mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-04 04:02:26 +00:00
scheduler: Replace __get_cpu_var with this_cpu_ptr
Convert all uses of __get_cpu_var for address calculation to use this_cpu_ptr instead. [Uses of __get_cpu_var with cpumask_var_t are no longer handled by this patch] Cc: Peter Zijlstra <peterz@infradead.org> Acked-by: Ingo Molnar <mingo@kernel.org> Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
parent
dc5df73b3a
commit
4a32fea9d7
@ -44,8 +44,8 @@ DECLARE_PER_CPU(struct kernel_stat, kstat);
|
|||||||
DECLARE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
|
DECLARE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
|
||||||
|
|
||||||
/* Must have preemption disabled for this to be meaningful. */
|
/* Must have preemption disabled for this to be meaningful. */
|
||||||
#define kstat_this_cpu (&__get_cpu_var(kstat))
|
#define kstat_this_cpu this_cpu_ptr(&kstat)
|
||||||
#define kcpustat_this_cpu (&__get_cpu_var(kernel_cpustat))
|
#define kcpustat_this_cpu this_cpu_ptr(&kernel_cpustat)
|
||||||
#define kstat_cpu(cpu) per_cpu(kstat, cpu)
|
#define kstat_cpu(cpu) per_cpu(kstat, cpu)
|
||||||
#define kcpustat_cpu(cpu) per_cpu(kernel_cpustat, cpu)
|
#define kcpustat_cpu(cpu) per_cpu(kernel_cpustat, cpu)
|
||||||
|
|
||||||
|
@ -137,7 +137,7 @@ static struct perf_callchain_entry *get_callchain_entry(int *rctx)
|
|||||||
int cpu;
|
int cpu;
|
||||||
struct callchain_cpus_entries *entries;
|
struct callchain_cpus_entries *entries;
|
||||||
|
|
||||||
*rctx = get_recursion_context(__get_cpu_var(callchain_recursion));
|
*rctx = get_recursion_context(this_cpu_ptr(callchain_recursion));
|
||||||
if (*rctx == -1)
|
if (*rctx == -1)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
@ -153,7 +153,7 @@ static struct perf_callchain_entry *get_callchain_entry(int *rctx)
|
|||||||
static void
|
static void
|
||||||
put_callchain_entry(int rctx)
|
put_callchain_entry(int rctx)
|
||||||
{
|
{
|
||||||
put_recursion_context(__get_cpu_var(callchain_recursion), rctx);
|
put_recursion_context(this_cpu_ptr(callchain_recursion), rctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct perf_callchain_entry *
|
struct perf_callchain_entry *
|
||||||
|
@ -239,7 +239,7 @@ static void perf_duration_warn(struct irq_work *w)
|
|||||||
u64 avg_local_sample_len;
|
u64 avg_local_sample_len;
|
||||||
u64 local_samples_len;
|
u64 local_samples_len;
|
||||||
|
|
||||||
local_samples_len = __get_cpu_var(running_sample_length);
|
local_samples_len = __this_cpu_read(running_sample_length);
|
||||||
avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES;
|
avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES;
|
||||||
|
|
||||||
printk_ratelimited(KERN_WARNING
|
printk_ratelimited(KERN_WARNING
|
||||||
@ -261,10 +261,10 @@ void perf_sample_event_took(u64 sample_len_ns)
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
/* decay the counter by 1 average sample */
|
/* decay the counter by 1 average sample */
|
||||||
local_samples_len = __get_cpu_var(running_sample_length);
|
local_samples_len = __this_cpu_read(running_sample_length);
|
||||||
local_samples_len -= local_samples_len/NR_ACCUMULATED_SAMPLES;
|
local_samples_len -= local_samples_len/NR_ACCUMULATED_SAMPLES;
|
||||||
local_samples_len += sample_len_ns;
|
local_samples_len += sample_len_ns;
|
||||||
__get_cpu_var(running_sample_length) = local_samples_len;
|
__this_cpu_write(running_sample_length, local_samples_len);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* note: this will be biased artifically low until we have
|
* note: this will be biased artifically low until we have
|
||||||
@ -877,7 +877,7 @@ static DEFINE_PER_CPU(struct list_head, rotation_list);
|
|||||||
static void perf_pmu_rotate_start(struct pmu *pmu)
|
static void perf_pmu_rotate_start(struct pmu *pmu)
|
||||||
{
|
{
|
||||||
struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
|
struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
|
||||||
struct list_head *head = &__get_cpu_var(rotation_list);
|
struct list_head *head = this_cpu_ptr(&rotation_list);
|
||||||
|
|
||||||
WARN_ON(!irqs_disabled());
|
WARN_ON(!irqs_disabled());
|
||||||
|
|
||||||
@ -2389,7 +2389,7 @@ void __perf_event_task_sched_out(struct task_struct *task,
|
|||||||
* to check if we have to switch out PMU state.
|
* to check if we have to switch out PMU state.
|
||||||
* cgroup event are system-wide mode only
|
* cgroup event are system-wide mode only
|
||||||
*/
|
*/
|
||||||
if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
|
if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
|
||||||
perf_cgroup_sched_out(task, next);
|
perf_cgroup_sched_out(task, next);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2632,11 +2632,11 @@ void __perf_event_task_sched_in(struct task_struct *prev,
|
|||||||
* to check if we have to switch in PMU state.
|
* to check if we have to switch in PMU state.
|
||||||
* cgroup event are system-wide mode only
|
* cgroup event are system-wide mode only
|
||||||
*/
|
*/
|
||||||
if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
|
if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
|
||||||
perf_cgroup_sched_in(prev, task);
|
perf_cgroup_sched_in(prev, task);
|
||||||
|
|
||||||
/* check for system-wide branch_stack events */
|
/* check for system-wide branch_stack events */
|
||||||
if (atomic_read(&__get_cpu_var(perf_branch_stack_events)))
|
if (atomic_read(this_cpu_ptr(&perf_branch_stack_events)))
|
||||||
perf_branch_stack_sched_in(prev, task);
|
perf_branch_stack_sched_in(prev, task);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2891,7 +2891,7 @@ bool perf_event_can_stop_tick(void)
|
|||||||
|
|
||||||
void perf_event_task_tick(void)
|
void perf_event_task_tick(void)
|
||||||
{
|
{
|
||||||
struct list_head *head = &__get_cpu_var(rotation_list);
|
struct list_head *head = this_cpu_ptr(&rotation_list);
|
||||||
struct perf_cpu_context *cpuctx, *tmp;
|
struct perf_cpu_context *cpuctx, *tmp;
|
||||||
struct perf_event_context *ctx;
|
struct perf_event_context *ctx;
|
||||||
int throttled;
|
int throttled;
|
||||||
@ -5671,7 +5671,7 @@ static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
|
|||||||
struct perf_sample_data *data,
|
struct perf_sample_data *data,
|
||||||
struct pt_regs *regs)
|
struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
|
struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
|
||||||
struct perf_event *event;
|
struct perf_event *event;
|
||||||
struct hlist_head *head;
|
struct hlist_head *head;
|
||||||
|
|
||||||
@ -5690,7 +5690,7 @@ static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
|
|||||||
|
|
||||||
int perf_swevent_get_recursion_context(void)
|
int perf_swevent_get_recursion_context(void)
|
||||||
{
|
{
|
||||||
struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
|
struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
|
||||||
|
|
||||||
return get_recursion_context(swhash->recursion);
|
return get_recursion_context(swhash->recursion);
|
||||||
}
|
}
|
||||||
@ -5698,7 +5698,7 @@ EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
|
|||||||
|
|
||||||
inline void perf_swevent_put_recursion_context(int rctx)
|
inline void perf_swevent_put_recursion_context(int rctx)
|
||||||
{
|
{
|
||||||
struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
|
struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
|
||||||
|
|
||||||
put_recursion_context(swhash->recursion, rctx);
|
put_recursion_context(swhash->recursion, rctx);
|
||||||
}
|
}
|
||||||
@ -5727,7 +5727,7 @@ static void perf_swevent_read(struct perf_event *event)
|
|||||||
|
|
||||||
static int perf_swevent_add(struct perf_event *event, int flags)
|
static int perf_swevent_add(struct perf_event *event, int flags)
|
||||||
{
|
{
|
||||||
struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
|
struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
|
||||||
struct hw_perf_event *hwc = &event->hw;
|
struct hw_perf_event *hwc = &event->hw;
|
||||||
struct hlist_head *head;
|
struct hlist_head *head;
|
||||||
|
|
||||||
|
@ -650,10 +650,10 @@ static inline int cpu_of(struct rq *rq)
|
|||||||
DECLARE_PER_CPU(struct rq, runqueues);
|
DECLARE_PER_CPU(struct rq, runqueues);
|
||||||
|
|
||||||
#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
|
#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
|
||||||
#define this_rq() (&__get_cpu_var(runqueues))
|
#define this_rq() this_cpu_ptr(&runqueues)
|
||||||
#define task_rq(p) cpu_rq(task_cpu(p))
|
#define task_rq(p) cpu_rq(task_cpu(p))
|
||||||
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
|
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
|
||||||
#define raw_rq() (&__raw_get_cpu_var(runqueues))
|
#define raw_rq() raw_cpu_ptr(&runqueues)
|
||||||
|
|
||||||
static inline u64 rq_clock(struct rq *rq)
|
static inline u64 rq_clock(struct rq *rq)
|
||||||
{
|
{
|
||||||
|
@ -638,7 +638,7 @@ void taskstats_exit(struct task_struct *tsk, int group_dead)
|
|||||||
fill_tgid_exit(tsk);
|
fill_tgid_exit(tsk);
|
||||||
}
|
}
|
||||||
|
|
||||||
listeners = __this_cpu_ptr(&listener_array);
|
listeners = raw_cpu_ptr(&listener_array);
|
||||||
if (list_empty(&listeners->list))
|
if (list_empty(&listeners->list))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
@ -924,7 +924,7 @@ static void tick_nohz_account_idle_ticks(struct tick_sched *ts)
|
|||||||
*/
|
*/
|
||||||
void tick_nohz_idle_exit(void)
|
void tick_nohz_idle_exit(void)
|
||||||
{
|
{
|
||||||
struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
|
struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
|
||||||
ktime_t now;
|
ktime_t now;
|
||||||
|
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
@ -1041,7 +1041,7 @@ static void tick_nohz_kick_tick(struct tick_sched *ts, ktime_t now)
|
|||||||
|
|
||||||
static inline void tick_nohz_irq_enter(void)
|
static inline void tick_nohz_irq_enter(void)
|
||||||
{
|
{
|
||||||
struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
|
struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
|
||||||
ktime_t now;
|
ktime_t now;
|
||||||
|
|
||||||
if (!ts->idle_active && !ts->tick_stopped)
|
if (!ts->idle_active && !ts->tick_stopped)
|
||||||
|
@ -14,7 +14,7 @@ static DEFINE_PER_CPU(struct hlist_head, return_notifier_list);
|
|||||||
void user_return_notifier_register(struct user_return_notifier *urn)
|
void user_return_notifier_register(struct user_return_notifier *urn)
|
||||||
{
|
{
|
||||||
set_tsk_thread_flag(current, TIF_USER_RETURN_NOTIFY);
|
set_tsk_thread_flag(current, TIF_USER_RETURN_NOTIFY);
|
||||||
hlist_add_head(&urn->link, &__get_cpu_var(return_notifier_list));
|
hlist_add_head(&urn->link, this_cpu_ptr(&return_notifier_list));
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(user_return_notifier_register);
|
EXPORT_SYMBOL_GPL(user_return_notifier_register);
|
||||||
|
|
||||||
@ -25,7 +25,7 @@ EXPORT_SYMBOL_GPL(user_return_notifier_register);
|
|||||||
void user_return_notifier_unregister(struct user_return_notifier *urn)
|
void user_return_notifier_unregister(struct user_return_notifier *urn)
|
||||||
{
|
{
|
||||||
hlist_del(&urn->link);
|
hlist_del(&urn->link);
|
||||||
if (hlist_empty(&__get_cpu_var(return_notifier_list)))
|
if (hlist_empty(this_cpu_ptr(&return_notifier_list)))
|
||||||
clear_tsk_thread_flag(current, TIF_USER_RETURN_NOTIFY);
|
clear_tsk_thread_flag(current, TIF_USER_RETURN_NOTIFY);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(user_return_notifier_unregister);
|
EXPORT_SYMBOL_GPL(user_return_notifier_unregister);
|
||||||
|
Loading…
Reference in New Issue
Block a user