mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 09:16:33 +00:00
mm: replace __get_cpu_var uses with this_cpu_ptr
Replace places where __get_cpu_var() is used for an address calculation with this_cpu_ptr(). Signed-off-by: Christoph Lameter <cl@linux.com> Cc: Tejun Heo <tj@kernel.org> Cc: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
dc6f6c97f1
commit
7c8e0181e6
@ -194,7 +194,7 @@ radix_tree_node_alloc(struct radix_tree_root *root)
|
||||
* succeed in getting a node here (and never reach
|
||||
* kmem_cache_alloc)
|
||||
*/
|
||||
rtp = &__get_cpu_var(radix_tree_preloads);
|
||||
rtp = this_cpu_ptr(&radix_tree_preloads);
|
||||
if (rtp->nr) {
|
||||
ret = rtp->nodes[rtp->nr - 1];
|
||||
rtp->nodes[rtp->nr - 1] = NULL;
|
||||
@ -250,14 +250,14 @@ static int __radix_tree_preload(gfp_t gfp_mask)
|
||||
int ret = -ENOMEM;
|
||||
|
||||
preempt_disable();
|
||||
rtp = &__get_cpu_var(radix_tree_preloads);
|
||||
rtp = this_cpu_ptr(&radix_tree_preloads);
|
||||
while (rtp->nr < ARRAY_SIZE(rtp->nodes)) {
|
||||
preempt_enable();
|
||||
node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
|
||||
if (node == NULL)
|
||||
goto out;
|
||||
preempt_disable();
|
||||
rtp = &__get_cpu_var(radix_tree_preloads);
|
||||
rtp = this_cpu_ptr(&radix_tree_preloads);
|
||||
if (rtp->nr < ARRAY_SIZE(rtp->nodes))
|
||||
rtp->nodes[rtp->nr++] = node;
|
||||
else
|
||||
|
@ -2436,7 +2436,7 @@ static void drain_stock(struct memcg_stock_pcp *stock)
|
||||
*/
|
||||
static void drain_local_stock(struct work_struct *dummy)
|
||||
{
|
||||
struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock);
|
||||
struct memcg_stock_pcp *stock = this_cpu_ptr(&memcg_stock);
|
||||
drain_stock(stock);
|
||||
clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
|
||||
}
|
||||
|
@ -1298,7 +1298,7 @@ static void memory_failure_work_func(struct work_struct *work)
|
||||
unsigned long proc_flags;
|
||||
int gotten;
|
||||
|
||||
mf_cpu = &__get_cpu_var(memory_failure_cpu);
|
||||
mf_cpu = this_cpu_ptr(&memory_failure_cpu);
|
||||
for (;;) {
|
||||
spin_lock_irqsave(&mf_cpu->lock, proc_flags);
|
||||
gotten = kfifo_get(&mf_cpu->fifo, &entry);
|
||||
|
@ -1623,7 +1623,7 @@ void balance_dirty_pages_ratelimited(struct address_space *mapping)
|
||||
* 1000+ tasks, all of them start dirtying pages at exactly the same
|
||||
* time, hence all honoured too large initial task->nr_dirtied_pause.
|
||||
*/
|
||||
p = &__get_cpu_var(bdp_ratelimits);
|
||||
p = this_cpu_ptr(&bdp_ratelimits);
|
||||
if (unlikely(current->nr_dirtied >= ratelimit))
|
||||
*p = 0;
|
||||
else if (unlikely(*p >= ratelimit_pages)) {
|
||||
@ -1635,7 +1635,7 @@ void balance_dirty_pages_ratelimited(struct address_space *mapping)
|
||||
* short-lived tasks (eg. gcc invocations in a kernel build) escaping
|
||||
* the dirty throttling and livelock other long-run dirtiers.
|
||||
*/
|
||||
p = &__get_cpu_var(dirty_throttle_leaks);
|
||||
p = this_cpu_ptr(&dirty_throttle_leaks);
|
||||
if (*p > 0 && current->nr_dirtied < ratelimit) {
|
||||
unsigned long nr_pages_dirtied;
|
||||
nr_pages_dirtied = min(*p, ratelimit - current->nr_dirtied);
|
||||
|
@ -2209,7 +2209,7 @@ static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
|
||||
|
||||
page = new_slab(s, flags, node);
|
||||
if (page) {
|
||||
c = __this_cpu_ptr(s->cpu_slab);
|
||||
c = raw_cpu_ptr(s->cpu_slab);
|
||||
if (c->page)
|
||||
flush_slab(s, c);
|
||||
|
||||
@ -2425,7 +2425,7 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s,
|
||||
* and the retrieval of the tid.
|
||||
*/
|
||||
preempt_disable();
|
||||
c = __this_cpu_ptr(s->cpu_slab);
|
||||
c = this_cpu_ptr(s->cpu_slab);
|
||||
|
||||
/*
|
||||
* The transaction ids are globally unique per cpu and per operation on
|
||||
@ -2681,7 +2681,7 @@ static __always_inline void slab_free(struct kmem_cache *s,
|
||||
* during the cmpxchg then the free will succedd.
|
||||
*/
|
||||
preempt_disable();
|
||||
c = __this_cpu_ptr(s->cpu_slab);
|
||||
c = this_cpu_ptr(s->cpu_slab);
|
||||
|
||||
tid = c->tid;
|
||||
preempt_enable();
|
||||
|
@ -441,7 +441,7 @@ void rotate_reclaimable_page(struct page *page)
|
||||
|
||||
page_cache_get(page);
|
||||
local_irq_save(flags);
|
||||
pvec = &__get_cpu_var(lru_rotate_pvecs);
|
||||
pvec = this_cpu_ptr(&lru_rotate_pvecs);
|
||||
if (!pagevec_add(pvec, page))
|
||||
pagevec_move_tail(pvec);
|
||||
local_irq_restore(flags);
|
||||
|
@ -1496,7 +1496,7 @@ void vfree(const void *addr)
|
||||
if (!addr)
|
||||
return;
|
||||
if (unlikely(in_interrupt())) {
|
||||
struct vfree_deferred *p = &__get_cpu_var(vfree_deferred);
|
||||
struct vfree_deferred *p = this_cpu_ptr(&vfree_deferred);
|
||||
if (llist_add((struct llist_node *)addr, &p->list))
|
||||
schedule_work(&p->wq);
|
||||
} else
|
||||
|
@ -489,7 +489,7 @@ static void refresh_cpu_vm_stats(void)
|
||||
continue;
|
||||
|
||||
if (__this_cpu_read(p->pcp.count))
|
||||
drain_zone_pages(zone, __this_cpu_ptr(&p->pcp));
|
||||
drain_zone_pages(zone, this_cpu_ptr(&p->pcp));
|
||||
#endif
|
||||
}
|
||||
fold_diff(global_diff);
|
||||
@ -1230,7 +1230,7 @@ int sysctl_stat_interval __read_mostly = HZ;
|
||||
static void vmstat_update(struct work_struct *w)
|
||||
{
|
||||
refresh_cpu_vm_stats();
|
||||
schedule_delayed_work(&__get_cpu_var(vmstat_work),
|
||||
schedule_delayed_work(this_cpu_ptr(&vmstat_work),
|
||||
round_jiffies_relative(sysctl_stat_interval));
|
||||
}
|
||||
|
||||
|
@ -1082,7 +1082,7 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
|
||||
class = &pool->size_class[class_idx];
|
||||
off = obj_idx_to_offset(page, obj_idx, class->size);
|
||||
|
||||
area = &__get_cpu_var(zs_map_area);
|
||||
area = this_cpu_ptr(&zs_map_area);
|
||||
if (off + class->size <= PAGE_SIZE)
|
||||
kunmap_atomic(area->vm_addr);
|
||||
else {
|
||||
|
Loading…
Reference in New Issue
Block a user