mm: replace __get_cpu_var uses with this_cpu_ptr
authorChristoph Lameter <cl@linux.com>
Wed, 4 Jun 2014 23:07:56 +0000 (16:07 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 4 Jun 2014 23:54:03 +0000 (16:54 -0700)
Replace places where __get_cpu_var() is used for an address calculation
with this_cpu_ptr().

Signed-off-by: Christoph Lameter <cl@linux.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Hugh Dickins <hughd@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
lib/radix-tree.c
mm/memcontrol.c
mm/memory-failure.c
mm/page-writeback.c
mm/slub.c
mm/swap.c
mm/vmalloc.c
mm/vmstat.c
mm/zsmalloc.c

index 9599aa72d7a024795b300750e55147a60f58ed19..55f7a9c2731248ba9b281fff5de7a98e868a5d5d 100644 (file)
@@ -194,7 +194,7 @@ radix_tree_node_alloc(struct radix_tree_root *root)
                 * succeed in getting a node here (and never reach
                 * kmem_cache_alloc)
                 */
-               rtp = &__get_cpu_var(radix_tree_preloads);
+               rtp = this_cpu_ptr(&radix_tree_preloads);
                if (rtp->nr) {
                        ret = rtp->nodes[rtp->nr - 1];
                        rtp->nodes[rtp->nr - 1] = NULL;
@@ -250,14 +250,14 @@ static int __radix_tree_preload(gfp_t gfp_mask)
        int ret = -ENOMEM;
 
        preempt_disable();
-       rtp = &__get_cpu_var(radix_tree_preloads);
+       rtp = this_cpu_ptr(&radix_tree_preloads);
        while (rtp->nr < ARRAY_SIZE(rtp->nodes)) {
                preempt_enable();
                node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
                if (node == NULL)
                        goto out;
                preempt_disable();
-               rtp = &__get_cpu_var(radix_tree_preloads);
+               rtp = this_cpu_ptr(&radix_tree_preloads);
                if (rtp->nr < ARRAY_SIZE(rtp->nodes))
                        rtp->nodes[rtp->nr++] = node;
                else
index 6b448881422b387f525b7b2399233eec1d73c14e..14326935800de602ae6a0c915bb1358c7b193886 100644 (file)
@@ -2436,7 +2436,7 @@ static void drain_stock(struct memcg_stock_pcp *stock)
  */
 static void drain_local_stock(struct work_struct *dummy)
 {
-       struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock);
+       struct memcg_stock_pcp *stock = this_cpu_ptr(&memcg_stock);
        drain_stock(stock);
        clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
 }
index 6917f799412b18f16179c6110e990421056bb4c3..d50f17fb9be2c2998c275a8b807316833513cb43 100644 (file)
@@ -1298,7 +1298,7 @@ static void memory_failure_work_func(struct work_struct *work)
        unsigned long proc_flags;
        int gotten;
 
-       mf_cpu = &__get_cpu_var(memory_failure_cpu);
+       mf_cpu = this_cpu_ptr(&memory_failure_cpu);
        for (;;) {
                spin_lock_irqsave(&mf_cpu->lock, proc_flags);
                gotten = kfifo_get(&mf_cpu->fifo, &entry);
index a4317da60532be3eccb1c1e604362b3923271f65..b9b8e82046280451ed418e58de53dd42d8aa3cd0 100644 (file)
@@ -1623,7 +1623,7 @@ void balance_dirty_pages_ratelimited(struct address_space *mapping)
         * 1000+ tasks, all of them start dirtying pages at exactly the same
         * time, hence all honoured too large initial task->nr_dirtied_pause.
         */
-       p =  &__get_cpu_var(bdp_ratelimits);
+       p =  this_cpu_ptr(&bdp_ratelimits);
        if (unlikely(current->nr_dirtied >= ratelimit))
                *p = 0;
        else if (unlikely(*p >= ratelimit_pages)) {
@@ -1635,7 +1635,7 @@ void balance_dirty_pages_ratelimited(struct address_space *mapping)
         * short-lived tasks (eg. gcc invocations in a kernel build) escaping
         * the dirty throttling and livelock other long-run dirtiers.
         */
-       p = &__get_cpu_var(dirty_throttle_leaks);
+       p = this_cpu_ptr(&dirty_throttle_leaks);
        if (*p > 0 && current->nr_dirtied < ratelimit) {
                unsigned long nr_pages_dirtied;
                nr_pages_dirtied = min(*p, ratelimit - current->nr_dirtied);
index 9e288d7c5e6afc8996d789f3c3e7734502cd3bac..fdf0fe4da9a948e3e1f3f7676508080a5492eb86 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2209,7 +2209,7 @@ static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
 
        page = new_slab(s, flags, node);
        if (page) {
-               c = __this_cpu_ptr(s->cpu_slab);
+               c = raw_cpu_ptr(s->cpu_slab);
                if (c->page)
                        flush_slab(s, c);
 
@@ -2425,7 +2425,7 @@ redo:
         * and the retrieval of the tid.
         */
        preempt_disable();
-       c = __this_cpu_ptr(s->cpu_slab);
+       c = this_cpu_ptr(s->cpu_slab);
 
        /*
         * The transaction ids are globally unique per cpu and per operation on
@@ -2681,7 +2681,7 @@ redo:
         * during the cmpxchg then the free will succedd.
         */
        preempt_disable();
-       c = __this_cpu_ptr(s->cpu_slab);
+       c = this_cpu_ptr(s->cpu_slab);
 
        tid = c->tid;
        preempt_enable();
index c0ed4d65438f78427bc769a24b0739aea8bcb888..913b99dfbea5985918917b1b82e05bf324e86fad 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -441,7 +441,7 @@ void rotate_reclaimable_page(struct page *page)
 
                page_cache_get(page);
                local_irq_save(flags);
-               pvec = &__get_cpu_var(lru_rotate_pvecs);
+               pvec = this_cpu_ptr(&lru_rotate_pvecs);
                if (!pagevec_add(pvec, page))
                        pagevec_move_tail(pvec);
                local_irq_restore(flags);
index bf233b283319cea03fcf76f2d81dce5b9933d536..ddaf70b21b59b0487293111dc5a7a81bfd9860b5 100644 (file)
@@ -1496,7 +1496,7 @@ void vfree(const void *addr)
        if (!addr)
                return;
        if (unlikely(in_interrupt())) {
-               struct vfree_deferred *p = &__get_cpu_var(vfree_deferred);
+               struct vfree_deferred *p = this_cpu_ptr(&vfree_deferred);
                if (llist_add((struct llist_node *)addr, &p->list))
                        schedule_work(&p->wq);
        } else
index 82ce17ce58c4d6912a8b0e7189deb3f23891d8a0..376bd2d214824789842fb65f315c607d91ae3e3e 100644 (file)
@@ -489,7 +489,7 @@ static void refresh_cpu_vm_stats(void)
                        continue;
 
                if (__this_cpu_read(p->pcp.count))
-                       drain_zone_pages(zone, __this_cpu_ptr(&p->pcp));
+                       drain_zone_pages(zone, this_cpu_ptr(&p->pcp));
 #endif
        }
        fold_diff(global_diff);
@@ -1230,7 +1230,7 @@ int sysctl_stat_interval __read_mostly = HZ;
 static void vmstat_update(struct work_struct *w)
 {
        refresh_cpu_vm_stats();
-       schedule_delayed_work(&__get_cpu_var(vmstat_work),
+       schedule_delayed_work(this_cpu_ptr(&vmstat_work),
                round_jiffies_relative(sysctl_stat_interval));
 }
 
index 36b4591a7a2d3b2eca7111b26bee3efa19c7c5c5..5ae5d85b629d3a56f520ae7f4211c3024b0191f3 100644 (file)
@@ -1082,7 +1082,7 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
        class = &pool->size_class[class_idx];
        off = obj_idx_to_offset(page, obj_idx, class->size);
 
-       area = &__get_cpu_var(zs_map_area);
+       area = this_cpu_ptr(&zs_map_area);
        if (off + class->size <= PAGE_SIZE)
                kunmap_atomic(area->vm_addr);
        else {