Merge tag 'drm-misc-next-2020-04-14' of git://anongit.freedesktop.org/drm/drm-misc...
[sfrench/cifs-2.6.git] / mm / slub.c
index ebdae0c77ce60504edcef4d531139d66a66ffefb..7002af0f013f92d27652325a082945152fe06f13 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -259,7 +259,7 @@ static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr,
         * freepointer to be restored incorrectly.
         */
        return (void *)((unsigned long)ptr ^ s->random ^
-                       (unsigned long)kasan_reset_tag((void *)ptr_addr));
+                       swab((unsigned long)kasan_reset_tag((void *)ptr_addr)));
 #else
        return ptr;
 #endif
@@ -449,6 +449,7 @@ static DEFINE_SPINLOCK(object_map_lock);
  * not vanish from under us.
  */
 static unsigned long *get_map(struct kmem_cache *s, struct page *page)
+       __acquires(&object_map_lock)
 {
        void *p;
        void *addr = page_address(page);
@@ -465,7 +466,7 @@ static unsigned long *get_map(struct kmem_cache *s, struct page *page)
        return object_map;
 }
 
-static void put_map(unsigned long *map)
+static void put_map(unsigned long *map) __releases(&object_map_lock)
 {
        VM_BUG_ON(map != object_map);
        lockdep_assert_held(&object_map_lock);
@@ -1973,8 +1974,6 @@ static void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
 
        if (node == NUMA_NO_NODE)
                searchnode = numa_mem_id();
-       else if (!node_present_pages(node))
-               searchnode = node_to_mem_node(node);
 
        object = get_partial_node(s, get_node(s, searchnode), c, flags);
        if (object || node != NUMA_NO_NODE)
@@ -2207,11 +2206,11 @@ static void unfreeze_partials(struct kmem_cache *s,
        struct kmem_cache_node *n = NULL, *n2 = NULL;
        struct page *page, *discard_page = NULL;
 
-       while ((page = c->partial)) {
+       while ((page = slub_percpu_partial(c))) {
                struct page new;
                struct page old;
 
-               c->partial = page->next;
+               slub_set_percpu_partial(c, page);
 
                n2 = get_node(s, page_to_nid(page));
                if (n != n2) {
@@ -2284,7 +2283,7 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
                if (oldpage) {
                        pobjects = oldpage->pobjects;
                        pages = oldpage->pages;
-                       if (drain && pobjects > s->cpu_partial) {
+                       if (drain && pobjects > slub_cpu_partial(s)) {
                                unsigned long flags;
                                /*
                                 * partial array is full. Move the existing
@@ -2309,7 +2308,7 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
 
        } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page)
                                                                != oldpage);
-       if (unlikely(!s->cpu_partial)) {
+       if (unlikely(!slub_cpu_partial(s))) {
                unsigned long flags;
 
                local_irq_save(flags);
@@ -2563,17 +2562,27 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
        struct page *page;
 
        page = c->page;
-       if (!page)
+       if (!page) {
+               /*
+                * if the node is not online or has no normal memory, just
+                * ignore the node constraint
+                */
+               if (unlikely(node != NUMA_NO_NODE &&
+                            !node_state(node, N_NORMAL_MEMORY)))
+                       node = NUMA_NO_NODE;
                goto new_slab;
+       }
 redo:
 
        if (unlikely(!node_match(page, node))) {
-               int searchnode = node;
-
-               if (node != NUMA_NO_NODE && !node_present_pages(node))
-                       searchnode = node_to_mem_node(node);
-
-               if (unlikely(!node_match(page, searchnode))) {
+               /*
+                * same as above but node_match() being false already
+                * implies node != NUMA_NO_NODE
+                */
+               if (!node_state(node, N_NORMAL_MEMORY)) {
+                       node = NUMA_NO_NODE;
+                       goto redo;
+               } else {
                        stat(s, ALLOC_NODE_MISMATCH);
                        deactivate_slab(s, page, c->freelist, c);
                        goto new_slab;
@@ -2997,11 +3006,13 @@ redo:
        barrier();
 
        if (likely(page == c->page)) {
-               set_freepointer(s, tail_obj, c->freelist);
+               void **freelist = READ_ONCE(c->freelist);
+
+               set_freepointer(s, tail_obj, freelist);
 
                if (unlikely(!this_cpu_cmpxchg_double(
                                s->cpu_slab->freelist, s->cpu_slab->tid,
-                               c->freelist, tid,
+                               freelist, tid,
                                head, next_tid(tid)))) {
 
                        note_cmpxchg_failure("slab_free", s, tid);
@@ -3174,6 +3185,15 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
                void *object = c->freelist;
 
                if (unlikely(!object)) {
+                       /*
+                        * We may have removed an object from c->freelist using
+                        * the fastpath in the previous iteration; in that case,
+                        * c->tid has not been bumped yet.
+                        * Since ___slab_alloc() may reenable interrupts while
+                        * allocating memory, we should bump c->tid now.
+                        */
+                       c->tid = next_tid(c->tid);
+
                        /*
                         * Invoking slow path likely have side-effect
                         * of re-populating per CPU c->freelist
@@ -3493,15 +3513,15 @@ static void set_cpu_partial(struct kmem_cache *s)
         *    50% to keep some capacity around for frees.
         */
        if (!kmem_cache_has_cpu_partial(s))
-               s->cpu_partial = 0;
+               slub_set_cpu_partial(s, 0);
        else if (s->size >= PAGE_SIZE)
-               s->cpu_partial = 2;
+               slub_set_cpu_partial(s, 2);
        else if (s->size >= 1024)
-               s->cpu_partial = 6;
+               slub_set_cpu_partial(s, 6);
        else if (s->size >= 256)
-               s->cpu_partial = 13;
+               slub_set_cpu_partial(s, 13);
        else
-               s->cpu_partial = 30;
+               slub_set_cpu_partial(s, 30);
 #endif
 }
 
@@ -3562,6 +3582,13 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
                 */
                s->offset = size;
                size += sizeof(void *);
+       } else if (size > sizeof(void *)) {
+               /*
+                * Store freelist pointer near middle of object to keep
+                * it away from the edges of the object to avoid small
+                * sized over/underflows from neighboring allocations.
+                */
+               s->offset = ALIGN(size / 2, sizeof(void *));
        }
 
 #ifdef CONFIG_SLUB_DEBUG