Merge branch 'for-4.20' of git://git.kernel.org/pub/scm/linux/kernel/git/dennis/percpu
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 1 Nov 2018 16:27:57 +0000 (09:27 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 1 Nov 2018 16:27:57 +0000 (09:27 -0700)
Pull percpu fixes from Dennis Zhou:
 "Two small things for v4.20.

  The first fixes a clang uninitialized variable warning for arm64 in
  the default path calls BUILD_BUG(). The second removes an unnecessary
  unlikely() in a WARN_ON() use"

* 'for-4.20' of git://git.kernel.org/pub/scm/linux/kernel/git/dennis/percpu:
  arm64: percpu: Initialize ret in the default case
  mm: percpu: remove unnecessary unlikely()

1  2 
mm/percpu.c

diff --combined mm/percpu.c
index a6b74c6fe0becd3ef42284aa643933ae4fb7f0cf,f5c2796fe63e5c5c0e3a8a89b079528bbb8390ab..db86282fd024580cbf5c41f01cb6d5447a9e1791
@@@ -65,7 -65,7 +65,7 @@@
  #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  
  #include <linux/bitmap.h>
 -#include <linux/bootmem.h>
 +#include <linux/memblock.h>
  #include <linux/err.h>
  #include <linux/lcm.h>
  #include <linux/list.h>
@@@ -1101,9 -1101,9 +1101,9 @@@ static struct pcpu_chunk * __init pcpu_
        region_size = ALIGN(start_offset + map_size, lcm_align);
  
        /* allocate chunk */
 -      chunk = memblock_virt_alloc(sizeof(struct pcpu_chunk) +
 -                                  BITS_TO_LONGS(region_size >> PAGE_SHIFT),
 -                                  0);
 +      chunk = memblock_alloc(sizeof(struct pcpu_chunk) +
 +                             BITS_TO_LONGS(region_size >> PAGE_SHIFT),
 +                             SMP_CACHE_BYTES);
  
        INIT_LIST_HEAD(&chunk->list);
  
        chunk->nr_pages = region_size >> PAGE_SHIFT;
        region_bits = pcpu_chunk_map_bits(chunk);
  
 -      chunk->alloc_map = memblock_virt_alloc(BITS_TO_LONGS(region_bits) *
 -                                             sizeof(chunk->alloc_map[0]), 0);
 -      chunk->bound_map = memblock_virt_alloc(BITS_TO_LONGS(region_bits + 1) *
 -                                             sizeof(chunk->bound_map[0]), 0);
 -      chunk->md_blocks = memblock_virt_alloc(pcpu_chunk_nr_blocks(chunk) *
 -                                             sizeof(chunk->md_blocks[0]), 0);
 +      chunk->alloc_map = memblock_alloc(BITS_TO_LONGS(region_bits) * sizeof(chunk->alloc_map[0]),
 +                                        SMP_CACHE_BYTES);
 +      chunk->bound_map = memblock_alloc(BITS_TO_LONGS(region_bits + 1) * sizeof(chunk->bound_map[0]),
 +                                        SMP_CACHE_BYTES);
 +      chunk->md_blocks = memblock_alloc(pcpu_chunk_nr_blocks(chunk) * sizeof(chunk->md_blocks[0]),
 +                                        SMP_CACHE_BYTES);
        pcpu_init_md_blocks(chunk);
  
        /* manage populated page bitmap */
@@@ -1212,7 -1212,6 +1212,7 @@@ static void pcpu_free_chunk(struct pcpu
  {
        if (!chunk)
                return;
 +      pcpu_mem_free(chunk->md_blocks);
        pcpu_mem_free(chunk->bound_map);
        pcpu_mem_free(chunk->alloc_map);
        pcpu_mem_free(chunk);
@@@ -1888,7 -1887,7 +1888,7 @@@ struct pcpu_alloc_info * __init pcpu_al
                          __alignof__(ai->groups[0].cpu_map[0]));
        ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
  
 -      ptr = memblock_virt_alloc_nopanic(PFN_ALIGN(ai_size), PAGE_SIZE);
 +      ptr = memblock_alloc_nopanic(PFN_ALIGN(ai_size), PAGE_SIZE);
        if (!ptr)
                return NULL;
        ai = ptr;
@@@ -2075,14 -2074,12 +2075,14 @@@ int __init pcpu_setup_first_chunk(cons
        PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
  
        /* process group information and build config tables accordingly */
 -      group_offsets = memblock_virt_alloc(ai->nr_groups *
 -                                           sizeof(group_offsets[0]), 0);
 -      group_sizes = memblock_virt_alloc(ai->nr_groups *
 -                                         sizeof(group_sizes[0]), 0);
 -      unit_map = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_map[0]), 0);
 -      unit_off = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_off[0]), 0);
 +      group_offsets = memblock_alloc(ai->nr_groups * sizeof(group_offsets[0]),
 +                                     SMP_CACHE_BYTES);
 +      group_sizes = memblock_alloc(ai->nr_groups * sizeof(group_sizes[0]),
 +                                   SMP_CACHE_BYTES);
 +      unit_map = memblock_alloc(nr_cpu_ids * sizeof(unit_map[0]),
 +                                SMP_CACHE_BYTES);
 +      unit_off = memblock_alloc(nr_cpu_ids * sizeof(unit_off[0]),
 +                                SMP_CACHE_BYTES);
  
        for (cpu = 0; cpu < nr_cpu_ids; cpu++)
                unit_map[cpu] = UINT_MAX;
         * empty chunks.
         */
        pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
 -      pcpu_slot = memblock_virt_alloc(
 -                      pcpu_nr_slots * sizeof(pcpu_slot[0]), 0);
 +      pcpu_slot = memblock_alloc(pcpu_nr_slots * sizeof(pcpu_slot[0]),
 +                                 SMP_CACHE_BYTES);
        for (i = 0; i < pcpu_nr_slots; i++)
                INIT_LIST_HEAD(&pcpu_slot[i]);
  
@@@ -2460,7 -2457,7 +2460,7 @@@ int __init pcpu_embed_first_chunk(size_
        size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
        areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
  
 -      areas = memblock_virt_alloc_nopanic(areas_size, 0);
 +      areas = memblock_alloc_nopanic(areas_size, SMP_CACHE_BYTES);
        if (!areas) {
                rc = -ENOMEM;
                goto out_free;
@@@ -2591,7 -2588,7 +2591,7 @@@ int __init pcpu_page_first_chunk(size_
        BUG_ON(ai->nr_groups != 1);
        upa = ai->alloc_size/ai->unit_size;
        nr_g0_units = roundup(num_possible_cpus(), upa);
-       if (unlikely(WARN_ON(ai->groups[0].nr_units != nr_g0_units))) {
+       if (WARN_ON(ai->groups[0].nr_units != nr_g0_units)) {
                pcpu_free_alloc_info(ai);
                return -EINVAL;
        }
        /* unaligned allocations can't be freed, round up to page size */
        pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
                               sizeof(pages[0]));
 -      pages = memblock_virt_alloc(pages_size, 0);
 +      pages = memblock_alloc(pages_size, SMP_CACHE_BYTES);
  
        /* allocate pages */
        j = 0;
@@@ -2690,7 -2687,7 +2690,7 @@@ EXPORT_SYMBOL(__per_cpu_offset)
  static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
                                       size_t align)
  {
 -      return  memblock_virt_alloc_from_nopanic(
 +      return  memblock_alloc_from_nopanic(
                        size, align, __pa(MAX_DMA_ADDRESS));
  }
  
@@@ -2739,7 -2736,7 +2739,7 @@@ void __init setup_per_cpu_areas(void
        void *fc;
  
        ai = pcpu_alloc_alloc_info(1, 1);
 -      fc = memblock_virt_alloc_from_nopanic(unit_size,
 +      fc = memblock_alloc_from_nopanic(unit_size,
                                              PAGE_SIZE,
                                              __pa(MAX_DMA_ADDRESS));
        if (!ai || !fc)