arch: use memblock_alloc() instead of memblock_alloc_from(size, align, 0)
authorMike Rapoport <rppt@linux.ibm.com>
Tue, 12 Mar 2019 06:29:50 +0000 (23:29 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 12 Mar 2019 17:04:01 +0000 (10:04 -0700)
The last parameter of memblock_alloc_from() is the lower limit for the
memory allocation.  When it is 0, the call is equivalent to
memblock_alloc().

Link: http://lkml.kernel.org/r/1548057848-15136-13-git-send-email-rppt@linux.ibm.com
Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
Acked-by: Paul Burton <paul.burton@mips.com> # MIPS part
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christophe Leroy <christophe.leroy@c-s.fr>
Cc: Christoph Hellwig <hch@lst.de>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Dennis Zhou <dennis@kernel.org>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Greentime Hu <green.hu@gmail.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Guan Xuetao <gxt@pku.edu.cn>
Cc: Guo Ren <guoren@kernel.org>
Cc: Guo Ren <ren_guo@c-sky.com> [c-sky]
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Juergen Gross <jgross@suse.com> [Xen]
Cc: Mark Salter <msalter@redhat.com>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Petr Mladek <pmladek@suse.com>
Cc: Richard Weinberger <richard@nod.at>
Cc: Rich Felker <dalias@libc.org>
Cc: Rob Herring <robh+dt@kernel.org>
Cc: Rob Herring <robh@kernel.org>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Stafford Horne <shorne@gmail.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Vineet Gupta <vgupta@synopsys.com>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
arch/alpha/kernel/core_cia.c
arch/alpha/kernel/pci_iommu.c
arch/alpha/kernel/setup.c
arch/ia64/kernel/mca.c
arch/mips/kernel/traps.c
arch/sparc/kernel/prom_32.c
arch/sparc/mm/init_32.c
arch/sparc/mm/srmmu.c

index 867e8730b0c5c4819a19983efa19522a0984f760..466cd44d8b36745cd7e717302ab2a853b1ff1e83 100644 (file)
@@ -331,7 +331,7 @@ cia_prepare_tbia_workaround(int window)
        long i;
 
        /* Use minimal 1K map. */
-       ppte = memblock_alloc_from(CIA_BROKEN_TBIA_SIZE, 32768, 0);
+       ppte = memblock_alloc(CIA_BROKEN_TBIA_SIZE, 32768);
        pte = (virt_to_phys(ppte) >> (PAGE_SHIFT - 1)) | 1;
 
        for (i = 0; i < CIA_BROKEN_TBIA_SIZE / sizeof(unsigned long); ++i)
index aa0f50d0f8237f73ec921239a509f51a6d20e167..e4cf77b07742e0a1e6150fc6c7e32fdbf9374ff5 100644 (file)
@@ -87,13 +87,13 @@ iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base,
                printk("%s: couldn't allocate arena ptes from node %d\n"
                       "    falling back to system-wide allocation\n",
                       __func__, nid);
-               arena->ptes = memblock_alloc_from(mem_size, align, 0);
+               arena->ptes = memblock_alloc(mem_size, align);
        }
 
 #else /* CONFIG_DISCONTIGMEM */
 
        arena = memblock_alloc(sizeof(*arena), SMP_CACHE_BYTES);
-       arena->ptes = memblock_alloc_from(mem_size, align, 0);
+       arena->ptes = memblock_alloc(mem_size, align);
 
 #endif /* CONFIG_DISCONTIGMEM */
 
index 4b5b1b244f86a108bdda89f33316e84c77950df3..5d4c76a77a9f3edd9d8769f91c9425bb293bbbbe 100644 (file)
@@ -293,7 +293,7 @@ move_initrd(unsigned long mem_limit)
        unsigned long size;
 
        size = initrd_end - initrd_start;
-       start = memblock_alloc_from(PAGE_ALIGN(size), PAGE_SIZE, 0);
+       start = memblock_alloc(PAGE_ALIGN(size), PAGE_SIZE);
        if (!start || __pa(start) + size > mem_limit) {
                initrd_start = initrd_end = 0;
                return NULL;
index 91bd1e129379db09a7cc0ac60c41d5d94d04fc70..74d148bd48765ea5e0cf70678748a0b89fc65888 100644 (file)
@@ -1835,8 +1835,7 @@ format_mca_init_stack(void *mca_data, unsigned long offset,
 /* Caller prevents this from being called after init */
 static void * __ref mca_bootmem(void)
 {
-       return memblock_alloc_from(sizeof(struct ia64_mca_cpu),
-                                  KERNEL_STACK_SIZE, 0);
+       return memblock_alloc(sizeof(struct ia64_mca_cpu), KERNEL_STACK_SIZE);
 }
 
 /* Do per-CPU MCA-related initialization.  */
index 42d411125690fa7c31950d6618c76c0d5798e59c..fc511ecefec6f238d62867d126d80ba7bb3f4ce7 100644 (file)
@@ -2293,7 +2293,7 @@ void __init trap_init(void)
                phys_addr_t ebase_pa;
 
                ebase = (unsigned long)
-                       memblock_alloc_from(size, 1 << fls(size), 0);
+                       memblock_alloc(size, 1 << fls(size));
 
                /*
                 * Try to ensure ebase resides in KSeg0 if possible.
index 42d7f2a7da6d080d138c931a1589b39e95735415..38940afaa696537f29c4d29d06934b033e30fab7 100644 (file)
@@ -32,7 +32,7 @@ void * __init prom_early_alloc(unsigned long size)
 {
        void *ret;
 
-       ret = memblock_alloc_from(size, SMP_CACHE_BYTES, 0UL);
+       ret = memblock_alloc(size, SMP_CACHE_BYTES);
        if (ret != NULL)
                memset(ret, 0, size);
 
index d900952bfc5f3b0da140e722025f9aea21bfcef4..a8ff29821bdbc44466481b310fdedf3729c50047 100644 (file)
@@ -264,7 +264,7 @@ void __init mem_init(void)
        i = last_valid_pfn >> ((20 - PAGE_SHIFT) + 5);
        i += 1;
        sparc_valid_addr_bitmap = (unsigned long *)
-               memblock_alloc_from(i << 2, SMP_CACHE_BYTES, 0UL);
+               memblock_alloc(i << 2, SMP_CACHE_BYTES);
 
        if (sparc_valid_addr_bitmap == NULL) {
                prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n");
index b609362e846fcce0ceceba0795598cad03358ec7..a400ec31956476d7ed459474245c3de633fbd989 100644 (file)
@@ -303,13 +303,13 @@ static void __init srmmu_nocache_init(void)
 
        bitmap_bits = srmmu_nocache_size >> SRMMU_NOCACHE_BITMAP_SHIFT;
 
-       srmmu_nocache_pool = memblock_alloc_from(srmmu_nocache_size,
-                                                SRMMU_NOCACHE_ALIGN_MAX, 0UL);
+       srmmu_nocache_pool = memblock_alloc(srmmu_nocache_size,
+                                           SRMMU_NOCACHE_ALIGN_MAX);
        memset(srmmu_nocache_pool, 0, srmmu_nocache_size);
 
        srmmu_nocache_bitmap =
-               memblock_alloc_from(BITS_TO_LONGS(bitmap_bits) * sizeof(long),
-                                   SMP_CACHE_BYTES, 0UL);
+               memblock_alloc(BITS_TO_LONGS(bitmap_bits) * sizeof(long),
+                              SMP_CACHE_BYTES);
        bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits);
 
        srmmu_swapper_pg_dir = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
@@ -467,7 +467,7 @@ static void __init sparc_context_init(int numctx)
        unsigned long size;
 
        size = numctx * sizeof(struct ctx_list);
-       ctx_list_pool = memblock_alloc_from(size, SMP_CACHE_BYTES, 0UL);
+       ctx_list_pool = memblock_alloc(size, SMP_CACHE_BYTES);
 
        for (ctx = 0; ctx < numctx; ctx++) {
                struct ctx_list *clist;