percpu: use pcpu_free_slot instead of pcpu_nr_slots - 1
authorDennis Zhou <dennis@kernel.org>
Sun, 18 Apr 2021 22:44:16 +0000 (22:44 +0000)
committerDennis Zhou <dennis@kernel.org>
Wed, 21 Apr 2021 18:17:40 +0000 (18:17 +0000)
This prepares for adding a to_depopulate list and sidelined list after
the free slot in the set of lists in pcpu_slot.

Signed-off-by: Dennis Zhou <dennis@kernel.org>
Acked-by: Roman Gushchin <guro@fb.com>
Signed-off-by: Dennis Zhou <dennis@kernel.org>
mm/percpu.c

index 5edc7bd8813306981220eca83929bb33777018b9..d462222f4adc2136b7fde4915fe2ed2db9a25849 100644 (file)
@@ -135,6 +135,7 @@ static int pcpu_unit_size __ro_after_init;
 static int pcpu_nr_units __ro_after_init;
 static int pcpu_atom_size __ro_after_init;
 int pcpu_nr_slots __ro_after_init;
+int pcpu_free_slot __ro_after_init;
 static size_t pcpu_chunk_struct_size __ro_after_init;
 
 /* cpus with the lowest and highest unit addresses */
@@ -237,7 +238,7 @@ static int __pcpu_size_to_slot(int size)
 static int pcpu_size_to_slot(int size)
 {
        if (size == pcpu_unit_size)
-               return pcpu_nr_slots - 1;
+               return pcpu_free_slot;
        return __pcpu_size_to_slot(size);
 }
 
@@ -1806,7 +1807,7 @@ restart:
                goto fail;
        }
 
-       if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) {
+       if (list_empty(&pcpu_slot[pcpu_free_slot])) {
                chunk = pcpu_create_chunk(type, pcpu_gfp);
                if (!chunk) {
                        err = "failed to allocate new chunk";
@@ -1958,7 +1959,7 @@ static void pcpu_balance_free(enum pcpu_chunk_type type)
 {
        LIST_HEAD(to_free);
        struct list_head *pcpu_slot = pcpu_chunk_list(type);
-       struct list_head *free_head = &pcpu_slot[pcpu_nr_slots - 1];
+       struct list_head *free_head = &pcpu_slot[pcpu_free_slot];
        struct pcpu_chunk *chunk, *next;
 
        /*
@@ -2033,7 +2034,7 @@ retry_pop:
                                  0, PCPU_EMPTY_POP_PAGES_HIGH);
        }
 
-       for (slot = pcpu_size_to_slot(PAGE_SIZE); slot < pcpu_nr_slots; slot++) {
+       for (slot = pcpu_size_to_slot(PAGE_SIZE); slot <= pcpu_free_slot; slot++) {
                unsigned int nr_unpop = 0, rs, re;
 
                if (!nr_to_pop)
@@ -2140,7 +2141,7 @@ void free_percpu(void __percpu *ptr)
        if (chunk->free_bytes == pcpu_unit_size) {
                struct pcpu_chunk *pos;
 
-               list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
+               list_for_each_entry(pos, &pcpu_slot[pcpu_free_slot], list)
                        if (pos != chunk) {
                                need_balance = true;
                                break;
@@ -2562,7 +2563,8 @@ void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
         * Allocate chunk slots.  The additional last slot is for
         * empty chunks.
         */
-       pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
+       pcpu_free_slot = __pcpu_size_to_slot(pcpu_unit_size) + 1;
+       pcpu_nr_slots = pcpu_free_slot + 1;
        pcpu_chunk_lists = memblock_alloc(pcpu_nr_slots *
                                          sizeof(pcpu_chunk_lists[0]) *
                                          PCPU_NR_CHUNK_TYPES,