1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * pseries Memory Hotplug infrastructure.
5 * Copyright (C) 2008 Badari Pulavarty, IBM Corporation
8 #define pr_fmt(fmt) "pseries-hotplug-mem: " fmt
11 #include <linux/of_address.h>
12 #include <linux/memblock.h>
13 #include <linux/memory.h>
14 #include <linux/memory_hotplug.h>
15 #include <linux/slab.h>
17 #include <asm/firmware.h>
18 #include <asm/machdep.h>
19 #include <asm/sparsemem.h>
20 #include <asm/fadump.h>
21 #include <asm/drmem.h>
24 static void dlpar_free_property(struct property *prop)
31 static struct property *dlpar_clone_property(struct property *prop,
34 struct property *new_prop;
36 new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
40 new_prop->name = kstrdup(prop->name, GFP_KERNEL);
41 new_prop->value = kzalloc(prop_size, GFP_KERNEL);
42 if (!new_prop->name || !new_prop->value) {
43 dlpar_free_property(new_prop);
47 memcpy(new_prop->value, prop->value, prop->length);
48 new_prop->length = prop_size;
50 of_property_set_flag(new_prop, OF_DYNAMIC);
54 static bool find_aa_index(struct device_node *dr_node,
55 struct property *ala_prop,
56 const u32 *lmb_assoc, u32 *aa_index)
58 u32 *assoc_arrays, new_prop_size;
59 struct property *new_prop;
60 int aa_arrays, aa_array_entries, aa_array_sz;
64 * The ibm,associativity-lookup-arrays property is defined to be
65 * a 32-bit value specifying the number of associativity arrays
66 * followed by a 32-bitvalue specifying the number of entries per
67 * array, followed by the associativity arrays.
69 assoc_arrays = ala_prop->value;
71 aa_arrays = be32_to_cpu(assoc_arrays[0]);
72 aa_array_entries = be32_to_cpu(assoc_arrays[1]);
73 aa_array_sz = aa_array_entries * sizeof(u32);
75 for (i = 0; i < aa_arrays; i++) {
76 index = (i * aa_array_entries) + 2;
78 if (memcmp(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz))
85 new_prop_size = ala_prop->length + aa_array_sz;
86 new_prop = dlpar_clone_property(ala_prop, new_prop_size);
90 assoc_arrays = new_prop->value;
92 /* increment the number of entries in the lookup array */
93 assoc_arrays[0] = cpu_to_be32(aa_arrays + 1);
95 /* copy the new associativity into the lookup array */
96 index = aa_arrays * aa_array_entries + 2;
97 memcpy(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz);
99 of_update_property(dr_node, new_prop);
102 * The associativity lookup array index for this lmb is
103 * number of entries - 1 since we added its associativity
104 * to the end of the lookup array.
106 *aa_index = be32_to_cpu(assoc_arrays[0]) - 1;
110 static int update_lmb_associativity_index(struct drmem_lmb *lmb)
112 struct device_node *parent, *lmb_node, *dr_node;
113 struct property *ala_prop;
114 const u32 *lmb_assoc;
118 parent = of_find_node_by_path("/");
122 lmb_node = dlpar_configure_connector(cpu_to_be32(lmb->drc_index),
128 lmb_assoc = of_get_property(lmb_node, "ibm,associativity", NULL);
130 dlpar_free_cc_nodes(lmb_node);
134 update_numa_distance(lmb_node);
136 dr_node = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
138 dlpar_free_cc_nodes(lmb_node);
142 ala_prop = of_find_property(dr_node, "ibm,associativity-lookup-arrays",
145 of_node_put(dr_node);
146 dlpar_free_cc_nodes(lmb_node);
150 found = find_aa_index(dr_node, ala_prop, lmb_assoc, &aa_index);
152 of_node_put(dr_node);
153 dlpar_free_cc_nodes(lmb_node);
156 pr_err("Could not find LMB associativity\n");
160 lmb->aa_index = aa_index;
164 static struct memory_block *lmb_to_memblock(struct drmem_lmb *lmb)
166 unsigned long section_nr;
167 struct memory_block *mem_block;
169 section_nr = pfn_to_section_nr(PFN_DOWN(lmb->base_addr));
171 mem_block = find_memory_block(section_nr);
175 static int get_lmb_range(u32 drc_index, int n_lmbs,
176 struct drmem_lmb **start_lmb,
177 struct drmem_lmb **end_lmb)
179 struct drmem_lmb *lmb, *start, *end;
180 struct drmem_lmb *limit;
183 for_each_drmem_lmb(lmb) {
184 if (lmb->drc_index == drc_index) {
193 end = &start[n_lmbs];
195 limit = &drmem_info->lmbs[drmem_info->n_lmbs];
204 static int dlpar_change_lmb_state(struct drmem_lmb *lmb, bool online)
206 struct memory_block *mem_block;
209 mem_block = lmb_to_memblock(lmb);
213 if (online && mem_block->dev.offline)
214 rc = device_online(&mem_block->dev);
215 else if (!online && !mem_block->dev.offline)
216 rc = device_offline(&mem_block->dev);
220 put_device(&mem_block->dev);
225 static int dlpar_online_lmb(struct drmem_lmb *lmb)
227 return dlpar_change_lmb_state(lmb, true);
230 #ifdef CONFIG_MEMORY_HOTREMOVE
231 static int dlpar_offline_lmb(struct drmem_lmb *lmb)
233 return dlpar_change_lmb_state(lmb, false);
236 static int pseries_remove_memblock(unsigned long base, unsigned long memblock_size)
238 unsigned long start_pfn;
239 int sections_per_block;
242 start_pfn = base >> PAGE_SHIFT;
244 lock_device_hotplug();
246 if (!pfn_valid(start_pfn))
249 sections_per_block = memory_block_size / MIN_MEMORY_BLOCK_SIZE;
251 for (i = 0; i < sections_per_block; i++) {
252 __remove_memory(base, MIN_MEMORY_BLOCK_SIZE);
253 base += MIN_MEMORY_BLOCK_SIZE;
257 /* Update memory regions for memory remove */
258 memblock_remove(base, memblock_size);
259 unlock_device_hotplug();
263 static int pseries_remove_mem_node(struct device_node *np)
269 * Check to see if we are actually removing memory
271 if (!of_node_is_type(np, "memory"))
275 * Find the base address and size of the memblock
277 ret = of_address_to_resource(np, 0, &res);
281 pseries_remove_memblock(res.start, resource_size(&res));
285 static bool lmb_is_removable(struct drmem_lmb *lmb)
287 if ((lmb->flags & DRCONF_MEM_RESERVED) ||
288 !(lmb->flags & DRCONF_MEM_ASSIGNED))
291 #ifdef CONFIG_FA_DUMP
293 * Don't hot-remove memory that falls in fadump boot memory area
294 * and memory that is reserved for capturing old kernel memory.
296 if (is_fadump_memory_area(lmb->base_addr, memory_block_size_bytes()))
299 /* device_offline() will determine if we can actually remove this lmb */
303 static int dlpar_add_lmb(struct drmem_lmb *);
305 static int dlpar_remove_lmb(struct drmem_lmb *lmb)
307 struct memory_block *mem_block;
310 if (!lmb_is_removable(lmb))
313 mem_block = lmb_to_memblock(lmb);
314 if (mem_block == NULL)
317 rc = dlpar_offline_lmb(lmb);
319 put_device(&mem_block->dev);
323 __remove_memory(lmb->base_addr, memory_block_size);
324 put_device(&mem_block->dev);
326 /* Update memory regions for memory remove */
327 memblock_remove(lmb->base_addr, memory_block_size);
329 invalidate_lmb_associativity_index(lmb);
330 lmb->flags &= ~DRCONF_MEM_ASSIGNED;
335 static int dlpar_memory_remove_by_count(u32 lmbs_to_remove)
337 struct drmem_lmb *lmb;
338 int lmbs_reserved = 0;
339 int lmbs_available = 0;
342 pr_info("Attempting to hot-remove %d LMB(s)\n", lmbs_to_remove);
344 if (lmbs_to_remove == 0)
347 /* Validate that there are enough LMBs to satisfy the request */
348 for_each_drmem_lmb(lmb) {
349 if (lmb_is_removable(lmb))
352 if (lmbs_available == lmbs_to_remove)
356 if (lmbs_available < lmbs_to_remove) {
357 pr_info("Not enough LMBs available (%d of %d) to satisfy request\n",
358 lmbs_available, lmbs_to_remove);
362 for_each_drmem_lmb(lmb) {
363 rc = dlpar_remove_lmb(lmb);
367 /* Mark this lmb so we can add it later if all of the
368 * requested LMBs cannot be removed.
370 drmem_mark_lmb_reserved(lmb);
373 if (lmbs_reserved == lmbs_to_remove)
377 if (lmbs_reserved != lmbs_to_remove) {
378 pr_err("Memory hot-remove failed, adding LMB's back\n");
380 for_each_drmem_lmb(lmb) {
381 if (!drmem_lmb_reserved(lmb))
384 rc = dlpar_add_lmb(lmb);
386 pr_err("Failed to add LMB back, drc index %x\n",
389 drmem_remove_lmb_reservation(lmb);
392 if (lmbs_reserved == 0)
398 for_each_drmem_lmb(lmb) {
399 if (!drmem_lmb_reserved(lmb))
402 dlpar_release_drc(lmb->drc_index);
403 pr_info("Memory at %llx was hot-removed\n",
406 drmem_remove_lmb_reservation(lmb);
409 if (lmbs_reserved == 0)
418 static int dlpar_memory_remove_by_index(u32 drc_index)
420 struct drmem_lmb *lmb;
424 pr_debug("Attempting to hot-remove LMB, drc index %x\n", drc_index);
427 for_each_drmem_lmb(lmb) {
428 if (lmb->drc_index == drc_index) {
430 rc = dlpar_remove_lmb(lmb);
432 dlpar_release_drc(lmb->drc_index);
442 pr_debug("Failed to hot-remove memory at %llx\n",
445 pr_debug("Memory at %llx was hot-removed\n", lmb->base_addr);
450 static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
452 struct drmem_lmb *lmb, *start_lmb, *end_lmb;
455 pr_info("Attempting to hot-remove %u LMB(s) at %x\n",
456 lmbs_to_remove, drc_index);
458 if (lmbs_to_remove == 0)
461 rc = get_lmb_range(drc_index, lmbs_to_remove, &start_lmb, &end_lmb);
466 * Validate that all LMBs in range are not reserved. Note that it
467 * is ok if they are !ASSIGNED since our goal here is to remove the
468 * LMB range, regardless of whether some LMBs were already removed
469 * by any other reason.
471 * This is a contrast to what is done in remove_by_count() where we
472 * check for both RESERVED and !ASSIGNED (via lmb_is_removable()),
473 * because we want to remove a fixed amount of LMBs in that function.
475 for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
476 if (lmb->flags & DRCONF_MEM_RESERVED) {
477 pr_err("Memory at %llx (drc index %x) is reserved\n",
478 lmb->base_addr, lmb->drc_index);
483 for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
485 * dlpar_remove_lmb() will error out if the LMB is already
486 * !ASSIGNED, but this case is a no-op for us.
488 if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
491 rc = dlpar_remove_lmb(lmb);
495 drmem_mark_lmb_reserved(lmb);
499 pr_err("Memory indexed-count-remove failed, adding any removed LMBs\n");
502 for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
503 if (!drmem_lmb_reserved(lmb))
507 * Setting the isolation state of an UNISOLATED/CONFIGURED
508 * device to UNISOLATE is a no-op, but the hypervisor can
509 * use it as a hint that the LMB removal failed.
511 dlpar_unisolate_drc(lmb->drc_index);
513 rc = dlpar_add_lmb(lmb);
515 pr_err("Failed to add LMB, drc index %x\n",
518 drmem_remove_lmb_reservation(lmb);
522 for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
523 if (!drmem_lmb_reserved(lmb))
526 dlpar_release_drc(lmb->drc_index);
527 pr_info("Memory at %llx (drc index %x) was hot-removed\n",
528 lmb->base_addr, lmb->drc_index);
530 drmem_remove_lmb_reservation(lmb);
538 static inline int pseries_remove_memblock(unsigned long base,
539 unsigned long memblock_size)
543 static inline int pseries_remove_mem_node(struct device_node *np)
547 static int dlpar_remove_lmb(struct drmem_lmb *lmb)
551 static int dlpar_memory_remove_by_count(u32 lmbs_to_remove)
555 static int dlpar_memory_remove_by_index(u32 drc_index)
560 static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
564 #endif /* CONFIG_MEMORY_HOTREMOVE */
566 static int dlpar_add_lmb(struct drmem_lmb *lmb)
568 unsigned long block_sz;
571 if (lmb->flags & DRCONF_MEM_ASSIGNED)
574 rc = update_lmb_associativity_index(lmb);
576 dlpar_release_drc(lmb->drc_index);
580 block_sz = memory_block_size_bytes();
582 /* Find the node id for this LMB. Fake one if necessary. */
583 nid = of_drconf_to_nid_single(lmb);
584 if (nid < 0 || !node_possible(nid))
585 nid = first_online_node;
588 rc = __add_memory(nid, lmb->base_addr, block_sz, MHP_MEMMAP_ON_MEMORY);
590 invalidate_lmb_associativity_index(lmb);
594 rc = dlpar_online_lmb(lmb);
596 __remove_memory(lmb->base_addr, block_sz);
597 invalidate_lmb_associativity_index(lmb);
599 lmb->flags |= DRCONF_MEM_ASSIGNED;
605 static int dlpar_memory_add_by_count(u32 lmbs_to_add)
607 struct drmem_lmb *lmb;
608 int lmbs_available = 0;
609 int lmbs_reserved = 0;
612 pr_info("Attempting to hot-add %d LMB(s)\n", lmbs_to_add);
614 if (lmbs_to_add == 0)
617 /* Validate that there are enough LMBs to satisfy the request */
618 for_each_drmem_lmb(lmb) {
619 if (lmb->flags & DRCONF_MEM_RESERVED)
622 if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
625 if (lmbs_available == lmbs_to_add)
629 if (lmbs_available < lmbs_to_add)
632 for_each_drmem_lmb(lmb) {
633 if (lmb->flags & DRCONF_MEM_ASSIGNED)
636 rc = dlpar_acquire_drc(lmb->drc_index);
640 rc = dlpar_add_lmb(lmb);
642 dlpar_release_drc(lmb->drc_index);
646 /* Mark this lmb so we can remove it later if all of the
647 * requested LMBs cannot be added.
649 drmem_mark_lmb_reserved(lmb);
651 if (lmbs_reserved == lmbs_to_add)
655 if (lmbs_reserved != lmbs_to_add) {
656 pr_err("Memory hot-add failed, removing any added LMBs\n");
658 for_each_drmem_lmb(lmb) {
659 if (!drmem_lmb_reserved(lmb))
662 rc = dlpar_remove_lmb(lmb);
664 pr_err("Failed to remove LMB, drc index %x\n",
667 dlpar_release_drc(lmb->drc_index);
669 drmem_remove_lmb_reservation(lmb);
672 if (lmbs_reserved == 0)
677 for_each_drmem_lmb(lmb) {
678 if (!drmem_lmb_reserved(lmb))
681 pr_debug("Memory at %llx (drc index %x) was hot-added\n",
682 lmb->base_addr, lmb->drc_index);
683 drmem_remove_lmb_reservation(lmb);
686 if (lmbs_reserved == 0)
695 static int dlpar_memory_add_by_index(u32 drc_index)
697 struct drmem_lmb *lmb;
700 pr_info("Attempting to hot-add LMB, drc index %x\n", drc_index);
703 for_each_drmem_lmb(lmb) {
704 if (lmb->drc_index == drc_index) {
706 rc = dlpar_acquire_drc(lmb->drc_index);
708 rc = dlpar_add_lmb(lmb);
710 dlpar_release_drc(lmb->drc_index);
721 pr_info("Failed to hot-add memory, drc index %x\n", drc_index);
723 pr_info("Memory at %llx (drc index %x) was hot-added\n",
724 lmb->base_addr, drc_index);
729 static int dlpar_memory_add_by_ic(u32 lmbs_to_add, u32 drc_index)
731 struct drmem_lmb *lmb, *start_lmb, *end_lmb;
734 pr_info("Attempting to hot-add %u LMB(s) at index %x\n",
735 lmbs_to_add, drc_index);
737 if (lmbs_to_add == 0)
740 rc = get_lmb_range(drc_index, lmbs_to_add, &start_lmb, &end_lmb);
744 /* Validate that the LMBs in this range are not reserved */
745 for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
746 /* Fail immediately if the whole range can't be hot-added */
747 if (lmb->flags & DRCONF_MEM_RESERVED) {
748 pr_err("Memory at %llx (drc index %x) is reserved\n",
749 lmb->base_addr, lmb->drc_index);
754 for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
755 if (lmb->flags & DRCONF_MEM_ASSIGNED)
758 rc = dlpar_acquire_drc(lmb->drc_index);
762 rc = dlpar_add_lmb(lmb);
764 dlpar_release_drc(lmb->drc_index);
768 drmem_mark_lmb_reserved(lmb);
772 pr_err("Memory indexed-count-add failed, removing any added LMBs\n");
774 for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
775 if (!drmem_lmb_reserved(lmb))
778 rc = dlpar_remove_lmb(lmb);
780 pr_err("Failed to remove LMB, drc index %x\n",
783 dlpar_release_drc(lmb->drc_index);
785 drmem_remove_lmb_reservation(lmb);
789 for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
790 if (!drmem_lmb_reserved(lmb))
793 pr_info("Memory at %llx (drc index %x) was hot-added\n",
794 lmb->base_addr, lmb->drc_index);
795 drmem_remove_lmb_reservation(lmb);
802 int dlpar_memory(struct pseries_hp_errorlog *hp_elog)
804 u32 count, drc_index;
807 lock_device_hotplug();
809 switch (hp_elog->action) {
810 case PSERIES_HP_ELOG_ACTION_ADD:
811 switch (hp_elog->id_type) {
812 case PSERIES_HP_ELOG_ID_DRC_COUNT:
813 count = hp_elog->_drc_u.drc_count;
814 rc = dlpar_memory_add_by_count(count);
816 case PSERIES_HP_ELOG_ID_DRC_INDEX:
817 drc_index = hp_elog->_drc_u.drc_index;
818 rc = dlpar_memory_add_by_index(drc_index);
820 case PSERIES_HP_ELOG_ID_DRC_IC:
821 count = hp_elog->_drc_u.ic.count;
822 drc_index = hp_elog->_drc_u.ic.index;
823 rc = dlpar_memory_add_by_ic(count, drc_index);
831 case PSERIES_HP_ELOG_ACTION_REMOVE:
832 switch (hp_elog->id_type) {
833 case PSERIES_HP_ELOG_ID_DRC_COUNT:
834 count = hp_elog->_drc_u.drc_count;
835 rc = dlpar_memory_remove_by_count(count);
837 case PSERIES_HP_ELOG_ID_DRC_INDEX:
838 drc_index = hp_elog->_drc_u.drc_index;
839 rc = dlpar_memory_remove_by_index(drc_index);
841 case PSERIES_HP_ELOG_ID_DRC_IC:
842 count = hp_elog->_drc_u.ic.count;
843 drc_index = hp_elog->_drc_u.ic.index;
844 rc = dlpar_memory_remove_by_ic(count, drc_index);
853 pr_err("Invalid action (%d) specified\n", hp_elog->action);
859 rc = drmem_update_dt();
861 unlock_device_hotplug();
865 static int pseries_add_mem_node(struct device_node *np)
871 * Check to see if we are actually adding memory
873 if (!of_node_is_type(np, "memory"))
877 * Find the base and size of the memblock
879 ret = of_address_to_resource(np, 0, &res);
884 * Update memory region to represent the memory add
886 ret = memblock_add(res.start, resource_size(&res));
887 return (ret < 0) ? -EINVAL : 0;
890 static int pseries_memory_notifier(struct notifier_block *nb,
891 unsigned long action, void *data)
893 struct of_reconfig_data *rd = data;
897 case OF_RECONFIG_ATTACH_NODE:
898 err = pseries_add_mem_node(rd->dn);
900 case OF_RECONFIG_DETACH_NODE:
901 err = pseries_remove_mem_node(rd->dn);
903 case OF_RECONFIG_UPDATE_PROPERTY:
904 if (!strcmp(rd->dn->name,
905 "ibm,dynamic-reconfiguration-memory"))
906 drmem_update_lmbs(rd->prop);
908 return notifier_from_errno(err);
911 static struct notifier_block pseries_mem_nb = {
912 .notifier_call = pseries_memory_notifier,
915 static int __init pseries_memory_hotplug_init(void)
917 if (firmware_has_feature(FW_FEATURE_LPAR))
918 of_reconfig_notifier_register(&pseries_mem_nb);
922 machine_device_initcall(pseries, pseries_memory_hotplug_init);