mm, memory_hotplug: add nid parameter to arch_remove_memory
[sfrench/cifs-2.6.git] / mm / memory_hotplug.c
index 61972da38d93cb54d6f1088df186ed20bb0f98bb..0718cf7427b20a1f788ea900654e73ab8fd8d3a0 100644 (file)
@@ -586,6 +586,7 @@ int __remove_pages(struct zone *zone, unsigned long phys_start_pfn,
        for (i = 0; i < sections_to_remove; i++) {
                unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION;
 
+               cond_resched();
                ret = __remove_section(zone, __pfn_to_section(pfn), map_offset,
                                altmap);
                map_offset = 0;
@@ -1077,7 +1078,7 @@ static int online_memory_block(struct memory_block *mem, void *arg)
  *
  * we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG
  */
-int __ref add_memory_resource(int nid, struct resource *res, bool online)
+int __ref add_memory_resource(int nid, struct resource *res)
 {
        u64 start, size;
        bool new_node = false;
@@ -1132,7 +1133,7 @@ int __ref add_memory_resource(int nid, struct resource *res, bool online)
        mem_hotplug_done();
 
        /* online pages if requested */
-       if (online)
+       if (memhp_auto_online)
                walk_memory_range(PFN_DOWN(start), PFN_UP(start + size - 1),
                                  NULL, online_memory_block);
 
@@ -1156,7 +1157,7 @@ int __ref __add_memory(int nid, u64 start, u64 size)
        if (IS_ERR(res))
                return PTR_ERR(res);
 
-       ret = add_memory_resource(nid, res, memhp_auto_online);
+       ret = add_memory_resource(nid, res);
        if (ret < 0)
                release_memory_resource(res);
        return ret;
@@ -1225,7 +1226,7 @@ static bool is_pageblock_removable_nolock(struct page *page)
        if (!zone_spans_pfn(zone, pfn))
                return false;
 
-       return !has_unmovable_pages(zone, page, 0, MIGRATE_MOVABLE, true);
+       return !has_unmovable_pages(zone, page, 0, MIGRATE_MOVABLE, SKIP_HWPOISON);
 }
 
 /* Checks if this range of memory is likely to be hot-removable. */
@@ -1387,10 +1388,8 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
                                                    page_is_file_cache(page));
 
                } else {
-#ifdef CONFIG_DEBUG_VM
-                       pr_alert("failed to isolate pfn %lx\n", pfn);
+                       pr_warn("failed to isolate pfn %lx\n", pfn);
                        dump_page(page, "isolation failed");
-#endif
                        put_page(page);
                        /* Because we don't have big zone->lock. we should
                           check this again here. */
@@ -1410,8 +1409,14 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
                /* Allocate a new page from the nearest neighbor node */
                ret = migrate_pages(&source, new_node_page, NULL, 0,
                                        MIGRATE_SYNC, MR_MEMORY_HOTPLUG);
-               if (ret)
+               if (ret) {
+                       list_for_each_entry(page, &source, lru) {
+                               pr_warn("migrating pfn %lx failed ret:%d ",
+                                      page_to_pfn(page), ret);
+                               dump_page(page, "migration failure");
+                       }
                        putback_movable_pages(&source);
+               }
        }
 out:
        return ret;
@@ -1552,12 +1557,7 @@ static int __ref __offline_pages(unsigned long start_pfn,
        unsigned long valid_start, valid_end;
        struct zone *zone;
        struct memory_notify arg;
-
-       /* at least, alignment against pageblock is necessary */
-       if (!IS_ALIGNED(start_pfn, pageblock_nr_pages))
-               return -EINVAL;
-       if (!IS_ALIGNED(end_pfn, pageblock_nr_pages))
-               return -EINVAL;
+       char *reason;
 
        mem_hotplug_begin();
 
@@ -1566,7 +1566,9 @@ static int __ref __offline_pages(unsigned long start_pfn,
        if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start,
                                  &valid_end)) {
                mem_hotplug_done();
-               return -EINVAL;
+               ret = -EINVAL;
+               reason = "multizone range";
+               goto failed_removal;
        }
 
        zone = page_zone(pfn_to_page(valid_start));
@@ -1575,10 +1577,12 @@ static int __ref __offline_pages(unsigned long start_pfn,
 
        /* set above range as isolated */
        ret = start_isolate_page_range(start_pfn, end_pfn,
-                                      MIGRATE_MOVABLE, true);
+                                      MIGRATE_MOVABLE,
+                                      SKIP_HWPOISON | REPORT_FAILURE);
        if (ret) {
                mem_hotplug_done();
-               return ret;
+               reason = "failure to isolate range";
+               goto failed_removal;
        }
 
        arg.start_pfn = start_pfn;
@@ -1587,15 +1591,19 @@ static int __ref __offline_pages(unsigned long start_pfn,
 
        ret = memory_notify(MEM_GOING_OFFLINE, &arg);
        ret = notifier_to_errno(ret);
-       if (ret)
-               goto failed_removal;
+       if (ret) {
+               reason = "notifier failure";
+               goto failed_removal_isolated;
+       }
 
        pfn = start_pfn;
 repeat:
        /* start memory hot removal */
        ret = -EINTR;
-       if (signal_pending(current))
-               goto failed_removal;
+       if (signal_pending(current)) {
+               reason = "signal backoff";
+               goto failed_removal_isolated;
+       }
 
        cond_resched();
        lru_add_drain_all();
@@ -1612,8 +1620,10 @@ repeat:
         * actually in order to make hugetlbfs's object counting consistent.
         */
        ret = dissolve_free_huge_pages(start_pfn, end_pfn);
-       if (ret)
-               goto failed_removal;
+       if (ret) {
+               reason = "failure to dissolve huge pages";
+               goto failed_removal_isolated;
+       }
        /* check again */
        offlined_pages = check_pages_isolated(start_pfn, end_pfn);
        if (offlined_pages < 0)
@@ -1653,13 +1663,15 @@ repeat:
        mem_hotplug_done();
        return 0;
 
+failed_removal_isolated:
+       undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
 failed_removal:
-       pr_debug("memory offlining [mem %#010llx-%#010llx] failed\n",
+       pr_debug("memory offlining [mem %#010llx-%#010llx] failed due to %s\n",
                 (unsigned long long) start_pfn << PAGE_SHIFT,
-                ((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
+                ((unsigned long long) end_pfn << PAGE_SHIFT) - 1,
+                reason);
        memory_notify(MEM_CANCEL_OFFLINE, &arg);
        /* pushback to free area */
-       undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
        mem_hotplug_done();
        return ret;
 }
@@ -1752,34 +1764,6 @@ static int check_cpu_on_node(pg_data_t *pgdat)
        return 0;
 }
 
-static void unmap_cpu_on_node(pg_data_t *pgdat)
-{
-#ifdef CONFIG_ACPI_NUMA
-       int cpu;
-
-       for_each_possible_cpu(cpu)
-               if (cpu_to_node(cpu) == pgdat->node_id)
-                       numa_clear_node(cpu);
-#endif
-}
-
-static int check_and_unmap_cpu_on_node(pg_data_t *pgdat)
-{
-       int ret;
-
-       ret = check_cpu_on_node(pgdat);
-       if (ret)
-               return ret;
-
-       /*
-        * the node will be offlined when we come here, so we can clear
-        * the cpu_to_node() now.
-        */
-
-       unmap_cpu_on_node(pgdat);
-       return 0;
-}
-
 /**
  * try_offline_node
  * @nid: the node ID
@@ -1812,7 +1796,7 @@ void try_offline_node(int nid)
                return;
        }
 
-       if (check_and_unmap_cpu_on_node(pgdat))
+       if (check_cpu_on_node(pgdat))
                return;
 
        /*
@@ -1857,7 +1841,7 @@ void __ref __remove_memory(int nid, u64 start, u64 size)
        memblock_free(start, size);
        memblock_remove(start, size);
 
-       arch_remove_memory(start, size, NULL);
+       arch_remove_memory(nid, start, size, NULL);
 
        try_offline_node(nid);