[RTNL]: Validate hardware and broadcast address attribute for RTM_NEWLINK
[sfrench/cifs-2.6.git] / mm / memory_hotplug.c
index 091b9c6c252973dd1cf5e89ffcf73de1afba173c..7469c503580dcf6402000cc339359175a9b417fc 100644 (file)
@@ -39,7 +39,7 @@ static struct resource *register_memory_resource(u64 start, u64 size)
        res->name = "System RAM";
        res->start = start;
        res->end = start + size - 1;
-       res->flags = IORESOURCE_MEM;
+       res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
        if (request_resource(&iomem_resource, res) < 0) {
                printk("System RAM resource %llx - %llx cannot be added\n",
                (unsigned long long)res->start, (unsigned long long)res->end);
@@ -121,7 +121,7 @@ int __add_pages(struct zone *zone, unsigned long phys_start_pfn,
                err = __add_section(zone, i << PFN_SECTION_SHIFT);
 
                /*
-                * EEXIST is finally dealed with by ioresource collision
+                * EEXIST is finally dealt with by ioresource collision
                 * check. see add_memory() => register_memory_resource()
                 * Warning will be printed if there is collision.
                 */
@@ -187,7 +187,24 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)
        unsigned long onlined_pages = 0;
        struct zone *zone;
        int need_zonelists_rebuild = 0;
+       int nid;
+       int ret;
+       struct memory_notify arg;
+
+       arg.start_pfn = pfn;
+       arg.nr_pages = nr_pages;
+       arg.status_change_nid = -1;
+
+       nid = page_to_nid(pfn_to_page(pfn));
+       if (node_present_pages(nid) == 0)
+               arg.status_change_nid = nid;
 
+       ret = memory_notify(MEM_GOING_ONLINE, &arg);
+       ret = notifier_to_errno(ret);
+       if (ret) {
+               memory_notify(MEM_CANCEL_ONLINE, &arg);
+               return ret;
+       }
        /*
         * This doesn't need a lock to do pfn_to_page().
         * The section can't be removed here because of the
@@ -222,6 +239,10 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)
                build_all_zonelists();
        vm_total_pages = nr_free_pagecache_pages();
        writeback_set_ratelimit();
+
+       if (onlined_pages)
+               memory_notify(MEM_ONLINE, &arg);
+
        return 0;
 }
 #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
@@ -460,15 +481,14 @@ check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
        return offlined;
 }
 
-extern void drain_all_local_pages(void);
-
 int offline_pages(unsigned long start_pfn,
                  unsigned long end_pfn, unsigned long timeout)
 {
        unsigned long pfn, nr_pages, expire;
        long offlined_pages;
-       int ret, drain, retry_max;
+       int ret, drain, retry_max, node;
        struct zone *zone;
+       struct memory_notify arg;
 
        BUG_ON(start_pfn >= end_pfn);
        /* at least, alignment against pageblock is necessary */
@@ -480,11 +500,27 @@ int offline_pages(unsigned long start_pfn,
           we assume this for now. .*/
        if (!test_pages_in_a_zone(start_pfn, end_pfn))
                return -EINVAL;
+
+       zone = page_zone(pfn_to_page(start_pfn));
+       node = zone_to_nid(zone);
+       nr_pages = end_pfn - start_pfn;
+
        /* set above range as isolated */
        ret = start_isolate_page_range(start_pfn, end_pfn);
        if (ret)
                return ret;
-       nr_pages = end_pfn - start_pfn;
+
+       arg.start_pfn = start_pfn;
+       arg.nr_pages = nr_pages;
+       arg.status_change_nid = -1;
+       if (nr_pages >= node_present_pages(node))
+               arg.status_change_nid = node;
+
+       ret = memory_notify(MEM_GOING_OFFLINE, &arg);
+       ret = notifier_to_errno(ret);
+       if (ret)
+               goto failed_removal;
+
        pfn = start_pfn;
        expire = jiffies + timeout;
        drain = 0;
@@ -502,7 +538,7 @@ repeat:
                lru_add_drain_all();
                flush_scheduled_work();
                cond_resched();
-               drain_all_local_pages();
+               drain_all_pages();
        }
 
        pfn = scan_lru_pages(start_pfn, end_pfn);
@@ -525,7 +561,7 @@ repeat:
        flush_scheduled_work();
        yield();
        /* drain pcp pages , this is synchrouns. */
-       drain_all_local_pages();
+       drain_all_pages();
        /* check again */
        offlined_pages = check_pages_isolated(start_pfn, end_pfn);
        if (offlined_pages < 0) {
@@ -536,23 +572,27 @@ repeat:
        /* Ok, all of our target is islaoted.
           We cannot do rollback at this point. */
        offline_isolated_pages(start_pfn, end_pfn);
-       /* reset pagetype flags */
-       start_isolate_page_range(start_pfn, end_pfn);
+       /* reset pagetype flags and makes migrate type to be MOVABLE */
+       undo_isolate_page_range(start_pfn, end_pfn);
        /* removal success */
-       zone = page_zone(pfn_to_page(start_pfn));
        zone->present_pages -= offlined_pages;
        zone->zone_pgdat->node_present_pages -= offlined_pages;
        totalram_pages -= offlined_pages;
        num_physpages -= offlined_pages;
+
        vm_total_pages = nr_free_pagecache_pages();
        writeback_set_ratelimit();
+
+       memory_notify(MEM_OFFLINE, &arg);
        return 0;
 
 failed_removal:
        printk(KERN_INFO "memory offlining %lx to %lx failed\n",
                start_pfn, end_pfn);
+       memory_notify(MEM_CANCEL_OFFLINE, &arg);
        /* pushback to free area */
        undo_isolate_page_range(start_pfn, end_pfn);
+
        return ret;
 }
 #else