Merge branch 'x86-alternatives-for-linus' of git://git.kernel.org/pub/scm/linux/kerne...
[sfrench/cifs-2.6.git] / drivers / hv / hv_balloon.c
1 /*
2  * Copyright (c) 2012, Microsoft Corporation.
3  *
4  * Author:
5  *   K. Y. Srinivasan <kys@microsoft.com>
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms of the GNU General Public License version 2 as published
9  * by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful, but
12  * WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14  * NON INFRINGEMENT.  See the GNU General Public License for more
15  * details.
16  *
17  */
18
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21 #include <linux/kernel.h>
22 #include <linux/jiffies.h>
23 #include <linux/mman.h>
24 #include <linux/delay.h>
25 #include <linux/init.h>
26 #include <linux/module.h>
27 #include <linux/slab.h>
28 #include <linux/kthread.h>
29 #include <linux/completion.h>
30 #include <linux/memory_hotplug.h>
31 #include <linux/memory.h>
32 #include <linux/notifier.h>
33 #include <linux/percpu_counter.h>
34
35 #include <linux/hyperv.h>
36
37 #define CREATE_TRACE_POINTS
38 #include "hv_trace_balloon.h"
39
40 /*
41  * We begin with definitions supporting the Dynamic Memory protocol
42  * with the host.
43  *
44  * Begin protocol definitions.
45  */
46
47
48
49 /*
50  * Protocol versions. The low word is the minor version, the high word the major
51  * version.
52  *
53  * History:
54  * Initial version 1.0
55  * Changed to 0.1 on 2009/03/25
56  * Changes to 0.2 on 2009/05/14
57  * Changes to 0.3 on 2009/12/03
58  * Changed to 1.0 on 2011/04/05
59  */
60
61 #define DYNMEM_MAKE_VERSION(Major, Minor) ((__u32)(((Major) << 16) | (Minor)))
62 #define DYNMEM_MAJOR_VERSION(Version) ((__u32)(Version) >> 16)
63 #define DYNMEM_MINOR_VERSION(Version) ((__u32)(Version) & 0xff)
64
65 enum {
66         DYNMEM_PROTOCOL_VERSION_1 = DYNMEM_MAKE_VERSION(0, 3),
67         DYNMEM_PROTOCOL_VERSION_2 = DYNMEM_MAKE_VERSION(1, 0),
68         DYNMEM_PROTOCOL_VERSION_3 = DYNMEM_MAKE_VERSION(2, 0),
69
70         DYNMEM_PROTOCOL_VERSION_WIN7 = DYNMEM_PROTOCOL_VERSION_1,
71         DYNMEM_PROTOCOL_VERSION_WIN8 = DYNMEM_PROTOCOL_VERSION_2,
72         DYNMEM_PROTOCOL_VERSION_WIN10 = DYNMEM_PROTOCOL_VERSION_3,
73
74         DYNMEM_PROTOCOL_VERSION_CURRENT = DYNMEM_PROTOCOL_VERSION_WIN10
75 };
76
77
78
79 /*
80  * Message Types
81  */
82
83 enum dm_message_type {
84         /*
85          * Version 0.3
86          */
87         DM_ERROR                        = 0,
88         DM_VERSION_REQUEST              = 1,
89         DM_VERSION_RESPONSE             = 2,
90         DM_CAPABILITIES_REPORT          = 3,
91         DM_CAPABILITIES_RESPONSE        = 4,
92         DM_STATUS_REPORT                = 5,
93         DM_BALLOON_REQUEST              = 6,
94         DM_BALLOON_RESPONSE             = 7,
95         DM_UNBALLOON_REQUEST            = 8,
96         DM_UNBALLOON_RESPONSE           = 9,
97         DM_MEM_HOT_ADD_REQUEST          = 10,
98         DM_MEM_HOT_ADD_RESPONSE         = 11,
99         DM_VERSION_03_MAX               = 11,
100         /*
101          * Version 1.0.
102          */
103         DM_INFO_MESSAGE                 = 12,
104         DM_VERSION_1_MAX                = 12
105 };
106
107
108 /*
109  * Structures defining the dynamic memory management
110  * protocol.
111  */
112
113 union dm_version {
114         struct {
115                 __u16 minor_version;
116                 __u16 major_version;
117         };
118         __u32 version;
119 } __packed;
120
121
122 union dm_caps {
123         struct {
124                 __u64 balloon:1;
125                 __u64 hot_add:1;
126                 /*
127                  * To support guests that may have alignment
128                  * limitations on hot-add, the guest can specify
129                  * its alignment requirements; a value of n
130                  * represents an alignment of 2^n in mega bytes.
131                  */
132                 __u64 hot_add_alignment:4;
133                 __u64 reservedz:58;
134         } cap_bits;
135         __u64 caps;
136 } __packed;
137
138 union dm_mem_page_range {
139         struct  {
140                 /*
141                  * The PFN number of the first page in the range.
142                  * 40 bits is the architectural limit of a PFN
143                  * number for AMD64.
144                  */
145                 __u64 start_page:40;
146                 /*
147                  * The number of pages in the range.
148                  */
149                 __u64 page_cnt:24;
150         } finfo;
151         __u64  page_range;
152 } __packed;
153
154
155
156 /*
157  * The header for all dynamic memory messages:
158  *
159  * type: Type of the message.
160  * size: Size of the message in bytes; including the header.
161  * trans_id: The guest is responsible for manufacturing this ID.
162  */
163
164 struct dm_header {
165         __u16 type;
166         __u16 size;
167         __u32 trans_id;
168 } __packed;
169
170 /*
171  * A generic message format for dynamic memory.
172  * Specific message formats are defined later in the file.
173  */
174
175 struct dm_message {
176         struct dm_header hdr;
177         __u8 data[]; /* enclosed message */
178 } __packed;
179
180
181 /*
182  * Specific message types supporting the dynamic memory protocol.
183  */
184
185 /*
186  * Version negotiation message. Sent from the guest to the host.
187  * The guest is free to try different versions until the host
188  * accepts the version.
189  *
190  * dm_version: The protocol version requested.
191  * is_last_attempt: If TRUE, this is the last version guest will request.
192  * reservedz: Reserved field, set to zero.
193  */
194
195 struct dm_version_request {
196         struct dm_header hdr;
197         union dm_version version;
198         __u32 is_last_attempt:1;
199         __u32 reservedz:31;
200 } __packed;
201
202 /*
203  * Version response message; Host to Guest and indicates
204  * if the host has accepted the version sent by the guest.
205  *
206  * is_accepted: If TRUE, host has accepted the version and the guest
207  * should proceed to the next stage of the protocol. FALSE indicates that
208  * guest should re-try with a different version.
209  *
210  * reservedz: Reserved field, set to zero.
211  */
212
213 struct dm_version_response {
214         struct dm_header hdr;
215         __u64 is_accepted:1;
216         __u64 reservedz:63;
217 } __packed;
218
219 /*
220  * Message reporting capabilities. This is sent from the guest to the
221  * host.
222  */
223
224 struct dm_capabilities {
225         struct dm_header hdr;
226         union dm_caps caps;
227         __u64 min_page_cnt;
228         __u64 max_page_number;
229 } __packed;
230
231 /*
232  * Response to the capabilities message. This is sent from the host to the
233  * guest. This message notifies if the host has accepted the guest's
234  * capabilities. If the host has not accepted, the guest must shutdown
235  * the service.
236  *
237  * is_accepted: Indicates if the host has accepted guest's capabilities.
238  * reservedz: Must be 0.
239  */
240
241 struct dm_capabilities_resp_msg {
242         struct dm_header hdr;
243         __u64 is_accepted:1;
244         __u64 reservedz:63;
245 } __packed;
246
247 /*
248  * This message is used to report memory pressure from the guest.
249  * This message is not part of any transaction and there is no
250  * response to this message.
251  *
252  * num_avail: Available memory in pages.
253  * num_committed: Committed memory in pages.
254  * page_file_size: The accumulated size of all page files
255  *                 in the system in pages.
256  * zero_free: The nunber of zero and free pages.
257  * page_file_writes: The writes to the page file in pages.
258  * io_diff: An indicator of file cache efficiency or page file activity,
259  *          calculated as File Cache Page Fault Count - Page Read Count.
260  *          This value is in pages.
261  *
262  * Some of these metrics are Windows specific and fortunately
263  * the algorithm on the host side that computes the guest memory
264  * pressure only uses num_committed value.
265  */
266
267 struct dm_status {
268         struct dm_header hdr;
269         __u64 num_avail;
270         __u64 num_committed;
271         __u64 page_file_size;
272         __u64 zero_free;
273         __u32 page_file_writes;
274         __u32 io_diff;
275 } __packed;
276
277
278 /*
279  * Message to ask the guest to allocate memory - balloon up message.
280  * This message is sent from the host to the guest. The guest may not be
281  * able to allocate as much memory as requested.
282  *
283  * num_pages: number of pages to allocate.
284  */
285
286 struct dm_balloon {
287         struct dm_header hdr;
288         __u32 num_pages;
289         __u32 reservedz;
290 } __packed;
291
292
293 /*
294  * Balloon response message; this message is sent from the guest
295  * to the host in response to the balloon message.
296  *
297  * reservedz: Reserved; must be set to zero.
298  * more_pages: If FALSE, this is the last message of the transaction.
299  * if TRUE there will atleast one more message from the guest.
300  *
301  * range_count: The number of ranges in the range array.
302  *
303  * range_array: An array of page ranges returned to the host.
304  *
305  */
306
307 struct dm_balloon_response {
308         struct dm_header hdr;
309         __u32 reservedz;
310         __u32 more_pages:1;
311         __u32 range_count:31;
312         union dm_mem_page_range range_array[];
313 } __packed;
314
315 /*
316  * Un-balloon message; this message is sent from the host
317  * to the guest to give guest more memory.
318  *
319  * more_pages: If FALSE, this is the last message of the transaction.
320  * if TRUE there will atleast one more message from the guest.
321  *
322  * reservedz: Reserved; must be set to zero.
323  *
324  * range_count: The number of ranges in the range array.
325  *
326  * range_array: An array of page ranges returned to the host.
327  *
328  */
329
330 struct dm_unballoon_request {
331         struct dm_header hdr;
332         __u32 more_pages:1;
333         __u32 reservedz:31;
334         __u32 range_count;
335         union dm_mem_page_range range_array[];
336 } __packed;
337
338 /*
339  * Un-balloon response message; this message is sent from the guest
340  * to the host in response to an unballoon request.
341  *
342  */
343
344 struct dm_unballoon_response {
345         struct dm_header hdr;
346 } __packed;
347
348
349 /*
350  * Hot add request message. Message sent from the host to the guest.
351  *
352  * mem_range: Memory range to hot add.
353  *
354  * On Linux we currently don't support this since we cannot hot add
355  * arbitrary granularity of memory.
356  */
357
358 struct dm_hot_add {
359         struct dm_header hdr;
360         union dm_mem_page_range range;
361 } __packed;
362
363 /*
364  * Hot add response message.
365  * This message is sent by the guest to report the status of a hot add request.
366  * If page_count is less than the requested page count, then the host should
367  * assume all further hot add requests will fail, since this indicates that
368  * the guest has hit an upper physical memory barrier.
369  *
370  * Hot adds may also fail due to low resources; in this case, the guest must
371  * not complete this message until the hot add can succeed, and the host must
372  * not send a new hot add request until the response is sent.
373  * If VSC fails to hot add memory DYNMEM_NUMBER_OF_UNSUCCESSFUL_HOTADD_ATTEMPTS
374  * times it fails the request.
375  *
376  *
377  * page_count: number of pages that were successfully hot added.
378  *
379  * result: result of the operation 1: success, 0: failure.
380  *
381  */
382
383 struct dm_hot_add_response {
384         struct dm_header hdr;
385         __u32 page_count;
386         __u32 result;
387 } __packed;
388
389 /*
390  * Types of information sent from host to the guest.
391  */
392
393 enum dm_info_type {
394         INFO_TYPE_MAX_PAGE_CNT = 0,
395         MAX_INFO_TYPE
396 };
397
398
399 /*
400  * Header for the information message.
401  */
402
403 struct dm_info_header {
404         enum dm_info_type type;
405         __u32 data_size;
406 } __packed;
407
408 /*
409  * This message is sent from the host to the guest to pass
410  * some relevant information (win8 addition).
411  *
412  * reserved: no used.
413  * info_size: size of the information blob.
414  * info: information blob.
415  */
416
417 struct dm_info_msg {
418         struct dm_header hdr;
419         __u32 reserved;
420         __u32 info_size;
421         __u8  info[];
422 };
423
424 /*
425  * End protocol definitions.
426  */
427
428 /*
429  * State to manage hot adding memory into the guest.
430  * The range start_pfn : end_pfn specifies the range
431  * that the host has asked us to hot add. The range
432  * start_pfn : ha_end_pfn specifies the range that we have
433  * currently hot added. We hot add in multiples of 128M
434  * chunks; it is possible that we may not be able to bring
435  * online all the pages in the region. The range
436  * covered_start_pfn:covered_end_pfn defines the pages that can
437  * be brough online.
438  */
439
440 struct hv_hotadd_state {
441         struct list_head list;
442         unsigned long start_pfn;
443         unsigned long covered_start_pfn;
444         unsigned long covered_end_pfn;
445         unsigned long ha_end_pfn;
446         unsigned long end_pfn;
447         /*
448          * A list of gaps.
449          */
450         struct list_head gap_list;
451 };
452
453 struct hv_hotadd_gap {
454         struct list_head list;
455         unsigned long start_pfn;
456         unsigned long end_pfn;
457 };
458
459 struct balloon_state {
460         __u32 num_pages;
461         struct work_struct wrk;
462 };
463
464 struct hot_add_wrk {
465         union dm_mem_page_range ha_page_range;
466         union dm_mem_page_range ha_region_range;
467         struct work_struct wrk;
468 };
469
470 static bool hot_add = true;
471 static bool do_hot_add;
472 /*
473  * Delay reporting memory pressure by
474  * the specified number of seconds.
475  */
476 static uint pressure_report_delay = 45;
477
478 /*
479  * The last time we posted a pressure report to host.
480  */
481 static unsigned long last_post_time;
482
483 module_param(hot_add, bool, (S_IRUGO | S_IWUSR));
484 MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
485
486 module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
487 MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
488 static atomic_t trans_id = ATOMIC_INIT(0);
489
490 static int dm_ring_size = (5 * PAGE_SIZE);
491
492 /*
493  * Driver specific state.
494  */
495
496 enum hv_dm_state {
497         DM_INITIALIZING = 0,
498         DM_INITIALIZED,
499         DM_BALLOON_UP,
500         DM_BALLOON_DOWN,
501         DM_HOT_ADD,
502         DM_INIT_ERROR
503 };
504
505
506 static __u8 recv_buffer[PAGE_SIZE];
507 static __u8 *send_buffer;
508 #define PAGES_IN_2M     512
509 #define HA_CHUNK (32 * 1024)
510
511 struct hv_dynmem_device {
512         struct hv_device *dev;
513         enum hv_dm_state state;
514         struct completion host_event;
515         struct completion config_event;
516
517         /*
518          * Number of pages we have currently ballooned out.
519          */
520         unsigned int num_pages_ballooned;
521         unsigned int num_pages_onlined;
522         unsigned int num_pages_added;
523
524         /*
525          * State to manage the ballooning (up) operation.
526          */
527         struct balloon_state balloon_wrk;
528
529         /*
530          * State to execute the "hot-add" operation.
531          */
532         struct hot_add_wrk ha_wrk;
533
534         /*
535          * This state tracks if the host has specified a hot-add
536          * region.
537          */
538         bool host_specified_ha_region;
539
540         /*
541          * State to synchronize hot-add.
542          */
543         struct completion  ol_waitevent;
544         bool ha_waiting;
545         /*
546          * This thread handles hot-add
547          * requests from the host as well as notifying
548          * the host with regards to memory pressure in
549          * the guest.
550          */
551         struct task_struct *thread;
552
553         /*
554          * Protects ha_region_list, num_pages_onlined counter and individual
555          * regions from ha_region_list.
556          */
557         spinlock_t ha_lock;
558
559         /*
560          * A list of hot-add regions.
561          */
562         struct list_head ha_region_list;
563
564         /*
565          * We start with the highest version we can support
566          * and downgrade based on the host; we save here the
567          * next version to try.
568          */
569         __u32 next_version;
570
571         /*
572          * The negotiated version agreed by host.
573          */
574         __u32 version;
575 };
576
577 static struct hv_dynmem_device dm_device;
578
579 static void post_status(struct hv_dynmem_device *dm);
580
581 #ifdef CONFIG_MEMORY_HOTPLUG
582 static inline bool has_pfn_is_backed(struct hv_hotadd_state *has,
583                                      unsigned long pfn)
584 {
585         struct hv_hotadd_gap *gap;
586
587         /* The page is not backed. */
588         if ((pfn < has->covered_start_pfn) || (pfn >= has->covered_end_pfn))
589                 return false;
590
591         /* Check for gaps. */
592         list_for_each_entry(gap, &has->gap_list, list) {
593                 if ((pfn >= gap->start_pfn) && (pfn < gap->end_pfn))
594                         return false;
595         }
596
597         return true;
598 }
599
600 static unsigned long hv_page_offline_check(unsigned long start_pfn,
601                                            unsigned long nr_pages)
602 {
603         unsigned long pfn = start_pfn, count = 0;
604         struct hv_hotadd_state *has;
605         bool found;
606
607         while (pfn < start_pfn + nr_pages) {
608                 /*
609                  * Search for HAS which covers the pfn and when we find one
610                  * count how many consequitive PFNs are covered.
611                  */
612                 found = false;
613                 list_for_each_entry(has, &dm_device.ha_region_list, list) {
614                         while ((pfn >= has->start_pfn) &&
615                                (pfn < has->end_pfn) &&
616                                (pfn < start_pfn + nr_pages)) {
617                                 found = true;
618                                 if (has_pfn_is_backed(has, pfn))
619                                         count++;
620                                 pfn++;
621                         }
622                 }
623
624                 /*
625                  * This PFN is not in any HAS (e.g. we're offlining a region
626                  * which was present at boot), no need to account for it. Go
627                  * to the next one.
628                  */
629                 if (!found)
630                         pfn++;
631         }
632
633         return count;
634 }
635
636 static int hv_memory_notifier(struct notifier_block *nb, unsigned long val,
637                               void *v)
638 {
639         struct memory_notify *mem = (struct memory_notify *)v;
640         unsigned long flags, pfn_count;
641
642         switch (val) {
643         case MEM_ONLINE:
644         case MEM_CANCEL_ONLINE:
645                 if (dm_device.ha_waiting) {
646                         dm_device.ha_waiting = false;
647                         complete(&dm_device.ol_waitevent);
648                 }
649                 break;
650
651         case MEM_OFFLINE:
652                 spin_lock_irqsave(&dm_device.ha_lock, flags);
653                 pfn_count = hv_page_offline_check(mem->start_pfn,
654                                                   mem->nr_pages);
655                 if (pfn_count <= dm_device.num_pages_onlined) {
656                         dm_device.num_pages_onlined -= pfn_count;
657                 } else {
658                         /*
659                          * We're offlining more pages than we managed to online.
660                          * This is unexpected. In any case don't let
661                          * num_pages_onlined wrap around zero.
662                          */
663                         WARN_ON_ONCE(1);
664                         dm_device.num_pages_onlined = 0;
665                 }
666                 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
667                 break;
668         case MEM_GOING_ONLINE:
669         case MEM_GOING_OFFLINE:
670         case MEM_CANCEL_OFFLINE:
671                 break;
672         }
673         return NOTIFY_OK;
674 }
675
676 static struct notifier_block hv_memory_nb = {
677         .notifier_call = hv_memory_notifier,
678         .priority = 0
679 };
680
681 /* Check if the particular page is backed and can be onlined and online it. */
682 static void hv_page_online_one(struct hv_hotadd_state *has, struct page *pg)
683 {
684         if (!has_pfn_is_backed(has, page_to_pfn(pg)))
685                 return;
686
687         /* This frame is currently backed; online the page. */
688         __online_page_set_limits(pg);
689         __online_page_increment_counters(pg);
690         __online_page_free(pg);
691
692         lockdep_assert_held(&dm_device.ha_lock);
693         dm_device.num_pages_onlined++;
694 }
695
696 static void hv_bring_pgs_online(struct hv_hotadd_state *has,
697                                 unsigned long start_pfn, unsigned long size)
698 {
699         int i;
700
701         pr_debug("Online %lu pages starting at pfn 0x%lx\n", size, start_pfn);
702         for (i = 0; i < size; i++)
703                 hv_page_online_one(has, pfn_to_page(start_pfn + i));
704 }
705
706 static void hv_mem_hot_add(unsigned long start, unsigned long size,
707                                 unsigned long pfn_count,
708                                 struct hv_hotadd_state *has)
709 {
710         int ret = 0;
711         int i, nid;
712         unsigned long start_pfn;
713         unsigned long processed_pfn;
714         unsigned long total_pfn = pfn_count;
715         unsigned long flags;
716
717         for (i = 0; i < (size/HA_CHUNK); i++) {
718                 start_pfn = start + (i * HA_CHUNK);
719
720                 spin_lock_irqsave(&dm_device.ha_lock, flags);
721                 has->ha_end_pfn +=  HA_CHUNK;
722
723                 if (total_pfn > HA_CHUNK) {
724                         processed_pfn = HA_CHUNK;
725                         total_pfn -= HA_CHUNK;
726                 } else {
727                         processed_pfn = total_pfn;
728                         total_pfn = 0;
729                 }
730
731                 has->covered_end_pfn +=  processed_pfn;
732                 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
733
734                 init_completion(&dm_device.ol_waitevent);
735                 dm_device.ha_waiting = !memhp_auto_online;
736
737                 nid = memory_add_physaddr_to_nid(PFN_PHYS(start_pfn));
738                 ret = add_memory(nid, PFN_PHYS((start_pfn)),
739                                 (HA_CHUNK << PAGE_SHIFT));
740
741                 if (ret) {
742                         pr_err("hot_add memory failed error is %d\n", ret);
743                         if (ret == -EEXIST) {
744                                 /*
745                                  * This error indicates that the error
746                                  * is not a transient failure. This is the
747                                  * case where the guest's physical address map
748                                  * precludes hot adding memory. Stop all further
749                                  * memory hot-add.
750                                  */
751                                 do_hot_add = false;
752                         }
753                         spin_lock_irqsave(&dm_device.ha_lock, flags);
754                         has->ha_end_pfn -= HA_CHUNK;
755                         has->covered_end_pfn -=  processed_pfn;
756                         spin_unlock_irqrestore(&dm_device.ha_lock, flags);
757                         break;
758                 }
759
760                 /*
761                  * Wait for the memory block to be onlined when memory onlining
762                  * is done outside of kernel (memhp_auto_online). Since the hot
763                  * add has succeeded, it is ok to proceed even if the pages in
764                  * the hot added region have not been "onlined" within the
765                  * allowed time.
766                  */
767                 if (dm_device.ha_waiting)
768                         wait_for_completion_timeout(&dm_device.ol_waitevent,
769                                                     5*HZ);
770                 post_status(&dm_device);
771         }
772 }
773
774 static void hv_online_page(struct page *pg)
775 {
776         struct hv_hotadd_state *has;
777         unsigned long flags;
778         unsigned long pfn = page_to_pfn(pg);
779
780         spin_lock_irqsave(&dm_device.ha_lock, flags);
781         list_for_each_entry(has, &dm_device.ha_region_list, list) {
782                 /* The page belongs to a different HAS. */
783                 if ((pfn < has->start_pfn) || (pfn >= has->end_pfn))
784                         continue;
785
786                 hv_page_online_one(has, pg);
787                 break;
788         }
789         spin_unlock_irqrestore(&dm_device.ha_lock, flags);
790 }
791
792 static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
793 {
794         struct hv_hotadd_state *has;
795         struct hv_hotadd_gap *gap;
796         unsigned long residual, new_inc;
797         int ret = 0;
798         unsigned long flags;
799
800         spin_lock_irqsave(&dm_device.ha_lock, flags);
801         list_for_each_entry(has, &dm_device.ha_region_list, list) {
802                 /*
803                  * If the pfn range we are dealing with is not in the current
804                  * "hot add block", move on.
805                  */
806                 if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn)
807                         continue;
808
809                 /*
810                  * If the current start pfn is not where the covered_end
811                  * is, create a gap and update covered_end_pfn.
812                  */
813                 if (has->covered_end_pfn != start_pfn) {
814                         gap = kzalloc(sizeof(struct hv_hotadd_gap), GFP_ATOMIC);
815                         if (!gap) {
816                                 ret = -ENOMEM;
817                                 break;
818                         }
819
820                         INIT_LIST_HEAD(&gap->list);
821                         gap->start_pfn = has->covered_end_pfn;
822                         gap->end_pfn = start_pfn;
823                         list_add_tail(&gap->list, &has->gap_list);
824
825                         has->covered_end_pfn = start_pfn;
826                 }
827
828                 /*
829                  * If the current hot add-request extends beyond
830                  * our current limit; extend it.
831                  */
832                 if ((start_pfn + pfn_cnt) > has->end_pfn) {
833                         residual = (start_pfn + pfn_cnt - has->end_pfn);
834                         /*
835                          * Extend the region by multiples of HA_CHUNK.
836                          */
837                         new_inc = (residual / HA_CHUNK) * HA_CHUNK;
838                         if (residual % HA_CHUNK)
839                                 new_inc += HA_CHUNK;
840
841                         has->end_pfn += new_inc;
842                 }
843
844                 ret = 1;
845                 break;
846         }
847         spin_unlock_irqrestore(&dm_device.ha_lock, flags);
848
849         return ret;
850 }
851
852 static unsigned long handle_pg_range(unsigned long pg_start,
853                                         unsigned long pg_count)
854 {
855         unsigned long start_pfn = pg_start;
856         unsigned long pfn_cnt = pg_count;
857         unsigned long size;
858         struct hv_hotadd_state *has;
859         unsigned long pgs_ol = 0;
860         unsigned long old_covered_state;
861         unsigned long res = 0, flags;
862
863         pr_debug("Hot adding %lu pages starting at pfn 0x%lx.\n", pg_count,
864                 pg_start);
865
866         spin_lock_irqsave(&dm_device.ha_lock, flags);
867         list_for_each_entry(has, &dm_device.ha_region_list, list) {
868                 /*
869                  * If the pfn range we are dealing with is not in the current
870                  * "hot add block", move on.
871                  */
872                 if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn)
873                         continue;
874
875                 old_covered_state = has->covered_end_pfn;
876
877                 if (start_pfn < has->ha_end_pfn) {
878                         /*
879                          * This is the case where we are backing pages
880                          * in an already hot added region. Bring
881                          * these pages online first.
882                          */
883                         pgs_ol = has->ha_end_pfn - start_pfn;
884                         if (pgs_ol > pfn_cnt)
885                                 pgs_ol = pfn_cnt;
886
887                         has->covered_end_pfn +=  pgs_ol;
888                         pfn_cnt -= pgs_ol;
889                         /*
890                          * Check if the corresponding memory block is already
891                          * online. It is possible to observe struct pages still
892                          * being uninitialized here so check section instead.
893                          * In case the section is online we need to bring the
894                          * rest of pfns (which were not backed previously)
895                          * online too.
896                          */
897                         if (start_pfn > has->start_pfn &&
898                             online_section_nr(pfn_to_section_nr(start_pfn)))
899                                 hv_bring_pgs_online(has, start_pfn, pgs_ol);
900
901                 }
902
903                 if ((has->ha_end_pfn < has->end_pfn) && (pfn_cnt > 0)) {
904                         /*
905                          * We have some residual hot add range
906                          * that needs to be hot added; hot add
907                          * it now. Hot add a multiple of
908                          * of HA_CHUNK that fully covers the pages
909                          * we have.
910                          */
911                         size = (has->end_pfn - has->ha_end_pfn);
912                         if (pfn_cnt <= size) {
913                                 size = ((pfn_cnt / HA_CHUNK) * HA_CHUNK);
914                                 if (pfn_cnt % HA_CHUNK)
915                                         size += HA_CHUNK;
916                         } else {
917                                 pfn_cnt = size;
918                         }
919                         spin_unlock_irqrestore(&dm_device.ha_lock, flags);
920                         hv_mem_hot_add(has->ha_end_pfn, size, pfn_cnt, has);
921                         spin_lock_irqsave(&dm_device.ha_lock, flags);
922                 }
923                 /*
924                  * If we managed to online any pages that were given to us,
925                  * we declare success.
926                  */
927                 res = has->covered_end_pfn - old_covered_state;
928                 break;
929         }
930         spin_unlock_irqrestore(&dm_device.ha_lock, flags);
931
932         return res;
933 }
934
935 static unsigned long process_hot_add(unsigned long pg_start,
936                                         unsigned long pfn_cnt,
937                                         unsigned long rg_start,
938                                         unsigned long rg_size)
939 {
940         struct hv_hotadd_state *ha_region = NULL;
941         int covered;
942         unsigned long flags;
943
944         if (pfn_cnt == 0)
945                 return 0;
946
947         if (!dm_device.host_specified_ha_region) {
948                 covered = pfn_covered(pg_start, pfn_cnt);
949                 if (covered < 0)
950                         return 0;
951
952                 if (covered)
953                         goto do_pg_range;
954         }
955
956         /*
957          * If the host has specified a hot-add range; deal with it first.
958          */
959
960         if (rg_size != 0) {
961                 ha_region = kzalloc(sizeof(struct hv_hotadd_state), GFP_KERNEL);
962                 if (!ha_region)
963                         return 0;
964
965                 INIT_LIST_HEAD(&ha_region->list);
966                 INIT_LIST_HEAD(&ha_region->gap_list);
967
968                 ha_region->start_pfn = rg_start;
969                 ha_region->ha_end_pfn = rg_start;
970                 ha_region->covered_start_pfn = pg_start;
971                 ha_region->covered_end_pfn = pg_start;
972                 ha_region->end_pfn = rg_start + rg_size;
973
974                 spin_lock_irqsave(&dm_device.ha_lock, flags);
975                 list_add_tail(&ha_region->list, &dm_device.ha_region_list);
976                 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
977         }
978
979 do_pg_range:
980         /*
981          * Process the page range specified; bringing them
982          * online if possible.
983          */
984         return handle_pg_range(pg_start, pfn_cnt);
985 }
986
987 #endif
988
989 static void hot_add_req(struct work_struct *dummy)
990 {
991         struct dm_hot_add_response resp;
992 #ifdef CONFIG_MEMORY_HOTPLUG
993         unsigned long pg_start, pfn_cnt;
994         unsigned long rg_start, rg_sz;
995 #endif
996         struct hv_dynmem_device *dm = &dm_device;
997
998         memset(&resp, 0, sizeof(struct dm_hot_add_response));
999         resp.hdr.type = DM_MEM_HOT_ADD_RESPONSE;
1000         resp.hdr.size = sizeof(struct dm_hot_add_response);
1001
1002 #ifdef CONFIG_MEMORY_HOTPLUG
1003         pg_start = dm->ha_wrk.ha_page_range.finfo.start_page;
1004         pfn_cnt = dm->ha_wrk.ha_page_range.finfo.page_cnt;
1005
1006         rg_start = dm->ha_wrk.ha_region_range.finfo.start_page;
1007         rg_sz = dm->ha_wrk.ha_region_range.finfo.page_cnt;
1008
1009         if ((rg_start == 0) && (!dm->host_specified_ha_region)) {
1010                 unsigned long region_size;
1011                 unsigned long region_start;
1012
1013                 /*
1014                  * The host has not specified the hot-add region.
1015                  * Based on the hot-add page range being specified,
1016                  * compute a hot-add region that can cover the pages
1017                  * that need to be hot-added while ensuring the alignment
1018                  * and size requirements of Linux as it relates to hot-add.
1019                  */
1020                 region_start = pg_start;
1021                 region_size = (pfn_cnt / HA_CHUNK) * HA_CHUNK;
1022                 if (pfn_cnt % HA_CHUNK)
1023                         region_size += HA_CHUNK;
1024
1025                 region_start = (pg_start / HA_CHUNK) * HA_CHUNK;
1026
1027                 rg_start = region_start;
1028                 rg_sz = region_size;
1029         }
1030
1031         if (do_hot_add)
1032                 resp.page_count = process_hot_add(pg_start, pfn_cnt,
1033                                                 rg_start, rg_sz);
1034
1035         dm->num_pages_added += resp.page_count;
1036 #endif
1037         /*
1038          * The result field of the response structure has the
1039          * following semantics:
1040          *
1041          * 1. If all or some pages hot-added: Guest should return success.
1042          *
1043          * 2. If no pages could be hot-added:
1044          *
1045          * If the guest returns success, then the host
1046          * will not attempt any further hot-add operations. This
1047          * signifies a permanent failure.
1048          *
1049          * If the guest returns failure, then this failure will be
1050          * treated as a transient failure and the host may retry the
1051          * hot-add operation after some delay.
1052          */
1053         if (resp.page_count > 0)
1054                 resp.result = 1;
1055         else if (!do_hot_add)
1056                 resp.result = 1;
1057         else
1058                 resp.result = 0;
1059
1060         if (!do_hot_add || (resp.page_count == 0))
1061                 pr_err("Memory hot add failed\n");
1062
1063         dm->state = DM_INITIALIZED;
1064         resp.hdr.trans_id = atomic_inc_return(&trans_id);
1065         vmbus_sendpacket(dm->dev->channel, &resp,
1066                         sizeof(struct dm_hot_add_response),
1067                         (unsigned long)NULL,
1068                         VM_PKT_DATA_INBAND, 0);
1069 }
1070
1071 static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
1072 {
1073         struct dm_info_header *info_hdr;
1074
1075         info_hdr = (struct dm_info_header *)msg->info;
1076
1077         switch (info_hdr->type) {
1078         case INFO_TYPE_MAX_PAGE_CNT:
1079                 if (info_hdr->data_size == sizeof(__u64)) {
1080                         __u64 *max_page_count = (__u64 *)&info_hdr[1];
1081
1082                         pr_info("Max. dynamic memory size: %llu MB\n",
1083                                 (*max_page_count) >> (20 - PAGE_SHIFT));
1084                 }
1085
1086                 break;
1087         default:
1088                 pr_warn("Received Unknown type: %d\n", info_hdr->type);
1089         }
1090 }
1091
1092 static unsigned long compute_balloon_floor(void)
1093 {
1094         unsigned long min_pages;
1095         unsigned long nr_pages = totalram_pages();
1096 #define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
1097         /* Simple continuous piecewiese linear function:
1098          *  max MiB -> min MiB  gradient
1099          *       0         0
1100          *      16        16
1101          *      32        24
1102          *     128        72    (1/2)
1103          *     512       168    (1/4)
1104          *    2048       360    (1/8)
1105          *    8192       744    (1/16)
1106          *   32768      1512    (1/32)
1107          */
1108         if (nr_pages < MB2PAGES(128))
1109                 min_pages = MB2PAGES(8) + (nr_pages >> 1);
1110         else if (nr_pages < MB2PAGES(512))
1111                 min_pages = MB2PAGES(40) + (nr_pages >> 2);
1112         else if (nr_pages < MB2PAGES(2048))
1113                 min_pages = MB2PAGES(104) + (nr_pages >> 3);
1114         else if (nr_pages < MB2PAGES(8192))
1115                 min_pages = MB2PAGES(232) + (nr_pages >> 4);
1116         else
1117                 min_pages = MB2PAGES(488) + (nr_pages >> 5);
1118 #undef MB2PAGES
1119         return min_pages;
1120 }
1121
1122 /*
1123  * Post our status as it relates memory pressure to the
1124  * host. Host expects the guests to post this status
1125  * periodically at 1 second intervals.
1126  *
1127  * The metrics specified in this protocol are very Windows
1128  * specific and so we cook up numbers here to convey our memory
1129  * pressure.
1130  */
1131
1132 static void post_status(struct hv_dynmem_device *dm)
1133 {
1134         struct dm_status status;
1135         unsigned long now = jiffies;
1136         unsigned long last_post = last_post_time;
1137
1138         if (pressure_report_delay > 0) {
1139                 --pressure_report_delay;
1140                 return;
1141         }
1142
1143         if (!time_after(now, (last_post_time + HZ)))
1144                 return;
1145
1146         memset(&status, 0, sizeof(struct dm_status));
1147         status.hdr.type = DM_STATUS_REPORT;
1148         status.hdr.size = sizeof(struct dm_status);
1149         status.hdr.trans_id = atomic_inc_return(&trans_id);
1150
1151         /*
1152          * The host expects the guest to report free and committed memory.
1153          * Furthermore, the host expects the pressure information to include
1154          * the ballooned out pages. For a given amount of memory that we are
1155          * managing we need to compute a floor below which we should not
1156          * balloon. Compute this and add it to the pressure report.
1157          * We also need to report all offline pages (num_pages_added -
1158          * num_pages_onlined) as committed to the host, otherwise it can try
1159          * asking us to balloon them out.
1160          */
1161         status.num_avail = si_mem_available();
1162         status.num_committed = vm_memory_committed() +
1163                 dm->num_pages_ballooned +
1164                 (dm->num_pages_added > dm->num_pages_onlined ?
1165                  dm->num_pages_added - dm->num_pages_onlined : 0) +
1166                 compute_balloon_floor();
1167
1168         trace_balloon_status(status.num_avail, status.num_committed,
1169                              vm_memory_committed(), dm->num_pages_ballooned,
1170                              dm->num_pages_added, dm->num_pages_onlined);
1171         /*
1172          * If our transaction ID is no longer current, just don't
1173          * send the status. This can happen if we were interrupted
1174          * after we picked our transaction ID.
1175          */
1176         if (status.hdr.trans_id != atomic_read(&trans_id))
1177                 return;
1178
1179         /*
1180          * If the last post time that we sampled has changed,
1181          * we have raced, don't post the status.
1182          */
1183         if (last_post != last_post_time)
1184                 return;
1185
1186         last_post_time = jiffies;
1187         vmbus_sendpacket(dm->dev->channel, &status,
1188                                 sizeof(struct dm_status),
1189                                 (unsigned long)NULL,
1190                                 VM_PKT_DATA_INBAND, 0);
1191
1192 }
1193
1194 static void free_balloon_pages(struct hv_dynmem_device *dm,
1195                          union dm_mem_page_range *range_array)
1196 {
1197         int num_pages = range_array->finfo.page_cnt;
1198         __u64 start_frame = range_array->finfo.start_page;
1199         struct page *pg;
1200         int i;
1201
1202         for (i = 0; i < num_pages; i++) {
1203                 pg = pfn_to_page(i + start_frame);
1204                 __free_page(pg);
1205                 dm->num_pages_ballooned--;
1206         }
1207 }
1208
1209
1210
1211 static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm,
1212                                         unsigned int num_pages,
1213                                         struct dm_balloon_response *bl_resp,
1214                                         int alloc_unit)
1215 {
1216         unsigned int i = 0;
1217         struct page *pg;
1218
1219         if (num_pages < alloc_unit)
1220                 return 0;
1221
1222         for (i = 0; (i * alloc_unit) < num_pages; i++) {
1223                 if (bl_resp->hdr.size + sizeof(union dm_mem_page_range) >
1224                         PAGE_SIZE)
1225                         return i * alloc_unit;
1226
1227                 /*
1228                  * We execute this code in a thread context. Furthermore,
1229                  * we don't want the kernel to try too hard.
1230                  */
1231                 pg = alloc_pages(GFP_HIGHUSER | __GFP_NORETRY |
1232                                 __GFP_NOMEMALLOC | __GFP_NOWARN,
1233                                 get_order(alloc_unit << PAGE_SHIFT));
1234
1235                 if (!pg)
1236                         return i * alloc_unit;
1237
1238                 dm->num_pages_ballooned += alloc_unit;
1239
1240                 /*
1241                  * If we allocatted 2M pages; split them so we
1242                  * can free them in any order we get.
1243                  */
1244
1245                 if (alloc_unit != 1)
1246                         split_page(pg, get_order(alloc_unit << PAGE_SHIFT));
1247
1248                 bl_resp->range_count++;
1249                 bl_resp->range_array[i].finfo.start_page =
1250                         page_to_pfn(pg);
1251                 bl_resp->range_array[i].finfo.page_cnt = alloc_unit;
1252                 bl_resp->hdr.size += sizeof(union dm_mem_page_range);
1253
1254         }
1255
1256         return num_pages;
1257 }
1258
1259 static void balloon_up(struct work_struct *dummy)
1260 {
1261         unsigned int num_pages = dm_device.balloon_wrk.num_pages;
1262         unsigned int num_ballooned = 0;
1263         struct dm_balloon_response *bl_resp;
1264         int alloc_unit;
1265         int ret;
1266         bool done = false;
1267         int i;
1268         long avail_pages;
1269         unsigned long floor;
1270
1271         /* The host balloons pages in 2M granularity. */
1272         WARN_ON_ONCE(num_pages % PAGES_IN_2M != 0);
1273
1274         /*
1275          * We will attempt 2M allocations. However, if we fail to
1276          * allocate 2M chunks, we will go back to 4k allocations.
1277          */
1278         alloc_unit = 512;
1279
1280         avail_pages = si_mem_available();
1281         floor = compute_balloon_floor();
1282
1283         /* Refuse to balloon below the floor, keep the 2M granularity. */
1284         if (avail_pages < num_pages || avail_pages - num_pages < floor) {
1285                 pr_warn("Balloon request will be partially fulfilled. %s\n",
1286                         avail_pages < num_pages ? "Not enough memory." :
1287                         "Balloon floor reached.");
1288
1289                 num_pages = avail_pages > floor ? (avail_pages - floor) : 0;
1290                 num_pages -= num_pages % PAGES_IN_2M;
1291         }
1292
1293         while (!done) {
1294                 bl_resp = (struct dm_balloon_response *)send_buffer;
1295                 memset(send_buffer, 0, PAGE_SIZE);
1296                 bl_resp->hdr.type = DM_BALLOON_RESPONSE;
1297                 bl_resp->hdr.size = sizeof(struct dm_balloon_response);
1298                 bl_resp->more_pages = 1;
1299
1300                 num_pages -= num_ballooned;
1301                 num_ballooned = alloc_balloon_pages(&dm_device, num_pages,
1302                                                     bl_resp, alloc_unit);
1303
1304                 if (alloc_unit != 1 && num_ballooned == 0) {
1305                         alloc_unit = 1;
1306                         continue;
1307                 }
1308
1309                 if (num_ballooned == 0 || num_ballooned == num_pages) {
1310                         pr_debug("Ballooned %u out of %u requested pages.\n",
1311                                 num_pages, dm_device.balloon_wrk.num_pages);
1312
1313                         bl_resp->more_pages = 0;
1314                         done = true;
1315                         dm_device.state = DM_INITIALIZED;
1316                 }
1317
1318                 /*
1319                  * We are pushing a lot of data through the channel;
1320                  * deal with transient failures caused because of the
1321                  * lack of space in the ring buffer.
1322                  */
1323
1324                 do {
1325                         bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
1326                         ret = vmbus_sendpacket(dm_device.dev->channel,
1327                                                 bl_resp,
1328                                                 bl_resp->hdr.size,
1329                                                 (unsigned long)NULL,
1330                                                 VM_PKT_DATA_INBAND, 0);
1331
1332                         if (ret == -EAGAIN)
1333                                 msleep(20);
1334                         post_status(&dm_device);
1335                 } while (ret == -EAGAIN);
1336
1337                 if (ret) {
1338                         /*
1339                          * Free up the memory we allocatted.
1340                          */
1341                         pr_err("Balloon response failed\n");
1342
1343                         for (i = 0; i < bl_resp->range_count; i++)
1344                                 free_balloon_pages(&dm_device,
1345                                                  &bl_resp->range_array[i]);
1346
1347                         done = true;
1348                 }
1349         }
1350
1351 }
1352
1353 static void balloon_down(struct hv_dynmem_device *dm,
1354                         struct dm_unballoon_request *req)
1355 {
1356         union dm_mem_page_range *range_array = req->range_array;
1357         int range_count = req->range_count;
1358         struct dm_unballoon_response resp;
1359         int i;
1360         unsigned int prev_pages_ballooned = dm->num_pages_ballooned;
1361
1362         for (i = 0; i < range_count; i++) {
1363                 free_balloon_pages(dm, &range_array[i]);
1364                 complete(&dm_device.config_event);
1365         }
1366
1367         pr_debug("Freed %u ballooned pages.\n",
1368                 prev_pages_ballooned - dm->num_pages_ballooned);
1369
1370         if (req->more_pages == 1)
1371                 return;
1372
1373         memset(&resp, 0, sizeof(struct dm_unballoon_response));
1374         resp.hdr.type = DM_UNBALLOON_RESPONSE;
1375         resp.hdr.trans_id = atomic_inc_return(&trans_id);
1376         resp.hdr.size = sizeof(struct dm_unballoon_response);
1377
1378         vmbus_sendpacket(dm_device.dev->channel, &resp,
1379                                 sizeof(struct dm_unballoon_response),
1380                                 (unsigned long)NULL,
1381                                 VM_PKT_DATA_INBAND, 0);
1382
1383         dm->state = DM_INITIALIZED;
1384 }
1385
1386 static void balloon_onchannelcallback(void *context);
1387
1388 static int dm_thread_func(void *dm_dev)
1389 {
1390         struct hv_dynmem_device *dm = dm_dev;
1391
1392         while (!kthread_should_stop()) {
1393                 wait_for_completion_interruptible_timeout(
1394                                                 &dm_device.config_event, 1*HZ);
1395                 /*
1396                  * The host expects us to post information on the memory
1397                  * pressure every second.
1398                  */
1399                 reinit_completion(&dm_device.config_event);
1400                 post_status(dm);
1401         }
1402
1403         return 0;
1404 }
1405
1406
1407 static void version_resp(struct hv_dynmem_device *dm,
1408                         struct dm_version_response *vresp)
1409 {
1410         struct dm_version_request version_req;
1411         int ret;
1412
1413         if (vresp->is_accepted) {
1414                 /*
1415                  * We are done; wakeup the
1416                  * context waiting for version
1417                  * negotiation.
1418                  */
1419                 complete(&dm->host_event);
1420                 return;
1421         }
1422         /*
1423          * If there are more versions to try, continue
1424          * with negotiations; if not
1425          * shutdown the service since we are not able
1426          * to negotiate a suitable version number
1427          * with the host.
1428          */
1429         if (dm->next_version == 0)
1430                 goto version_error;
1431
1432         memset(&version_req, 0, sizeof(struct dm_version_request));
1433         version_req.hdr.type = DM_VERSION_REQUEST;
1434         version_req.hdr.size = sizeof(struct dm_version_request);
1435         version_req.hdr.trans_id = atomic_inc_return(&trans_id);
1436         version_req.version.version = dm->next_version;
1437         dm->version = version_req.version.version;
1438
1439         /*
1440          * Set the next version to try in case current version fails.
1441          * Win7 protocol ought to be the last one to try.
1442          */
1443         switch (version_req.version.version) {
1444         case DYNMEM_PROTOCOL_VERSION_WIN8:
1445                 dm->next_version = DYNMEM_PROTOCOL_VERSION_WIN7;
1446                 version_req.is_last_attempt = 0;
1447                 break;
1448         default:
1449                 dm->next_version = 0;
1450                 version_req.is_last_attempt = 1;
1451         }
1452
1453         ret = vmbus_sendpacket(dm->dev->channel, &version_req,
1454                                 sizeof(struct dm_version_request),
1455                                 (unsigned long)NULL,
1456                                 VM_PKT_DATA_INBAND, 0);
1457
1458         if (ret)
1459                 goto version_error;
1460
1461         return;
1462
1463 version_error:
1464         dm->state = DM_INIT_ERROR;
1465         complete(&dm->host_event);
1466 }
1467
1468 static void cap_resp(struct hv_dynmem_device *dm,
1469                         struct dm_capabilities_resp_msg *cap_resp)
1470 {
1471         if (!cap_resp->is_accepted) {
1472                 pr_err("Capabilities not accepted by host\n");
1473                 dm->state = DM_INIT_ERROR;
1474         }
1475         complete(&dm->host_event);
1476 }
1477
1478 static void balloon_onchannelcallback(void *context)
1479 {
1480         struct hv_device *dev = context;
1481         u32 recvlen;
1482         u64 requestid;
1483         struct dm_message *dm_msg;
1484         struct dm_header *dm_hdr;
1485         struct hv_dynmem_device *dm = hv_get_drvdata(dev);
1486         struct dm_balloon *bal_msg;
1487         struct dm_hot_add *ha_msg;
1488         union dm_mem_page_range *ha_pg_range;
1489         union dm_mem_page_range *ha_region;
1490
1491         memset(recv_buffer, 0, sizeof(recv_buffer));
1492         vmbus_recvpacket(dev->channel, recv_buffer,
1493                          PAGE_SIZE, &recvlen, &requestid);
1494
1495         if (recvlen > 0) {
1496                 dm_msg = (struct dm_message *)recv_buffer;
1497                 dm_hdr = &dm_msg->hdr;
1498
1499                 switch (dm_hdr->type) {
1500                 case DM_VERSION_RESPONSE:
1501                         version_resp(dm,
1502                                  (struct dm_version_response *)dm_msg);
1503                         break;
1504
1505                 case DM_CAPABILITIES_RESPONSE:
1506                         cap_resp(dm,
1507                                  (struct dm_capabilities_resp_msg *)dm_msg);
1508                         break;
1509
1510                 case DM_BALLOON_REQUEST:
1511                         if (dm->state == DM_BALLOON_UP)
1512                                 pr_warn("Currently ballooning\n");
1513                         bal_msg = (struct dm_balloon *)recv_buffer;
1514                         dm->state = DM_BALLOON_UP;
1515                         dm_device.balloon_wrk.num_pages = bal_msg->num_pages;
1516                         schedule_work(&dm_device.balloon_wrk.wrk);
1517                         break;
1518
1519                 case DM_UNBALLOON_REQUEST:
1520                         dm->state = DM_BALLOON_DOWN;
1521                         balloon_down(dm,
1522                                  (struct dm_unballoon_request *)recv_buffer);
1523                         break;
1524
1525                 case DM_MEM_HOT_ADD_REQUEST:
1526                         if (dm->state == DM_HOT_ADD)
1527                                 pr_warn("Currently hot-adding\n");
1528                         dm->state = DM_HOT_ADD;
1529                         ha_msg = (struct dm_hot_add *)recv_buffer;
1530                         if (ha_msg->hdr.size == sizeof(struct dm_hot_add)) {
1531                                 /*
1532                                  * This is a normal hot-add request specifying
1533                                  * hot-add memory.
1534                                  */
1535                                 dm->host_specified_ha_region = false;
1536                                 ha_pg_range = &ha_msg->range;
1537                                 dm->ha_wrk.ha_page_range = *ha_pg_range;
1538                                 dm->ha_wrk.ha_region_range.page_range = 0;
1539                         } else {
1540                                 /*
1541                                  * Host is specifying that we first hot-add
1542                                  * a region and then partially populate this
1543                                  * region.
1544                                  */
1545                                 dm->host_specified_ha_region = true;
1546                                 ha_pg_range = &ha_msg->range;
1547                                 ha_region = &ha_pg_range[1];
1548                                 dm->ha_wrk.ha_page_range = *ha_pg_range;
1549                                 dm->ha_wrk.ha_region_range = *ha_region;
1550                         }
1551                         schedule_work(&dm_device.ha_wrk.wrk);
1552                         break;
1553
1554                 case DM_INFO_MESSAGE:
1555                         process_info(dm, (struct dm_info_msg *)dm_msg);
1556                         break;
1557
1558                 default:
1559                         pr_warn("Unhandled message: type: %d\n", dm_hdr->type);
1560
1561                 }
1562         }
1563
1564 }
1565
1566 static int balloon_probe(struct hv_device *dev,
1567                         const struct hv_vmbus_device_id *dev_id)
1568 {
1569         int ret;
1570         unsigned long t;
1571         struct dm_version_request version_req;
1572         struct dm_capabilities cap_msg;
1573
1574 #ifdef CONFIG_MEMORY_HOTPLUG
1575         do_hot_add = hot_add;
1576 #else
1577         do_hot_add = false;
1578 #endif
1579
1580         /*
1581          * First allocate a send buffer.
1582          */
1583
1584         send_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
1585         if (!send_buffer)
1586                 return -ENOMEM;
1587
1588         ret = vmbus_open(dev->channel, dm_ring_size, dm_ring_size, NULL, 0,
1589                         balloon_onchannelcallback, dev);
1590
1591         if (ret)
1592                 goto probe_error0;
1593
1594         dm_device.dev = dev;
1595         dm_device.state = DM_INITIALIZING;
1596         dm_device.next_version = DYNMEM_PROTOCOL_VERSION_WIN8;
1597         init_completion(&dm_device.host_event);
1598         init_completion(&dm_device.config_event);
1599         INIT_LIST_HEAD(&dm_device.ha_region_list);
1600         spin_lock_init(&dm_device.ha_lock);
1601         INIT_WORK(&dm_device.balloon_wrk.wrk, balloon_up);
1602         INIT_WORK(&dm_device.ha_wrk.wrk, hot_add_req);
1603         dm_device.host_specified_ha_region = false;
1604
1605         dm_device.thread =
1606                  kthread_run(dm_thread_func, &dm_device, "hv_balloon");
1607         if (IS_ERR(dm_device.thread)) {
1608                 ret = PTR_ERR(dm_device.thread);
1609                 goto probe_error1;
1610         }
1611
1612 #ifdef CONFIG_MEMORY_HOTPLUG
1613         set_online_page_callback(&hv_online_page);
1614         register_memory_notifier(&hv_memory_nb);
1615 #endif
1616
1617         hv_set_drvdata(dev, &dm_device);
1618         /*
1619          * Initiate the hand shake with the host and negotiate
1620          * a version that the host can support. We start with the
1621          * highest version number and go down if the host cannot
1622          * support it.
1623          */
1624         memset(&version_req, 0, sizeof(struct dm_version_request));
1625         version_req.hdr.type = DM_VERSION_REQUEST;
1626         version_req.hdr.size = sizeof(struct dm_version_request);
1627         version_req.hdr.trans_id = atomic_inc_return(&trans_id);
1628         version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN10;
1629         version_req.is_last_attempt = 0;
1630         dm_device.version = version_req.version.version;
1631
1632         ret = vmbus_sendpacket(dev->channel, &version_req,
1633                                 sizeof(struct dm_version_request),
1634                                 (unsigned long)NULL,
1635                                 VM_PKT_DATA_INBAND, 0);
1636         if (ret)
1637                 goto probe_error2;
1638
1639         t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
1640         if (t == 0) {
1641                 ret = -ETIMEDOUT;
1642                 goto probe_error2;
1643         }
1644
1645         /*
1646          * If we could not negotiate a compatible version with the host
1647          * fail the probe function.
1648          */
1649         if (dm_device.state == DM_INIT_ERROR) {
1650                 ret = -ETIMEDOUT;
1651                 goto probe_error2;
1652         }
1653
1654         pr_info("Using Dynamic Memory protocol version %u.%u\n",
1655                 DYNMEM_MAJOR_VERSION(dm_device.version),
1656                 DYNMEM_MINOR_VERSION(dm_device.version));
1657
1658         /*
1659          * Now submit our capabilities to the host.
1660          */
1661         memset(&cap_msg, 0, sizeof(struct dm_capabilities));
1662         cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
1663         cap_msg.hdr.size = sizeof(struct dm_capabilities);
1664         cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
1665
1666         cap_msg.caps.cap_bits.balloon = 1;
1667         cap_msg.caps.cap_bits.hot_add = 1;
1668
1669         /*
1670          * Specify our alignment requirements as it relates
1671          * memory hot-add. Specify 128MB alignment.
1672          */
1673         cap_msg.caps.cap_bits.hot_add_alignment = 7;
1674
1675         /*
1676          * Currently the host does not use these
1677          * values and we set them to what is done in the
1678          * Windows driver.
1679          */
1680         cap_msg.min_page_cnt = 0;
1681         cap_msg.max_page_number = -1;
1682
1683         ret = vmbus_sendpacket(dev->channel, &cap_msg,
1684                                 sizeof(struct dm_capabilities),
1685                                 (unsigned long)NULL,
1686                                 VM_PKT_DATA_INBAND, 0);
1687         if (ret)
1688                 goto probe_error2;
1689
1690         t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
1691         if (t == 0) {
1692                 ret = -ETIMEDOUT;
1693                 goto probe_error2;
1694         }
1695
1696         /*
1697          * If the host does not like our capabilities,
1698          * fail the probe function.
1699          */
1700         if (dm_device.state == DM_INIT_ERROR) {
1701                 ret = -ETIMEDOUT;
1702                 goto probe_error2;
1703         }
1704
1705         dm_device.state = DM_INITIALIZED;
1706         last_post_time = jiffies;
1707
1708         return 0;
1709
1710 probe_error2:
1711 #ifdef CONFIG_MEMORY_HOTPLUG
1712         restore_online_page_callback(&hv_online_page);
1713 #endif
1714         kthread_stop(dm_device.thread);
1715
1716 probe_error1:
1717         vmbus_close(dev->channel);
1718 probe_error0:
1719         kfree(send_buffer);
1720         return ret;
1721 }
1722
1723 static int balloon_remove(struct hv_device *dev)
1724 {
1725         struct hv_dynmem_device *dm = hv_get_drvdata(dev);
1726         struct hv_hotadd_state *has, *tmp;
1727         struct hv_hotadd_gap *gap, *tmp_gap;
1728         unsigned long flags;
1729
1730         if (dm->num_pages_ballooned != 0)
1731                 pr_warn("Ballooned pages: %d\n", dm->num_pages_ballooned);
1732
1733         cancel_work_sync(&dm->balloon_wrk.wrk);
1734         cancel_work_sync(&dm->ha_wrk.wrk);
1735
1736         vmbus_close(dev->channel);
1737         kthread_stop(dm->thread);
1738         kfree(send_buffer);
1739 #ifdef CONFIG_MEMORY_HOTPLUG
1740         restore_online_page_callback(&hv_online_page);
1741         unregister_memory_notifier(&hv_memory_nb);
1742 #endif
1743         spin_lock_irqsave(&dm_device.ha_lock, flags);
1744         list_for_each_entry_safe(has, tmp, &dm->ha_region_list, list) {
1745                 list_for_each_entry_safe(gap, tmp_gap, &has->gap_list, list) {
1746                         list_del(&gap->list);
1747                         kfree(gap);
1748                 }
1749                 list_del(&has->list);
1750                 kfree(has);
1751         }
1752         spin_unlock_irqrestore(&dm_device.ha_lock, flags);
1753
1754         return 0;
1755 }
1756
1757 static const struct hv_vmbus_device_id id_table[] = {
1758         /* Dynamic Memory Class ID */
1759         /* 525074DC-8985-46e2-8057-A307DC18A502 */
1760         { HV_DM_GUID, },
1761         { },
1762 };
1763
1764 MODULE_DEVICE_TABLE(vmbus, id_table);
1765
1766 static  struct hv_driver balloon_drv = {
1767         .name = "hv_balloon",
1768         .id_table = id_table,
1769         .probe =  balloon_probe,
1770         .remove =  balloon_remove,
1771         .driver = {
1772                 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
1773         },
1774 };
1775
1776 static int __init init_balloon_drv(void)
1777 {
1778
1779         return vmbus_driver_register(&balloon_drv);
1780 }
1781
1782 module_init(init_balloon_drv);
1783
1784 MODULE_DESCRIPTION("Hyper-V Balloon");
1785 MODULE_LICENSE("GPL");