Merge tag 'hyperv-fixes-signed-20240411' of git://git.kernel.org/pub/scm/linux/kernel...
[sfrench/cifs-2.6.git] / drivers / hv / vmbus_drv.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2009, Microsoft Corporation.
4  *
5  * Authors:
6  *   Haiyang Zhang <haiyangz@microsoft.com>
7  *   Hank Janssen  <hjanssen@microsoft.com>
8  *   K. Y. Srinivasan <kys@microsoft.com>
9  */
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/device.h>
15 #include <linux/platform_device.h>
16 #include <linux/interrupt.h>
17 #include <linux/sysctl.h>
18 #include <linux/slab.h>
19 #include <linux/acpi.h>
20 #include <linux/completion.h>
21 #include <linux/hyperv.h>
22 #include <linux/kernel_stat.h>
23 #include <linux/of_address.h>
24 #include <linux/clockchips.h>
25 #include <linux/cpu.h>
26 #include <linux/sched/isolation.h>
27 #include <linux/sched/task_stack.h>
28
29 #include <linux/delay.h>
30 #include <linux/panic_notifier.h>
31 #include <linux/ptrace.h>
32 #include <linux/screen_info.h>
33 #include <linux/efi.h>
34 #include <linux/random.h>
35 #include <linux/kernel.h>
36 #include <linux/syscore_ops.h>
37 #include <linux/dma-map-ops.h>
38 #include <linux/pci.h>
39 #include <clocksource/hyperv_timer.h>
40 #include <asm/mshyperv.h>
41 #include "hyperv_vmbus.h"
42
43 struct vmbus_dynid {
44         struct list_head node;
45         struct hv_vmbus_device_id id;
46 };
47
48 static struct device  *hv_dev;
49
50 static int hyperv_cpuhp_online;
51
52 static long __percpu *vmbus_evt;
53
54 /* Values parsed from ACPI DSDT */
55 int vmbus_irq;
56 int vmbus_interrupt;
57
58 /*
59  * The panic notifier below is responsible solely for unloading the
60  * vmbus connection, which is necessary in a panic event.
61  *
62  * Notice an intrincate relation of this notifier with Hyper-V
63  * framebuffer panic notifier exists - we need vmbus connection alive
64  * there in order to succeed, so we need to order both with each other
65  * [see hvfb_on_panic()] - this is done using notifiers' priorities.
66  */
67 static int hv_panic_vmbus_unload(struct notifier_block *nb, unsigned long val,
68                               void *args)
69 {
70         vmbus_initiate_unload(true);
71         return NOTIFY_DONE;
72 }
73 static struct notifier_block hyperv_panic_vmbus_unload_block = {
74         .notifier_call  = hv_panic_vmbus_unload,
75         .priority       = INT_MIN + 1, /* almost the latest one to execute */
76 };
77
78 static const char *fb_mmio_name = "fb_range";
79 static struct resource *fb_mmio;
80 static struct resource *hyperv_mmio;
81 static DEFINE_MUTEX(hyperv_mmio_lock);
82
83 static int vmbus_exists(void)
84 {
85         if (hv_dev == NULL)
86                 return -ENODEV;
87
88         return 0;
89 }
90
91 static u8 channel_monitor_group(const struct vmbus_channel *channel)
92 {
93         return (u8)channel->offermsg.monitorid / 32;
94 }
95
96 static u8 channel_monitor_offset(const struct vmbus_channel *channel)
97 {
98         return (u8)channel->offermsg.monitorid % 32;
99 }
100
101 static u32 channel_pending(const struct vmbus_channel *channel,
102                            const struct hv_monitor_page *monitor_page)
103 {
104         u8 monitor_group = channel_monitor_group(channel);
105
106         return monitor_page->trigger_group[monitor_group].pending;
107 }
108
109 static u32 channel_latency(const struct vmbus_channel *channel,
110                            const struct hv_monitor_page *monitor_page)
111 {
112         u8 monitor_group = channel_monitor_group(channel);
113         u8 monitor_offset = channel_monitor_offset(channel);
114
115         return monitor_page->latency[monitor_group][monitor_offset];
116 }
117
118 static u32 channel_conn_id(struct vmbus_channel *channel,
119                            struct hv_monitor_page *monitor_page)
120 {
121         u8 monitor_group = channel_monitor_group(channel);
122         u8 monitor_offset = channel_monitor_offset(channel);
123
124         return monitor_page->parameter[monitor_group][monitor_offset].connectionid.u.id;
125 }
126
127 static ssize_t id_show(struct device *dev, struct device_attribute *dev_attr,
128                        char *buf)
129 {
130         struct hv_device *hv_dev = device_to_hv_device(dev);
131
132         if (!hv_dev->channel)
133                 return -ENODEV;
134         return sysfs_emit(buf, "%d\n", hv_dev->channel->offermsg.child_relid);
135 }
136 static DEVICE_ATTR_RO(id);
137
138 static ssize_t state_show(struct device *dev, struct device_attribute *dev_attr,
139                           char *buf)
140 {
141         struct hv_device *hv_dev = device_to_hv_device(dev);
142
143         if (!hv_dev->channel)
144                 return -ENODEV;
145         return sysfs_emit(buf, "%d\n", hv_dev->channel->state);
146 }
147 static DEVICE_ATTR_RO(state);
148
149 static ssize_t monitor_id_show(struct device *dev,
150                                struct device_attribute *dev_attr, char *buf)
151 {
152         struct hv_device *hv_dev = device_to_hv_device(dev);
153
154         if (!hv_dev->channel)
155                 return -ENODEV;
156         return sysfs_emit(buf, "%d\n", hv_dev->channel->offermsg.monitorid);
157 }
158 static DEVICE_ATTR_RO(monitor_id);
159
160 static ssize_t class_id_show(struct device *dev,
161                                struct device_attribute *dev_attr, char *buf)
162 {
163         struct hv_device *hv_dev = device_to_hv_device(dev);
164
165         if (!hv_dev->channel)
166                 return -ENODEV;
167         return sysfs_emit(buf, "{%pUl}\n",
168                           &hv_dev->channel->offermsg.offer.if_type);
169 }
170 static DEVICE_ATTR_RO(class_id);
171
172 static ssize_t device_id_show(struct device *dev,
173                               struct device_attribute *dev_attr, char *buf)
174 {
175         struct hv_device *hv_dev = device_to_hv_device(dev);
176
177         if (!hv_dev->channel)
178                 return -ENODEV;
179         return sysfs_emit(buf, "{%pUl}\n",
180                           &hv_dev->channel->offermsg.offer.if_instance);
181 }
182 static DEVICE_ATTR_RO(device_id);
183
184 static ssize_t modalias_show(struct device *dev,
185                              struct device_attribute *dev_attr, char *buf)
186 {
187         struct hv_device *hv_dev = device_to_hv_device(dev);
188
189         return sysfs_emit(buf, "vmbus:%*phN\n", UUID_SIZE, &hv_dev->dev_type);
190 }
191 static DEVICE_ATTR_RO(modalias);
192
193 #ifdef CONFIG_NUMA
194 static ssize_t numa_node_show(struct device *dev,
195                               struct device_attribute *attr, char *buf)
196 {
197         struct hv_device *hv_dev = device_to_hv_device(dev);
198
199         if (!hv_dev->channel)
200                 return -ENODEV;
201
202         return sysfs_emit(buf, "%d\n", cpu_to_node(hv_dev->channel->target_cpu));
203 }
204 static DEVICE_ATTR_RO(numa_node);
205 #endif
206
207 static ssize_t server_monitor_pending_show(struct device *dev,
208                                            struct device_attribute *dev_attr,
209                                            char *buf)
210 {
211         struct hv_device *hv_dev = device_to_hv_device(dev);
212
213         if (!hv_dev->channel)
214                 return -ENODEV;
215         return sysfs_emit(buf, "%d\n", channel_pending(hv_dev->channel,
216                           vmbus_connection.monitor_pages[0]));
217 }
218 static DEVICE_ATTR_RO(server_monitor_pending);
219
220 static ssize_t client_monitor_pending_show(struct device *dev,
221                                            struct device_attribute *dev_attr,
222                                            char *buf)
223 {
224         struct hv_device *hv_dev = device_to_hv_device(dev);
225
226         if (!hv_dev->channel)
227                 return -ENODEV;
228         return sysfs_emit(buf, "%d\n", channel_pending(hv_dev->channel,
229                           vmbus_connection.monitor_pages[1]));
230 }
231 static DEVICE_ATTR_RO(client_monitor_pending);
232
233 static ssize_t server_monitor_latency_show(struct device *dev,
234                                            struct device_attribute *dev_attr,
235                                            char *buf)
236 {
237         struct hv_device *hv_dev = device_to_hv_device(dev);
238
239         if (!hv_dev->channel)
240                 return -ENODEV;
241         return sysfs_emit(buf, "%d\n", channel_latency(hv_dev->channel,
242                           vmbus_connection.monitor_pages[0]));
243 }
244 static DEVICE_ATTR_RO(server_monitor_latency);
245
246 static ssize_t client_monitor_latency_show(struct device *dev,
247                                            struct device_attribute *dev_attr,
248                                            char *buf)
249 {
250         struct hv_device *hv_dev = device_to_hv_device(dev);
251
252         if (!hv_dev->channel)
253                 return -ENODEV;
254         return sysfs_emit(buf, "%d\n", channel_latency(hv_dev->channel,
255                           vmbus_connection.monitor_pages[1]));
256 }
257 static DEVICE_ATTR_RO(client_monitor_latency);
258
259 static ssize_t server_monitor_conn_id_show(struct device *dev,
260                                            struct device_attribute *dev_attr,
261                                            char *buf)
262 {
263         struct hv_device *hv_dev = device_to_hv_device(dev);
264
265         if (!hv_dev->channel)
266                 return -ENODEV;
267         return sysfs_emit(buf, "%d\n", channel_conn_id(hv_dev->channel,
268                           vmbus_connection.monitor_pages[0]));
269 }
270 static DEVICE_ATTR_RO(server_monitor_conn_id);
271
272 static ssize_t client_monitor_conn_id_show(struct device *dev,
273                                            struct device_attribute *dev_attr,
274                                            char *buf)
275 {
276         struct hv_device *hv_dev = device_to_hv_device(dev);
277
278         if (!hv_dev->channel)
279                 return -ENODEV;
280         return sysfs_emit(buf, "%d\n", channel_conn_id(hv_dev->channel,
281                           vmbus_connection.monitor_pages[1]));
282 }
283 static DEVICE_ATTR_RO(client_monitor_conn_id);
284
285 static ssize_t out_intr_mask_show(struct device *dev,
286                                   struct device_attribute *dev_attr, char *buf)
287 {
288         struct hv_device *hv_dev = device_to_hv_device(dev);
289         struct hv_ring_buffer_debug_info outbound;
290         int ret;
291
292         if (!hv_dev->channel)
293                 return -ENODEV;
294
295         ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
296                                           &outbound);
297         if (ret < 0)
298                 return ret;
299
300         return sysfs_emit(buf, "%d\n", outbound.current_interrupt_mask);
301 }
302 static DEVICE_ATTR_RO(out_intr_mask);
303
304 static ssize_t out_read_index_show(struct device *dev,
305                                    struct device_attribute *dev_attr, char *buf)
306 {
307         struct hv_device *hv_dev = device_to_hv_device(dev);
308         struct hv_ring_buffer_debug_info outbound;
309         int ret;
310
311         if (!hv_dev->channel)
312                 return -ENODEV;
313
314         ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
315                                           &outbound);
316         if (ret < 0)
317                 return ret;
318         return sysfs_emit(buf, "%d\n", outbound.current_read_index);
319 }
320 static DEVICE_ATTR_RO(out_read_index);
321
322 static ssize_t out_write_index_show(struct device *dev,
323                                     struct device_attribute *dev_attr,
324                                     char *buf)
325 {
326         struct hv_device *hv_dev = device_to_hv_device(dev);
327         struct hv_ring_buffer_debug_info outbound;
328         int ret;
329
330         if (!hv_dev->channel)
331                 return -ENODEV;
332
333         ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
334                                           &outbound);
335         if (ret < 0)
336                 return ret;
337         return sysfs_emit(buf, "%d\n", outbound.current_write_index);
338 }
339 static DEVICE_ATTR_RO(out_write_index);
340
341 static ssize_t out_read_bytes_avail_show(struct device *dev,
342                                          struct device_attribute *dev_attr,
343                                          char *buf)
344 {
345         struct hv_device *hv_dev = device_to_hv_device(dev);
346         struct hv_ring_buffer_debug_info outbound;
347         int ret;
348
349         if (!hv_dev->channel)
350                 return -ENODEV;
351
352         ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
353                                           &outbound);
354         if (ret < 0)
355                 return ret;
356         return sysfs_emit(buf, "%d\n", outbound.bytes_avail_toread);
357 }
358 static DEVICE_ATTR_RO(out_read_bytes_avail);
359
360 static ssize_t out_write_bytes_avail_show(struct device *dev,
361                                           struct device_attribute *dev_attr,
362                                           char *buf)
363 {
364         struct hv_device *hv_dev = device_to_hv_device(dev);
365         struct hv_ring_buffer_debug_info outbound;
366         int ret;
367
368         if (!hv_dev->channel)
369                 return -ENODEV;
370
371         ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
372                                           &outbound);
373         if (ret < 0)
374                 return ret;
375         return sysfs_emit(buf, "%d\n", outbound.bytes_avail_towrite);
376 }
377 static DEVICE_ATTR_RO(out_write_bytes_avail);
378
379 static ssize_t in_intr_mask_show(struct device *dev,
380                                  struct device_attribute *dev_attr, char *buf)
381 {
382         struct hv_device *hv_dev = device_to_hv_device(dev);
383         struct hv_ring_buffer_debug_info inbound;
384         int ret;
385
386         if (!hv_dev->channel)
387                 return -ENODEV;
388
389         ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
390         if (ret < 0)
391                 return ret;
392
393         return sysfs_emit(buf, "%d\n", inbound.current_interrupt_mask);
394 }
395 static DEVICE_ATTR_RO(in_intr_mask);
396
397 static ssize_t in_read_index_show(struct device *dev,
398                                   struct device_attribute *dev_attr, char *buf)
399 {
400         struct hv_device *hv_dev = device_to_hv_device(dev);
401         struct hv_ring_buffer_debug_info inbound;
402         int ret;
403
404         if (!hv_dev->channel)
405                 return -ENODEV;
406
407         ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
408         if (ret < 0)
409                 return ret;
410
411         return sysfs_emit(buf, "%d\n", inbound.current_read_index);
412 }
413 static DEVICE_ATTR_RO(in_read_index);
414
415 static ssize_t in_write_index_show(struct device *dev,
416                                    struct device_attribute *dev_attr, char *buf)
417 {
418         struct hv_device *hv_dev = device_to_hv_device(dev);
419         struct hv_ring_buffer_debug_info inbound;
420         int ret;
421
422         if (!hv_dev->channel)
423                 return -ENODEV;
424
425         ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
426         if (ret < 0)
427                 return ret;
428
429         return sysfs_emit(buf, "%d\n", inbound.current_write_index);
430 }
431 static DEVICE_ATTR_RO(in_write_index);
432
433 static ssize_t in_read_bytes_avail_show(struct device *dev,
434                                         struct device_attribute *dev_attr,
435                                         char *buf)
436 {
437         struct hv_device *hv_dev = device_to_hv_device(dev);
438         struct hv_ring_buffer_debug_info inbound;
439         int ret;
440
441         if (!hv_dev->channel)
442                 return -ENODEV;
443
444         ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
445         if (ret < 0)
446                 return ret;
447
448         return sysfs_emit(buf, "%d\n", inbound.bytes_avail_toread);
449 }
450 static DEVICE_ATTR_RO(in_read_bytes_avail);
451
452 static ssize_t in_write_bytes_avail_show(struct device *dev,
453                                          struct device_attribute *dev_attr,
454                                          char *buf)
455 {
456         struct hv_device *hv_dev = device_to_hv_device(dev);
457         struct hv_ring_buffer_debug_info inbound;
458         int ret;
459
460         if (!hv_dev->channel)
461                 return -ENODEV;
462
463         ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
464         if (ret < 0)
465                 return ret;
466
467         return sysfs_emit(buf, "%d\n", inbound.bytes_avail_towrite);
468 }
469 static DEVICE_ATTR_RO(in_write_bytes_avail);
470
471 static ssize_t channel_vp_mapping_show(struct device *dev,
472                                        struct device_attribute *dev_attr,
473                                        char *buf)
474 {
475         struct hv_device *hv_dev = device_to_hv_device(dev);
476         struct vmbus_channel *channel = hv_dev->channel, *cur_sc;
477         int n_written;
478         struct list_head *cur;
479
480         if (!channel)
481                 return -ENODEV;
482
483         mutex_lock(&vmbus_connection.channel_mutex);
484
485         n_written = sysfs_emit(buf, "%u:%u\n",
486                                channel->offermsg.child_relid,
487                                channel->target_cpu);
488
489         list_for_each(cur, &channel->sc_list) {
490
491                 cur_sc = list_entry(cur, struct vmbus_channel, sc_list);
492                 n_written += sysfs_emit_at(buf, n_written, "%u:%u\n",
493                                           cur_sc->offermsg.child_relid,
494                                           cur_sc->target_cpu);
495         }
496
497         mutex_unlock(&vmbus_connection.channel_mutex);
498
499         return n_written;
500 }
501 static DEVICE_ATTR_RO(channel_vp_mapping);
502
503 static ssize_t vendor_show(struct device *dev,
504                            struct device_attribute *dev_attr,
505                            char *buf)
506 {
507         struct hv_device *hv_dev = device_to_hv_device(dev);
508
509         return sysfs_emit(buf, "0x%x\n", hv_dev->vendor_id);
510 }
511 static DEVICE_ATTR_RO(vendor);
512
513 static ssize_t device_show(struct device *dev,
514                            struct device_attribute *dev_attr,
515                            char *buf)
516 {
517         struct hv_device *hv_dev = device_to_hv_device(dev);
518
519         return sysfs_emit(buf, "0x%x\n", hv_dev->device_id);
520 }
521 static DEVICE_ATTR_RO(device);
522
523 static ssize_t driver_override_store(struct device *dev,
524                                      struct device_attribute *attr,
525                                      const char *buf, size_t count)
526 {
527         struct hv_device *hv_dev = device_to_hv_device(dev);
528         int ret;
529
530         ret = driver_set_override(dev, &hv_dev->driver_override, buf, count);
531         if (ret)
532                 return ret;
533
534         return count;
535 }
536
537 static ssize_t driver_override_show(struct device *dev,
538                                     struct device_attribute *attr, char *buf)
539 {
540         struct hv_device *hv_dev = device_to_hv_device(dev);
541         ssize_t len;
542
543         device_lock(dev);
544         len = sysfs_emit(buf, "%s\n", hv_dev->driver_override);
545         device_unlock(dev);
546
547         return len;
548 }
549 static DEVICE_ATTR_RW(driver_override);
550
551 /* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */
552 static struct attribute *vmbus_dev_attrs[] = {
553         &dev_attr_id.attr,
554         &dev_attr_state.attr,
555         &dev_attr_monitor_id.attr,
556         &dev_attr_class_id.attr,
557         &dev_attr_device_id.attr,
558         &dev_attr_modalias.attr,
559 #ifdef CONFIG_NUMA
560         &dev_attr_numa_node.attr,
561 #endif
562         &dev_attr_server_monitor_pending.attr,
563         &dev_attr_client_monitor_pending.attr,
564         &dev_attr_server_monitor_latency.attr,
565         &dev_attr_client_monitor_latency.attr,
566         &dev_attr_server_monitor_conn_id.attr,
567         &dev_attr_client_monitor_conn_id.attr,
568         &dev_attr_out_intr_mask.attr,
569         &dev_attr_out_read_index.attr,
570         &dev_attr_out_write_index.attr,
571         &dev_attr_out_read_bytes_avail.attr,
572         &dev_attr_out_write_bytes_avail.attr,
573         &dev_attr_in_intr_mask.attr,
574         &dev_attr_in_read_index.attr,
575         &dev_attr_in_write_index.attr,
576         &dev_attr_in_read_bytes_avail.attr,
577         &dev_attr_in_write_bytes_avail.attr,
578         &dev_attr_channel_vp_mapping.attr,
579         &dev_attr_vendor.attr,
580         &dev_attr_device.attr,
581         &dev_attr_driver_override.attr,
582         NULL,
583 };
584
585 /*
586  * Device-level attribute_group callback function. Returns the permission for
587  * each attribute, and returns 0 if an attribute is not visible.
588  */
589 static umode_t vmbus_dev_attr_is_visible(struct kobject *kobj,
590                                          struct attribute *attr, int idx)
591 {
592         struct device *dev = kobj_to_dev(kobj);
593         const struct hv_device *hv_dev = device_to_hv_device(dev);
594
595         /* Hide the monitor attributes if the monitor mechanism is not used. */
596         if (!hv_dev->channel->offermsg.monitor_allocated &&
597             (attr == &dev_attr_monitor_id.attr ||
598              attr == &dev_attr_server_monitor_pending.attr ||
599              attr == &dev_attr_client_monitor_pending.attr ||
600              attr == &dev_attr_server_monitor_latency.attr ||
601              attr == &dev_attr_client_monitor_latency.attr ||
602              attr == &dev_attr_server_monitor_conn_id.attr ||
603              attr == &dev_attr_client_monitor_conn_id.attr))
604                 return 0;
605
606         return attr->mode;
607 }
608
609 static const struct attribute_group vmbus_dev_group = {
610         .attrs = vmbus_dev_attrs,
611         .is_visible = vmbus_dev_attr_is_visible
612 };
613 __ATTRIBUTE_GROUPS(vmbus_dev);
614
615 /* Set up the attribute for /sys/bus/vmbus/hibernation */
616 static ssize_t hibernation_show(const struct bus_type *bus, char *buf)
617 {
618         return sprintf(buf, "%d\n", !!hv_is_hibernation_supported());
619 }
620
621 static BUS_ATTR_RO(hibernation);
622
623 static struct attribute *vmbus_bus_attrs[] = {
624         &bus_attr_hibernation.attr,
625         NULL,
626 };
627 static const struct attribute_group vmbus_bus_group = {
628         .attrs = vmbus_bus_attrs,
629 };
630 __ATTRIBUTE_GROUPS(vmbus_bus);
631
632 /*
633  * vmbus_uevent - add uevent for our device
634  *
635  * This routine is invoked when a device is added or removed on the vmbus to
636  * generate a uevent to udev in the userspace. The udev will then look at its
637  * rule and the uevent generated here to load the appropriate driver
638  *
639  * The alias string will be of the form vmbus:guid where guid is the string
640  * representation of the device guid (each byte of the guid will be
641  * represented with two hex characters.
642  */
643 static int vmbus_uevent(const struct device *device, struct kobj_uevent_env *env)
644 {
645         const struct hv_device *dev = device_to_hv_device(device);
646         const char *format = "MODALIAS=vmbus:%*phN";
647
648         return add_uevent_var(env, format, UUID_SIZE, &dev->dev_type);
649 }
650
651 static const struct hv_vmbus_device_id *
652 hv_vmbus_dev_match(const struct hv_vmbus_device_id *id, const guid_t *guid)
653 {
654         if (id == NULL)
655                 return NULL; /* empty device table */
656
657         for (; !guid_is_null(&id->guid); id++)
658                 if (guid_equal(&id->guid, guid))
659                         return id;
660
661         return NULL;
662 }
663
664 static const struct hv_vmbus_device_id *
665 hv_vmbus_dynid_match(struct hv_driver *drv, const guid_t *guid)
666 {
667         const struct hv_vmbus_device_id *id = NULL;
668         struct vmbus_dynid *dynid;
669
670         spin_lock(&drv->dynids.lock);
671         list_for_each_entry(dynid, &drv->dynids.list, node) {
672                 if (guid_equal(&dynid->id.guid, guid)) {
673                         id = &dynid->id;
674                         break;
675                 }
676         }
677         spin_unlock(&drv->dynids.lock);
678
679         return id;
680 }
681
682 static const struct hv_vmbus_device_id vmbus_device_null;
683
684 /*
685  * Return a matching hv_vmbus_device_id pointer.
686  * If there is no match, return NULL.
687  */
688 static const struct hv_vmbus_device_id *hv_vmbus_get_id(struct hv_driver *drv,
689                                                         struct hv_device *dev)
690 {
691         const guid_t *guid = &dev->dev_type;
692         const struct hv_vmbus_device_id *id;
693
694         /* When driver_override is set, only bind to the matching driver */
695         if (dev->driver_override && strcmp(dev->driver_override, drv->name))
696                 return NULL;
697
698         /* Look at the dynamic ids first, before the static ones */
699         id = hv_vmbus_dynid_match(drv, guid);
700         if (!id)
701                 id = hv_vmbus_dev_match(drv->id_table, guid);
702
703         /* driver_override will always match, send a dummy id */
704         if (!id && dev->driver_override)
705                 id = &vmbus_device_null;
706
707         return id;
708 }
709
710 /* vmbus_add_dynid - add a new device ID to this driver and re-probe devices */
711 static int vmbus_add_dynid(struct hv_driver *drv, guid_t *guid)
712 {
713         struct vmbus_dynid *dynid;
714
715         dynid = kzalloc(sizeof(*dynid), GFP_KERNEL);
716         if (!dynid)
717                 return -ENOMEM;
718
719         dynid->id.guid = *guid;
720
721         spin_lock(&drv->dynids.lock);
722         list_add_tail(&dynid->node, &drv->dynids.list);
723         spin_unlock(&drv->dynids.lock);
724
725         return driver_attach(&drv->driver);
726 }
727
728 static void vmbus_free_dynids(struct hv_driver *drv)
729 {
730         struct vmbus_dynid *dynid, *n;
731
732         spin_lock(&drv->dynids.lock);
733         list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
734                 list_del(&dynid->node);
735                 kfree(dynid);
736         }
737         spin_unlock(&drv->dynids.lock);
738 }
739
740 /*
741  * store_new_id - sysfs frontend to vmbus_add_dynid()
742  *
743  * Allow GUIDs to be added to an existing driver via sysfs.
744  */
745 static ssize_t new_id_store(struct device_driver *driver, const char *buf,
746                             size_t count)
747 {
748         struct hv_driver *drv = drv_to_hv_drv(driver);
749         guid_t guid;
750         ssize_t retval;
751
752         retval = guid_parse(buf, &guid);
753         if (retval)
754                 return retval;
755
756         if (hv_vmbus_dynid_match(drv, &guid))
757                 return -EEXIST;
758
759         retval = vmbus_add_dynid(drv, &guid);
760         if (retval)
761                 return retval;
762         return count;
763 }
764 static DRIVER_ATTR_WO(new_id);
765
766 /*
767  * store_remove_id - remove a PCI device ID from this driver
768  *
769  * Removes a dynamic pci device ID to this driver.
770  */
771 static ssize_t remove_id_store(struct device_driver *driver, const char *buf,
772                                size_t count)
773 {
774         struct hv_driver *drv = drv_to_hv_drv(driver);
775         struct vmbus_dynid *dynid, *n;
776         guid_t guid;
777         ssize_t retval;
778
779         retval = guid_parse(buf, &guid);
780         if (retval)
781                 return retval;
782
783         retval = -ENODEV;
784         spin_lock(&drv->dynids.lock);
785         list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
786                 struct hv_vmbus_device_id *id = &dynid->id;
787
788                 if (guid_equal(&id->guid, &guid)) {
789                         list_del(&dynid->node);
790                         kfree(dynid);
791                         retval = count;
792                         break;
793                 }
794         }
795         spin_unlock(&drv->dynids.lock);
796
797         return retval;
798 }
799 static DRIVER_ATTR_WO(remove_id);
800
801 static struct attribute *vmbus_drv_attrs[] = {
802         &driver_attr_new_id.attr,
803         &driver_attr_remove_id.attr,
804         NULL,
805 };
806 ATTRIBUTE_GROUPS(vmbus_drv);
807
808
809 /*
810  * vmbus_match - Attempt to match the specified device to the specified driver
811  */
812 static int vmbus_match(struct device *device, struct device_driver *driver)
813 {
814         struct hv_driver *drv = drv_to_hv_drv(driver);
815         struct hv_device *hv_dev = device_to_hv_device(device);
816
817         /* The hv_sock driver handles all hv_sock offers. */
818         if (is_hvsock_channel(hv_dev->channel))
819                 return drv->hvsock;
820
821         if (hv_vmbus_get_id(drv, hv_dev))
822                 return 1;
823
824         return 0;
825 }
826
827 /*
828  * vmbus_probe - Add the new vmbus's child device
829  */
830 static int vmbus_probe(struct device *child_device)
831 {
832         int ret = 0;
833         struct hv_driver *drv =
834                         drv_to_hv_drv(child_device->driver);
835         struct hv_device *dev = device_to_hv_device(child_device);
836         const struct hv_vmbus_device_id *dev_id;
837
838         dev_id = hv_vmbus_get_id(drv, dev);
839         if (drv->probe) {
840                 ret = drv->probe(dev, dev_id);
841                 if (ret != 0)
842                         pr_err("probe failed for device %s (%d)\n",
843                                dev_name(child_device), ret);
844
845         } else {
846                 pr_err("probe not set for driver %s\n",
847                        dev_name(child_device));
848                 ret = -ENODEV;
849         }
850         return ret;
851 }
852
853 /*
854  * vmbus_dma_configure -- Configure DMA coherence for VMbus device
855  */
856 static int vmbus_dma_configure(struct device *child_device)
857 {
858         /*
859          * On ARM64, propagate the DMA coherence setting from the top level
860          * VMbus ACPI device to the child VMbus device being added here.
861          * On x86/x64 coherence is assumed and these calls have no effect.
862          */
863         hv_setup_dma_ops(child_device,
864                 device_get_dma_attr(hv_dev) == DEV_DMA_COHERENT);
865         return 0;
866 }
867
868 /*
869  * vmbus_remove - Remove a vmbus device
870  */
871 static void vmbus_remove(struct device *child_device)
872 {
873         struct hv_driver *drv;
874         struct hv_device *dev = device_to_hv_device(child_device);
875
876         if (child_device->driver) {
877                 drv = drv_to_hv_drv(child_device->driver);
878                 if (drv->remove)
879                         drv->remove(dev);
880         }
881 }
882
883 /*
884  * vmbus_shutdown - Shutdown a vmbus device
885  */
886 static void vmbus_shutdown(struct device *child_device)
887 {
888         struct hv_driver *drv;
889         struct hv_device *dev = device_to_hv_device(child_device);
890
891
892         /* The device may not be attached yet */
893         if (!child_device->driver)
894                 return;
895
896         drv = drv_to_hv_drv(child_device->driver);
897
898         if (drv->shutdown)
899                 drv->shutdown(dev);
900 }
901
902 #ifdef CONFIG_PM_SLEEP
903 /*
904  * vmbus_suspend - Suspend a vmbus device
905  */
906 static int vmbus_suspend(struct device *child_device)
907 {
908         struct hv_driver *drv;
909         struct hv_device *dev = device_to_hv_device(child_device);
910
911         /* The device may not be attached yet */
912         if (!child_device->driver)
913                 return 0;
914
915         drv = drv_to_hv_drv(child_device->driver);
916         if (!drv->suspend)
917                 return -EOPNOTSUPP;
918
919         return drv->suspend(dev);
920 }
921
922 /*
923  * vmbus_resume - Resume a vmbus device
924  */
925 static int vmbus_resume(struct device *child_device)
926 {
927         struct hv_driver *drv;
928         struct hv_device *dev = device_to_hv_device(child_device);
929
930         /* The device may not be attached yet */
931         if (!child_device->driver)
932                 return 0;
933
934         drv = drv_to_hv_drv(child_device->driver);
935         if (!drv->resume)
936                 return -EOPNOTSUPP;
937
938         return drv->resume(dev);
939 }
940 #else
941 #define vmbus_suspend NULL
942 #define vmbus_resume NULL
943 #endif /* CONFIG_PM_SLEEP */
944
945 /*
946  * vmbus_device_release - Final callback release of the vmbus child device
947  */
948 static void vmbus_device_release(struct device *device)
949 {
950         struct hv_device *hv_dev = device_to_hv_device(device);
951         struct vmbus_channel *channel = hv_dev->channel;
952
953         hv_debug_rm_dev_dir(hv_dev);
954
955         mutex_lock(&vmbus_connection.channel_mutex);
956         hv_process_channel_removal(channel);
957         mutex_unlock(&vmbus_connection.channel_mutex);
958         kfree(hv_dev);
959 }
960
961 /*
962  * Note: we must use the "noirq" ops: see the comment before vmbus_bus_pm.
963  *
964  * suspend_noirq/resume_noirq are set to NULL to support Suspend-to-Idle: we
965  * shouldn't suspend the vmbus devices upon Suspend-to-Idle, otherwise there
966  * is no way to wake up a Generation-2 VM.
967  *
968  * The other 4 ops are for hibernation.
969  */
970
971 static const struct dev_pm_ops vmbus_pm = {
972         .suspend_noirq  = NULL,
973         .resume_noirq   = NULL,
974         .freeze_noirq   = vmbus_suspend,
975         .thaw_noirq     = vmbus_resume,
976         .poweroff_noirq = vmbus_suspend,
977         .restore_noirq  = vmbus_resume,
978 };
979
980 /* The one and only one */
981 static const struct bus_type  hv_bus = {
982         .name =         "vmbus",
983         .match =                vmbus_match,
984         .shutdown =             vmbus_shutdown,
985         .remove =               vmbus_remove,
986         .probe =                vmbus_probe,
987         .uevent =               vmbus_uevent,
988         .dma_configure =        vmbus_dma_configure,
989         .dev_groups =           vmbus_dev_groups,
990         .drv_groups =           vmbus_drv_groups,
991         .bus_groups =           vmbus_bus_groups,
992         .pm =                   &vmbus_pm,
993 };
994
995 struct onmessage_work_context {
996         struct work_struct work;
997         struct {
998                 struct hv_message_header header;
999                 u8 payload[];
1000         } msg;
1001 };
1002
1003 static void vmbus_onmessage_work(struct work_struct *work)
1004 {
1005         struct onmessage_work_context *ctx;
1006
1007         /* Do not process messages if we're in DISCONNECTED state */
1008         if (vmbus_connection.conn_state == DISCONNECTED)
1009                 return;
1010
1011         ctx = container_of(work, struct onmessage_work_context,
1012                            work);
1013         vmbus_onmessage((struct vmbus_channel_message_header *)
1014                         &ctx->msg.payload);
1015         kfree(ctx);
1016 }
1017
1018 void vmbus_on_msg_dpc(unsigned long data)
1019 {
1020         struct hv_per_cpu_context *hv_cpu = (void *)data;
1021         void *page_addr = hv_cpu->synic_message_page;
1022         struct hv_message msg_copy, *msg = (struct hv_message *)page_addr +
1023                                   VMBUS_MESSAGE_SINT;
1024         struct vmbus_channel_message_header *hdr;
1025         enum vmbus_channel_message_type msgtype;
1026         const struct vmbus_channel_message_table_entry *entry;
1027         struct onmessage_work_context *ctx;
1028         __u8 payload_size;
1029         u32 message_type;
1030
1031         /*
1032          * 'enum vmbus_channel_message_type' is supposed to always be 'u32' as
1033          * it is being used in 'struct vmbus_channel_message_header' definition
1034          * which is supposed to match hypervisor ABI.
1035          */
1036         BUILD_BUG_ON(sizeof(enum vmbus_channel_message_type) != sizeof(u32));
1037
1038         /*
1039          * Since the message is in memory shared with the host, an erroneous or
1040          * malicious Hyper-V could modify the message while vmbus_on_msg_dpc()
1041          * or individual message handlers are executing; to prevent this, copy
1042          * the message into private memory.
1043          */
1044         memcpy(&msg_copy, msg, sizeof(struct hv_message));
1045
1046         message_type = msg_copy.header.message_type;
1047         if (message_type == HVMSG_NONE)
1048                 /* no msg */
1049                 return;
1050
1051         hdr = (struct vmbus_channel_message_header *)msg_copy.u.payload;
1052         msgtype = hdr->msgtype;
1053
1054         trace_vmbus_on_msg_dpc(hdr);
1055
1056         if (msgtype >= CHANNELMSG_COUNT) {
1057                 WARN_ONCE(1, "unknown msgtype=%d\n", msgtype);
1058                 goto msg_handled;
1059         }
1060
1061         payload_size = msg_copy.header.payload_size;
1062         if (payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT) {
1063                 WARN_ONCE(1, "payload size is too large (%d)\n", payload_size);
1064                 goto msg_handled;
1065         }
1066
1067         entry = &channel_message_table[msgtype];
1068
1069         if (!entry->message_handler)
1070                 goto msg_handled;
1071
1072         if (payload_size < entry->min_payload_len) {
1073                 WARN_ONCE(1, "message too short: msgtype=%d len=%d\n", msgtype, payload_size);
1074                 goto msg_handled;
1075         }
1076
1077         if (entry->handler_type == VMHT_BLOCKING) {
1078                 ctx = kmalloc(struct_size(ctx, msg.payload, payload_size), GFP_ATOMIC);
1079                 if (ctx == NULL)
1080                         return;
1081
1082                 INIT_WORK(&ctx->work, vmbus_onmessage_work);
1083                 ctx->msg.header = msg_copy.header;
1084                 memcpy(&ctx->msg.payload, msg_copy.u.payload, payload_size);
1085
1086                 /*
1087                  * The host can generate a rescind message while we
1088                  * may still be handling the original offer. We deal with
1089                  * this condition by relying on the synchronization provided
1090                  * by offer_in_progress and by channel_mutex.  See also the
1091                  * inline comments in vmbus_onoffer_rescind().
1092                  */
1093                 switch (msgtype) {
1094                 case CHANNELMSG_RESCIND_CHANNELOFFER:
1095                         /*
1096                          * If we are handling the rescind message;
1097                          * schedule the work on the global work queue.
1098                          *
1099                          * The OFFER message and the RESCIND message should
1100                          * not be handled by the same serialized work queue,
1101                          * because the OFFER handler may call vmbus_open(),
1102                          * which tries to open the channel by sending an
1103                          * OPEN_CHANNEL message to the host and waits for
1104                          * the host's response; however, if the host has
1105                          * rescinded the channel before it receives the
1106                          * OPEN_CHANNEL message, the host just silently
1107                          * ignores the OPEN_CHANNEL message; as a result,
1108                          * the guest's OFFER handler hangs for ever, if we
1109                          * handle the RESCIND message in the same serialized
1110                          * work queue: the RESCIND handler can not start to
1111                          * run before the OFFER handler finishes.
1112                          */
1113                         if (vmbus_connection.ignore_any_offer_msg)
1114                                 break;
1115                         queue_work(vmbus_connection.rescind_work_queue, &ctx->work);
1116                         break;
1117
1118                 case CHANNELMSG_OFFERCHANNEL:
1119                         /*
1120                          * The host sends the offer message of a given channel
1121                          * before sending the rescind message of the same
1122                          * channel.  These messages are sent to the guest's
1123                          * connect CPU; the guest then starts processing them
1124                          * in the tasklet handler on this CPU:
1125                          *
1126                          * VMBUS_CONNECT_CPU
1127                          *
1128                          * [vmbus_on_msg_dpc()]
1129                          * atomic_inc()  // CHANNELMSG_OFFERCHANNEL
1130                          * queue_work()
1131                          * ...
1132                          * [vmbus_on_msg_dpc()]
1133                          * schedule_work()  // CHANNELMSG_RESCIND_CHANNELOFFER
1134                          *
1135                          * We rely on the memory-ordering properties of the
1136                          * queue_work() and schedule_work() primitives, which
1137                          * guarantee that the atomic increment will be visible
1138                          * to the CPUs which will execute the offer & rescind
1139                          * works by the time these works will start execution.
1140                          */
1141                         if (vmbus_connection.ignore_any_offer_msg)
1142                                 break;
1143                         atomic_inc(&vmbus_connection.offer_in_progress);
1144                         fallthrough;
1145
1146                 default:
1147                         queue_work(vmbus_connection.work_queue, &ctx->work);
1148                 }
1149         } else
1150                 entry->message_handler(hdr);
1151
1152 msg_handled:
1153         vmbus_signal_eom(msg, message_type);
1154 }
1155
1156 #ifdef CONFIG_PM_SLEEP
1157 /*
1158  * Fake RESCIND_CHANNEL messages to clean up hv_sock channels by force for
1159  * hibernation, because hv_sock connections can not persist across hibernation.
1160  */
1161 static void vmbus_force_channel_rescinded(struct vmbus_channel *channel)
1162 {
1163         struct onmessage_work_context *ctx;
1164         struct vmbus_channel_rescind_offer *rescind;
1165
1166         WARN_ON(!is_hvsock_channel(channel));
1167
1168         /*
1169          * Allocation size is small and the allocation should really not fail,
1170          * otherwise the state of the hv_sock connections ends up in limbo.
1171          */
1172         ctx = kzalloc(sizeof(*ctx) + sizeof(*rescind),
1173                       GFP_KERNEL | __GFP_NOFAIL);
1174
1175         /*
1176          * So far, these are not really used by Linux. Just set them to the
1177          * reasonable values conforming to the definitions of the fields.
1178          */
1179         ctx->msg.header.message_type = 1;
1180         ctx->msg.header.payload_size = sizeof(*rescind);
1181
1182         /* These values are actually used by Linux. */
1183         rescind = (struct vmbus_channel_rescind_offer *)ctx->msg.payload;
1184         rescind->header.msgtype = CHANNELMSG_RESCIND_CHANNELOFFER;
1185         rescind->child_relid = channel->offermsg.child_relid;
1186
1187         INIT_WORK(&ctx->work, vmbus_onmessage_work);
1188
1189         queue_work(vmbus_connection.work_queue, &ctx->work);
1190 }
1191 #endif /* CONFIG_PM_SLEEP */
1192
1193 /*
1194  * Schedule all channels with events pending
1195  */
1196 static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu)
1197 {
1198         unsigned long *recv_int_page;
1199         u32 maxbits, relid;
1200
1201         /*
1202          * The event page can be directly checked to get the id of
1203          * the channel that has the interrupt pending.
1204          */
1205         void *page_addr = hv_cpu->synic_event_page;
1206         union hv_synic_event_flags *event
1207                 = (union hv_synic_event_flags *)page_addr +
1208                                          VMBUS_MESSAGE_SINT;
1209
1210         maxbits = HV_EVENT_FLAGS_COUNT;
1211         recv_int_page = event->flags;
1212
1213         if (unlikely(!recv_int_page))
1214                 return;
1215
1216         for_each_set_bit(relid, recv_int_page, maxbits) {
1217                 void (*callback_fn)(void *context);
1218                 struct vmbus_channel *channel;
1219
1220                 if (!sync_test_and_clear_bit(relid, recv_int_page))
1221                         continue;
1222
1223                 /* Special case - vmbus channel protocol msg */
1224                 if (relid == 0)
1225                         continue;
1226
1227                 /*
1228                  * Pairs with the kfree_rcu() in vmbus_chan_release().
1229                  * Guarantees that the channel data structure doesn't
1230                  * get freed while the channel pointer below is being
1231                  * dereferenced.
1232                  */
1233                 rcu_read_lock();
1234
1235                 /* Find channel based on relid */
1236                 channel = relid2channel(relid);
1237                 if (channel == NULL)
1238                         goto sched_unlock_rcu;
1239
1240                 if (channel->rescind)
1241                         goto sched_unlock_rcu;
1242
1243                 /*
1244                  * Make sure that the ring buffer data structure doesn't get
1245                  * freed while we dereference the ring buffer pointer.  Test
1246                  * for the channel's onchannel_callback being NULL within a
1247                  * sched_lock critical section.  See also the inline comments
1248                  * in vmbus_reset_channel_cb().
1249                  */
1250                 spin_lock(&channel->sched_lock);
1251
1252                 callback_fn = channel->onchannel_callback;
1253                 if (unlikely(callback_fn == NULL))
1254                         goto sched_unlock;
1255
1256                 trace_vmbus_chan_sched(channel);
1257
1258                 ++channel->interrupts;
1259
1260                 switch (channel->callback_mode) {
1261                 case HV_CALL_ISR:
1262                         (*callback_fn)(channel->channel_callback_context);
1263                         break;
1264
1265                 case HV_CALL_BATCHED:
1266                         hv_begin_read(&channel->inbound);
1267                         fallthrough;
1268                 case HV_CALL_DIRECT:
1269                         tasklet_schedule(&channel->callback_event);
1270                 }
1271
1272 sched_unlock:
1273                 spin_unlock(&channel->sched_lock);
1274 sched_unlock_rcu:
1275                 rcu_read_unlock();
1276         }
1277 }
1278
1279 static void vmbus_isr(void)
1280 {
1281         struct hv_per_cpu_context *hv_cpu
1282                 = this_cpu_ptr(hv_context.cpu_context);
1283         void *page_addr;
1284         struct hv_message *msg;
1285
1286         vmbus_chan_sched(hv_cpu);
1287
1288         page_addr = hv_cpu->synic_message_page;
1289         msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
1290
1291         /* Check if there are actual msgs to be processed */
1292         if (msg->header.message_type != HVMSG_NONE) {
1293                 if (msg->header.message_type == HVMSG_TIMER_EXPIRED) {
1294                         hv_stimer0_isr();
1295                         vmbus_signal_eom(msg, HVMSG_TIMER_EXPIRED);
1296                 } else
1297                         tasklet_schedule(&hv_cpu->msg_dpc);
1298         }
1299
1300         add_interrupt_randomness(vmbus_interrupt);
1301 }
1302
1303 static irqreturn_t vmbus_percpu_isr(int irq, void *dev_id)
1304 {
1305         vmbus_isr();
1306         return IRQ_HANDLED;
1307 }
1308
1309 /*
1310  * vmbus_bus_init -Main vmbus driver initialization routine.
1311  *
1312  * Here, we
1313  *      - initialize the vmbus driver context
1314  *      - invoke the vmbus hv main init routine
1315  *      - retrieve the channel offers
1316  */
1317 static int vmbus_bus_init(void)
1318 {
1319         int ret;
1320
1321         ret = hv_init();
1322         if (ret != 0) {
1323                 pr_err("Unable to initialize the hypervisor - 0x%x\n", ret);
1324                 return ret;
1325         }
1326
1327         ret = bus_register(&hv_bus);
1328         if (ret)
1329                 return ret;
1330
1331         /*
1332          * VMbus interrupts are best modeled as per-cpu interrupts. If
1333          * on an architecture with support for per-cpu IRQs (e.g. ARM64),
1334          * allocate a per-cpu IRQ using standard Linux kernel functionality.
1335          * If not on such an architecture (e.g., x86/x64), then rely on
1336          * code in the arch-specific portion of the code tree to connect
1337          * the VMbus interrupt handler.
1338          */
1339
1340         if (vmbus_irq == -1) {
1341                 hv_setup_vmbus_handler(vmbus_isr);
1342         } else {
1343                 vmbus_evt = alloc_percpu(long);
1344                 ret = request_percpu_irq(vmbus_irq, vmbus_percpu_isr,
1345                                 "Hyper-V VMbus", vmbus_evt);
1346                 if (ret) {
1347                         pr_err("Can't request Hyper-V VMbus IRQ %d, Err %d",
1348                                         vmbus_irq, ret);
1349                         free_percpu(vmbus_evt);
1350                         goto err_setup;
1351                 }
1352         }
1353
1354         ret = hv_synic_alloc();
1355         if (ret)
1356                 goto err_alloc;
1357
1358         /*
1359          * Initialize the per-cpu interrupt state and stimer state.
1360          * Then connect to the host.
1361          */
1362         ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "hyperv/vmbus:online",
1363                                 hv_synic_init, hv_synic_cleanup);
1364         if (ret < 0)
1365                 goto err_alloc;
1366         hyperv_cpuhp_online = ret;
1367
1368         ret = vmbus_connect();
1369         if (ret)
1370                 goto err_connect;
1371
1372         /*
1373          * Always register the vmbus unload panic notifier because we
1374          * need to shut the VMbus channel connection on panic.
1375          */
1376         atomic_notifier_chain_register(&panic_notifier_list,
1377                                &hyperv_panic_vmbus_unload_block);
1378
1379         vmbus_request_offers();
1380
1381         return 0;
1382
1383 err_connect:
1384         cpuhp_remove_state(hyperv_cpuhp_online);
1385 err_alloc:
1386         hv_synic_free();
1387         if (vmbus_irq == -1) {
1388                 hv_remove_vmbus_handler();
1389         } else {
1390                 free_percpu_irq(vmbus_irq, vmbus_evt);
1391                 free_percpu(vmbus_evt);
1392         }
1393 err_setup:
1394         bus_unregister(&hv_bus);
1395         return ret;
1396 }
1397
1398 /**
1399  * __vmbus_driver_register() - Register a vmbus's driver
1400  * @hv_driver: Pointer to driver structure you want to register
1401  * @owner: owner module of the drv
1402  * @mod_name: module name string
1403  *
1404  * Registers the given driver with Linux through the 'driver_register()' call
1405  * and sets up the hyper-v vmbus handling for this driver.
1406  * It will return the state of the 'driver_register()' call.
1407  *
1408  */
1409 int __vmbus_driver_register(struct hv_driver *hv_driver, struct module *owner, const char *mod_name)
1410 {
1411         int ret;
1412
1413         pr_info("registering driver %s\n", hv_driver->name);
1414
1415         ret = vmbus_exists();
1416         if (ret < 0)
1417                 return ret;
1418
1419         hv_driver->driver.name = hv_driver->name;
1420         hv_driver->driver.owner = owner;
1421         hv_driver->driver.mod_name = mod_name;
1422         hv_driver->driver.bus = &hv_bus;
1423
1424         spin_lock_init(&hv_driver->dynids.lock);
1425         INIT_LIST_HEAD(&hv_driver->dynids.list);
1426
1427         ret = driver_register(&hv_driver->driver);
1428
1429         return ret;
1430 }
1431 EXPORT_SYMBOL_GPL(__vmbus_driver_register);
1432
1433 /**
1434  * vmbus_driver_unregister() - Unregister a vmbus's driver
1435  * @hv_driver: Pointer to driver structure you want to
1436  *             un-register
1437  *
1438  * Un-register the given driver that was previous registered with a call to
1439  * vmbus_driver_register()
1440  */
1441 void vmbus_driver_unregister(struct hv_driver *hv_driver)
1442 {
1443         pr_info("unregistering driver %s\n", hv_driver->name);
1444
1445         if (!vmbus_exists()) {
1446                 driver_unregister(&hv_driver->driver);
1447                 vmbus_free_dynids(hv_driver);
1448         }
1449 }
1450 EXPORT_SYMBOL_GPL(vmbus_driver_unregister);
1451
1452
1453 /*
1454  * Called when last reference to channel is gone.
1455  */
1456 static void vmbus_chan_release(struct kobject *kobj)
1457 {
1458         struct vmbus_channel *channel
1459                 = container_of(kobj, struct vmbus_channel, kobj);
1460
1461         kfree_rcu(channel, rcu);
1462 }
1463
1464 struct vmbus_chan_attribute {
1465         struct attribute attr;
1466         ssize_t (*show)(struct vmbus_channel *chan, char *buf);
1467         ssize_t (*store)(struct vmbus_channel *chan,
1468                          const char *buf, size_t count);
1469 };
1470 #define VMBUS_CHAN_ATTR(_name, _mode, _show, _store) \
1471         struct vmbus_chan_attribute chan_attr_##_name \
1472                 = __ATTR(_name, _mode, _show, _store)
1473 #define VMBUS_CHAN_ATTR_RW(_name) \
1474         struct vmbus_chan_attribute chan_attr_##_name = __ATTR_RW(_name)
1475 #define VMBUS_CHAN_ATTR_RO(_name) \
1476         struct vmbus_chan_attribute chan_attr_##_name = __ATTR_RO(_name)
1477 #define VMBUS_CHAN_ATTR_WO(_name) \
1478         struct vmbus_chan_attribute chan_attr_##_name = __ATTR_WO(_name)
1479
1480 static ssize_t vmbus_chan_attr_show(struct kobject *kobj,
1481                                     struct attribute *attr, char *buf)
1482 {
1483         const struct vmbus_chan_attribute *attribute
1484                 = container_of(attr, struct vmbus_chan_attribute, attr);
1485         struct vmbus_channel *chan
1486                 = container_of(kobj, struct vmbus_channel, kobj);
1487
1488         if (!attribute->show)
1489                 return -EIO;
1490
1491         return attribute->show(chan, buf);
1492 }
1493
1494 static ssize_t vmbus_chan_attr_store(struct kobject *kobj,
1495                                      struct attribute *attr, const char *buf,
1496                                      size_t count)
1497 {
1498         const struct vmbus_chan_attribute *attribute
1499                 = container_of(attr, struct vmbus_chan_attribute, attr);
1500         struct vmbus_channel *chan
1501                 = container_of(kobj, struct vmbus_channel, kobj);
1502
1503         if (!attribute->store)
1504                 return -EIO;
1505
1506         return attribute->store(chan, buf, count);
1507 }
1508
1509 static const struct sysfs_ops vmbus_chan_sysfs_ops = {
1510         .show = vmbus_chan_attr_show,
1511         .store = vmbus_chan_attr_store,
1512 };
1513
1514 static ssize_t out_mask_show(struct vmbus_channel *channel, char *buf)
1515 {
1516         struct hv_ring_buffer_info *rbi = &channel->outbound;
1517         ssize_t ret;
1518
1519         mutex_lock(&rbi->ring_buffer_mutex);
1520         if (!rbi->ring_buffer) {
1521                 mutex_unlock(&rbi->ring_buffer_mutex);
1522                 return -EINVAL;
1523         }
1524
1525         ret = sprintf(buf, "%u\n", rbi->ring_buffer->interrupt_mask);
1526         mutex_unlock(&rbi->ring_buffer_mutex);
1527         return ret;
1528 }
1529 static VMBUS_CHAN_ATTR_RO(out_mask);
1530
1531 static ssize_t in_mask_show(struct vmbus_channel *channel, char *buf)
1532 {
1533         struct hv_ring_buffer_info *rbi = &channel->inbound;
1534         ssize_t ret;
1535
1536         mutex_lock(&rbi->ring_buffer_mutex);
1537         if (!rbi->ring_buffer) {
1538                 mutex_unlock(&rbi->ring_buffer_mutex);
1539                 return -EINVAL;
1540         }
1541
1542         ret = sprintf(buf, "%u\n", rbi->ring_buffer->interrupt_mask);
1543         mutex_unlock(&rbi->ring_buffer_mutex);
1544         return ret;
1545 }
1546 static VMBUS_CHAN_ATTR_RO(in_mask);
1547
1548 static ssize_t read_avail_show(struct vmbus_channel *channel, char *buf)
1549 {
1550         struct hv_ring_buffer_info *rbi = &channel->inbound;
1551         ssize_t ret;
1552
1553         mutex_lock(&rbi->ring_buffer_mutex);
1554         if (!rbi->ring_buffer) {
1555                 mutex_unlock(&rbi->ring_buffer_mutex);
1556                 return -EINVAL;
1557         }
1558
1559         ret = sprintf(buf, "%u\n", hv_get_bytes_to_read(rbi));
1560         mutex_unlock(&rbi->ring_buffer_mutex);
1561         return ret;
1562 }
1563 static VMBUS_CHAN_ATTR_RO(read_avail);
1564
1565 static ssize_t write_avail_show(struct vmbus_channel *channel, char *buf)
1566 {
1567         struct hv_ring_buffer_info *rbi = &channel->outbound;
1568         ssize_t ret;
1569
1570         mutex_lock(&rbi->ring_buffer_mutex);
1571         if (!rbi->ring_buffer) {
1572                 mutex_unlock(&rbi->ring_buffer_mutex);
1573                 return -EINVAL;
1574         }
1575
1576         ret = sprintf(buf, "%u\n", hv_get_bytes_to_write(rbi));
1577         mutex_unlock(&rbi->ring_buffer_mutex);
1578         return ret;
1579 }
1580 static VMBUS_CHAN_ATTR_RO(write_avail);
1581
1582 static ssize_t target_cpu_show(struct vmbus_channel *channel, char *buf)
1583 {
1584         return sprintf(buf, "%u\n", channel->target_cpu);
1585 }
1586 static ssize_t target_cpu_store(struct vmbus_channel *channel,
1587                                 const char *buf, size_t count)
1588 {
1589         u32 target_cpu, origin_cpu;
1590         ssize_t ret = count;
1591
1592         if (vmbus_proto_version < VERSION_WIN10_V4_1)
1593                 return -EIO;
1594
1595         if (sscanf(buf, "%uu", &target_cpu) != 1)
1596                 return -EIO;
1597
1598         /* Validate target_cpu for the cpumask_test_cpu() operation below. */
1599         if (target_cpu >= nr_cpumask_bits)
1600                 return -EINVAL;
1601
1602         if (!cpumask_test_cpu(target_cpu, housekeeping_cpumask(HK_TYPE_MANAGED_IRQ)))
1603                 return -EINVAL;
1604
1605         /* No CPUs should come up or down during this. */
1606         cpus_read_lock();
1607
1608         if (!cpu_online(target_cpu)) {
1609                 cpus_read_unlock();
1610                 return -EINVAL;
1611         }
1612
1613         /*
1614          * Synchronizes target_cpu_store() and channel closure:
1615          *
1616          * { Initially: state = CHANNEL_OPENED }
1617          *
1618          * CPU1                         CPU2
1619          *
1620          * [target_cpu_store()]         [vmbus_disconnect_ring()]
1621          *
1622          * LOCK channel_mutex           LOCK channel_mutex
1623          * LOAD r1 = state              LOAD r2 = state
1624          * IF (r1 == CHANNEL_OPENED)    IF (r2 == CHANNEL_OPENED)
1625          *   SEND MODIFYCHANNEL           STORE state = CHANNEL_OPEN
1626          *   [...]                        SEND CLOSECHANNEL
1627          * UNLOCK channel_mutex         UNLOCK channel_mutex
1628          *
1629          * Forbids: r1 == r2 == CHANNEL_OPENED (i.e., CPU1's LOCK precedes
1630          *              CPU2's LOCK) && CPU2's SEND precedes CPU1's SEND
1631          *
1632          * Note.  The host processes the channel messages "sequentially", in
1633          * the order in which they are received on a per-partition basis.
1634          */
1635         mutex_lock(&vmbus_connection.channel_mutex);
1636
1637         /*
1638          * Hyper-V will ignore MODIFYCHANNEL messages for "non-open" channels;
1639          * avoid sending the message and fail here for such channels.
1640          */
1641         if (channel->state != CHANNEL_OPENED_STATE) {
1642                 ret = -EIO;
1643                 goto cpu_store_unlock;
1644         }
1645
1646         origin_cpu = channel->target_cpu;
1647         if (target_cpu == origin_cpu)
1648                 goto cpu_store_unlock;
1649
1650         if (vmbus_send_modifychannel(channel,
1651                                      hv_cpu_number_to_vp_number(target_cpu))) {
1652                 ret = -EIO;
1653                 goto cpu_store_unlock;
1654         }
1655
1656         /*
1657          * For version before VERSION_WIN10_V5_3, the following warning holds:
1658          *
1659          * Warning.  At this point, there is *no* guarantee that the host will
1660          * have successfully processed the vmbus_send_modifychannel() request.
1661          * See the header comment of vmbus_send_modifychannel() for more info.
1662          *
1663          * Lags in the processing of the above vmbus_send_modifychannel() can
1664          * result in missed interrupts if the "old" target CPU is taken offline
1665          * before Hyper-V starts sending interrupts to the "new" target CPU.
1666          * But apart from this offlining scenario, the code tolerates such
1667          * lags.  It will function correctly even if a channel interrupt comes
1668          * in on a CPU that is different from the channel target_cpu value.
1669          */
1670
1671         channel->target_cpu = target_cpu;
1672
1673         /* See init_vp_index(). */
1674         if (hv_is_perf_channel(channel))
1675                 hv_update_allocated_cpus(origin_cpu, target_cpu);
1676
1677         /* Currently set only for storvsc channels. */
1678         if (channel->change_target_cpu_callback) {
1679                 (*channel->change_target_cpu_callback)(channel,
1680                                 origin_cpu, target_cpu);
1681         }
1682
1683 cpu_store_unlock:
1684         mutex_unlock(&vmbus_connection.channel_mutex);
1685         cpus_read_unlock();
1686         return ret;
1687 }
1688 static VMBUS_CHAN_ATTR(cpu, 0644, target_cpu_show, target_cpu_store);
1689
1690 static ssize_t channel_pending_show(struct vmbus_channel *channel,
1691                                     char *buf)
1692 {
1693         return sprintf(buf, "%d\n",
1694                        channel_pending(channel,
1695                                        vmbus_connection.monitor_pages[1]));
1696 }
1697 static VMBUS_CHAN_ATTR(pending, 0444, channel_pending_show, NULL);
1698
1699 static ssize_t channel_latency_show(struct vmbus_channel *channel,
1700                                     char *buf)
1701 {
1702         return sprintf(buf, "%d\n",
1703                        channel_latency(channel,
1704                                        vmbus_connection.monitor_pages[1]));
1705 }
1706 static VMBUS_CHAN_ATTR(latency, 0444, channel_latency_show, NULL);
1707
1708 static ssize_t channel_interrupts_show(struct vmbus_channel *channel, char *buf)
1709 {
1710         return sprintf(buf, "%llu\n", channel->interrupts);
1711 }
1712 static VMBUS_CHAN_ATTR(interrupts, 0444, channel_interrupts_show, NULL);
1713
1714 static ssize_t channel_events_show(struct vmbus_channel *channel, char *buf)
1715 {
1716         return sprintf(buf, "%llu\n", channel->sig_events);
1717 }
1718 static VMBUS_CHAN_ATTR(events, 0444, channel_events_show, NULL);
1719
1720 static ssize_t channel_intr_in_full_show(struct vmbus_channel *channel,
1721                                          char *buf)
1722 {
1723         return sprintf(buf, "%llu\n",
1724                        (unsigned long long)channel->intr_in_full);
1725 }
1726 static VMBUS_CHAN_ATTR(intr_in_full, 0444, channel_intr_in_full_show, NULL);
1727
1728 static ssize_t channel_intr_out_empty_show(struct vmbus_channel *channel,
1729                                            char *buf)
1730 {
1731         return sprintf(buf, "%llu\n",
1732                        (unsigned long long)channel->intr_out_empty);
1733 }
1734 static VMBUS_CHAN_ATTR(intr_out_empty, 0444, channel_intr_out_empty_show, NULL);
1735
1736 static ssize_t channel_out_full_first_show(struct vmbus_channel *channel,
1737                                            char *buf)
1738 {
1739         return sprintf(buf, "%llu\n",
1740                        (unsigned long long)channel->out_full_first);
1741 }
1742 static VMBUS_CHAN_ATTR(out_full_first, 0444, channel_out_full_first_show, NULL);
1743
1744 static ssize_t channel_out_full_total_show(struct vmbus_channel *channel,
1745                                            char *buf)
1746 {
1747         return sprintf(buf, "%llu\n",
1748                        (unsigned long long)channel->out_full_total);
1749 }
1750 static VMBUS_CHAN_ATTR(out_full_total, 0444, channel_out_full_total_show, NULL);
1751
1752 static ssize_t subchannel_monitor_id_show(struct vmbus_channel *channel,
1753                                           char *buf)
1754 {
1755         return sprintf(buf, "%u\n", channel->offermsg.monitorid);
1756 }
1757 static VMBUS_CHAN_ATTR(monitor_id, 0444, subchannel_monitor_id_show, NULL);
1758
1759 static ssize_t subchannel_id_show(struct vmbus_channel *channel,
1760                                   char *buf)
1761 {
1762         return sprintf(buf, "%u\n",
1763                        channel->offermsg.offer.sub_channel_index);
1764 }
1765 static VMBUS_CHAN_ATTR_RO(subchannel_id);
1766
1767 static struct attribute *vmbus_chan_attrs[] = {
1768         &chan_attr_out_mask.attr,
1769         &chan_attr_in_mask.attr,
1770         &chan_attr_read_avail.attr,
1771         &chan_attr_write_avail.attr,
1772         &chan_attr_cpu.attr,
1773         &chan_attr_pending.attr,
1774         &chan_attr_latency.attr,
1775         &chan_attr_interrupts.attr,
1776         &chan_attr_events.attr,
1777         &chan_attr_intr_in_full.attr,
1778         &chan_attr_intr_out_empty.attr,
1779         &chan_attr_out_full_first.attr,
1780         &chan_attr_out_full_total.attr,
1781         &chan_attr_monitor_id.attr,
1782         &chan_attr_subchannel_id.attr,
1783         NULL
1784 };
1785
1786 /*
1787  * Channel-level attribute_group callback function. Returns the permission for
1788  * each attribute, and returns 0 if an attribute is not visible.
1789  */
1790 static umode_t vmbus_chan_attr_is_visible(struct kobject *kobj,
1791                                           struct attribute *attr, int idx)
1792 {
1793         const struct vmbus_channel *channel =
1794                 container_of(kobj, struct vmbus_channel, kobj);
1795
1796         /* Hide the monitor attributes if the monitor mechanism is not used. */
1797         if (!channel->offermsg.monitor_allocated &&
1798             (attr == &chan_attr_pending.attr ||
1799              attr == &chan_attr_latency.attr ||
1800              attr == &chan_attr_monitor_id.attr))
1801                 return 0;
1802
1803         return attr->mode;
1804 }
1805
1806 static struct attribute_group vmbus_chan_group = {
1807         .attrs = vmbus_chan_attrs,
1808         .is_visible = vmbus_chan_attr_is_visible
1809 };
1810
1811 static struct kobj_type vmbus_chan_ktype = {
1812         .sysfs_ops = &vmbus_chan_sysfs_ops,
1813         .release = vmbus_chan_release,
1814 };
1815
1816 /*
1817  * vmbus_add_channel_kobj - setup a sub-directory under device/channels
1818  */
1819 int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel)
1820 {
1821         const struct device *device = &dev->device;
1822         struct kobject *kobj = &channel->kobj;
1823         u32 relid = channel->offermsg.child_relid;
1824         int ret;
1825
1826         kobj->kset = dev->channels_kset;
1827         ret = kobject_init_and_add(kobj, &vmbus_chan_ktype, NULL,
1828                                    "%u", relid);
1829         if (ret) {
1830                 kobject_put(kobj);
1831                 return ret;
1832         }
1833
1834         ret = sysfs_create_group(kobj, &vmbus_chan_group);
1835
1836         if (ret) {
1837                 /*
1838                  * The calling functions' error handling paths will cleanup the
1839                  * empty channel directory.
1840                  */
1841                 kobject_put(kobj);
1842                 dev_err(device, "Unable to set up channel sysfs files\n");
1843                 return ret;
1844         }
1845
1846         kobject_uevent(kobj, KOBJ_ADD);
1847
1848         return 0;
1849 }
1850
1851 /*
1852  * vmbus_remove_channel_attr_group - remove the channel's attribute group
1853  */
1854 void vmbus_remove_channel_attr_group(struct vmbus_channel *channel)
1855 {
1856         sysfs_remove_group(&channel->kobj, &vmbus_chan_group);
1857 }
1858
1859 /*
1860  * vmbus_device_create - Creates and registers a new child device
1861  * on the vmbus.
1862  */
1863 struct hv_device *vmbus_device_create(const guid_t *type,
1864                                       const guid_t *instance,
1865                                       struct vmbus_channel *channel)
1866 {
1867         struct hv_device *child_device_obj;
1868
1869         child_device_obj = kzalloc(sizeof(struct hv_device), GFP_KERNEL);
1870         if (!child_device_obj) {
1871                 pr_err("Unable to allocate device object for child device\n");
1872                 return NULL;
1873         }
1874
1875         child_device_obj->channel = channel;
1876         guid_copy(&child_device_obj->dev_type, type);
1877         guid_copy(&child_device_obj->dev_instance, instance);
1878         child_device_obj->vendor_id = PCI_VENDOR_ID_MICROSOFT;
1879
1880         return child_device_obj;
1881 }
1882
1883 /*
1884  * vmbus_device_register - Register the child device
1885  */
1886 int vmbus_device_register(struct hv_device *child_device_obj)
1887 {
1888         struct kobject *kobj = &child_device_obj->device.kobj;
1889         int ret;
1890
1891         dev_set_name(&child_device_obj->device, "%pUl",
1892                      &child_device_obj->channel->offermsg.offer.if_instance);
1893
1894         child_device_obj->device.bus = &hv_bus;
1895         child_device_obj->device.parent = hv_dev;
1896         child_device_obj->device.release = vmbus_device_release;
1897
1898         child_device_obj->device.dma_parms = &child_device_obj->dma_parms;
1899         child_device_obj->device.dma_mask = &child_device_obj->dma_mask;
1900         dma_set_mask(&child_device_obj->device, DMA_BIT_MASK(64));
1901
1902         /*
1903          * Register with the LDM. This will kick off the driver/device
1904          * binding...which will eventually call vmbus_match() and vmbus_probe()
1905          */
1906         ret = device_register(&child_device_obj->device);
1907         if (ret) {
1908                 pr_err("Unable to register child device\n");
1909                 put_device(&child_device_obj->device);
1910                 return ret;
1911         }
1912
1913         child_device_obj->channels_kset = kset_create_and_add("channels",
1914                                                               NULL, kobj);
1915         if (!child_device_obj->channels_kset) {
1916                 ret = -ENOMEM;
1917                 goto err_dev_unregister;
1918         }
1919
1920         ret = vmbus_add_channel_kobj(child_device_obj,
1921                                      child_device_obj->channel);
1922         if (ret) {
1923                 pr_err("Unable to register primary channeln");
1924                 goto err_kset_unregister;
1925         }
1926         hv_debug_add_dev_dir(child_device_obj);
1927
1928         return 0;
1929
1930 err_kset_unregister:
1931         kset_unregister(child_device_obj->channels_kset);
1932
1933 err_dev_unregister:
1934         device_unregister(&child_device_obj->device);
1935         return ret;
1936 }
1937
1938 /*
1939  * vmbus_device_unregister - Remove the specified child device
1940  * from the vmbus.
1941  */
1942 void vmbus_device_unregister(struct hv_device *device_obj)
1943 {
1944         pr_debug("child device %s unregistered\n",
1945                 dev_name(&device_obj->device));
1946
1947         kset_unregister(device_obj->channels_kset);
1948
1949         /*
1950          * Kick off the process of unregistering the device.
1951          * This will call vmbus_remove() and eventually vmbus_device_release()
1952          */
1953         device_unregister(&device_obj->device);
1954 }
1955
1956 #ifdef CONFIG_ACPI
1957 /*
1958  * VMBUS is an acpi enumerated device. Get the information we
1959  * need from DSDT.
1960  */
1961 static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *ctx)
1962 {
1963         resource_size_t start = 0;
1964         resource_size_t end = 0;
1965         struct resource *new_res;
1966         struct resource **old_res = &hyperv_mmio;
1967         struct resource **prev_res = NULL;
1968         struct resource r;
1969
1970         switch (res->type) {
1971
1972         /*
1973          * "Address" descriptors are for bus windows. Ignore
1974          * "memory" descriptors, which are for registers on
1975          * devices.
1976          */
1977         case ACPI_RESOURCE_TYPE_ADDRESS32:
1978                 start = res->data.address32.address.minimum;
1979                 end = res->data.address32.address.maximum;
1980                 break;
1981
1982         case ACPI_RESOURCE_TYPE_ADDRESS64:
1983                 start = res->data.address64.address.minimum;
1984                 end = res->data.address64.address.maximum;
1985                 break;
1986
1987         /*
1988          * The IRQ information is needed only on ARM64, which Hyper-V
1989          * sets up in the extended format. IRQ information is present
1990          * on x86/x64 in the non-extended format but it is not used by
1991          * Linux. So don't bother checking for the non-extended format.
1992          */
1993         case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
1994                 if (!acpi_dev_resource_interrupt(res, 0, &r)) {
1995                         pr_err("Unable to parse Hyper-V ACPI interrupt\n");
1996                         return AE_ERROR;
1997                 }
1998                 /* ARM64 INTID for VMbus */
1999                 vmbus_interrupt = res->data.extended_irq.interrupts[0];
2000                 /* Linux IRQ number */
2001                 vmbus_irq = r.start;
2002                 return AE_OK;
2003
2004         default:
2005                 /* Unused resource type */
2006                 return AE_OK;
2007
2008         }
2009         /*
2010          * Ignore ranges that are below 1MB, as they're not
2011          * necessary or useful here.
2012          */
2013         if (end < 0x100000)
2014                 return AE_OK;
2015
2016         new_res = kzalloc(sizeof(*new_res), GFP_ATOMIC);
2017         if (!new_res)
2018                 return AE_NO_MEMORY;
2019
2020         /* If this range overlaps the virtual TPM, truncate it. */
2021         if (end > VTPM_BASE_ADDRESS && start < VTPM_BASE_ADDRESS)
2022                 end = VTPM_BASE_ADDRESS;
2023
2024         new_res->name = "hyperv mmio";
2025         new_res->flags = IORESOURCE_MEM;
2026         new_res->start = start;
2027         new_res->end = end;
2028
2029         /*
2030          * If two ranges are adjacent, merge them.
2031          */
2032         do {
2033                 if (!*old_res) {
2034                         *old_res = new_res;
2035                         break;
2036                 }
2037
2038                 if (((*old_res)->end + 1) == new_res->start) {
2039                         (*old_res)->end = new_res->end;
2040                         kfree(new_res);
2041                         break;
2042                 }
2043
2044                 if ((*old_res)->start == new_res->end + 1) {
2045                         (*old_res)->start = new_res->start;
2046                         kfree(new_res);
2047                         break;
2048                 }
2049
2050                 if ((*old_res)->start > new_res->end) {
2051                         new_res->sibling = *old_res;
2052                         if (prev_res)
2053                                 (*prev_res)->sibling = new_res;
2054                         *old_res = new_res;
2055                         break;
2056                 }
2057
2058                 prev_res = old_res;
2059                 old_res = &(*old_res)->sibling;
2060
2061         } while (1);
2062
2063         return AE_OK;
2064 }
2065 #endif
2066
2067 static void vmbus_mmio_remove(void)
2068 {
2069         struct resource *cur_res;
2070         struct resource *next_res;
2071
2072         if (hyperv_mmio) {
2073                 if (fb_mmio) {
2074                         __release_region(hyperv_mmio, fb_mmio->start,
2075                                          resource_size(fb_mmio));
2076                         fb_mmio = NULL;
2077                 }
2078
2079                 for (cur_res = hyperv_mmio; cur_res; cur_res = next_res) {
2080                         next_res = cur_res->sibling;
2081                         kfree(cur_res);
2082                 }
2083         }
2084 }
2085
2086 static void __maybe_unused vmbus_reserve_fb(void)
2087 {
2088         resource_size_t start = 0, size;
2089         struct pci_dev *pdev;
2090
2091         if (efi_enabled(EFI_BOOT)) {
2092                 /* Gen2 VM: get FB base from EFI framebuffer */
2093                 if (IS_ENABLED(CONFIG_SYSFB)) {
2094                         start = screen_info.lfb_base;
2095                         size = max_t(__u32, screen_info.lfb_size, 0x800000);
2096                 }
2097         } else {
2098                 /* Gen1 VM: get FB base from PCI */
2099                 pdev = pci_get_device(PCI_VENDOR_ID_MICROSOFT,
2100                                       PCI_DEVICE_ID_HYPERV_VIDEO, NULL);
2101                 if (!pdev)
2102                         return;
2103
2104                 if (pdev->resource[0].flags & IORESOURCE_MEM) {
2105                         start = pci_resource_start(pdev, 0);
2106                         size = pci_resource_len(pdev, 0);
2107                 }
2108
2109                 /*
2110                  * Release the PCI device so hyperv_drm or hyperv_fb driver can
2111                  * grab it later.
2112                  */
2113                 pci_dev_put(pdev);
2114         }
2115
2116         if (!start)
2117                 return;
2118
2119         /*
2120          * Make a claim for the frame buffer in the resource tree under the
2121          * first node, which will be the one below 4GB.  The length seems to
2122          * be underreported, particularly in a Generation 1 VM.  So start out
2123          * reserving a larger area and make it smaller until it succeeds.
2124          */
2125         for (; !fb_mmio && (size >= 0x100000); size >>= 1)
2126                 fb_mmio = __request_region(hyperv_mmio, start, size, fb_mmio_name, 0);
2127 }
2128
2129 /**
2130  * vmbus_allocate_mmio() - Pick a memory-mapped I/O range.
2131  * @new:                If successful, supplied a pointer to the
2132  *                      allocated MMIO space.
2133  * @device_obj:         Identifies the caller
2134  * @min:                Minimum guest physical address of the
2135  *                      allocation
2136  * @max:                Maximum guest physical address
2137  * @size:               Size of the range to be allocated
2138  * @align:              Alignment of the range to be allocated
2139  * @fb_overlap_ok:      Whether this allocation can be allowed
2140  *                      to overlap the video frame buffer.
2141  *
2142  * This function walks the resources granted to VMBus by the
2143  * _CRS object in the ACPI namespace underneath the parent
2144  * "bridge" whether that's a root PCI bus in the Generation 1
2145  * case or a Module Device in the Generation 2 case.  It then
2146  * attempts to allocate from the global MMIO pool in a way that
2147  * matches the constraints supplied in these parameters and by
2148  * that _CRS.
2149  *
2150  * Return: 0 on success, -errno on failure
2151  */
2152 int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
2153                         resource_size_t min, resource_size_t max,
2154                         resource_size_t size, resource_size_t align,
2155                         bool fb_overlap_ok)
2156 {
2157         struct resource *iter, *shadow;
2158         resource_size_t range_min, range_max, start, end;
2159         const char *dev_n = dev_name(&device_obj->device);
2160         int retval;
2161
2162         retval = -ENXIO;
2163         mutex_lock(&hyperv_mmio_lock);
2164
2165         /*
2166          * If overlaps with frame buffers are allowed, then first attempt to
2167          * make the allocation from within the reserved region.  Because it
2168          * is already reserved, no shadow allocation is necessary.
2169          */
2170         if (fb_overlap_ok && fb_mmio && !(min > fb_mmio->end) &&
2171             !(max < fb_mmio->start)) {
2172
2173                 range_min = fb_mmio->start;
2174                 range_max = fb_mmio->end;
2175                 start = (range_min + align - 1) & ~(align - 1);
2176                 for (; start + size - 1 <= range_max; start += align) {
2177                         *new = request_mem_region_exclusive(start, size, dev_n);
2178                         if (*new) {
2179                                 retval = 0;
2180                                 goto exit;
2181                         }
2182                 }
2183         }
2184
2185         for (iter = hyperv_mmio; iter; iter = iter->sibling) {
2186                 if ((iter->start >= max) || (iter->end <= min))
2187                         continue;
2188
2189                 range_min = iter->start;
2190                 range_max = iter->end;
2191                 start = (range_min + align - 1) & ~(align - 1);
2192                 for (; start + size - 1 <= range_max; start += align) {
2193                         end = start + size - 1;
2194
2195                         /* Skip the whole fb_mmio region if not fb_overlap_ok */
2196                         if (!fb_overlap_ok && fb_mmio &&
2197                             (((start >= fb_mmio->start) && (start <= fb_mmio->end)) ||
2198                              ((end >= fb_mmio->start) && (end <= fb_mmio->end))))
2199                                 continue;
2200
2201                         shadow = __request_region(iter, start, size, NULL,
2202                                                   IORESOURCE_BUSY);
2203                         if (!shadow)
2204                                 continue;
2205
2206                         *new = request_mem_region_exclusive(start, size, dev_n);
2207                         if (*new) {
2208                                 shadow->name = (char *)*new;
2209                                 retval = 0;
2210                                 goto exit;
2211                         }
2212
2213                         __release_region(iter, start, size);
2214                 }
2215         }
2216
2217 exit:
2218         mutex_unlock(&hyperv_mmio_lock);
2219         return retval;
2220 }
2221 EXPORT_SYMBOL_GPL(vmbus_allocate_mmio);
2222
2223 /**
2224  * vmbus_free_mmio() - Free a memory-mapped I/O range.
2225  * @start:              Base address of region to release.
2226  * @size:               Size of the range to be allocated
2227  *
2228  * This function releases anything requested by
2229  * vmbus_mmio_allocate().
2230  */
2231 void vmbus_free_mmio(resource_size_t start, resource_size_t size)
2232 {
2233         struct resource *iter;
2234
2235         mutex_lock(&hyperv_mmio_lock);
2236         for (iter = hyperv_mmio; iter; iter = iter->sibling) {
2237                 if ((iter->start >= start + size) || (iter->end <= start))
2238                         continue;
2239
2240                 __release_region(iter, start, size);
2241         }
2242         release_mem_region(start, size);
2243         mutex_unlock(&hyperv_mmio_lock);
2244
2245 }
2246 EXPORT_SYMBOL_GPL(vmbus_free_mmio);
2247
2248 #ifdef CONFIG_ACPI
2249 static int vmbus_acpi_add(struct platform_device *pdev)
2250 {
2251         acpi_status result;
2252         int ret_val = -ENODEV;
2253         struct acpi_device *ancestor;
2254         struct acpi_device *device = ACPI_COMPANION(&pdev->dev);
2255
2256         hv_dev = &device->dev;
2257
2258         /*
2259          * Older versions of Hyper-V for ARM64 fail to include the _CCA
2260          * method on the top level VMbus device in the DSDT. But devices
2261          * are hardware coherent in all current Hyper-V use cases, so fix
2262          * up the ACPI device to behave as if _CCA is present and indicates
2263          * hardware coherence.
2264          */
2265         ACPI_COMPANION_SET(&device->dev, device);
2266         if (IS_ENABLED(CONFIG_ACPI_CCA_REQUIRED) &&
2267             device_get_dma_attr(&device->dev) == DEV_DMA_NOT_SUPPORTED) {
2268                 pr_info("No ACPI _CCA found; assuming coherent device I/O\n");
2269                 device->flags.cca_seen = true;
2270                 device->flags.coherent_dma = true;
2271         }
2272
2273         result = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
2274                                         vmbus_walk_resources, NULL);
2275
2276         if (ACPI_FAILURE(result))
2277                 goto acpi_walk_err;
2278         /*
2279          * Some ancestor of the vmbus acpi device (Gen1 or Gen2
2280          * firmware) is the VMOD that has the mmio ranges. Get that.
2281          */
2282         for (ancestor = acpi_dev_parent(device);
2283              ancestor && ancestor->handle != ACPI_ROOT_OBJECT;
2284              ancestor = acpi_dev_parent(ancestor)) {
2285                 result = acpi_walk_resources(ancestor->handle, METHOD_NAME__CRS,
2286                                              vmbus_walk_resources, NULL);
2287
2288                 if (ACPI_FAILURE(result))
2289                         continue;
2290                 if (hyperv_mmio) {
2291                         vmbus_reserve_fb();
2292                         break;
2293                 }
2294         }
2295         ret_val = 0;
2296
2297 acpi_walk_err:
2298         if (ret_val)
2299                 vmbus_mmio_remove();
2300         return ret_val;
2301 }
2302 #else
2303 static int vmbus_acpi_add(struct platform_device *pdev)
2304 {
2305         return 0;
2306 }
2307 #endif
2308
2309 static int vmbus_device_add(struct platform_device *pdev)
2310 {
2311         struct resource **cur_res = &hyperv_mmio;
2312         struct of_range range;
2313         struct of_range_parser parser;
2314         struct device_node *np = pdev->dev.of_node;
2315         int ret;
2316
2317         hv_dev = &pdev->dev;
2318
2319         ret = of_range_parser_init(&parser, np);
2320         if (ret)
2321                 return ret;
2322
2323         for_each_of_range(&parser, &range) {
2324                 struct resource *res;
2325
2326                 res = kzalloc(sizeof(*res), GFP_KERNEL);
2327                 if (!res) {
2328                         vmbus_mmio_remove();
2329                         return -ENOMEM;
2330                 }
2331
2332                 res->name = "hyperv mmio";
2333                 res->flags = range.flags;
2334                 res->start = range.cpu_addr;
2335                 res->end = range.cpu_addr + range.size;
2336
2337                 *cur_res = res;
2338                 cur_res = &res->sibling;
2339         }
2340
2341         return ret;
2342 }
2343
2344 static int vmbus_platform_driver_probe(struct platform_device *pdev)
2345 {
2346         if (acpi_disabled)
2347                 return vmbus_device_add(pdev);
2348         else
2349                 return vmbus_acpi_add(pdev);
2350 }
2351
2352 static void vmbus_platform_driver_remove(struct platform_device *pdev)
2353 {
2354         vmbus_mmio_remove();
2355 }
2356
2357 #ifdef CONFIG_PM_SLEEP
2358 static int vmbus_bus_suspend(struct device *dev)
2359 {
2360         struct hv_per_cpu_context *hv_cpu = per_cpu_ptr(
2361                         hv_context.cpu_context, VMBUS_CONNECT_CPU);
2362         struct vmbus_channel *channel, *sc;
2363
2364         tasklet_disable(&hv_cpu->msg_dpc);
2365         vmbus_connection.ignore_any_offer_msg = true;
2366         /* The tasklet_enable() takes care of providing a memory barrier */
2367         tasklet_enable(&hv_cpu->msg_dpc);
2368
2369         /* Drain all the workqueues as we are in suspend */
2370         drain_workqueue(vmbus_connection.rescind_work_queue);
2371         drain_workqueue(vmbus_connection.work_queue);
2372         drain_workqueue(vmbus_connection.handle_primary_chan_wq);
2373         drain_workqueue(vmbus_connection.handle_sub_chan_wq);
2374
2375         mutex_lock(&vmbus_connection.channel_mutex);
2376         list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
2377                 if (!is_hvsock_channel(channel))
2378                         continue;
2379
2380                 vmbus_force_channel_rescinded(channel);
2381         }
2382         mutex_unlock(&vmbus_connection.channel_mutex);
2383
2384         /*
2385          * Wait until all the sub-channels and hv_sock channels have been
2386          * cleaned up. Sub-channels should be destroyed upon suspend, otherwise
2387          * they would conflict with the new sub-channels that will be created
2388          * in the resume path. hv_sock channels should also be destroyed, but
2389          * a hv_sock channel of an established hv_sock connection can not be
2390          * really destroyed since it may still be referenced by the userspace
2391          * application, so we just force the hv_sock channel to be rescinded
2392          * by vmbus_force_channel_rescinded(), and the userspace application
2393          * will thoroughly destroy the channel after hibernation.
2394          *
2395          * Note: the counter nr_chan_close_on_suspend may never go above 0 if
2396          * the VM has no sub-channel and hv_sock channel, e.g. a 1-vCPU VM.
2397          */
2398         if (atomic_read(&vmbus_connection.nr_chan_close_on_suspend) > 0)
2399                 wait_for_completion(&vmbus_connection.ready_for_suspend_event);
2400
2401         if (atomic_read(&vmbus_connection.nr_chan_fixup_on_resume) != 0) {
2402                 pr_err("Can not suspend due to a previous failed resuming\n");
2403                 return -EBUSY;
2404         }
2405
2406         mutex_lock(&vmbus_connection.channel_mutex);
2407
2408         list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
2409                 /*
2410                  * Remove the channel from the array of channels and invalidate
2411                  * the channel's relid.  Upon resume, vmbus_onoffer() will fix
2412                  * up the relid (and other fields, if necessary) and add the
2413                  * channel back to the array.
2414                  */
2415                 vmbus_channel_unmap_relid(channel);
2416                 channel->offermsg.child_relid = INVALID_RELID;
2417
2418                 if (is_hvsock_channel(channel)) {
2419                         if (!channel->rescind) {
2420                                 pr_err("hv_sock channel not rescinded!\n");
2421                                 WARN_ON_ONCE(1);
2422                         }
2423                         continue;
2424                 }
2425
2426                 list_for_each_entry(sc, &channel->sc_list, sc_list) {
2427                         pr_err("Sub-channel not deleted!\n");
2428                         WARN_ON_ONCE(1);
2429                 }
2430
2431                 atomic_inc(&vmbus_connection.nr_chan_fixup_on_resume);
2432         }
2433
2434         mutex_unlock(&vmbus_connection.channel_mutex);
2435
2436         vmbus_initiate_unload(false);
2437
2438         /* Reset the event for the next resume. */
2439         reinit_completion(&vmbus_connection.ready_for_resume_event);
2440
2441         return 0;
2442 }
2443
2444 static int vmbus_bus_resume(struct device *dev)
2445 {
2446         struct vmbus_channel_msginfo *msginfo;
2447         size_t msgsize;
2448         int ret;
2449
2450         vmbus_connection.ignore_any_offer_msg = false;
2451
2452         /*
2453          * We only use the 'vmbus_proto_version', which was in use before
2454          * hibernation, to re-negotiate with the host.
2455          */
2456         if (!vmbus_proto_version) {
2457                 pr_err("Invalid proto version = 0x%x\n", vmbus_proto_version);
2458                 return -EINVAL;
2459         }
2460
2461         msgsize = sizeof(*msginfo) +
2462                   sizeof(struct vmbus_channel_initiate_contact);
2463
2464         msginfo = kzalloc(msgsize, GFP_KERNEL);
2465
2466         if (msginfo == NULL)
2467                 return -ENOMEM;
2468
2469         ret = vmbus_negotiate_version(msginfo, vmbus_proto_version);
2470
2471         kfree(msginfo);
2472
2473         if (ret != 0)
2474                 return ret;
2475
2476         WARN_ON(atomic_read(&vmbus_connection.nr_chan_fixup_on_resume) == 0);
2477
2478         vmbus_request_offers();
2479
2480         if (wait_for_completion_timeout(
2481                 &vmbus_connection.ready_for_resume_event, 10 * HZ) == 0)
2482                 pr_err("Some vmbus device is missing after suspending?\n");
2483
2484         /* Reset the event for the next suspend. */
2485         reinit_completion(&vmbus_connection.ready_for_suspend_event);
2486
2487         return 0;
2488 }
2489 #else
2490 #define vmbus_bus_suspend NULL
2491 #define vmbus_bus_resume NULL
2492 #endif /* CONFIG_PM_SLEEP */
2493
2494 static const __maybe_unused struct of_device_id vmbus_of_match[] = {
2495         {
2496                 .compatible = "microsoft,vmbus",
2497         },
2498         {
2499                 /* sentinel */
2500         },
2501 };
2502 MODULE_DEVICE_TABLE(of, vmbus_of_match);
2503
2504 static const __maybe_unused struct acpi_device_id vmbus_acpi_device_ids[] = {
2505         {"VMBUS", 0},
2506         {"VMBus", 0},
2507         {"", 0},
2508 };
2509 MODULE_DEVICE_TABLE(acpi, vmbus_acpi_device_ids);
2510
2511 /*
2512  * Note: we must use the "no_irq" ops, otherwise hibernation can not work with
2513  * PCI device assignment, because "pci_dev_pm_ops" uses the "noirq" ops: in
2514  * the resume path, the pci "noirq" restore op runs before "non-noirq" op (see
2515  * resume_target_kernel() -> dpm_resume_start(), and hibernation_restore() ->
2516  * dpm_resume_end()). This means vmbus_bus_resume() and the pci-hyperv's
2517  * resume callback must also run via the "noirq" ops.
2518  *
2519  * Set suspend_noirq/resume_noirq to NULL for Suspend-to-Idle: see the comment
2520  * earlier in this file before vmbus_pm.
2521  */
2522
2523 static const struct dev_pm_ops vmbus_bus_pm = {
2524         .suspend_noirq  = NULL,
2525         .resume_noirq   = NULL,
2526         .freeze_noirq   = vmbus_bus_suspend,
2527         .thaw_noirq     = vmbus_bus_resume,
2528         .poweroff_noirq = vmbus_bus_suspend,
2529         .restore_noirq  = vmbus_bus_resume
2530 };
2531
2532 static struct platform_driver vmbus_platform_driver = {
2533         .probe = vmbus_platform_driver_probe,
2534         .remove_new = vmbus_platform_driver_remove,
2535         .driver = {
2536                 .name = "vmbus",
2537                 .acpi_match_table = ACPI_PTR(vmbus_acpi_device_ids),
2538                 .of_match_table = of_match_ptr(vmbus_of_match),
2539                 .pm = &vmbus_bus_pm,
2540                 .probe_type = PROBE_FORCE_SYNCHRONOUS,
2541         }
2542 };
2543
2544 static void hv_kexec_handler(void)
2545 {
2546         hv_stimer_global_cleanup();
2547         vmbus_initiate_unload(false);
2548         /* Make sure conn_state is set as hv_synic_cleanup checks for it */
2549         mb();
2550         cpuhp_remove_state(hyperv_cpuhp_online);
2551 };
2552
2553 static void hv_crash_handler(struct pt_regs *regs)
2554 {
2555         int cpu;
2556
2557         vmbus_initiate_unload(true);
2558         /*
2559          * In crash handler we can't schedule synic cleanup for all CPUs,
2560          * doing the cleanup for current CPU only. This should be sufficient
2561          * for kdump.
2562          */
2563         cpu = smp_processor_id();
2564         hv_stimer_cleanup(cpu);
2565         hv_synic_disable_regs(cpu);
2566 };
2567
2568 static int hv_synic_suspend(void)
2569 {
2570         /*
2571          * When we reach here, all the non-boot CPUs have been offlined.
2572          * If we're in a legacy configuration where stimer Direct Mode is
2573          * not enabled, the stimers on the non-boot CPUs have been unbound
2574          * in hv_synic_cleanup() -> hv_stimer_legacy_cleanup() ->
2575          * hv_stimer_cleanup() -> clockevents_unbind_device().
2576          *
2577          * hv_synic_suspend() only runs on CPU0 with interrupts disabled.
2578          * Here we do not call hv_stimer_legacy_cleanup() on CPU0 because:
2579          * 1) it's unnecessary as interrupts remain disabled between
2580          * syscore_suspend() and syscore_resume(): see create_image() and
2581          * resume_target_kernel()
2582          * 2) the stimer on CPU0 is automatically disabled later by
2583          * syscore_suspend() -> timekeeping_suspend() -> tick_suspend() -> ...
2584          * -> clockevents_shutdown() -> ... -> hv_ce_shutdown()
2585          * 3) a warning would be triggered if we call
2586          * clockevents_unbind_device(), which may sleep, in an
2587          * interrupts-disabled context.
2588          */
2589
2590         hv_synic_disable_regs(0);
2591
2592         return 0;
2593 }
2594
2595 static void hv_synic_resume(void)
2596 {
2597         hv_synic_enable_regs(0);
2598
2599         /*
2600          * Note: we don't need to call hv_stimer_init(0), because the timer
2601          * on CPU0 is not unbound in hv_synic_suspend(), and the timer is
2602          * automatically re-enabled in timekeeping_resume().
2603          */
2604 }
2605
2606 /* The callbacks run only on CPU0, with irqs_disabled. */
2607 static struct syscore_ops hv_synic_syscore_ops = {
2608         .suspend = hv_synic_suspend,
2609         .resume = hv_synic_resume,
2610 };
2611
2612 static int __init hv_acpi_init(void)
2613 {
2614         int ret;
2615
2616         if (!hv_is_hyperv_initialized())
2617                 return -ENODEV;
2618
2619         if (hv_root_partition && !hv_nested)
2620                 return 0;
2621
2622         /*
2623          * Get ACPI resources first.
2624          */
2625         ret = platform_driver_register(&vmbus_platform_driver);
2626         if (ret)
2627                 return ret;
2628
2629         if (!hv_dev) {
2630                 ret = -ENODEV;
2631                 goto cleanup;
2632         }
2633
2634         /*
2635          * If we're on an architecture with a hardcoded hypervisor
2636          * vector (i.e. x86/x64), override the VMbus interrupt found
2637          * in the ACPI tables. Ensure vmbus_irq is not set since the
2638          * normal Linux IRQ mechanism is not used in this case.
2639          */
2640 #ifdef HYPERVISOR_CALLBACK_VECTOR
2641         vmbus_interrupt = HYPERVISOR_CALLBACK_VECTOR;
2642         vmbus_irq = -1;
2643 #endif
2644
2645         hv_debug_init();
2646
2647         ret = vmbus_bus_init();
2648         if (ret)
2649                 goto cleanup;
2650
2651         hv_setup_kexec_handler(hv_kexec_handler);
2652         hv_setup_crash_handler(hv_crash_handler);
2653
2654         register_syscore_ops(&hv_synic_syscore_ops);
2655
2656         return 0;
2657
2658 cleanup:
2659         platform_driver_unregister(&vmbus_platform_driver);
2660         hv_dev = NULL;
2661         return ret;
2662 }
2663
2664 static void __exit vmbus_exit(void)
2665 {
2666         int cpu;
2667
2668         unregister_syscore_ops(&hv_synic_syscore_ops);
2669
2670         hv_remove_kexec_handler();
2671         hv_remove_crash_handler();
2672         vmbus_connection.conn_state = DISCONNECTED;
2673         hv_stimer_global_cleanup();
2674         vmbus_disconnect();
2675         if (vmbus_irq == -1) {
2676                 hv_remove_vmbus_handler();
2677         } else {
2678                 free_percpu_irq(vmbus_irq, vmbus_evt);
2679                 free_percpu(vmbus_evt);
2680         }
2681         for_each_online_cpu(cpu) {
2682                 struct hv_per_cpu_context *hv_cpu
2683                         = per_cpu_ptr(hv_context.cpu_context, cpu);
2684
2685                 tasklet_kill(&hv_cpu->msg_dpc);
2686         }
2687         hv_debug_rm_all_dir();
2688
2689         vmbus_free_channels();
2690         kfree(vmbus_connection.channels);
2691
2692         /*
2693          * The vmbus panic notifier is always registered, hence we should
2694          * also unconditionally unregister it here as well.
2695          */
2696         atomic_notifier_chain_unregister(&panic_notifier_list,
2697                                         &hyperv_panic_vmbus_unload_block);
2698
2699         bus_unregister(&hv_bus);
2700
2701         cpuhp_remove_state(hyperv_cpuhp_online);
2702         hv_synic_free();
2703         platform_driver_unregister(&vmbus_platform_driver);
2704 }
2705
2706
2707 MODULE_LICENSE("GPL");
2708 MODULE_DESCRIPTION("Microsoft Hyper-V VMBus Driver");
2709
2710 subsys_initcall(hv_acpi_init);
2711 module_exit(vmbus_exit);