2 * Copyright (c) 2009, Microsoft Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 #include <linux/kernel.h>
24 #include <linux/interrupt.h>
25 #include <linux/sched.h>
26 #include <linux/wait.h>
28 #include <linux/slab.h>
29 #include <linux/list.h>
30 #include <linux/module.h>
31 #include <linux/completion.h>
32 #include <linux/delay.h>
33 #include <linux/hyperv.h>
34 #include <asm/mshyperv.h>
36 #include "hyperv_vmbus.h"
38 static void init_vp_index(struct vmbus_channel *channel, u16 dev_type);
40 static const struct vmbus_device vmbus_devs[] = {
48 { .dev_type = HV_SCSI,
72 { .dev_type = HV_PCIE,
77 /* Synthetic Frame Buffer */
83 /* Synthetic Keyboard */
90 { .dev_type = HV_MOUSE,
104 .perf_device = false,
110 .perf_device = false,
114 { .dev_type = HV_SHUTDOWN,
116 .perf_device = false,
120 { .dev_type = HV_FCOPY,
122 .perf_device = false,
126 { .dev_type = HV_BACKUP,
128 .perf_device = false,
134 .perf_device = false,
138 { .dev_type = HV_UNKNOWN,
139 .perf_device = false,
143 static const struct {
145 } vmbus_unsupported_devs[] = {
152 * The rescinded channel may be blocked waiting for a response from the host;
155 static void vmbus_rescind_cleanup(struct vmbus_channel *channel)
157 struct vmbus_channel_msginfo *msginfo;
161 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
162 channel->rescind = true;
163 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
166 if (msginfo->waiting_channel == channel) {
167 complete(&msginfo->waitevent);
171 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
174 static bool is_unsupported_vmbus_devs(const uuid_le *guid)
178 for (i = 0; i < ARRAY_SIZE(vmbus_unsupported_devs); i++)
179 if (!uuid_le_cmp(*guid, vmbus_unsupported_devs[i].guid))
184 static u16 hv_get_dev_type(const struct vmbus_channel *channel)
186 const uuid_le *guid = &channel->offermsg.offer.if_type;
189 if (is_hvsock_channel(channel) || is_unsupported_vmbus_devs(guid))
192 for (i = HV_IDE; i < HV_UNKNOWN; i++) {
193 if (!uuid_le_cmp(*guid, vmbus_devs[i].guid))
196 pr_info("Unknown GUID: %pUl\n", guid);
201 * vmbus_prep_negotiate_resp() - Create default response for Hyper-V Negotiate message
202 * @icmsghdrp: Pointer to msg header structure
203 * @icmsg_negotiate: Pointer to negotiate message structure
204 * @buf: Raw buffer channel data
206 * @icmsghdrp is of type &struct icmsg_hdr.
207 * Set up and fill in default negotiate response message.
209 * The fw_version and fw_vercnt specifies the framework version that
212 * The srv_version and srv_vercnt specifies the service
213 * versions we can support.
215 * Versions are given in decreasing order.
217 * nego_fw_version and nego_srv_version store the selected protocol versions.
219 * Mainly used by Hyper-V drivers.
221 bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp,
222 u8 *buf, const int *fw_version, int fw_vercnt,
223 const int *srv_version, int srv_vercnt,
224 int *nego_fw_version, int *nego_srv_version)
226 int icframe_major, icframe_minor;
227 int icmsg_major, icmsg_minor;
228 int fw_major, fw_minor;
229 int srv_major, srv_minor;
231 bool found_match = false;
232 struct icmsg_negotiate *negop;
234 icmsghdrp->icmsgsize = 0x10;
235 negop = (struct icmsg_negotiate *)&buf[
236 sizeof(struct vmbuspipe_hdr) +
237 sizeof(struct icmsg_hdr)];
239 icframe_major = negop->icframe_vercnt;
242 icmsg_major = negop->icmsg_vercnt;
246 * Select the framework version number we will
250 for (i = 0; i < fw_vercnt; i++) {
251 fw_major = (fw_version[i] >> 16);
252 fw_minor = (fw_version[i] & 0xFFFF);
254 for (j = 0; j < negop->icframe_vercnt; j++) {
255 if ((negop->icversion_data[j].major == fw_major) &&
256 (negop->icversion_data[j].minor == fw_minor)) {
257 icframe_major = negop->icversion_data[j].major;
258 icframe_minor = negop->icversion_data[j].minor;
273 for (i = 0; i < srv_vercnt; i++) {
274 srv_major = (srv_version[i] >> 16);
275 srv_minor = (srv_version[i] & 0xFFFF);
277 for (j = negop->icframe_vercnt;
278 (j < negop->icframe_vercnt + negop->icmsg_vercnt);
281 if ((negop->icversion_data[j].major == srv_major) &&
282 (negop->icversion_data[j].minor == srv_minor)) {
284 icmsg_major = negop->icversion_data[j].major;
285 icmsg_minor = negop->icversion_data[j].minor;
296 * Respond with the framework and service
297 * version numbers we can support.
302 negop->icframe_vercnt = 0;
303 negop->icmsg_vercnt = 0;
305 negop->icframe_vercnt = 1;
306 negop->icmsg_vercnt = 1;
310 *nego_fw_version = (icframe_major << 16) | icframe_minor;
312 if (nego_srv_version)
313 *nego_srv_version = (icmsg_major << 16) | icmsg_minor;
315 negop->icversion_data[0].major = icframe_major;
316 negop->icversion_data[0].minor = icframe_minor;
317 negop->icversion_data[1].major = icmsg_major;
318 negop->icversion_data[1].minor = icmsg_minor;
322 EXPORT_SYMBOL_GPL(vmbus_prep_negotiate_resp);
325 * alloc_channel - Allocate and initialize a vmbus channel object
327 static struct vmbus_channel *alloc_channel(void)
329 struct vmbus_channel *channel;
331 channel = kzalloc(sizeof(*channel), GFP_ATOMIC);
335 spin_lock_init(&channel->lock);
337 INIT_LIST_HEAD(&channel->sc_list);
338 INIT_LIST_HEAD(&channel->percpu_list);
340 tasklet_init(&channel->callback_event,
341 vmbus_on_event, (unsigned long)channel);
347 * free_channel - Release the resources used by the vmbus channel object
349 static void free_channel(struct vmbus_channel *channel)
351 tasklet_kill(&channel->callback_event);
353 kfree_rcu(channel, rcu);
356 static void percpu_channel_enq(void *arg)
358 struct vmbus_channel *channel = arg;
359 struct hv_per_cpu_context *hv_cpu
360 = this_cpu_ptr(hv_context.cpu_context);
362 list_add_tail_rcu(&channel->percpu_list, &hv_cpu->chan_list);
365 static void percpu_channel_deq(void *arg)
367 struct vmbus_channel *channel = arg;
369 list_del_rcu(&channel->percpu_list);
373 static void vmbus_release_relid(u32 relid)
375 struct vmbus_channel_relid_released msg;
377 memset(&msg, 0, sizeof(struct vmbus_channel_relid_released));
378 msg.child_relid = relid;
379 msg.header.msgtype = CHANNELMSG_RELID_RELEASED;
380 vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released),
384 void hv_process_channel_removal(u32 relid)
387 struct vmbus_channel *primary_channel, *channel;
389 BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex));
392 * Make sure channel is valid as we may have raced.
394 channel = relid2channel(relid);
398 BUG_ON(!channel->rescind);
399 if (channel->target_cpu != get_cpu()) {
401 smp_call_function_single(channel->target_cpu,
402 percpu_channel_deq, channel, true);
404 percpu_channel_deq(channel);
408 if (channel->primary_channel == NULL) {
409 list_del(&channel->listentry);
411 primary_channel = channel;
413 primary_channel = channel->primary_channel;
414 spin_lock_irqsave(&primary_channel->lock, flags);
415 list_del(&channel->sc_list);
416 primary_channel->num_sc--;
417 spin_unlock_irqrestore(&primary_channel->lock, flags);
421 * We need to free the bit for init_vp_index() to work in the case
422 * of sub-channel, when we reload drivers like hv_netvsc.
424 if (channel->affinity_policy == HV_LOCALIZED)
425 cpumask_clear_cpu(channel->target_cpu,
426 &primary_channel->alloced_cpus_in_node);
428 vmbus_release_relid(relid);
430 free_channel(channel);
433 void vmbus_free_channels(void)
435 struct vmbus_channel *channel, *tmp;
437 list_for_each_entry_safe(channel, tmp, &vmbus_connection.chn_list,
439 /* hv_process_channel_removal() needs this */
440 channel->rescind = true;
442 vmbus_device_unregister(channel->device_obj);
447 * vmbus_process_offer - Process the offer by creating a channel/device
448 * associated with this offer
450 static void vmbus_process_offer(struct vmbus_channel *newchannel)
452 struct vmbus_channel *channel;
458 /* Make sure this is a new offer */
459 mutex_lock(&vmbus_connection.channel_mutex);
462 * Now that we have acquired the channel_mutex,
463 * we can release the potentially racing rescind thread.
465 atomic_dec(&vmbus_connection.offer_in_progress);
467 list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
468 if (!uuid_le_cmp(channel->offermsg.offer.if_type,
469 newchannel->offermsg.offer.if_type) &&
470 !uuid_le_cmp(channel->offermsg.offer.if_instance,
471 newchannel->offermsg.offer.if_instance)) {
478 list_add_tail(&newchannel->listentry,
479 &vmbus_connection.chn_list);
481 mutex_unlock(&vmbus_connection.channel_mutex);
485 * Check to see if this is a sub-channel.
487 if (newchannel->offermsg.offer.sub_channel_index != 0) {
489 * Process the sub-channel.
491 newchannel->primary_channel = channel;
492 spin_lock_irqsave(&channel->lock, flags);
493 list_add_tail(&newchannel->sc_list, &channel->sc_list);
495 spin_unlock_irqrestore(&channel->lock, flags);
501 dev_type = hv_get_dev_type(newchannel);
503 init_vp_index(newchannel, dev_type);
505 if (newchannel->target_cpu != get_cpu()) {
507 smp_call_function_single(newchannel->target_cpu,
511 percpu_channel_enq(newchannel);
516 * This state is used to indicate a successful open
517 * so that when we do close the channel normally, we
518 * can cleanup properly
520 newchannel->state = CHANNEL_OPEN_STATE;
523 if (channel->sc_creation_callback != NULL)
524 channel->sc_creation_callback(newchannel);
525 newchannel->probe_done = true;
530 * Start the process of binding this offer to the driver
531 * We need to set the DeviceObject field before calling
532 * vmbus_child_dev_add()
534 newchannel->device_obj = vmbus_device_create(
535 &newchannel->offermsg.offer.if_type,
536 &newchannel->offermsg.offer.if_instance,
538 if (!newchannel->device_obj)
541 newchannel->device_obj->device_id = dev_type;
543 * Add the new device to the bus. This will kick off device-driver
544 * binding which eventually invokes the device driver's AddDevice()
547 ret = vmbus_device_register(newchannel->device_obj);
550 pr_err("unable to add child device object (relid %d)\n",
551 newchannel->offermsg.child_relid);
552 kfree(newchannel->device_obj);
556 newchannel->probe_done = true;
560 mutex_lock(&vmbus_connection.channel_mutex);
561 list_del(&newchannel->listentry);
562 mutex_unlock(&vmbus_connection.channel_mutex);
564 if (newchannel->target_cpu != get_cpu()) {
566 smp_call_function_single(newchannel->target_cpu,
567 percpu_channel_deq, newchannel, true);
569 percpu_channel_deq(newchannel);
573 vmbus_release_relid(newchannel->offermsg.child_relid);
576 free_channel(newchannel);
580 * We use this state to statically distribute the channel interrupt load.
582 static int next_numa_node_id;
585 * Starting with Win8, we can statically distribute the incoming
586 * channel interrupt load by binding a channel to VCPU.
587 * We do this in a hierarchical fashion:
588 * First distribute the primary channels across available NUMA nodes
589 * and then distribute the subchannels amongst the CPUs in the NUMA
590 * node assigned to the primary channel.
592 * For pre-win8 hosts or non-performance critical channels we assign the
593 * first CPU in the first NUMA node.
595 static void init_vp_index(struct vmbus_channel *channel, u16 dev_type)
598 bool perf_chn = vmbus_devs[dev_type].perf_device;
599 struct vmbus_channel *primary = channel->primary_channel;
601 struct cpumask available_mask;
602 struct cpumask *alloced_mask;
604 if ((vmbus_proto_version == VERSION_WS2008) ||
605 (vmbus_proto_version == VERSION_WIN7) || (!perf_chn)) {
607 * Prior to win8, all channel interrupts are
608 * delivered on cpu 0.
609 * Also if the channel is not a performance critical
610 * channel, bind it to cpu 0.
612 channel->numa_node = 0;
613 channel->target_cpu = 0;
614 channel->target_vp = hv_cpu_number_to_vp_number(0);
619 * Based on the channel affinity policy, we will assign the NUMA
623 if ((channel->affinity_policy == HV_BALANCED) || (!primary)) {
625 next_node = next_numa_node_id++;
626 if (next_node == nr_node_ids) {
627 next_node = next_numa_node_id = 0;
630 if (cpumask_empty(cpumask_of_node(next_node)))
634 channel->numa_node = next_node;
637 alloced_mask = &hv_context.hv_numa_map[primary->numa_node];
639 if (cpumask_weight(alloced_mask) ==
640 cpumask_weight(cpumask_of_node(primary->numa_node))) {
642 * We have cycled through all the CPUs in the node;
643 * reset the alloced map.
645 cpumask_clear(alloced_mask);
648 cpumask_xor(&available_mask, alloced_mask,
649 cpumask_of_node(primary->numa_node));
653 if (primary->affinity_policy == HV_LOCALIZED) {
655 * Normally Hyper-V host doesn't create more subchannels
656 * than there are VCPUs on the node but it is possible when not
657 * all present VCPUs on the node are initialized by guest.
658 * Clear the alloced_cpus_in_node to start over.
660 if (cpumask_equal(&primary->alloced_cpus_in_node,
661 cpumask_of_node(primary->numa_node)))
662 cpumask_clear(&primary->alloced_cpus_in_node);
666 cur_cpu = cpumask_next(cur_cpu, &available_mask);
667 if (cur_cpu >= nr_cpu_ids) {
669 cpumask_copy(&available_mask,
670 cpumask_of_node(primary->numa_node));
674 if (primary->affinity_policy == HV_LOCALIZED) {
676 * NOTE: in the case of sub-channel, we clear the
677 * sub-channel related bit(s) in
678 * primary->alloced_cpus_in_node in
679 * hv_process_channel_removal(), so when we
680 * reload drivers like hv_netvsc in SMP guest, here
681 * we're able to re-allocate
682 * bit from primary->alloced_cpus_in_node.
684 if (!cpumask_test_cpu(cur_cpu,
685 &primary->alloced_cpus_in_node)) {
686 cpumask_set_cpu(cur_cpu,
687 &primary->alloced_cpus_in_node);
688 cpumask_set_cpu(cur_cpu, alloced_mask);
692 cpumask_set_cpu(cur_cpu, alloced_mask);
697 channel->target_cpu = cur_cpu;
698 channel->target_vp = hv_cpu_number_to_vp_number(cur_cpu);
701 static void vmbus_wait_for_unload(void)
705 struct hv_message *msg;
706 struct vmbus_channel_message_header *hdr;
710 * CHANNELMSG_UNLOAD_RESPONSE is always delivered to the CPU which was
711 * used for initial contact or to CPU0 depending on host version. When
712 * we're crashing on a different CPU let's hope that IRQ handler on
713 * the cpu which receives CHANNELMSG_UNLOAD_RESPONSE is still
714 * functional and vmbus_unload_response() will complete
715 * vmbus_connection.unload_event. If not, the last thing we can do is
716 * read message pages for all CPUs directly.
719 if (completion_done(&vmbus_connection.unload_event))
722 for_each_online_cpu(cpu) {
723 struct hv_per_cpu_context *hv_cpu
724 = per_cpu_ptr(hv_context.cpu_context, cpu);
726 page_addr = hv_cpu->synic_message_page;
727 msg = (struct hv_message *)page_addr
728 + VMBUS_MESSAGE_SINT;
730 message_type = READ_ONCE(msg->header.message_type);
731 if (message_type == HVMSG_NONE)
734 hdr = (struct vmbus_channel_message_header *)
737 if (hdr->msgtype == CHANNELMSG_UNLOAD_RESPONSE)
738 complete(&vmbus_connection.unload_event);
740 vmbus_signal_eom(msg, message_type);
747 * We're crashing and already got the UNLOAD_RESPONSE, cleanup all
748 * maybe-pending messages on all CPUs to be able to receive new
749 * messages after we reconnect.
751 for_each_online_cpu(cpu) {
752 struct hv_per_cpu_context *hv_cpu
753 = per_cpu_ptr(hv_context.cpu_context, cpu);
755 page_addr = hv_cpu->synic_message_page;
756 msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
757 msg->header.message_type = HVMSG_NONE;
762 * vmbus_unload_response - Handler for the unload response.
764 static void vmbus_unload_response(struct vmbus_channel_message_header *hdr)
767 * This is a global event; just wakeup the waiting thread.
768 * Once we successfully unload, we can cleanup the monitor state.
770 complete(&vmbus_connection.unload_event);
773 void vmbus_initiate_unload(bool crash)
775 struct vmbus_channel_message_header hdr;
777 /* Pre-Win2012R2 hosts don't support reconnect */
778 if (vmbus_proto_version < VERSION_WIN8_1)
781 init_completion(&vmbus_connection.unload_event);
782 memset(&hdr, 0, sizeof(struct vmbus_channel_message_header));
783 hdr.msgtype = CHANNELMSG_UNLOAD;
784 vmbus_post_msg(&hdr, sizeof(struct vmbus_channel_message_header),
788 * vmbus_initiate_unload() is also called on crash and the crash can be
789 * happening in an interrupt context, where scheduling is impossible.
792 wait_for_completion(&vmbus_connection.unload_event);
794 vmbus_wait_for_unload();
798 * vmbus_onoffer - Handler for channel offers from vmbus in parent partition.
801 static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
803 struct vmbus_channel_offer_channel *offer;
804 struct vmbus_channel *newchannel;
806 offer = (struct vmbus_channel_offer_channel *)hdr;
808 /* Allocate the channel object and save this offer. */
809 newchannel = alloc_channel();
811 vmbus_release_relid(offer->child_relid);
812 atomic_dec(&vmbus_connection.offer_in_progress);
813 pr_err("Unable to allocate channel object\n");
818 * Setup state for signalling the host.
820 newchannel->sig_event = VMBUS_EVENT_CONNECTION_ID;
822 if (vmbus_proto_version != VERSION_WS2008) {
823 newchannel->is_dedicated_interrupt =
824 (offer->is_dedicated_interrupt != 0);
825 newchannel->sig_event = offer->connection_id;
828 memcpy(&newchannel->offermsg, offer,
829 sizeof(struct vmbus_channel_offer_channel));
830 newchannel->monitor_grp = (u8)offer->monitorid / 32;
831 newchannel->monitor_bit = (u8)offer->monitorid % 32;
833 vmbus_process_offer(newchannel);
837 * vmbus_onoffer_rescind - Rescind offer handler.
839 * We queue a work item to process this offer synchronously
841 static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
843 struct vmbus_channel_rescind_offer *rescind;
844 struct vmbus_channel *channel;
847 rescind = (struct vmbus_channel_rescind_offer *)hdr;
850 * The offer msg and the corresponding rescind msg
851 * from the host are guranteed to be ordered -
852 * offer comes in first and then the rescind.
853 * Since we process these events in work elements,
854 * and with preemption, we may end up processing
855 * the events out of order. Given that we handle these
856 * work elements on the same CPU, this is possible only
857 * in the case of preemption. In any case wait here
858 * until the offer processing has moved beyond the
859 * point where the channel is discoverable.
862 while (atomic_read(&vmbus_connection.offer_in_progress) != 0) {
864 * We wait here until any channel offer is currently
870 mutex_lock(&vmbus_connection.channel_mutex);
871 channel = relid2channel(rescind->child_relid);
872 mutex_unlock(&vmbus_connection.channel_mutex);
874 if (channel == NULL) {
876 * We failed in processing the offer message;
877 * we would have cleaned up the relid in that
884 * Now wait for offer handling to complete.
886 while (READ_ONCE(channel->probe_done) == false) {
888 * We wait here until any channel offer is currently
895 * At this point, the rescind handling can proceed safely.
898 if (channel->device_obj) {
899 if (channel->chn_rescind_callback) {
900 channel->chn_rescind_callback(channel);
901 vmbus_rescind_cleanup(channel);
905 * We will have to unregister this device from the
908 dev = get_device(&channel->device_obj->device);
910 vmbus_rescind_cleanup(channel);
911 vmbus_device_unregister(channel->device_obj);
915 if (channel->primary_channel != NULL) {
917 * Sub-channel is being rescinded. Following is the channel
918 * close sequence when initiated from the driveri (refer to
919 * vmbus_close() for details):
920 * 1. Close all sub-channels first
921 * 2. Then close the primary channel.
923 mutex_lock(&vmbus_connection.channel_mutex);
924 vmbus_rescind_cleanup(channel);
925 if (channel->state == CHANNEL_OPEN_STATE) {
927 * The channel is currently not open;
928 * it is safe for us to cleanup the channel.
930 hv_process_channel_removal(rescind->child_relid);
932 mutex_unlock(&vmbus_connection.channel_mutex);
936 void vmbus_hvsock_device_unregister(struct vmbus_channel *channel)
938 BUG_ON(!is_hvsock_channel(channel));
940 /* We always get a rescind msg when a connection is closed. */
941 while (!READ_ONCE(channel->probe_done) || !READ_ONCE(channel->rescind))
944 vmbus_device_unregister(channel->device_obj);
946 EXPORT_SYMBOL_GPL(vmbus_hvsock_device_unregister);
950 * vmbus_onoffers_delivered -
951 * This is invoked when all offers have been delivered.
953 * Nothing to do here.
955 static void vmbus_onoffers_delivered(
956 struct vmbus_channel_message_header *hdr)
961 * vmbus_onopen_result - Open result handler.
963 * This is invoked when we received a response to our channel open request.
964 * Find the matching request, copy the response and signal the requesting
967 static void vmbus_onopen_result(struct vmbus_channel_message_header *hdr)
969 struct vmbus_channel_open_result *result;
970 struct vmbus_channel_msginfo *msginfo;
971 struct vmbus_channel_message_header *requestheader;
972 struct vmbus_channel_open_channel *openmsg;
975 result = (struct vmbus_channel_open_result *)hdr;
978 * Find the open msg, copy the result and signal/unblock the wait event
980 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
982 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
985 (struct vmbus_channel_message_header *)msginfo->msg;
987 if (requestheader->msgtype == CHANNELMSG_OPENCHANNEL) {
989 (struct vmbus_channel_open_channel *)msginfo->msg;
990 if (openmsg->child_relid == result->child_relid &&
991 openmsg->openid == result->openid) {
992 memcpy(&msginfo->response.open_result,
995 struct vmbus_channel_open_result));
996 complete(&msginfo->waitevent);
1001 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
1005 * vmbus_ongpadl_created - GPADL created handler.
1007 * This is invoked when we received a response to our gpadl create request.
1008 * Find the matching request, copy the response and signal the requesting
1011 static void vmbus_ongpadl_created(struct vmbus_channel_message_header *hdr)
1013 struct vmbus_channel_gpadl_created *gpadlcreated;
1014 struct vmbus_channel_msginfo *msginfo;
1015 struct vmbus_channel_message_header *requestheader;
1016 struct vmbus_channel_gpadl_header *gpadlheader;
1017 unsigned long flags;
1019 gpadlcreated = (struct vmbus_channel_gpadl_created *)hdr;
1022 * Find the establish msg, copy the result and signal/unblock the wait
1025 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
1027 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
1030 (struct vmbus_channel_message_header *)msginfo->msg;
1032 if (requestheader->msgtype == CHANNELMSG_GPADL_HEADER) {
1034 (struct vmbus_channel_gpadl_header *)requestheader;
1036 if ((gpadlcreated->child_relid ==
1037 gpadlheader->child_relid) &&
1038 (gpadlcreated->gpadl == gpadlheader->gpadl)) {
1039 memcpy(&msginfo->response.gpadl_created,
1042 struct vmbus_channel_gpadl_created));
1043 complete(&msginfo->waitevent);
1048 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
1052 * vmbus_ongpadl_torndown - GPADL torndown handler.
1054 * This is invoked when we received a response to our gpadl teardown request.
1055 * Find the matching request, copy the response and signal the requesting
1058 static void vmbus_ongpadl_torndown(
1059 struct vmbus_channel_message_header *hdr)
1061 struct vmbus_channel_gpadl_torndown *gpadl_torndown;
1062 struct vmbus_channel_msginfo *msginfo;
1063 struct vmbus_channel_message_header *requestheader;
1064 struct vmbus_channel_gpadl_teardown *gpadl_teardown;
1065 unsigned long flags;
1067 gpadl_torndown = (struct vmbus_channel_gpadl_torndown *)hdr;
1070 * Find the open msg, copy the result and signal/unblock the wait event
1072 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
1074 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
1077 (struct vmbus_channel_message_header *)msginfo->msg;
1079 if (requestheader->msgtype == CHANNELMSG_GPADL_TEARDOWN) {
1081 (struct vmbus_channel_gpadl_teardown *)requestheader;
1083 if (gpadl_torndown->gpadl == gpadl_teardown->gpadl) {
1084 memcpy(&msginfo->response.gpadl_torndown,
1087 struct vmbus_channel_gpadl_torndown));
1088 complete(&msginfo->waitevent);
1093 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
1097 * vmbus_onversion_response - Version response handler
1099 * This is invoked when we received a response to our initiate contact request.
1100 * Find the matching request, copy the response and signal the requesting
1103 static void vmbus_onversion_response(
1104 struct vmbus_channel_message_header *hdr)
1106 struct vmbus_channel_msginfo *msginfo;
1107 struct vmbus_channel_message_header *requestheader;
1108 struct vmbus_channel_version_response *version_response;
1109 unsigned long flags;
1111 version_response = (struct vmbus_channel_version_response *)hdr;
1112 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
1114 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
1117 (struct vmbus_channel_message_header *)msginfo->msg;
1119 if (requestheader->msgtype ==
1120 CHANNELMSG_INITIATE_CONTACT) {
1121 memcpy(&msginfo->response.version_response,
1123 sizeof(struct vmbus_channel_version_response));
1124 complete(&msginfo->waitevent);
1127 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
1130 /* Channel message dispatch table */
1131 const struct vmbus_channel_message_table_entry
1132 channel_message_table[CHANNELMSG_COUNT] = {
1133 { CHANNELMSG_INVALID, 0, NULL },
1134 { CHANNELMSG_OFFERCHANNEL, 0, vmbus_onoffer },
1135 { CHANNELMSG_RESCIND_CHANNELOFFER, 0, vmbus_onoffer_rescind },
1136 { CHANNELMSG_REQUESTOFFERS, 0, NULL },
1137 { CHANNELMSG_ALLOFFERS_DELIVERED, 1, vmbus_onoffers_delivered },
1138 { CHANNELMSG_OPENCHANNEL, 0, NULL },
1139 { CHANNELMSG_OPENCHANNEL_RESULT, 1, vmbus_onopen_result },
1140 { CHANNELMSG_CLOSECHANNEL, 0, NULL },
1141 { CHANNELMSG_GPADL_HEADER, 0, NULL },
1142 { CHANNELMSG_GPADL_BODY, 0, NULL },
1143 { CHANNELMSG_GPADL_CREATED, 1, vmbus_ongpadl_created },
1144 { CHANNELMSG_GPADL_TEARDOWN, 0, NULL },
1145 { CHANNELMSG_GPADL_TORNDOWN, 1, vmbus_ongpadl_torndown },
1146 { CHANNELMSG_RELID_RELEASED, 0, NULL },
1147 { CHANNELMSG_INITIATE_CONTACT, 0, NULL },
1148 { CHANNELMSG_VERSION_RESPONSE, 1, vmbus_onversion_response },
1149 { CHANNELMSG_UNLOAD, 0, NULL },
1150 { CHANNELMSG_UNLOAD_RESPONSE, 1, vmbus_unload_response },
1151 { CHANNELMSG_18, 0, NULL },
1152 { CHANNELMSG_19, 0, NULL },
1153 { CHANNELMSG_20, 0, NULL },
1154 { CHANNELMSG_TL_CONNECT_REQUEST, 0, NULL },
1158 * vmbus_onmessage - Handler for channel protocol messages.
1160 * This is invoked in the vmbus worker thread context.
1162 void vmbus_onmessage(void *context)
1164 struct hv_message *msg = context;
1165 struct vmbus_channel_message_header *hdr;
1168 hdr = (struct vmbus_channel_message_header *)msg->u.payload;
1169 size = msg->header.payload_size;
1171 if (hdr->msgtype >= CHANNELMSG_COUNT) {
1172 pr_err("Received invalid channel message type %d size %d\n",
1173 hdr->msgtype, size);
1174 print_hex_dump_bytes("", DUMP_PREFIX_NONE,
1175 (unsigned char *)msg->u.payload, size);
1179 if (channel_message_table[hdr->msgtype].message_handler)
1180 channel_message_table[hdr->msgtype].message_handler(hdr);
1182 pr_err("Unhandled channel message type %d\n", hdr->msgtype);
1186 * vmbus_request_offers - Send a request to get all our pending offers.
1188 int vmbus_request_offers(void)
1190 struct vmbus_channel_message_header *msg;
1191 struct vmbus_channel_msginfo *msginfo;
1194 msginfo = kmalloc(sizeof(*msginfo) +
1195 sizeof(struct vmbus_channel_message_header),
1200 msg = (struct vmbus_channel_message_header *)msginfo->msg;
1202 msg->msgtype = CHANNELMSG_REQUESTOFFERS;
1205 ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_message_header),
1208 pr_err("Unable to request offers - %d\n", ret);
1220 * Retrieve the (sub) channel on which to send an outgoing request.
1221 * When a primary channel has multiple sub-channels, we try to
1222 * distribute the load equally amongst all available channels.
1224 struct vmbus_channel *vmbus_get_outgoing_channel(struct vmbus_channel *primary)
1226 struct list_head *cur, *tmp;
1228 struct vmbus_channel *cur_channel;
1229 struct vmbus_channel *outgoing_channel = primary;
1233 if (list_empty(&primary->sc_list))
1234 return outgoing_channel;
1236 next_channel = primary->next_oc++;
1238 if (next_channel > (primary->num_sc)) {
1239 primary->next_oc = 0;
1240 return outgoing_channel;
1243 cur_cpu = hv_cpu_number_to_vp_number(smp_processor_id());
1244 list_for_each_safe(cur, tmp, &primary->sc_list) {
1245 cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
1246 if (cur_channel->state != CHANNEL_OPENED_STATE)
1249 if (cur_channel->target_vp == cur_cpu)
1252 if (i == next_channel)
1258 return outgoing_channel;
1260 EXPORT_SYMBOL_GPL(vmbus_get_outgoing_channel);
1262 static void invoke_sc_cb(struct vmbus_channel *primary_channel)
1264 struct list_head *cur, *tmp;
1265 struct vmbus_channel *cur_channel;
1267 if (primary_channel->sc_creation_callback == NULL)
1270 list_for_each_safe(cur, tmp, &primary_channel->sc_list) {
1271 cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
1273 primary_channel->sc_creation_callback(cur_channel);
1277 void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
1278 void (*sc_cr_cb)(struct vmbus_channel *new_sc))
1280 primary_channel->sc_creation_callback = sc_cr_cb;
1282 EXPORT_SYMBOL_GPL(vmbus_set_sc_create_callback);
1284 bool vmbus_are_subchannels_present(struct vmbus_channel *primary)
1288 ret = !list_empty(&primary->sc_list);
1292 * Invoke the callback on sub-channel creation.
1293 * This will present a uniform interface to the
1296 invoke_sc_cb(primary);
1301 EXPORT_SYMBOL_GPL(vmbus_are_subchannels_present);
1303 void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
1304 void (*chn_rescind_cb)(struct vmbus_channel *))
1306 channel->chn_rescind_callback = chn_rescind_cb;
1308 EXPORT_SYMBOL_GPL(vmbus_set_chn_rescind_callback);