2 * Copyright (C) 2010 - 2015 UNISYS CORPORATION
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12 * NON INFRINGEMENT. See the GNU General Public License for more
16 #include <linux/acpi.h>
17 #include <linux/crash_dump.h>
20 #include "visorbus_private.h"
22 /* {72120008-4AAB-11DC-8530-444553544200} */
23 #define VISOR_SIOVM_GUID GUID_INIT(0x72120008, 0x4AAB, 0x11DC, 0x85, 0x30, \
24 0x44, 0x45, 0x53, 0x54, 0x42, 0x00)
26 static const guid_t visor_vhba_channel_guid = VISOR_VHBA_CHANNEL_GUID;
27 static const guid_t visor_siovm_guid = VISOR_SIOVM_GUID;
28 static const guid_t visor_controlvm_channel_guid = VISOR_CONTROLVM_CHANNEL_GUID;
30 #define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
31 #define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
33 #define MAX_CONTROLVM_PAYLOAD_BYTES (1024 * 128)
35 #define UNISYS_VISOR_LEAF_ID 0x40000000
37 /* The s-Par leaf ID returns "UnisysSpar64" encoded across ebx, ecx, edx */
38 #define UNISYS_VISOR_ID_EBX 0x73696e55
39 #define UNISYS_VISOR_ID_ECX 0x70537379
40 #define UNISYS_VISOR_ID_EDX 0x34367261
43 * When the controlvm channel is idle for at least MIN_IDLE_SECONDS, we switch
44 * to slow polling mode. As soon as we get a controlvm message, we switch back
45 * to fast polling mode.
47 #define MIN_IDLE_SECONDS 10
49 struct parser_context {
50 unsigned long allocbytes;
51 unsigned long param_bytes;
53 unsigned long bytes_remaining;
55 struct visor_controlvm_parameters_header data;
58 /* VMCALL_CONTROLVM_ADDR: Used by all guests, not just IO. */
59 #define VMCALL_CONTROLVM_ADDR 0x0501
62 VMCALL_RESULT_SUCCESS = 0,
63 VMCALL_RESULT_INVALID_PARAM = 1,
64 VMCALL_RESULT_DATA_UNAVAILABLE = 2,
65 VMCALL_RESULT_FAILURE_UNAVAILABLE = 3,
66 VMCALL_RESULT_DEVICE_ERROR = 4,
67 VMCALL_RESULT_DEVICE_NOT_READY = 5
71 * struct vmcall_io_controlvm_addr_params - Structure for IO VMCALLS. Has
72 * parameters to VMCALL_CONTROLVM_ADDR
74 * @address: The Guest-relative physical address of the ControlVm channel.
75 * This VMCall fills this in with the appropriate address.
76 * Contents provided by this VMCALL (OUT).
77 * @channel_bytes: The size of the ControlVm channel in bytes This VMCall fills
78 * this in with the appropriate address. Contents provided by
80 * @unused: Unused Bytes in the 64-Bit Aligned Struct.
82 struct vmcall_io_controlvm_addr_params {
88 struct visorchipset_device {
89 struct acpi_device *acpi_device;
90 unsigned long poll_jiffies;
91 /* when we got our last controlvm message */
92 unsigned long most_recent_message_jiffies;
93 struct delayed_work periodic_controlvm_work;
94 struct visorchannel *controlvm_channel;
95 unsigned long controlvm_payload_bytes_buffered;
97 * The following variables are used to handle the scenario where we are
98 * unable to offload the payload from a controlvm message due to memory
99 * requirements. In this scenario, we simply stash the controlvm
100 * message, then attempt to process it again the next time
101 * controlvm_periodic_work() runs.
103 struct controlvm_message controlvm_pending_msg;
104 bool controlvm_pending_msg_valid;
105 struct vmcall_io_controlvm_addr_params controlvm_params;
108 static struct visorchipset_device *chipset_dev;
110 struct parahotplug_request {
111 struct list_head list;
113 unsigned long expiration;
114 struct controlvm_message msg;
117 /* prototypes for attributes */
118 static ssize_t toolaction_show(struct device *dev,
119 struct device_attribute *attr,
125 err = visorchannel_read(chipset_dev->controlvm_channel,
126 offsetof(struct visor_controlvm_channel,
128 &tool_action, sizeof(u8));
132 return sprintf(buf, "%u\n", tool_action);
135 static ssize_t toolaction_store(struct device *dev,
136 struct device_attribute *attr,
137 const char *buf, size_t count)
142 if (kstrtou8(buf, 10, &tool_action))
145 err = visorchannel_write(chipset_dev->controlvm_channel,
146 offsetof(struct visor_controlvm_channel,
148 &tool_action, sizeof(u8));
153 static DEVICE_ATTR_RW(toolaction);
155 static ssize_t boottotool_show(struct device *dev,
156 struct device_attribute *attr,
159 struct efi_visor_indication efi_visor_indication;
162 err = visorchannel_read(chipset_dev->controlvm_channel,
163 offsetof(struct visor_controlvm_channel,
165 &efi_visor_indication,
166 sizeof(struct efi_visor_indication));
169 return sprintf(buf, "%u\n", efi_visor_indication.boot_to_tool);
172 static ssize_t boottotool_store(struct device *dev,
173 struct device_attribute *attr,
174 const char *buf, size_t count)
177 struct efi_visor_indication efi_visor_indication;
179 if (kstrtoint(buf, 10, &val))
182 efi_visor_indication.boot_to_tool = val;
183 err = visorchannel_write(chipset_dev->controlvm_channel,
184 offsetof(struct visor_controlvm_channel,
186 &(efi_visor_indication),
187 sizeof(struct efi_visor_indication));
192 static DEVICE_ATTR_RW(boottotool);
194 static ssize_t error_show(struct device *dev, struct device_attribute *attr,
200 err = visorchannel_read(chipset_dev->controlvm_channel,
201 offsetof(struct visor_controlvm_channel,
203 &error, sizeof(u32));
206 return sprintf(buf, "%u\n", error);
209 static ssize_t error_store(struct device *dev, struct device_attribute *attr,
210 const char *buf, size_t count)
215 if (kstrtou32(buf, 10, &error))
218 err = visorchannel_write(chipset_dev->controlvm_channel,
219 offsetof(struct visor_controlvm_channel,
221 &error, sizeof(u32));
226 static DEVICE_ATTR_RW(error);
228 static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
234 err = visorchannel_read(chipset_dev->controlvm_channel,
235 offsetof(struct visor_controlvm_channel,
236 installation_text_id),
237 &text_id, sizeof(u32));
241 return sprintf(buf, "%u\n", text_id);
244 static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
245 const char *buf, size_t count)
250 if (kstrtou32(buf, 10, &text_id))
253 err = visorchannel_write(chipset_dev->controlvm_channel,
254 offsetof(struct visor_controlvm_channel,
255 installation_text_id),
256 &text_id, sizeof(u32));
261 static DEVICE_ATTR_RW(textid);
263 static ssize_t remaining_steps_show(struct device *dev,
264 struct device_attribute *attr, char *buf)
266 u16 remaining_steps = 0;
269 err = visorchannel_read(chipset_dev->controlvm_channel,
270 offsetof(struct visor_controlvm_channel,
271 installation_remaining_steps),
272 &remaining_steps, sizeof(u16));
276 return sprintf(buf, "%hu\n", remaining_steps);
279 static ssize_t remaining_steps_store(struct device *dev,
280 struct device_attribute *attr,
281 const char *buf, size_t count)
286 if (kstrtou16(buf, 10, &remaining_steps))
289 err = visorchannel_write(chipset_dev->controlvm_channel,
290 offsetof(struct visor_controlvm_channel,
291 installation_remaining_steps),
292 &remaining_steps, sizeof(u16));
297 static DEVICE_ATTR_RW(remaining_steps);
299 struct visor_busdev {
304 static int match_visorbus_dev_by_id(struct device *dev, void *data)
306 struct visor_device *vdev = to_visor_device(dev);
307 struct visor_busdev *id = data;
309 if ((vdev->chipset_bus_no == id->bus_no) &&
310 (vdev->chipset_dev_no == id->dev_no))
316 struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no,
317 struct visor_device *from)
320 struct device *dev_start = NULL;
321 struct visor_device *vdev = NULL;
322 struct visor_busdev id = {
328 dev_start = &from->device;
329 dev = bus_find_device(&visorbus_type, dev_start, (void *)&id,
330 match_visorbus_dev_by_id);
332 vdev = to_visor_device(dev);
336 static void controlvm_init_response(struct controlvm_message *msg,
337 struct controlvm_message_header *msg_hdr,
340 memset(msg, 0, sizeof(struct controlvm_message));
341 memcpy(&msg->hdr, msg_hdr, sizeof(struct controlvm_message_header));
342 msg->hdr.payload_bytes = 0;
343 msg->hdr.payload_vm_offset = 0;
344 msg->hdr.payload_max_bytes = 0;
346 msg->hdr.flags.failed = 1;
347 msg->hdr.completion_status = (u32)(-response);
351 static int controlvm_respond_chipset_init(
352 struct controlvm_message_header *msg_hdr,
354 enum visor_chipset_feature features)
356 struct controlvm_message outmsg;
358 controlvm_init_response(&outmsg, msg_hdr, response);
359 outmsg.cmd.init_chipset.features = features;
360 return visorchannel_signalinsert(chipset_dev->controlvm_channel,
361 CONTROLVM_QUEUE_REQUEST, &outmsg);
364 static int chipset_init(struct controlvm_message *inmsg)
366 static int chipset_inited;
367 enum visor_chipset_feature features = 0;
368 int rc = CONTROLVM_RESP_SUCCESS;
371 if (chipset_inited) {
372 rc = -CONTROLVM_RESP_ALREADY_DONE;
379 * Set features to indicate we support parahotplug (if Command also
382 features = inmsg->cmd.init_chipset.features &
383 VISOR_CHIPSET_FEATURE_PARA_HOTPLUG;
386 * Set the "reply" bit so Command knows this is a features-aware
389 features |= VISOR_CHIPSET_FEATURE_REPLY;
392 if (inmsg->hdr.flags.response_expected)
393 res = controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
398 static int controlvm_respond(struct controlvm_message_header *msg_hdr,
400 struct visor_segment_state *state)
402 struct controlvm_message outmsg;
404 controlvm_init_response(&outmsg, msg_hdr, response);
405 if (outmsg.hdr.flags.test_message == 1)
409 outmsg.cmd.device_change_state.state = *state;
410 outmsg.cmd.device_change_state.flags.phys_device = 1;
413 return visorchannel_signalinsert(chipset_dev->controlvm_channel,
414 CONTROLVM_QUEUE_REQUEST, &outmsg);
417 enum crash_obj_type {
422 static int save_crash_message(struct controlvm_message *msg,
423 enum crash_obj_type cr_type)
425 u32 local_crash_msg_offset;
426 u16 local_crash_msg_count;
429 err = visorchannel_read(chipset_dev->controlvm_channel,
430 offsetof(struct visor_controlvm_channel,
431 saved_crash_message_count),
432 &local_crash_msg_count, sizeof(u16));
434 dev_err(&chipset_dev->acpi_device->dev,
435 "failed to read message count\n");
439 if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
440 dev_err(&chipset_dev->acpi_device->dev,
441 "invalid number of messages\n");
445 err = visorchannel_read(chipset_dev->controlvm_channel,
446 offsetof(struct visor_controlvm_channel,
447 saved_crash_message_offset),
448 &local_crash_msg_offset, sizeof(u32));
450 dev_err(&chipset_dev->acpi_device->dev,
451 "failed to read offset\n");
457 local_crash_msg_offset += sizeof(struct controlvm_message);
458 err = visorchannel_write(chipset_dev->controlvm_channel,
459 local_crash_msg_offset,
461 sizeof(struct controlvm_message));
463 dev_err(&chipset_dev->acpi_device->dev,
464 "failed to write dev msg\n");
469 err = visorchannel_write(chipset_dev->controlvm_channel,
470 local_crash_msg_offset,
472 sizeof(struct controlvm_message));
474 dev_err(&chipset_dev->acpi_device->dev,
475 "failed to write bus msg\n");
480 dev_err(&chipset_dev->acpi_device->dev,
481 "Invalid crash_obj_type\n");
487 static int controlvm_responder(enum controlvm_id cmd_id,
488 struct controlvm_message_header *pending_msg_hdr,
491 if (!pending_msg_hdr)
494 if (pending_msg_hdr->id != (u32)cmd_id)
497 return controlvm_respond(pending_msg_hdr, response, NULL);
500 static int device_changestate_responder(
501 enum controlvm_id cmd_id,
502 struct visor_device *p, int response,
503 struct visor_segment_state response_state)
505 struct controlvm_message outmsg;
507 if (!p->pending_msg_hdr)
509 if (p->pending_msg_hdr->id != cmd_id)
512 controlvm_init_response(&outmsg, p->pending_msg_hdr, response);
514 outmsg.cmd.device_change_state.bus_no = p->chipset_bus_no;
515 outmsg.cmd.device_change_state.dev_no = p->chipset_dev_no;
516 outmsg.cmd.device_change_state.state = response_state;
518 return visorchannel_signalinsert(chipset_dev->controlvm_channel,
519 CONTROLVM_QUEUE_REQUEST, &outmsg);
522 static int visorbus_create(struct controlvm_message *inmsg)
524 struct controlvm_message_packet *cmd = &inmsg->cmd;
525 struct controlvm_message_header *pmsg_hdr = NULL;
526 u32 bus_no = cmd->create_bus.bus_no;
527 struct visor_device *bus_info;
528 struct visorchannel *visorchannel;
531 bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
532 if (bus_info && (bus_info->state.created == 1)) {
533 dev_err(&chipset_dev->acpi_device->dev,
534 "failed %s: already exists\n", __func__);
539 bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
545 INIT_LIST_HEAD(&bus_info->list_all);
546 bus_info->chipset_bus_no = bus_no;
547 bus_info->chipset_dev_no = BUS_ROOT_DEVICE;
549 if (guid_equal(&cmd->create_bus.bus_inst_guid, &visor_siovm_guid)) {
550 err = save_crash_message(inmsg, CRASH_BUS);
552 goto err_free_bus_info;
555 if (inmsg->hdr.flags.response_expected == 1) {
556 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr),
560 goto err_free_bus_info;
563 memcpy(pmsg_hdr, &inmsg->hdr,
564 sizeof(struct controlvm_message_header));
565 bus_info->pending_msg_hdr = pmsg_hdr;
568 visorchannel = visorchannel_create(cmd->create_bus.channel_addr,
570 &cmd->create_bus.bus_data_type_guid);
573 goto err_free_pending_msg;
576 bus_info->visorchannel = visorchannel;
578 /* Response will be handled by visorbus_create_instance on success */
579 err = visorbus_create_instance(bus_info);
581 goto err_destroy_channel;
586 visorchannel_destroy(visorchannel);
588 err_free_pending_msg:
589 kfree(bus_info->pending_msg_hdr);
595 if (inmsg->hdr.flags.response_expected == 1)
596 controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
600 static int visorbus_destroy(struct controlvm_message *inmsg)
602 struct controlvm_message_packet *cmd = &inmsg->cmd;
603 struct controlvm_message_header *pmsg_hdr = NULL;
604 u32 bus_no = cmd->destroy_bus.bus_no;
605 struct visor_device *bus_info;
608 bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
613 if (bus_info->state.created == 0) {
617 if (bus_info->pending_msg_hdr) {
618 /* only non-NULL if dev is still waiting on a response */
622 if (inmsg->hdr.flags.response_expected == 1) {
623 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
629 memcpy(pmsg_hdr, &inmsg->hdr,
630 sizeof(struct controlvm_message_header));
631 bus_info->pending_msg_hdr = pmsg_hdr;
634 /* Response will be handled by visorbus_remove_instance */
635 visorbus_remove_instance(bus_info);
639 if (inmsg->hdr.flags.response_expected == 1)
640 controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
644 static const guid_t *parser_id_get(struct parser_context *ctx)
646 return &ctx->data.id;
649 static void *parser_string_get(struct parser_context *ctx)
660 nscan = ctx->bytes_remaining;
664 for (i = 0, value_length = -1; i < nscan; i++)
665 if (pscan[i] == '\0') {
669 /* '\0' was not included in the length */
670 if (value_length < 0)
671 value_length = nscan;
673 value = kmalloc(value_length + 1, GFP_KERNEL);
676 if (value_length > 0)
677 memcpy(value, pscan, value_length);
678 ((u8 *)(value))[value_length] = '\0';
682 static void *parser_name_get(struct parser_context *ctx)
684 struct visor_controlvm_parameters_header *phdr = NULL;
688 if (phdr->name_offset + phdr->name_length > ctx->param_bytes)
691 ctx->curr = (char *)&phdr + phdr->name_offset;
692 ctx->bytes_remaining = phdr->name_length;
693 return parser_string_get(ctx);
696 static int visorbus_configure(struct controlvm_message *inmsg,
697 struct parser_context *parser_ctx)
699 struct controlvm_message_packet *cmd = &inmsg->cmd;
701 struct visor_device *bus_info;
704 bus_no = cmd->configure_bus.bus_no;
705 bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
710 if (bus_info->state.created == 0) {
714 if (bus_info->pending_msg_hdr) {
719 err = visorchannel_set_clientpartition
720 (bus_info->visorchannel,
721 cmd->configure_bus.guest_handle);
726 const guid_t *partition_guid = parser_id_get(parser_ctx);
728 guid_copy(&bus_info->partition_guid, partition_guid);
729 bus_info->name = parser_name_get(parser_ctx);
732 if (inmsg->hdr.flags.response_expected == 1)
733 controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
737 dev_err(&chipset_dev->acpi_device->dev,
738 "%s exited with err: %d\n", __func__, err);
739 if (inmsg->hdr.flags.response_expected == 1)
740 controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
744 static int visorbus_device_create(struct controlvm_message *inmsg)
746 struct controlvm_message_packet *cmd = &inmsg->cmd;
747 struct controlvm_message_header *pmsg_hdr = NULL;
748 u32 bus_no = cmd->create_device.bus_no;
749 u32 dev_no = cmd->create_device.dev_no;
750 struct visor_device *dev_info = NULL;
751 struct visor_device *bus_info;
752 struct visorchannel *visorchannel;
755 bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
757 dev_err(&chipset_dev->acpi_device->dev,
758 "failed to get bus by id: %d\n", bus_no);
762 if (bus_info->state.created == 0) {
763 dev_err(&chipset_dev->acpi_device->dev,
764 "bus not created, id: %d\n", bus_no);
769 dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
770 if (dev_info && (dev_info->state.created == 1)) {
771 dev_err(&chipset_dev->acpi_device->dev,
772 "failed to get bus by id: %d/%d\n", bus_no, dev_no);
777 dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
783 dev_info->chipset_bus_no = bus_no;
784 dev_info->chipset_dev_no = dev_no;
785 guid_copy(&dev_info->inst, &cmd->create_device.dev_inst_guid);
786 dev_info->device.parent = &bus_info->device;
789 visorchannel_create_with_lock(cmd->create_device.channel_addr,
791 &cmd->create_device.data_type_guid);
793 dev_err(&chipset_dev->acpi_device->dev,
794 "failed to create visorchannel: %d/%d\n",
797 goto err_free_dev_info;
799 dev_info->visorchannel = visorchannel;
800 guid_copy(&dev_info->channel_type_guid, &cmd->create_device.data_type_guid);
801 if (guid_equal(&cmd->create_device.data_type_guid, &visor_vhba_channel_guid)) {
802 err = save_crash_message(inmsg, CRASH_DEV);
804 goto err_destroy_visorchannel;
807 if (inmsg->hdr.flags.response_expected == 1) {
808 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
811 goto err_destroy_visorchannel;
814 memcpy(pmsg_hdr, &inmsg->hdr,
815 sizeof(struct controlvm_message_header));
816 dev_info->pending_msg_hdr = pmsg_hdr;
818 /* create_visor_device will send response */
819 err = create_visor_device(dev_info);
821 goto err_destroy_visorchannel;
825 err_destroy_visorchannel:
826 visorchannel_destroy(visorchannel);
832 if (inmsg->hdr.flags.response_expected == 1)
833 controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
837 static int visorbus_device_changestate(struct controlvm_message *inmsg)
839 struct controlvm_message_packet *cmd = &inmsg->cmd;
840 struct controlvm_message_header *pmsg_hdr = NULL;
841 u32 bus_no = cmd->device_change_state.bus_no;
842 u32 dev_no = cmd->device_change_state.dev_no;
843 struct visor_segment_state state = cmd->device_change_state.state;
844 struct visor_device *dev_info;
847 dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
852 if (dev_info->state.created == 0) {
856 if (dev_info->pending_msg_hdr) {
857 /* only non-NULL if dev is still waiting on a response */
861 if (inmsg->hdr.flags.response_expected == 1) {
862 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
868 memcpy(pmsg_hdr, &inmsg->hdr,
869 sizeof(struct controlvm_message_header));
870 dev_info->pending_msg_hdr = pmsg_hdr;
873 if (state.alive == segment_state_running.alive &&
874 state.operating == segment_state_running.operating)
875 /* Response will be sent from visorchipset_device_resume */
876 err = visorchipset_device_resume(dev_info);
877 /* ServerNotReady / ServerLost / SegmentStateStandby */
878 else if (state.alive == segment_state_standby.alive &&
879 state.operating == segment_state_standby.operating)
881 * technically this is standby case where server is lost.
882 * Response will be sent from visorchipset_device_pause.
884 err = visorchipset_device_pause(dev_info);
891 dev_err(&chipset_dev->acpi_device->dev, "failed: %d\n", err);
892 if (inmsg->hdr.flags.response_expected == 1)
893 controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
897 static int visorbus_device_destroy(struct controlvm_message *inmsg)
899 struct controlvm_message_packet *cmd = &inmsg->cmd;
900 struct controlvm_message_header *pmsg_hdr = NULL;
901 u32 bus_no = cmd->destroy_device.bus_no;
902 u32 dev_no = cmd->destroy_device.dev_no;
903 struct visor_device *dev_info;
906 dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
911 if (dev_info->state.created == 0) {
915 if (dev_info->pending_msg_hdr) {
916 /* only non-NULL if dev is still waiting on a response */
920 if (inmsg->hdr.flags.response_expected == 1) {
921 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
927 memcpy(pmsg_hdr, &inmsg->hdr,
928 sizeof(struct controlvm_message_header));
929 dev_info->pending_msg_hdr = pmsg_hdr;
932 kfree(dev_info->name);
933 remove_visor_device(dev_info);
937 if (inmsg->hdr.flags.response_expected == 1)
938 controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
943 * The general parahotplug flow works as follows. The visorchipset receives
944 * a DEVICE_CHANGESTATE message from Command specifying a physical device
945 * to enable or disable. The CONTROLVM message handler calls
946 * parahotplug_process_message, which then adds the message to a global list
947 * and kicks off a udev event which causes a user level script to enable or
948 * disable the specified device. The udev script then writes to
949 * /sys/devices/platform/visorchipset/parahotplug, which causes the
950 * parahotplug store functions to get called, at which point the
951 * appropriate CONTROLVM message is retrieved from the list and responded to.
954 #define PARAHOTPLUG_TIMEOUT_MS 2000
957 * parahotplug_next_id() - generate unique int to match an outstanding
958 * CONTROLVM message with a udev script /sys
961 * Return: a unique integer value
963 static int parahotplug_next_id(void)
965 static atomic_t id = ATOMIC_INIT(0);
967 return atomic_inc_return(&id);
971 * parahotplug_next_expiration() - returns the time (in jiffies) when a
972 * CONTROLVM message on the list should expire
973 * -- PARAHOTPLUG_TIMEOUT_MS in the future
975 * Return: expected expiration time (in jiffies)
977 static unsigned long parahotplug_next_expiration(void)
979 return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
983 * parahotplug_request_create() - create a parahotplug_request, which is
984 * basically a wrapper for a CONTROLVM_MESSAGE
985 * that we can stick on a list
986 * @msg: the message to insert in the request
988 * Return: the request containing the provided message
990 static struct parahotplug_request *parahotplug_request_create(
991 struct controlvm_message *msg)
993 struct parahotplug_request *req;
995 req = kmalloc(sizeof(*req), GFP_KERNEL);
999 req->id = parahotplug_next_id();
1000 req->expiration = parahotplug_next_expiration();
1007 * parahotplug_request_destroy() - free a parahotplug_request
1008 * @req: the request to deallocate
1010 static void parahotplug_request_destroy(struct parahotplug_request *req)
1015 static LIST_HEAD(parahotplug_request_list);
1016 /* lock for above */
1017 static DEFINE_SPINLOCK(parahotplug_request_list_lock);
1020 * parahotplug_request_complete() - mark request as complete
1021 * @id: the id of the request
1022 * @active: indicates whether the request is assigned to active partition
1024 * Called from the /sys handler, which means the user script has
1025 * finished the enable/disable. Find the matching identifier, and
1026 * respond to the CONTROLVM message with success.
1028 * Return: 0 on success or -EINVAL on failure
1030 static int parahotplug_request_complete(int id, u16 active)
1032 struct list_head *pos;
1033 struct list_head *tmp;
1035 spin_lock(¶hotplug_request_list_lock);
1037 /* Look for a request matching "id". */
1038 list_for_each_safe(pos, tmp, ¶hotplug_request_list) {
1039 struct parahotplug_request *req =
1040 list_entry(pos, struct parahotplug_request, list);
1041 if (req->id == id) {
1043 * Found a match. Remove it from the list and
1047 spin_unlock(¶hotplug_request_list_lock);
1048 req->msg.cmd.device_change_state.state.active = active;
1049 if (req->msg.hdr.flags.response_expected)
1051 &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
1052 &req->msg.cmd.device_change_state.state);
1053 parahotplug_request_destroy(req);
1058 spin_unlock(¶hotplug_request_list_lock);
1063 * devicedisabled_store() - disables the hotplug device
1064 * @dev: sysfs interface variable not utilized in this function
1065 * @attr: sysfs interface variable not utilized in this function
1066 * @buf: buffer containing the device id
1067 * @count: the size of the buffer
1069 * The parahotplug/devicedisabled interface gets called by our support script
1070 * when an SR-IOV device has been shut down. The ID is passed to the script
1071 * and then passed back when the device has been removed.
1073 * Return: the size of the buffer for success or negative for error
1075 static ssize_t devicedisabled_store(struct device *dev,
1076 struct device_attribute *attr,
1077 const char *buf, size_t count)
1082 if (kstrtouint(buf, 10, &id))
1085 err = parahotplug_request_complete(id, 0);
1090 static DEVICE_ATTR_WO(devicedisabled);
1093 * deviceenabled_store() - enables the hotplug device
1094 * @dev: sysfs interface variable not utilized in this function
1095 * @attr: sysfs interface variable not utilized in this function
1096 * @buf: buffer containing the device id
1097 * @count: the size of the buffer
1099 * The parahotplug/deviceenabled interface gets called by our support script
1100 * when an SR-IOV device has been recovered. The ID is passed to the script
1101 * and then passed back when the device has been brought back up.
1103 * Return: the size of the buffer for success or negative for error
1105 static ssize_t deviceenabled_store(struct device *dev,
1106 struct device_attribute *attr,
1107 const char *buf, size_t count)
1111 if (kstrtouint(buf, 10, &id))
1114 parahotplug_request_complete(id, 1);
1117 static DEVICE_ATTR_WO(deviceenabled);
1119 static struct attribute *visorchipset_install_attrs[] = {
1120 &dev_attr_toolaction.attr,
1121 &dev_attr_boottotool.attr,
1122 &dev_attr_error.attr,
1123 &dev_attr_textid.attr,
1124 &dev_attr_remaining_steps.attr,
1128 static const struct attribute_group visorchipset_install_group = {
1130 .attrs = visorchipset_install_attrs
1133 static struct attribute *visorchipset_parahotplug_attrs[] = {
1134 &dev_attr_devicedisabled.attr,
1135 &dev_attr_deviceenabled.attr,
1139 static const struct attribute_group visorchipset_parahotplug_group = {
1140 .name = "parahotplug",
1141 .attrs = visorchipset_parahotplug_attrs
1144 static const struct attribute_group *visorchipset_dev_groups[] = {
1145 &visorchipset_install_group,
1146 &visorchipset_parahotplug_group,
1151 * parahotplug_request_kickoff() - initiate parahotplug request
1152 * @req: the request to initiate
1154 * Cause uevent to run the user level script to do the disable/enable specified
1155 * in the parahotplug_request.
1157 static int parahotplug_request_kickoff(struct parahotplug_request *req)
1159 struct controlvm_message_packet *cmd = &req->msg.cmd;
1160 char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
1163 env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL
1166 sprintf(env_cmd, "VISOR_PARAHOTPLUG=1");
1167 sprintf(env_id, "VISOR_PARAHOTPLUG_ID=%d", req->id);
1168 sprintf(env_state, "VISOR_PARAHOTPLUG_STATE=%d",
1169 cmd->device_change_state.state.active);
1170 sprintf(env_bus, "VISOR_PARAHOTPLUG_BUS=%d",
1171 cmd->device_change_state.bus_no);
1172 sprintf(env_dev, "VISOR_PARAHOTPLUG_DEVICE=%d",
1173 cmd->device_change_state.dev_no >> 3);
1174 sprintf(env_func, "VISOR_PARAHOTPLUG_FUNCTION=%d",
1175 cmd->device_change_state.dev_no & 0x7);
1177 return kobject_uevent_env(&chipset_dev->acpi_device->dev.kobj,
1182 * parahotplug_process_message() - enables or disables a PCI device by kicking
1184 * @inmsg: the message indicating whether to enable or disable
1186 static int parahotplug_process_message(struct controlvm_message *inmsg)
1188 struct parahotplug_request *req;
1191 req = parahotplug_request_create(inmsg);
1196 * For enable messages, just respond with success right away, we don't
1197 * need to wait to see if the enable was successful.
1199 if (inmsg->cmd.device_change_state.state.active) {
1200 err = parahotplug_request_kickoff(req);
1203 controlvm_respond(&inmsg->hdr, CONTROLVM_RESP_SUCCESS,
1204 &inmsg->cmd.device_change_state.state);
1205 parahotplug_request_destroy(req);
1210 * For disable messages, add the request to the request list before
1211 * kicking off the udev script. It won't get responded to until the
1212 * script has indicated it's done.
1214 spin_lock(¶hotplug_request_list_lock);
1215 list_add_tail(&req->list, ¶hotplug_request_list);
1216 spin_unlock(¶hotplug_request_list_lock);
1218 err = parahotplug_request_kickoff(req);
1224 controlvm_respond(&inmsg->hdr, err,
1225 &inmsg->cmd.device_change_state.state);
1230 * chipset_ready_uevent() - sends chipset_ready action
1232 * Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1234 * Return: 0 on success, negative on failure
1236 static int chipset_ready_uevent(struct controlvm_message_header *msg_hdr)
1240 res = kobject_uevent(&chipset_dev->acpi_device->dev.kobj,
1243 if (msg_hdr->flags.response_expected)
1244 controlvm_respond(msg_hdr, res, NULL);
1250 * chipset_selftest_uevent() - sends chipset_selftest action
1252 * Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1254 * Return: 0 on success, negative on failure
1256 static int chipset_selftest_uevent(struct controlvm_message_header *msg_hdr)
1258 char env_selftest[20];
1259 char *envp[] = { env_selftest, NULL };
1262 sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
1263 res = kobject_uevent_env(&chipset_dev->acpi_device->dev.kobj,
1266 if (msg_hdr->flags.response_expected)
1267 controlvm_respond(msg_hdr, res, NULL);
1273 * chipset_notready_uevent() - sends chipset_notready action
1275 * Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
1277 * Return: 0 on success, negative on failure
1279 static int chipset_notready_uevent(struct controlvm_message_header *msg_hdr)
1281 int res = kobject_uevent(&chipset_dev->acpi_device->dev.kobj,
1284 if (msg_hdr->flags.response_expected)
1285 controlvm_respond(msg_hdr, res, NULL);
1290 static int unisys_vmcall(unsigned long tuple, unsigned long param)
1293 unsigned int cpuid_eax, cpuid_ebx, cpuid_ecx, cpuid_edx;
1294 unsigned long reg_ebx;
1295 unsigned long reg_ecx;
1297 reg_ebx = param & 0xFFFFFFFF;
1298 reg_ecx = param >> 32;
1300 cpuid(0x00000001, &cpuid_eax, &cpuid_ebx, &cpuid_ecx, &cpuid_edx);
1301 if (!(cpuid_ecx & 0x80000000))
1304 __asm__ __volatile__(".byte 0x00f, 0x001, 0x0c1" : "=a"(result) :
1305 "a"(tuple), "b"(reg_ebx), "c"(reg_ecx));
1310 /* Need to convert from VMCALL error codes to Linux */
1313 case VMCALL_RESULT_INVALID_PARAM:
1315 case VMCALL_RESULT_DATA_UNAVAILABLE:
1322 static int controlvm_channel_create(struct visorchipset_device *dev)
1324 struct visorchannel *chan;
1328 err = unisys_vmcall(VMCALL_CONTROLVM_ADDR,
1329 virt_to_phys(&dev->controlvm_params));
1332 addr = dev->controlvm_params.address;
1333 chan = visorchannel_create_with_lock(addr, GFP_KERNEL,
1334 &visor_controlvm_channel_guid);
1337 dev->controlvm_channel = chan;
1341 static void setup_crash_devices_work_queue(struct work_struct *work)
1343 struct controlvm_message local_crash_bus_msg;
1344 struct controlvm_message local_crash_dev_msg;
1345 struct controlvm_message msg;
1346 u32 local_crash_msg_offset;
1347 u16 local_crash_msg_count;
1349 /* send init chipset msg */
1350 msg.hdr.id = CONTROLVM_CHIPSET_INIT;
1351 msg.cmd.init_chipset.bus_count = 23;
1352 msg.cmd.init_chipset.switch_count = 0;
1356 /* get saved message count */
1357 if (visorchannel_read(chipset_dev->controlvm_channel,
1358 offsetof(struct visor_controlvm_channel,
1359 saved_crash_message_count),
1360 &local_crash_msg_count, sizeof(u16)) < 0) {
1361 dev_err(&chipset_dev->acpi_device->dev,
1362 "failed to read channel\n");
1366 if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
1367 dev_err(&chipset_dev->acpi_device->dev,
1372 /* get saved crash message offset */
1373 if (visorchannel_read(chipset_dev->controlvm_channel,
1374 offsetof(struct visor_controlvm_channel,
1375 saved_crash_message_offset),
1376 &local_crash_msg_offset, sizeof(u32)) < 0) {
1377 dev_err(&chipset_dev->acpi_device->dev,
1378 "failed to read channel\n");
1382 /* read create device message for storage bus offset */
1383 if (visorchannel_read(chipset_dev->controlvm_channel,
1384 local_crash_msg_offset,
1385 &local_crash_bus_msg,
1386 sizeof(struct controlvm_message)) < 0) {
1387 dev_err(&chipset_dev->acpi_device->dev,
1388 "failed to read channel\n");
1392 /* read create device message for storage device */
1393 if (visorchannel_read(chipset_dev->controlvm_channel,
1394 local_crash_msg_offset +
1395 sizeof(struct controlvm_message),
1396 &local_crash_dev_msg,
1397 sizeof(struct controlvm_message)) < 0) {
1398 dev_err(&chipset_dev->acpi_device->dev,
1399 "failed to read channel\n");
1403 /* reuse IOVM create bus message */
1404 if (!local_crash_bus_msg.cmd.create_bus.channel_addr) {
1405 dev_err(&chipset_dev->acpi_device->dev,
1406 "no valid create_bus message\n");
1409 visorbus_create(&local_crash_bus_msg);
1411 /* reuse create device message for storage device */
1412 if (!local_crash_dev_msg.cmd.create_device.channel_addr) {
1413 dev_err(&chipset_dev->acpi_device->dev,
1414 "no valid create_device message\n");
1417 visorbus_device_create(&local_crash_dev_msg);
1420 void visorbus_response(struct visor_device *bus_info, int response,
1423 controlvm_responder(controlvm_id, bus_info->pending_msg_hdr, response);
1425 kfree(bus_info->pending_msg_hdr);
1426 bus_info->pending_msg_hdr = NULL;
1429 void visorbus_device_changestate_response(struct visor_device *dev_info,
1431 struct visor_segment_state state)
1433 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
1434 dev_info, response, state);
1436 kfree(dev_info->pending_msg_hdr);
1437 dev_info->pending_msg_hdr = NULL;
1440 static void parser_done(struct parser_context *ctx)
1442 chipset_dev->controlvm_payload_bytes_buffered -= ctx->param_bytes;
1446 static struct parser_context *parser_init_stream(u64 addr, u32 bytes,
1450 struct parser_context *ctx;
1455 /* alloc an extra byte to ensure payload is \0 terminated */
1456 allocbytes = bytes + 1 + (sizeof(struct parser_context) -
1457 sizeof(struct visor_controlvm_parameters_header));
1458 if ((chipset_dev->controlvm_payload_bytes_buffered + bytes)
1459 > MAX_CONTROLVM_PAYLOAD_BYTES) {
1463 ctx = kzalloc(allocbytes, GFP_KERNEL);
1469 ctx->allocbytes = allocbytes;
1470 ctx->param_bytes = bytes;
1471 mapping = memremap(addr, bytes, MEMREMAP_WB);
1473 goto err_finish_ctx;
1474 memcpy(&ctx->data, mapping, bytes);
1476 ctx->byte_stream = true;
1477 chipset_dev->controlvm_payload_bytes_buffered += ctx->param_bytes;
1487 * handle_command() - process a controlvm message
1488 * @inmsg: the message to process
1489 * @channel_addr: address of the controlvm channel
1492 * 0 - Successfully processed the message
1493 * -EAGAIN - ControlVM message was not processed and should be retried
1494 * reading the next controlvm message; a scenario where this can
1495 * occur is when we need to throttle the allocation of memory in
1496 * which to copy out controlvm payload data.
1497 * < 0 - error: ControlVM message was processed but an error occurred.
1499 static int handle_command(struct controlvm_message inmsg, u64 channel_addr)
1501 struct controlvm_message_packet *cmd = &inmsg.cmd;
1504 struct parser_context *parser_ctx = NULL;
1505 struct controlvm_message ackmsg;
1508 /* create parsing context if necessary */
1509 parm_addr = channel_addr + inmsg.hdr.payload_vm_offset;
1510 parm_bytes = inmsg.hdr.payload_bytes;
1513 * Parameter and channel addresses within test messages actually lie
1514 * within our OS-controlled memory. We need to know that, because it
1515 * makes a difference in how we compute the virtual address.
1520 parser_ctx = parser_init_stream(parm_addr, parm_bytes, &retry);
1521 if (!parser_ctx && retry)
1524 controlvm_init_response(&ackmsg, &inmsg.hdr, CONTROLVM_RESP_SUCCESS);
1525 err = visorchannel_signalinsert(chipset_dev->controlvm_channel,
1526 CONTROLVM_QUEUE_ACK, &ackmsg);
1530 switch (inmsg.hdr.id) {
1531 case CONTROLVM_CHIPSET_INIT:
1532 err = chipset_init(&inmsg);
1534 case CONTROLVM_BUS_CREATE:
1535 err = visorbus_create(&inmsg);
1537 case CONTROLVM_BUS_DESTROY:
1538 err = visorbus_destroy(&inmsg);
1540 case CONTROLVM_BUS_CONFIGURE:
1541 err = visorbus_configure(&inmsg, parser_ctx);
1543 case CONTROLVM_DEVICE_CREATE:
1544 err = visorbus_device_create(&inmsg);
1546 case CONTROLVM_DEVICE_CHANGESTATE:
1547 if (cmd->device_change_state.flags.phys_device) {
1548 err = parahotplug_process_message(&inmsg);
1551 * save the hdr and cmd structures for later use when
1552 * sending back the response to Command
1554 err = visorbus_device_changestate(&inmsg);
1558 case CONTROLVM_DEVICE_DESTROY:
1559 err = visorbus_device_destroy(&inmsg);
1561 case CONTROLVM_DEVICE_CONFIGURE:
1562 /* no op just send a respond that we passed */
1563 if (inmsg.hdr.flags.response_expected)
1564 controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS,
1567 case CONTROLVM_CHIPSET_READY:
1568 err = chipset_ready_uevent(&inmsg.hdr);
1570 case CONTROLVM_CHIPSET_SELFTEST:
1571 err = chipset_selftest_uevent(&inmsg.hdr);
1573 case CONTROLVM_CHIPSET_STOP:
1574 err = chipset_notready_uevent(&inmsg.hdr);
1578 if (inmsg.hdr.flags.response_expected)
1579 controlvm_respond(&inmsg.hdr,
1580 -CONTROLVM_RESP_ID_UNKNOWN, NULL);
1585 parser_done(parser_ctx);
1592 * read_controlvm_event() - retreives the next message from the
1593 * CONTROLVM_QUEUE_EVENT queue in the controlvm
1595 * @msg: pointer to the retrieved message
1597 * Return: 0 if valid message was retrieved or -error
1599 static int read_controlvm_event(struct controlvm_message *msg)
1601 int err = visorchannel_signalremove(chipset_dev->controlvm_channel,
1602 CONTROLVM_QUEUE_EVENT, msg);
1607 if (msg->hdr.flags.test_message == 1)
1614 * parahotplug_process_list() - remove any request from the list that's been on
1615 * there too long and respond with an error
1617 static void parahotplug_process_list(void)
1619 struct list_head *pos;
1620 struct list_head *tmp;
1622 spin_lock(¶hotplug_request_list_lock);
1624 list_for_each_safe(pos, tmp, ¶hotplug_request_list) {
1625 struct parahotplug_request *req =
1626 list_entry(pos, struct parahotplug_request, list);
1628 if (!time_after_eq(jiffies, req->expiration))
1632 if (req->msg.hdr.flags.response_expected)
1635 CONTROLVM_RESP_DEVICE_UDEV_TIMEOUT,
1636 &req->msg.cmd.device_change_state.state);
1637 parahotplug_request_destroy(req);
1640 spin_unlock(¶hotplug_request_list_lock);
1643 static void controlvm_periodic_work(struct work_struct *work)
1645 struct controlvm_message inmsg;
1649 /* Drain the RESPONSE queue make it empty */
1651 err = visorchannel_signalremove(chipset_dev->controlvm_channel,
1652 CONTROLVM_QUEUE_RESPONSE,
1654 } while ((!err) && (++count < CONTROLVM_MESSAGE_MAX));
1659 if (chipset_dev->controlvm_pending_msg_valid) {
1661 * we throttled processing of a prior msg, so try to process
1662 * it again rather than reading a new one
1664 inmsg = chipset_dev->controlvm_pending_msg;
1665 chipset_dev->controlvm_pending_msg_valid = false;
1668 err = read_controlvm_event(&inmsg);
1672 chipset_dev->most_recent_message_jiffies = jiffies;
1673 err = handle_command(inmsg,
1674 visorchannel_get_physaddr
1675 (chipset_dev->controlvm_channel));
1676 if (err == -EAGAIN) {
1677 chipset_dev->controlvm_pending_msg = inmsg;
1678 chipset_dev->controlvm_pending_msg_valid = true;
1682 err = read_controlvm_event(&inmsg);
1685 /* parahotplug_worker */
1686 parahotplug_process_list();
1689 * The controlvm messages are sent in a bulk. If we start receiving messages, we
1690 * want the polling to be fast. If we do not receive any message for
1691 * MIN_IDLE_SECONDS, we can slow down the polling.
1694 if (time_after(jiffies, chipset_dev->most_recent_message_jiffies +
1695 (HZ * MIN_IDLE_SECONDS))) {
1697 * it's been longer than MIN_IDLE_SECONDS since we processed
1698 * our last controlvm message; slow down the polling
1700 if (chipset_dev->poll_jiffies !=
1701 POLLJIFFIES_CONTROLVMCHANNEL_SLOW)
1702 chipset_dev->poll_jiffies =
1703 POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
1705 if (chipset_dev->poll_jiffies !=
1706 POLLJIFFIES_CONTROLVMCHANNEL_FAST)
1707 chipset_dev->poll_jiffies =
1708 POLLJIFFIES_CONTROLVMCHANNEL_FAST;
1711 schedule_delayed_work(&chipset_dev->periodic_controlvm_work,
1712 chipset_dev->poll_jiffies);
1715 static int visorchipset_init(struct acpi_device *acpi_device)
1718 struct visorchannel *controlvm_channel;
1720 chipset_dev = kzalloc(sizeof(*chipset_dev), GFP_KERNEL);
1724 err = controlvm_channel_create(chipset_dev);
1726 goto error_free_chipset_dev;
1728 acpi_device->driver_data = chipset_dev;
1729 chipset_dev->acpi_device = acpi_device;
1730 chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
1732 err = sysfs_create_groups(&chipset_dev->acpi_device->dev.kobj,
1733 visorchipset_dev_groups);
1735 goto error_destroy_channel;
1737 controlvm_channel = chipset_dev->controlvm_channel;
1738 if (!visor_check_channel(visorchannel_get_header(controlvm_channel),
1739 &chipset_dev->acpi_device->dev,
1740 &visor_controlvm_channel_guid,
1742 sizeof(struct visor_controlvm_channel),
1743 VISOR_CONTROLVM_CHANNEL_VERSIONID,
1744 VISOR_CHANNEL_SIGNATURE))
1745 goto error_delete_groups;
1747 /* if booting in a crash kernel */
1748 if (is_kdump_kernel())
1749 INIT_DELAYED_WORK(&chipset_dev->periodic_controlvm_work,
1750 setup_crash_devices_work_queue);
1752 INIT_DELAYED_WORK(&chipset_dev->periodic_controlvm_work,
1753 controlvm_periodic_work);
1755 chipset_dev->most_recent_message_jiffies = jiffies;
1756 chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
1757 schedule_delayed_work(&chipset_dev->periodic_controlvm_work,
1758 chipset_dev->poll_jiffies);
1760 err = visorbus_init();
1762 goto error_cancel_work;
1767 cancel_delayed_work_sync(&chipset_dev->periodic_controlvm_work);
1769 error_delete_groups:
1770 sysfs_remove_groups(&chipset_dev->acpi_device->dev.kobj,
1771 visorchipset_dev_groups);
1773 error_destroy_channel:
1774 visorchannel_destroy(chipset_dev->controlvm_channel);
1776 error_free_chipset_dev:
1780 dev_err(&acpi_device->dev, "failed with error %d\n", err);
1784 static int visorchipset_exit(struct acpi_device *acpi_device)
1787 cancel_delayed_work_sync(&chipset_dev->periodic_controlvm_work);
1788 sysfs_remove_groups(&chipset_dev->acpi_device->dev.kobj,
1789 visorchipset_dev_groups);
1791 visorchannel_destroy(chipset_dev->controlvm_channel);
1797 static const struct acpi_device_id unisys_device_ids[] = {
1802 static struct acpi_driver unisys_acpi_driver = {
1803 .name = "unisys_acpi",
1804 .class = "unisys_acpi_class",
1805 .owner = THIS_MODULE,
1806 .ids = unisys_device_ids,
1808 .add = visorchipset_init,
1809 .remove = visorchipset_exit,
1813 MODULE_DEVICE_TABLE(acpi, unisys_device_ids);
1815 static __init int visorutil_spar_detect(void)
1817 unsigned int eax, ebx, ecx, edx;
1819 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
1821 cpuid(UNISYS_VISOR_LEAF_ID, &eax, &ebx, &ecx, &edx);
1822 return (ebx == UNISYS_VISOR_ID_EBX) &&
1823 (ecx == UNISYS_VISOR_ID_ECX) &&
1824 (edx == UNISYS_VISOR_ID_EDX);
1829 static int __init init_unisys(void)
1833 if (!visorutil_spar_detect())
1836 result = acpi_bus_register_driver(&unisys_acpi_driver);
1840 pr_info("Unisys Visorchipset Driver Loaded.\n");
1844 static void __exit exit_unisys(void)
1846 acpi_bus_unregister_driver(&unisys_acpi_driver);
1849 module_init(init_unisys);
1850 module_exit(exit_unisys);
1852 MODULE_AUTHOR("Unisys");
1853 MODULE_LICENSE("GPL");
1854 MODULE_DESCRIPTION("s-Par visorbus driver for virtual device buses");