Merge tag 'mmc-v4.14-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc
[sfrench/cifs-2.6.git] / drivers / staging / unisys / visorbus / visorchipset.c
1 /*
2  * Copyright (C) 2010 - 2015 UNISYS CORPORATION
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12  * NON INFRINGEMENT.  See the GNU General Public License for more
13  * details.
14  */
15
16 #include <linux/acpi.h>
17 #include <linux/crash_dump.h>
18
19 #include "visorbus.h"
20 #include "visorbus_private.h"
21
22 /* {72120008-4AAB-11DC-8530-444553544200} */
23 #define VISOR_SIOVM_GUID GUID_INIT(0x72120008, 0x4AAB, 0x11DC, 0x85, 0x30, \
24                                    0x44, 0x45, 0x53, 0x54, 0x42, 0x00)
25
26 static const guid_t visor_vhba_channel_guid = VISOR_VHBA_CHANNEL_GUID;
27 static const guid_t visor_siovm_guid = VISOR_SIOVM_GUID;
28 static const guid_t visor_controlvm_channel_guid = VISOR_CONTROLVM_CHANNEL_GUID;
29
30 #define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
31 #define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
32
33 #define MAX_CONTROLVM_PAYLOAD_BYTES (1024 * 128)
34
35 #define UNISYS_VISOR_LEAF_ID 0x40000000
36
37 /* The s-Par leaf ID returns "UnisysSpar64" encoded across ebx, ecx, edx */
38 #define UNISYS_VISOR_ID_EBX 0x73696e55
39 #define UNISYS_VISOR_ID_ECX 0x70537379
40 #define UNISYS_VISOR_ID_EDX 0x34367261
41
42 /*
43  * When the controlvm channel is idle for at least MIN_IDLE_SECONDS, we switch
44  * to slow polling mode. As soon as we get a controlvm message, we switch back
45  * to fast polling mode.
46  */
47 #define MIN_IDLE_SECONDS 10
48
49 struct parser_context {
50         unsigned long allocbytes;
51         unsigned long param_bytes;
52         u8 *curr;
53         unsigned long bytes_remaining;
54         bool byte_stream;
55         struct visor_controlvm_parameters_header data;
56 };
57
58 /* VMCALL_CONTROLVM_ADDR: Used by all guests, not just IO. */
59 #define VMCALL_CONTROLVM_ADDR 0x0501
60
61 enum vmcall_result {
62         VMCALL_RESULT_SUCCESS = 0,
63         VMCALL_RESULT_INVALID_PARAM = 1,
64         VMCALL_RESULT_DATA_UNAVAILABLE = 2,
65         VMCALL_RESULT_FAILURE_UNAVAILABLE = 3,
66         VMCALL_RESULT_DEVICE_ERROR = 4,
67         VMCALL_RESULT_DEVICE_NOT_READY = 5
68 };
69
70 /*
71  * struct vmcall_io_controlvm_addr_params - Structure for IO VMCALLS. Has
72  *                                          parameters to VMCALL_CONTROLVM_ADDR
73  *                                          interface.
74  * @address:       The Guest-relative physical address of the ControlVm channel.
75  *                 This VMCall fills this in with the appropriate address.
76  *                 Contents provided by this VMCALL (OUT).
77  * @channel_bytes: The size of the ControlVm channel in bytes This VMCall fills
78  *                 this in with the appropriate address. Contents provided by
79  *                 this VMCALL (OUT).
80  * @unused:        Unused Bytes in the 64-Bit Aligned Struct.
81  */
82 struct vmcall_io_controlvm_addr_params {
83         u64 address;
84         u32 channel_bytes;
85         u8 unused[4];
86 } __packed;
87
88 struct visorchipset_device {
89         struct acpi_device *acpi_device;
90         unsigned long poll_jiffies;
91         /* when we got our last controlvm message */
92         unsigned long most_recent_message_jiffies;
93         struct delayed_work periodic_controlvm_work;
94         struct visorchannel *controlvm_channel;
95         unsigned long controlvm_payload_bytes_buffered;
96         /*
97          * The following variables are used to handle the scenario where we are
98          * unable to offload the payload from a controlvm message due to memory
99          * requirements. In this scenario, we simply stash the controlvm
100          * message, then attempt to process it again the next time
101          * controlvm_periodic_work() runs.
102          */
103         struct controlvm_message controlvm_pending_msg;
104         bool controlvm_pending_msg_valid;
105         struct vmcall_io_controlvm_addr_params controlvm_params;
106 };
107
108 static struct visorchipset_device *chipset_dev;
109
110 struct parahotplug_request {
111         struct list_head list;
112         int id;
113         unsigned long expiration;
114         struct controlvm_message msg;
115 };
116
117 /* prototypes for attributes */
118 static ssize_t toolaction_show(struct device *dev,
119                                struct device_attribute *attr,
120                                char *buf)
121 {
122         u8 tool_action = 0;
123         int err;
124
125         err = visorchannel_read(chipset_dev->controlvm_channel,
126                                 offsetof(struct visor_controlvm_channel,
127                                          tool_action),
128                                 &tool_action, sizeof(u8));
129         if (err)
130                 return err;
131
132         return sprintf(buf, "%u\n", tool_action);
133 }
134
135 static ssize_t toolaction_store(struct device *dev,
136                                 struct device_attribute *attr,
137                                 const char *buf, size_t count)
138 {
139         u8 tool_action;
140         int err;
141
142         if (kstrtou8(buf, 10, &tool_action))
143                 return -EINVAL;
144
145         err = visorchannel_write(chipset_dev->controlvm_channel,
146                                  offsetof(struct visor_controlvm_channel,
147                                           tool_action),
148                                  &tool_action, sizeof(u8));
149         if (err)
150                 return err;
151         return count;
152 }
153 static DEVICE_ATTR_RW(toolaction);
154
155 static ssize_t boottotool_show(struct device *dev,
156                                struct device_attribute *attr,
157                                char *buf)
158 {
159         struct efi_visor_indication efi_visor_indication;
160         int err;
161
162         err = visorchannel_read(chipset_dev->controlvm_channel,
163                                 offsetof(struct visor_controlvm_channel,
164                                          efi_visor_ind),
165                                 &efi_visor_indication,
166                                 sizeof(struct efi_visor_indication));
167         if (err)
168                 return err;
169         return sprintf(buf, "%u\n", efi_visor_indication.boot_to_tool);
170 }
171
172 static ssize_t boottotool_store(struct device *dev,
173                                 struct device_attribute *attr,
174                                 const char *buf, size_t count)
175 {
176         int val, err;
177         struct efi_visor_indication efi_visor_indication;
178
179         if (kstrtoint(buf, 10, &val))
180                 return -EINVAL;
181
182         efi_visor_indication.boot_to_tool = val;
183         err = visorchannel_write(chipset_dev->controlvm_channel,
184                                  offsetof(struct visor_controlvm_channel,
185                                           efi_visor_ind),
186                                  &(efi_visor_indication),
187                                  sizeof(struct efi_visor_indication));
188         if (err)
189                 return err;
190         return count;
191 }
192 static DEVICE_ATTR_RW(boottotool);
193
194 static ssize_t error_show(struct device *dev, struct device_attribute *attr,
195                           char *buf)
196 {
197         u32 error = 0;
198         int err;
199
200         err = visorchannel_read(chipset_dev->controlvm_channel,
201                                 offsetof(struct visor_controlvm_channel,
202                                          installation_error),
203                                 &error, sizeof(u32));
204         if (err)
205                 return err;
206         return sprintf(buf, "%u\n", error);
207 }
208
209 static ssize_t error_store(struct device *dev, struct device_attribute *attr,
210                            const char *buf, size_t count)
211 {
212         u32 error;
213         int err;
214
215         if (kstrtou32(buf, 10, &error))
216                 return -EINVAL;
217
218         err = visorchannel_write(chipset_dev->controlvm_channel,
219                                  offsetof(struct visor_controlvm_channel,
220                                           installation_error),
221                                  &error, sizeof(u32));
222         if (err)
223                 return err;
224         return count;
225 }
226 static DEVICE_ATTR_RW(error);
227
228 static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
229                            char *buf)
230 {
231         u32 text_id = 0;
232         int err;
233
234         err = visorchannel_read(chipset_dev->controlvm_channel,
235                                 offsetof(struct visor_controlvm_channel,
236                                          installation_text_id),
237                                 &text_id, sizeof(u32));
238         if (err)
239                 return err;
240
241         return sprintf(buf, "%u\n", text_id);
242 }
243
244 static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
245                             const char *buf, size_t count)
246 {
247         u32 text_id;
248         int err;
249
250         if (kstrtou32(buf, 10, &text_id))
251                 return -EINVAL;
252
253         err = visorchannel_write(chipset_dev->controlvm_channel,
254                                  offsetof(struct visor_controlvm_channel,
255                                           installation_text_id),
256                                  &text_id, sizeof(u32));
257         if (err)
258                 return err;
259         return count;
260 }
261 static DEVICE_ATTR_RW(textid);
262
263 static ssize_t remaining_steps_show(struct device *dev,
264                                     struct device_attribute *attr, char *buf)
265 {
266         u16 remaining_steps = 0;
267         int err;
268
269         err = visorchannel_read(chipset_dev->controlvm_channel,
270                                 offsetof(struct visor_controlvm_channel,
271                                          installation_remaining_steps),
272                                 &remaining_steps, sizeof(u16));
273         if (err)
274                 return err;
275
276         return sprintf(buf, "%hu\n", remaining_steps);
277 }
278
279 static ssize_t remaining_steps_store(struct device *dev,
280                                      struct device_attribute *attr,
281                                      const char *buf, size_t count)
282 {
283         u16 remaining_steps;
284         int err;
285
286         if (kstrtou16(buf, 10, &remaining_steps))
287                 return -EINVAL;
288
289         err = visorchannel_write(chipset_dev->controlvm_channel,
290                                  offsetof(struct visor_controlvm_channel,
291                                           installation_remaining_steps),
292                                  &remaining_steps, sizeof(u16));
293         if (err)
294                 return err;
295         return count;
296 }
297 static DEVICE_ATTR_RW(remaining_steps);
298
299 struct visor_busdev {
300         u32 bus_no;
301         u32 dev_no;
302 };
303
304 static int match_visorbus_dev_by_id(struct device *dev, void *data)
305 {
306         struct visor_device *vdev = to_visor_device(dev);
307         struct visor_busdev *id = data;
308
309         if ((vdev->chipset_bus_no == id->bus_no) &&
310             (vdev->chipset_dev_no == id->dev_no))
311                 return 1;
312
313         return 0;
314 }
315
316 struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no,
317                                                struct visor_device *from)
318 {
319         struct device *dev;
320         struct device *dev_start = NULL;
321         struct visor_device *vdev = NULL;
322         struct visor_busdev id = {
323                 .bus_no = bus_no,
324                 .dev_no = dev_no
325         };
326
327         if (from)
328                 dev_start = &from->device;
329         dev = bus_find_device(&visorbus_type, dev_start, (void *)&id,
330                               match_visorbus_dev_by_id);
331         if (dev)
332                 vdev = to_visor_device(dev);
333         return vdev;
334 }
335
336 static void controlvm_init_response(struct controlvm_message *msg,
337                                     struct controlvm_message_header *msg_hdr,
338                                     int response)
339 {
340         memset(msg, 0, sizeof(struct controlvm_message));
341         memcpy(&msg->hdr, msg_hdr, sizeof(struct controlvm_message_header));
342         msg->hdr.payload_bytes = 0;
343         msg->hdr.payload_vm_offset = 0;
344         msg->hdr.payload_max_bytes = 0;
345         if (response < 0) {
346                 msg->hdr.flags.failed = 1;
347                 msg->hdr.completion_status = (u32)(-response);
348         }
349 }
350
351 static int controlvm_respond_chipset_init(
352                                 struct controlvm_message_header *msg_hdr,
353                                 int response,
354                                 enum visor_chipset_feature features)
355 {
356         struct controlvm_message outmsg;
357
358         controlvm_init_response(&outmsg, msg_hdr, response);
359         outmsg.cmd.init_chipset.features = features;
360         return visorchannel_signalinsert(chipset_dev->controlvm_channel,
361                                          CONTROLVM_QUEUE_REQUEST, &outmsg);
362 }
363
364 static int chipset_init(struct controlvm_message *inmsg)
365 {
366         static int chipset_inited;
367         enum visor_chipset_feature features = 0;
368         int rc = CONTROLVM_RESP_SUCCESS;
369         int res = 0;
370
371         if (chipset_inited) {
372                 rc = -CONTROLVM_RESP_ALREADY_DONE;
373                 res = -EIO;
374                 goto out_respond;
375         }
376         chipset_inited = 1;
377
378         /*
379          * Set features to indicate we support parahotplug (if Command also
380          * supports it).
381          */
382         features = inmsg->cmd.init_chipset.features &
383                    VISOR_CHIPSET_FEATURE_PARA_HOTPLUG;
384
385         /*
386          * Set the "reply" bit so Command knows this is a features-aware
387          * driver.
388          */
389         features |= VISOR_CHIPSET_FEATURE_REPLY;
390
391 out_respond:
392         if (inmsg->hdr.flags.response_expected)
393                 res = controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
394
395         return res;
396 }
397
398 static int controlvm_respond(struct controlvm_message_header *msg_hdr,
399                              int response,
400                              struct visor_segment_state *state)
401 {
402         struct controlvm_message outmsg;
403
404         controlvm_init_response(&outmsg, msg_hdr, response);
405         if (outmsg.hdr.flags.test_message == 1)
406                 return -EINVAL;
407
408         if (state) {
409                 outmsg.cmd.device_change_state.state = *state;
410                 outmsg.cmd.device_change_state.flags.phys_device = 1;
411         }
412
413         return visorchannel_signalinsert(chipset_dev->controlvm_channel,
414                                          CONTROLVM_QUEUE_REQUEST, &outmsg);
415 }
416
417 enum crash_obj_type {
418         CRASH_DEV,
419         CRASH_BUS,
420 };
421
422 static int save_crash_message(struct controlvm_message *msg,
423                               enum crash_obj_type cr_type)
424 {
425         u32 local_crash_msg_offset;
426         u16 local_crash_msg_count;
427         int err;
428
429         err = visorchannel_read(chipset_dev->controlvm_channel,
430                                 offsetof(struct visor_controlvm_channel,
431                                          saved_crash_message_count),
432                                 &local_crash_msg_count, sizeof(u16));
433         if (err) {
434                 dev_err(&chipset_dev->acpi_device->dev,
435                         "failed to read message count\n");
436                 return err;
437         }
438
439         if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
440                 dev_err(&chipset_dev->acpi_device->dev,
441                         "invalid number of messages\n");
442                 return -EIO;
443         }
444
445         err = visorchannel_read(chipset_dev->controlvm_channel,
446                                 offsetof(struct visor_controlvm_channel,
447                                          saved_crash_message_offset),
448                                 &local_crash_msg_offset, sizeof(u32));
449         if (err) {
450                 dev_err(&chipset_dev->acpi_device->dev,
451                         "failed to read offset\n");
452                 return err;
453         }
454
455         switch (cr_type) {
456         case CRASH_DEV:
457                 local_crash_msg_offset += sizeof(struct controlvm_message);
458                 err = visorchannel_write(chipset_dev->controlvm_channel,
459                                          local_crash_msg_offset,
460                                          msg,
461                                          sizeof(struct controlvm_message));
462                 if (err) {
463                         dev_err(&chipset_dev->acpi_device->dev,
464                                 "failed to write dev msg\n");
465                         return err;
466                 }
467                 break;
468         case CRASH_BUS:
469                 err = visorchannel_write(chipset_dev->controlvm_channel,
470                                          local_crash_msg_offset,
471                                          msg,
472                                          sizeof(struct controlvm_message));
473                 if (err) {
474                         dev_err(&chipset_dev->acpi_device->dev,
475                                 "failed to write bus msg\n");
476                         return err;
477                 }
478                 break;
479         default:
480                 dev_err(&chipset_dev->acpi_device->dev,
481                         "Invalid crash_obj_type\n");
482                 break;
483         }
484         return 0;
485 }
486
487 static int controlvm_responder(enum controlvm_id cmd_id,
488                                struct controlvm_message_header *pending_msg_hdr,
489                                int response)
490 {
491         if (!pending_msg_hdr)
492                 return -EIO;
493
494         if (pending_msg_hdr->id != (u32)cmd_id)
495                 return -EINVAL;
496
497         return controlvm_respond(pending_msg_hdr, response, NULL);
498 }
499
500 static int device_changestate_responder(
501                                 enum controlvm_id cmd_id,
502                                 struct visor_device *p, int response,
503                                 struct visor_segment_state response_state)
504 {
505         struct controlvm_message outmsg;
506
507         if (!p->pending_msg_hdr)
508                 return -EIO;
509         if (p->pending_msg_hdr->id != cmd_id)
510                 return -EINVAL;
511
512         controlvm_init_response(&outmsg, p->pending_msg_hdr, response);
513
514         outmsg.cmd.device_change_state.bus_no = p->chipset_bus_no;
515         outmsg.cmd.device_change_state.dev_no = p->chipset_dev_no;
516         outmsg.cmd.device_change_state.state = response_state;
517
518         return visorchannel_signalinsert(chipset_dev->controlvm_channel,
519                                          CONTROLVM_QUEUE_REQUEST, &outmsg);
520 }
521
522 static int visorbus_create(struct controlvm_message *inmsg)
523 {
524         struct controlvm_message_packet *cmd = &inmsg->cmd;
525         struct controlvm_message_header *pmsg_hdr = NULL;
526         u32 bus_no = cmd->create_bus.bus_no;
527         struct visor_device *bus_info;
528         struct visorchannel *visorchannel;
529         int err;
530
531         bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
532         if (bus_info && (bus_info->state.created == 1)) {
533                 dev_err(&chipset_dev->acpi_device->dev,
534                         "failed %s: already exists\n", __func__);
535                 err = -EEXIST;
536                 goto err_respond;
537         }
538
539         bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
540         if (!bus_info) {
541                 err = -ENOMEM;
542                 goto err_respond;
543         }
544
545         INIT_LIST_HEAD(&bus_info->list_all);
546         bus_info->chipset_bus_no = bus_no;
547         bus_info->chipset_dev_no = BUS_ROOT_DEVICE;
548
549         if (guid_equal(&cmd->create_bus.bus_inst_guid, &visor_siovm_guid)) {
550                 err = save_crash_message(inmsg, CRASH_BUS);
551                 if (err)
552                         goto err_free_bus_info;
553         }
554
555         if (inmsg->hdr.flags.response_expected == 1) {
556                 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr),
557                                    GFP_KERNEL);
558                 if (!pmsg_hdr) {
559                         err = -ENOMEM;
560                         goto err_free_bus_info;
561                 }
562
563                 memcpy(pmsg_hdr, &inmsg->hdr,
564                        sizeof(struct controlvm_message_header));
565                 bus_info->pending_msg_hdr = pmsg_hdr;
566         }
567
568         visorchannel = visorchannel_create(cmd->create_bus.channel_addr,
569                                            GFP_KERNEL,
570                                            &cmd->create_bus.bus_data_type_guid);
571         if (!visorchannel) {
572                 err = -ENOMEM;
573                 goto err_free_pending_msg;
574         }
575
576         bus_info->visorchannel = visorchannel;
577
578         /* Response will be handled by visorbus_create_instance on success */
579         err = visorbus_create_instance(bus_info);
580         if (err)
581                 goto err_destroy_channel;
582
583         return 0;
584
585 err_destroy_channel:
586         visorchannel_destroy(visorchannel);
587
588 err_free_pending_msg:
589         kfree(bus_info->pending_msg_hdr);
590
591 err_free_bus_info:
592         kfree(bus_info);
593
594 err_respond:
595         if (inmsg->hdr.flags.response_expected == 1)
596                 controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
597         return err;
598 }
599
600 static int visorbus_destroy(struct controlvm_message *inmsg)
601 {
602         struct controlvm_message_packet *cmd = &inmsg->cmd;
603         struct controlvm_message_header *pmsg_hdr = NULL;
604         u32 bus_no = cmd->destroy_bus.bus_no;
605         struct visor_device *bus_info;
606         int err;
607
608         bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
609         if (!bus_info) {
610                 err = -ENODEV;
611                 goto err_respond;
612         }
613         if (bus_info->state.created == 0) {
614                 err = -ENOENT;
615                 goto err_respond;
616         }
617         if (bus_info->pending_msg_hdr) {
618                 /* only non-NULL if dev is still waiting on a response */
619                 err = -EEXIST;
620                 goto err_respond;
621         }
622         if (inmsg->hdr.flags.response_expected == 1) {
623                 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
624                 if (!pmsg_hdr) {
625                         err = -ENOMEM;
626                         goto err_respond;
627                 }
628
629                 memcpy(pmsg_hdr, &inmsg->hdr,
630                        sizeof(struct controlvm_message_header));
631                 bus_info->pending_msg_hdr = pmsg_hdr;
632         }
633
634         /* Response will be handled by visorbus_remove_instance */
635         visorbus_remove_instance(bus_info);
636         return 0;
637
638 err_respond:
639         if (inmsg->hdr.flags.response_expected == 1)
640                 controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
641         return err;
642 }
643
644 static const guid_t *parser_id_get(struct parser_context *ctx)
645 {
646         return &ctx->data.id;
647 }
648
649 static void *parser_string_get(struct parser_context *ctx)
650 {
651         u8 *pscan;
652         unsigned long nscan;
653         int value_length;
654         void *value;
655         int i;
656
657         pscan = ctx->curr;
658         if (!pscan)
659                 return NULL;
660         nscan = ctx->bytes_remaining;
661         if (nscan == 0)
662                 return NULL;
663
664         for (i = 0, value_length = -1; i < nscan; i++)
665                 if (pscan[i] == '\0') {
666                         value_length = i;
667                         break;
668                 }
669         /* '\0' was not included in the length */
670         if (value_length < 0)
671                 value_length = nscan;
672
673         value = kmalloc(value_length + 1, GFP_KERNEL);
674         if (!value)
675                 return NULL;
676         if (value_length > 0)
677                 memcpy(value, pscan, value_length);
678         ((u8 *)(value))[value_length] = '\0';
679         return value;
680 }
681
682 static void *parser_name_get(struct parser_context *ctx)
683 {
684         struct visor_controlvm_parameters_header *phdr = NULL;
685
686         phdr = &ctx->data;
687
688         if (phdr->name_offset + phdr->name_length > ctx->param_bytes)
689                 return NULL;
690
691         ctx->curr = (char *)&phdr + phdr->name_offset;
692         ctx->bytes_remaining = phdr->name_length;
693         return parser_string_get(ctx);
694 }
695
696 static int visorbus_configure(struct controlvm_message *inmsg,
697                               struct parser_context *parser_ctx)
698 {
699         struct controlvm_message_packet *cmd = &inmsg->cmd;
700         u32 bus_no;
701         struct visor_device *bus_info;
702         int err = 0;
703
704         bus_no = cmd->configure_bus.bus_no;
705         bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
706         if (!bus_info) {
707                 err = -EINVAL;
708                 goto err_respond;
709         }
710         if (bus_info->state.created == 0) {
711                 err = -EINVAL;
712                 goto err_respond;
713         }
714         if (bus_info->pending_msg_hdr) {
715                 err = -EIO;
716                 goto err_respond;
717         }
718
719         err = visorchannel_set_clientpartition
720                 (bus_info->visorchannel,
721                  cmd->configure_bus.guest_handle);
722         if (err)
723                 goto err_respond;
724
725         if (parser_ctx) {
726                 const guid_t *partition_guid = parser_id_get(parser_ctx);
727
728                 guid_copy(&bus_info->partition_guid, partition_guid);
729                 bus_info->name = parser_name_get(parser_ctx);
730         }
731
732         if (inmsg->hdr.flags.response_expected == 1)
733                 controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
734         return 0;
735
736 err_respond:
737         dev_err(&chipset_dev->acpi_device->dev,
738                 "%s exited with err: %d\n", __func__, err);
739         if (inmsg->hdr.flags.response_expected == 1)
740                 controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
741         return err;
742 }
743
744 static int visorbus_device_create(struct controlvm_message *inmsg)
745 {
746         struct controlvm_message_packet *cmd = &inmsg->cmd;
747         struct controlvm_message_header *pmsg_hdr = NULL;
748         u32 bus_no = cmd->create_device.bus_no;
749         u32 dev_no = cmd->create_device.dev_no;
750         struct visor_device *dev_info = NULL;
751         struct visor_device *bus_info;
752         struct visorchannel *visorchannel;
753         int err;
754
755         bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
756         if (!bus_info) {
757                 dev_err(&chipset_dev->acpi_device->dev,
758                         "failed to get bus by id: %d\n", bus_no);
759                 err = -ENODEV;
760                 goto err_respond;
761         }
762         if (bus_info->state.created == 0) {
763                 dev_err(&chipset_dev->acpi_device->dev,
764                         "bus not created, id: %d\n", bus_no);
765                 err = -EINVAL;
766                 goto err_respond;
767         }
768
769         dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
770         if (dev_info && (dev_info->state.created == 1)) {
771                 dev_err(&chipset_dev->acpi_device->dev,
772                         "failed to get bus by id: %d/%d\n", bus_no, dev_no);
773                 err = -EEXIST;
774                 goto err_respond;
775         }
776
777         dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
778         if (!dev_info) {
779                 err = -ENOMEM;
780                 goto err_respond;
781         }
782
783         dev_info->chipset_bus_no = bus_no;
784         dev_info->chipset_dev_no = dev_no;
785         guid_copy(&dev_info->inst, &cmd->create_device.dev_inst_guid);
786         dev_info->device.parent = &bus_info->device;
787
788         visorchannel =
789                visorchannel_create_with_lock(cmd->create_device.channel_addr,
790                                              GFP_KERNEL,
791                                              &cmd->create_device.data_type_guid);
792         if (!visorchannel) {
793                 dev_err(&chipset_dev->acpi_device->dev,
794                         "failed to create visorchannel: %d/%d\n",
795                         bus_no, dev_no);
796                 err = -ENOMEM;
797                 goto err_free_dev_info;
798         }
799         dev_info->visorchannel = visorchannel;
800         guid_copy(&dev_info->channel_type_guid, &cmd->create_device.data_type_guid);
801         if (guid_equal(&cmd->create_device.data_type_guid, &visor_vhba_channel_guid)) {
802                 err = save_crash_message(inmsg, CRASH_DEV);
803                 if (err)
804                         goto err_destroy_visorchannel;
805         }
806
807         if (inmsg->hdr.flags.response_expected == 1) {
808                 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
809                 if (!pmsg_hdr) {
810                         err = -ENOMEM;
811                         goto err_destroy_visorchannel;
812                 }
813
814                 memcpy(pmsg_hdr, &inmsg->hdr,
815                        sizeof(struct controlvm_message_header));
816                 dev_info->pending_msg_hdr = pmsg_hdr;
817         }
818         /* create_visor_device will send response */
819         err = create_visor_device(dev_info);
820         if (err)
821                 goto err_destroy_visorchannel;
822
823         return 0;
824
825 err_destroy_visorchannel:
826         visorchannel_destroy(visorchannel);
827
828 err_free_dev_info:
829         kfree(dev_info);
830
831 err_respond:
832         if (inmsg->hdr.flags.response_expected == 1)
833                 controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
834         return err;
835 }
836
837 static int visorbus_device_changestate(struct controlvm_message *inmsg)
838 {
839         struct controlvm_message_packet *cmd = &inmsg->cmd;
840         struct controlvm_message_header *pmsg_hdr = NULL;
841         u32 bus_no = cmd->device_change_state.bus_no;
842         u32 dev_no = cmd->device_change_state.dev_no;
843         struct visor_segment_state state = cmd->device_change_state.state;
844         struct visor_device *dev_info;
845         int err = 0;
846
847         dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
848         if (!dev_info) {
849                 err = -ENODEV;
850                 goto err_respond;
851         }
852         if (dev_info->state.created == 0) {
853                 err = -EINVAL;
854                 goto err_respond;
855         }
856         if (dev_info->pending_msg_hdr) {
857                 /* only non-NULL if dev is still waiting on a response */
858                 err = -EIO;
859                 goto err_respond;
860         }
861         if (inmsg->hdr.flags.response_expected == 1) {
862                 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
863                 if (!pmsg_hdr) {
864                         err = -ENOMEM;
865                         goto err_respond;
866                 }
867
868                 memcpy(pmsg_hdr, &inmsg->hdr,
869                        sizeof(struct controlvm_message_header));
870                 dev_info->pending_msg_hdr = pmsg_hdr;
871         }
872
873         if (state.alive == segment_state_running.alive &&
874             state.operating == segment_state_running.operating)
875                 /* Response will be sent from visorchipset_device_resume */
876                 err = visorchipset_device_resume(dev_info);
877         /* ServerNotReady / ServerLost / SegmentStateStandby */
878         else if (state.alive == segment_state_standby.alive &&
879                  state.operating == segment_state_standby.operating)
880                 /*
881                  * technically this is standby case where server is lost.
882                  * Response will be sent from visorchipset_device_pause.
883                  */
884                 err = visorchipset_device_pause(dev_info);
885         if (err)
886                 goto err_respond;
887
888         return 0;
889
890 err_respond:
891         dev_err(&chipset_dev->acpi_device->dev, "failed: %d\n", err);
892         if (inmsg->hdr.flags.response_expected == 1)
893                 controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
894         return err;
895 }
896
897 static int visorbus_device_destroy(struct controlvm_message *inmsg)
898 {
899         struct controlvm_message_packet *cmd = &inmsg->cmd;
900         struct controlvm_message_header *pmsg_hdr = NULL;
901         u32 bus_no = cmd->destroy_device.bus_no;
902         u32 dev_no = cmd->destroy_device.dev_no;
903         struct visor_device *dev_info;
904         int err;
905
906         dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
907         if (!dev_info) {
908                 err = -ENODEV;
909                 goto err_respond;
910         }
911         if (dev_info->state.created == 0) {
912                 err = -EINVAL;
913                 goto err_respond;
914         }
915         if (dev_info->pending_msg_hdr) {
916                 /* only non-NULL if dev is still waiting on a response */
917                 err = -EIO;
918                 goto err_respond;
919         }
920         if (inmsg->hdr.flags.response_expected == 1) {
921                 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
922                 if (!pmsg_hdr) {
923                         err = -ENOMEM;
924                         goto err_respond;
925                 }
926
927                 memcpy(pmsg_hdr, &inmsg->hdr,
928                        sizeof(struct controlvm_message_header));
929                 dev_info->pending_msg_hdr = pmsg_hdr;
930         }
931
932         kfree(dev_info->name);
933         remove_visor_device(dev_info);
934         return 0;
935
936 err_respond:
937         if (inmsg->hdr.flags.response_expected == 1)
938                 controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
939         return err;
940 }
941
942 /*
943  * The general parahotplug flow works as follows. The visorchipset receives
944  * a DEVICE_CHANGESTATE message from Command specifying a physical device
945  * to enable or disable. The CONTROLVM message handler calls
946  * parahotplug_process_message, which then adds the message to a global list
947  * and kicks off a udev event which causes a user level script to enable or
948  * disable the specified device. The udev script then writes to
949  * /sys/devices/platform/visorchipset/parahotplug, which causes the
950  * parahotplug store functions to get called, at which point the
951  * appropriate CONTROLVM message is retrieved from the list and responded to.
952  */
953
954 #define PARAHOTPLUG_TIMEOUT_MS 2000
955
956 /*
957  * parahotplug_next_id() - generate unique int to match an outstanding
958  *                         CONTROLVM message with a udev script /sys
959  *                         response
960  *
961  * Return: a unique integer value
962  */
963 static int parahotplug_next_id(void)
964 {
965         static atomic_t id = ATOMIC_INIT(0);
966
967         return atomic_inc_return(&id);
968 }
969
970 /*
971  * parahotplug_next_expiration() - returns the time (in jiffies) when a
972  *                                 CONTROLVM message on the list should expire
973  *                                 -- PARAHOTPLUG_TIMEOUT_MS in the future
974  *
975  * Return: expected expiration time (in jiffies)
976  */
977 static unsigned long parahotplug_next_expiration(void)
978 {
979         return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
980 }
981
982 /*
983  * parahotplug_request_create() - create a parahotplug_request, which is
984  *                                basically a wrapper for a CONTROLVM_MESSAGE
985  *                                that we can stick on a list
986  * @msg: the message to insert in the request
987  *
988  * Return: the request containing the provided message
989  */
990 static struct parahotplug_request *parahotplug_request_create(
991                                                 struct controlvm_message *msg)
992 {
993         struct parahotplug_request *req;
994
995         req = kmalloc(sizeof(*req), GFP_KERNEL);
996         if (!req)
997                 return NULL;
998
999         req->id = parahotplug_next_id();
1000         req->expiration = parahotplug_next_expiration();
1001         req->msg = *msg;
1002
1003         return req;
1004 }
1005
1006 /*
1007  * parahotplug_request_destroy() - free a parahotplug_request
1008  * @req: the request to deallocate
1009  */
1010 static void parahotplug_request_destroy(struct parahotplug_request *req)
1011 {
1012         kfree(req);
1013 }
1014
1015 static LIST_HEAD(parahotplug_request_list);
1016 /* lock for above */
1017 static DEFINE_SPINLOCK(parahotplug_request_list_lock);
1018
1019 /*
1020  * parahotplug_request_complete() - mark request as complete
1021  * @id:     the id of the request
1022  * @active: indicates whether the request is assigned to active partition
1023  *
1024  * Called from the /sys handler, which means the user script has
1025  * finished the enable/disable. Find the matching identifier, and
1026  * respond to the CONTROLVM message with success.
1027  *
1028  * Return: 0 on success or -EINVAL on failure
1029  */
1030 static int parahotplug_request_complete(int id, u16 active)
1031 {
1032         struct list_head *pos;
1033         struct list_head *tmp;
1034
1035         spin_lock(&parahotplug_request_list_lock);
1036
1037         /* Look for a request matching "id". */
1038         list_for_each_safe(pos, tmp, &parahotplug_request_list) {
1039                 struct parahotplug_request *req =
1040                     list_entry(pos, struct parahotplug_request, list);
1041                 if (req->id == id) {
1042                         /*
1043                          * Found a match. Remove it from the list and
1044                          * respond.
1045                          */
1046                         list_del(pos);
1047                         spin_unlock(&parahotplug_request_list_lock);
1048                         req->msg.cmd.device_change_state.state.active = active;
1049                         if (req->msg.hdr.flags.response_expected)
1050                                 controlvm_respond(
1051                                        &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
1052                                        &req->msg.cmd.device_change_state.state);
1053                         parahotplug_request_destroy(req);
1054                         return 0;
1055                 }
1056         }
1057
1058         spin_unlock(&parahotplug_request_list_lock);
1059         return -EINVAL;
1060 }
1061
1062 /*
1063  * devicedisabled_store() - disables the hotplug device
1064  * @dev:   sysfs interface variable not utilized in this function
1065  * @attr:  sysfs interface variable not utilized in this function
1066  * @buf:   buffer containing the device id
1067  * @count: the size of the buffer
1068  *
1069  * The parahotplug/devicedisabled interface gets called by our support script
1070  * when an SR-IOV device has been shut down. The ID is passed to the script
1071  * and then passed back when the device has been removed.
1072  *
1073  * Return: the size of the buffer for success or negative for error
1074  */
1075 static ssize_t devicedisabled_store(struct device *dev,
1076                                     struct device_attribute *attr,
1077                                     const char *buf, size_t count)
1078 {
1079         unsigned int id;
1080         int err;
1081
1082         if (kstrtouint(buf, 10, &id))
1083                 return -EINVAL;
1084
1085         err = parahotplug_request_complete(id, 0);
1086         if (err < 0)
1087                 return err;
1088         return count;
1089 }
1090 static DEVICE_ATTR_WO(devicedisabled);
1091
1092 /*
1093  * deviceenabled_store() - enables the hotplug device
1094  * @dev:   sysfs interface variable not utilized in this function
1095  * @attr:  sysfs interface variable not utilized in this function
1096  * @buf:   buffer containing the device id
1097  * @count: the size of the buffer
1098  *
1099  * The parahotplug/deviceenabled interface gets called by our support script
1100  * when an SR-IOV device has been recovered. The ID is passed to the script
1101  * and then passed back when the device has been brought back up.
1102  *
1103  * Return: the size of the buffer for success or negative for error
1104  */
1105 static ssize_t deviceenabled_store(struct device *dev,
1106                                    struct device_attribute *attr,
1107                                    const char *buf, size_t count)
1108 {
1109         unsigned int id;
1110
1111         if (kstrtouint(buf, 10, &id))
1112                 return -EINVAL;
1113
1114         parahotplug_request_complete(id, 1);
1115         return count;
1116 }
1117 static DEVICE_ATTR_WO(deviceenabled);
1118
1119 static struct attribute *visorchipset_install_attrs[] = {
1120         &dev_attr_toolaction.attr,
1121         &dev_attr_boottotool.attr,
1122         &dev_attr_error.attr,
1123         &dev_attr_textid.attr,
1124         &dev_attr_remaining_steps.attr,
1125         NULL
1126 };
1127
1128 static const struct attribute_group visorchipset_install_group = {
1129         .name = "install",
1130         .attrs = visorchipset_install_attrs
1131 };
1132
1133 static struct attribute *visorchipset_parahotplug_attrs[] = {
1134         &dev_attr_devicedisabled.attr,
1135         &dev_attr_deviceenabled.attr,
1136         NULL
1137 };
1138
1139 static const struct attribute_group visorchipset_parahotplug_group = {
1140         .name = "parahotplug",
1141         .attrs = visorchipset_parahotplug_attrs
1142 };
1143
1144 static const struct attribute_group *visorchipset_dev_groups[] = {
1145         &visorchipset_install_group,
1146         &visorchipset_parahotplug_group,
1147         NULL
1148 };
1149
1150 /*
1151  * parahotplug_request_kickoff() - initiate parahotplug request
1152  * @req: the request to initiate
1153  *
1154  * Cause uevent to run the user level script to do the disable/enable specified
1155  * in the parahotplug_request.
1156  */
1157 static int parahotplug_request_kickoff(struct parahotplug_request *req)
1158 {
1159         struct controlvm_message_packet *cmd = &req->msg.cmd;
1160         char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
1161             env_func[40];
1162         char *envp[] = {
1163                 env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL
1164         };
1165
1166         sprintf(env_cmd, "VISOR_PARAHOTPLUG=1");
1167         sprintf(env_id, "VISOR_PARAHOTPLUG_ID=%d", req->id);
1168         sprintf(env_state, "VISOR_PARAHOTPLUG_STATE=%d",
1169                 cmd->device_change_state.state.active);
1170         sprintf(env_bus, "VISOR_PARAHOTPLUG_BUS=%d",
1171                 cmd->device_change_state.bus_no);
1172         sprintf(env_dev, "VISOR_PARAHOTPLUG_DEVICE=%d",
1173                 cmd->device_change_state.dev_no >> 3);
1174         sprintf(env_func, "VISOR_PARAHOTPLUG_FUNCTION=%d",
1175                 cmd->device_change_state.dev_no & 0x7);
1176
1177         return kobject_uevent_env(&chipset_dev->acpi_device->dev.kobj,
1178                                   KOBJ_CHANGE, envp);
1179 }
1180
1181 /*
1182  * parahotplug_process_message() - enables or disables a PCI device by kicking
1183  *                                 off a udev script
1184  * @inmsg: the message indicating whether to enable or disable
1185  */
1186 static int parahotplug_process_message(struct controlvm_message *inmsg)
1187 {
1188         struct parahotplug_request *req;
1189         int err;
1190
1191         req = parahotplug_request_create(inmsg);
1192         if (!req)
1193                 return -ENOMEM;
1194
1195         /*
1196          * For enable messages, just respond with success right away, we don't
1197          * need to wait to see if the enable was successful.
1198          */
1199         if (inmsg->cmd.device_change_state.state.active) {
1200                 err = parahotplug_request_kickoff(req);
1201                 if (err)
1202                         goto err_respond;
1203                 controlvm_respond(&inmsg->hdr, CONTROLVM_RESP_SUCCESS,
1204                                   &inmsg->cmd.device_change_state.state);
1205                 parahotplug_request_destroy(req);
1206                 return 0;
1207         }
1208
1209         /*
1210          * For disable messages, add the request to the request list before
1211          * kicking off the udev script. It won't get responded to until the
1212          * script has indicated it's done.
1213          */
1214         spin_lock(&parahotplug_request_list_lock);
1215         list_add_tail(&req->list, &parahotplug_request_list);
1216         spin_unlock(&parahotplug_request_list_lock);
1217
1218         err = parahotplug_request_kickoff(req);
1219         if (err)
1220                 goto err_respond;
1221         return 0;
1222
1223 err_respond:
1224         controlvm_respond(&inmsg->hdr, err,
1225                           &inmsg->cmd.device_change_state.state);
1226         return err;
1227 }
1228
1229 /*
1230  * chipset_ready_uevent() - sends chipset_ready action
1231  *
1232  * Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1233  *
1234  * Return: 0 on success, negative on failure
1235  */
1236 static int chipset_ready_uevent(struct controlvm_message_header *msg_hdr)
1237 {
1238         int res;
1239
1240         res = kobject_uevent(&chipset_dev->acpi_device->dev.kobj,
1241                              KOBJ_ONLINE);
1242
1243         if (msg_hdr->flags.response_expected)
1244                 controlvm_respond(msg_hdr, res, NULL);
1245
1246         return res;
1247 }
1248
1249 /*
1250  * chipset_selftest_uevent() - sends chipset_selftest action
1251  *
1252  * Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1253  *
1254  * Return: 0 on success, negative on failure
1255  */
1256 static int chipset_selftest_uevent(struct controlvm_message_header *msg_hdr)
1257 {
1258         char env_selftest[20];
1259         char *envp[] = { env_selftest, NULL };
1260         int res;
1261
1262         sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
1263         res = kobject_uevent_env(&chipset_dev->acpi_device->dev.kobj,
1264                                  KOBJ_CHANGE, envp);
1265
1266         if (msg_hdr->flags.response_expected)
1267                 controlvm_respond(msg_hdr, res, NULL);
1268
1269         return res;
1270 }
1271
1272 /*
1273  * chipset_notready_uevent() - sends chipset_notready action
1274  *
1275  * Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
1276  *
1277  * Return: 0 on success, negative on failure
1278  */
1279 static int chipset_notready_uevent(struct controlvm_message_header *msg_hdr)
1280 {
1281         int res = kobject_uevent(&chipset_dev->acpi_device->dev.kobj,
1282                              KOBJ_OFFLINE);
1283
1284         if (msg_hdr->flags.response_expected)
1285                 controlvm_respond(msg_hdr, res, NULL);
1286
1287         return res;
1288 }
1289
1290 static int unisys_vmcall(unsigned long tuple, unsigned long param)
1291 {
1292         int result = 0;
1293         unsigned int cpuid_eax, cpuid_ebx, cpuid_ecx, cpuid_edx;
1294         unsigned long reg_ebx;
1295         unsigned long reg_ecx;
1296
1297         reg_ebx = param & 0xFFFFFFFF;
1298         reg_ecx = param >> 32;
1299
1300         cpuid(0x00000001, &cpuid_eax, &cpuid_ebx, &cpuid_ecx, &cpuid_edx);
1301         if (!(cpuid_ecx & 0x80000000))
1302                 return -EPERM;
1303
1304         __asm__ __volatile__(".byte 0x00f, 0x001, 0x0c1" : "=a"(result) :
1305                 "a"(tuple), "b"(reg_ebx), "c"(reg_ecx));
1306         if (result)
1307                 goto error;
1308
1309         return 0;
1310 /* Need to convert from VMCALL error codes to Linux */
1311 error:
1312         switch (result) {
1313         case VMCALL_RESULT_INVALID_PARAM:
1314                 return -EINVAL;
1315         case VMCALL_RESULT_DATA_UNAVAILABLE:
1316                 return -ENODEV;
1317         default:
1318                 return -EFAULT;
1319         }
1320 }
1321
1322 static int controlvm_channel_create(struct visorchipset_device *dev)
1323 {
1324         struct visorchannel *chan;
1325         u64 addr;
1326         int err;
1327
1328         err = unisys_vmcall(VMCALL_CONTROLVM_ADDR,
1329                             virt_to_phys(&dev->controlvm_params));
1330         if (err)
1331                 return err;
1332         addr = dev->controlvm_params.address;
1333         chan = visorchannel_create_with_lock(addr, GFP_KERNEL,
1334                                              &visor_controlvm_channel_guid);
1335         if (!chan)
1336                 return -ENOMEM;
1337         dev->controlvm_channel = chan;
1338         return 0;
1339 }
1340
1341 static void setup_crash_devices_work_queue(struct work_struct *work)
1342 {
1343         struct controlvm_message local_crash_bus_msg;
1344         struct controlvm_message local_crash_dev_msg;
1345         struct controlvm_message msg;
1346         u32 local_crash_msg_offset;
1347         u16 local_crash_msg_count;
1348
1349         /* send init chipset msg */
1350         msg.hdr.id = CONTROLVM_CHIPSET_INIT;
1351         msg.cmd.init_chipset.bus_count = 23;
1352         msg.cmd.init_chipset.switch_count = 0;
1353
1354         chipset_init(&msg);
1355
1356         /* get saved message count */
1357         if (visorchannel_read(chipset_dev->controlvm_channel,
1358                               offsetof(struct visor_controlvm_channel,
1359                                        saved_crash_message_count),
1360                               &local_crash_msg_count, sizeof(u16)) < 0) {
1361                 dev_err(&chipset_dev->acpi_device->dev,
1362                         "failed to read channel\n");
1363                 return;
1364         }
1365
1366         if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
1367                 dev_err(&chipset_dev->acpi_device->dev,
1368                         "invalid count\n");
1369                 return;
1370         }
1371
1372         /* get saved crash message offset */
1373         if (visorchannel_read(chipset_dev->controlvm_channel,
1374                               offsetof(struct visor_controlvm_channel,
1375                                        saved_crash_message_offset),
1376                               &local_crash_msg_offset, sizeof(u32)) < 0) {
1377                 dev_err(&chipset_dev->acpi_device->dev,
1378                         "failed to read channel\n");
1379                 return;
1380         }
1381
1382         /* read create device message for storage bus offset */
1383         if (visorchannel_read(chipset_dev->controlvm_channel,
1384                               local_crash_msg_offset,
1385                               &local_crash_bus_msg,
1386                               sizeof(struct controlvm_message)) < 0) {
1387                 dev_err(&chipset_dev->acpi_device->dev,
1388                         "failed to read channel\n");
1389                 return;
1390         }
1391
1392         /* read create device message for storage device */
1393         if (visorchannel_read(chipset_dev->controlvm_channel,
1394                               local_crash_msg_offset +
1395                               sizeof(struct controlvm_message),
1396                               &local_crash_dev_msg,
1397                               sizeof(struct controlvm_message)) < 0) {
1398                 dev_err(&chipset_dev->acpi_device->dev,
1399                         "failed to read channel\n");
1400                 return;
1401         }
1402
1403         /* reuse IOVM create bus message */
1404         if (!local_crash_bus_msg.cmd.create_bus.channel_addr) {
1405                 dev_err(&chipset_dev->acpi_device->dev,
1406                         "no valid create_bus message\n");
1407                 return;
1408         }
1409         visorbus_create(&local_crash_bus_msg);
1410
1411         /* reuse create device message for storage device */
1412         if (!local_crash_dev_msg.cmd.create_device.channel_addr) {
1413                 dev_err(&chipset_dev->acpi_device->dev,
1414                         "no valid create_device message\n");
1415                 return;
1416         }
1417         visorbus_device_create(&local_crash_dev_msg);
1418 }
1419
1420 void visorbus_response(struct visor_device *bus_info, int response,
1421                        int controlvm_id)
1422 {
1423         controlvm_responder(controlvm_id, bus_info->pending_msg_hdr, response);
1424
1425         kfree(bus_info->pending_msg_hdr);
1426         bus_info->pending_msg_hdr = NULL;
1427 }
1428
1429 void visorbus_device_changestate_response(struct visor_device *dev_info,
1430                                           int response,
1431                                           struct visor_segment_state state)
1432 {
1433         device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
1434                                      dev_info, response, state);
1435
1436         kfree(dev_info->pending_msg_hdr);
1437         dev_info->pending_msg_hdr = NULL;
1438 }
1439
1440 static void parser_done(struct parser_context *ctx)
1441 {
1442         chipset_dev->controlvm_payload_bytes_buffered -= ctx->param_bytes;
1443         kfree(ctx);
1444 }
1445
1446 static struct parser_context *parser_init_stream(u64 addr, u32 bytes,
1447                                                  bool *retry)
1448 {
1449         int allocbytes;
1450         struct parser_context *ctx;
1451         void *mapping;
1452
1453         *retry = false;
1454
1455         /* alloc an extra byte to ensure payload is \0 terminated */
1456         allocbytes = bytes + 1 + (sizeof(struct parser_context) -
1457                      sizeof(struct visor_controlvm_parameters_header));
1458         if ((chipset_dev->controlvm_payload_bytes_buffered + bytes)
1459             > MAX_CONTROLVM_PAYLOAD_BYTES) {
1460                 *retry = true;
1461                 return NULL;
1462         }
1463         ctx = kzalloc(allocbytes, GFP_KERNEL);
1464         if (!ctx) {
1465                 *retry = true;
1466                 return NULL;
1467         }
1468
1469         ctx->allocbytes = allocbytes;
1470         ctx->param_bytes = bytes;
1471         mapping = memremap(addr, bytes, MEMREMAP_WB);
1472         if (!mapping)
1473                 goto err_finish_ctx;
1474         memcpy(&ctx->data, mapping, bytes);
1475         memunmap(mapping);
1476         ctx->byte_stream = true;
1477         chipset_dev->controlvm_payload_bytes_buffered += ctx->param_bytes;
1478
1479         return ctx;
1480
1481 err_finish_ctx:
1482         kfree(ctx);
1483         return NULL;
1484 }
1485
1486 /*
1487  * handle_command() - process a controlvm message
1488  * @inmsg:        the message to process
1489  * @channel_addr: address of the controlvm channel
1490  *
1491  * Return:
1492  *      0       - Successfully processed the message
1493  *      -EAGAIN - ControlVM message was not processed and should be retried
1494  *                reading the next controlvm message; a scenario where this can
1495  *                occur is when we need to throttle the allocation of memory in
1496  *                which to copy out controlvm payload data.
1497  *      < 0     - error: ControlVM message was processed but an error occurred.
1498  */
1499 static int handle_command(struct controlvm_message inmsg, u64 channel_addr)
1500 {
1501         struct controlvm_message_packet *cmd = &inmsg.cmd;
1502         u64 parm_addr;
1503         u32 parm_bytes;
1504         struct parser_context *parser_ctx = NULL;
1505         struct controlvm_message ackmsg;
1506         int err = 0;
1507
1508         /* create parsing context if necessary */
1509         parm_addr = channel_addr + inmsg.hdr.payload_vm_offset;
1510         parm_bytes = inmsg.hdr.payload_bytes;
1511
1512         /*
1513          * Parameter and channel addresses within test messages actually lie
1514          * within our OS-controlled memory. We need to know that, because it
1515          * makes a difference in how we compute the virtual address.
1516          */
1517         if (parm_bytes) {
1518                 bool retry = false;
1519
1520                 parser_ctx = parser_init_stream(parm_addr, parm_bytes, &retry);
1521                 if (!parser_ctx && retry)
1522                         return -EAGAIN;
1523         }
1524         controlvm_init_response(&ackmsg, &inmsg.hdr, CONTROLVM_RESP_SUCCESS);
1525         err = visorchannel_signalinsert(chipset_dev->controlvm_channel,
1526                                         CONTROLVM_QUEUE_ACK, &ackmsg);
1527         if (err)
1528                 return err;
1529
1530         switch (inmsg.hdr.id) {
1531         case CONTROLVM_CHIPSET_INIT:
1532                 err = chipset_init(&inmsg);
1533                 break;
1534         case CONTROLVM_BUS_CREATE:
1535                 err = visorbus_create(&inmsg);
1536                 break;
1537         case CONTROLVM_BUS_DESTROY:
1538                 err = visorbus_destroy(&inmsg);
1539                 break;
1540         case CONTROLVM_BUS_CONFIGURE:
1541                 err = visorbus_configure(&inmsg, parser_ctx);
1542                 break;
1543         case CONTROLVM_DEVICE_CREATE:
1544                 err = visorbus_device_create(&inmsg);
1545                 break;
1546         case CONTROLVM_DEVICE_CHANGESTATE:
1547                 if (cmd->device_change_state.flags.phys_device) {
1548                         err = parahotplug_process_message(&inmsg);
1549                 } else {
1550                         /*
1551                          * save the hdr and cmd structures for later use when
1552                          * sending back the response to Command
1553                          */
1554                         err = visorbus_device_changestate(&inmsg);
1555                         break;
1556                 }
1557                 break;
1558         case CONTROLVM_DEVICE_DESTROY:
1559                 err = visorbus_device_destroy(&inmsg);
1560                 break;
1561         case CONTROLVM_DEVICE_CONFIGURE:
1562                 /* no op just send a respond that we passed */
1563                 if (inmsg.hdr.flags.response_expected)
1564                         controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS,
1565                                           NULL);
1566                 break;
1567         case CONTROLVM_CHIPSET_READY:
1568                 err = chipset_ready_uevent(&inmsg.hdr);
1569                 break;
1570         case CONTROLVM_CHIPSET_SELFTEST:
1571                 err = chipset_selftest_uevent(&inmsg.hdr);
1572                 break;
1573         case CONTROLVM_CHIPSET_STOP:
1574                 err = chipset_notready_uevent(&inmsg.hdr);
1575                 break;
1576         default:
1577                 err = -ENOMSG;
1578                 if (inmsg.hdr.flags.response_expected)
1579                         controlvm_respond(&inmsg.hdr,
1580                                           -CONTROLVM_RESP_ID_UNKNOWN, NULL);
1581                 break;
1582         }
1583
1584         if (parser_ctx) {
1585                 parser_done(parser_ctx);
1586                 parser_ctx = NULL;
1587         }
1588         return err;
1589 }
1590
1591 /*
1592  * read_controlvm_event() - retreives the next message from the
1593  *                          CONTROLVM_QUEUE_EVENT queue in the controlvm
1594  *                          channel
1595  * @msg: pointer to the retrieved message
1596  *
1597  * Return: 0 if valid message was retrieved or -error
1598  */
1599 static int read_controlvm_event(struct controlvm_message *msg)
1600 {
1601         int err = visorchannel_signalremove(chipset_dev->controlvm_channel,
1602                                         CONTROLVM_QUEUE_EVENT, msg);
1603         if (err)
1604                 return err;
1605
1606         /* got a message */
1607         if (msg->hdr.flags.test_message == 1)
1608                 return -EINVAL;
1609
1610         return 0;
1611 }
1612
1613 /*
1614  * parahotplug_process_list() - remove any request from the list that's been on
1615  *                              there too long and respond with an error
1616  */
1617 static void parahotplug_process_list(void)
1618 {
1619         struct list_head *pos;
1620         struct list_head *tmp;
1621
1622         spin_lock(&parahotplug_request_list_lock);
1623
1624         list_for_each_safe(pos, tmp, &parahotplug_request_list) {
1625                 struct parahotplug_request *req =
1626                     list_entry(pos, struct parahotplug_request, list);
1627
1628                 if (!time_after_eq(jiffies, req->expiration))
1629                         continue;
1630
1631                 list_del(pos);
1632                 if (req->msg.hdr.flags.response_expected)
1633                         controlvm_respond(
1634                                 &req->msg.hdr,
1635                                 CONTROLVM_RESP_DEVICE_UDEV_TIMEOUT,
1636                                 &req->msg.cmd.device_change_state.state);
1637                 parahotplug_request_destroy(req);
1638         }
1639
1640         spin_unlock(&parahotplug_request_list_lock);
1641 }
1642
1643 static void controlvm_periodic_work(struct work_struct *work)
1644 {
1645         struct controlvm_message inmsg;
1646         int count = 0;
1647         int err;
1648
1649         /* Drain the RESPONSE queue make it empty */
1650         do {
1651                 err = visorchannel_signalremove(chipset_dev->controlvm_channel,
1652                                                 CONTROLVM_QUEUE_RESPONSE,
1653                                                 &inmsg);
1654         } while ((!err) && (++count < CONTROLVM_MESSAGE_MAX));
1655
1656         if (err != -EAGAIN)
1657                 goto schedule_out;
1658
1659         if (chipset_dev->controlvm_pending_msg_valid) {
1660                 /*
1661                  * we throttled processing of a prior msg, so try to process
1662                  * it again rather than reading a new one
1663                  */
1664                 inmsg = chipset_dev->controlvm_pending_msg;
1665                 chipset_dev->controlvm_pending_msg_valid = false;
1666                 err = 0;
1667         } else {
1668                 err = read_controlvm_event(&inmsg);
1669         }
1670
1671         while (!err) {
1672                 chipset_dev->most_recent_message_jiffies = jiffies;
1673                 err = handle_command(inmsg,
1674                                      visorchannel_get_physaddr
1675                                      (chipset_dev->controlvm_channel));
1676                 if (err == -EAGAIN) {
1677                         chipset_dev->controlvm_pending_msg = inmsg;
1678                         chipset_dev->controlvm_pending_msg_valid = true;
1679                         break;
1680                 }
1681
1682                 err = read_controlvm_event(&inmsg);
1683         }
1684
1685         /* parahotplug_worker */
1686         parahotplug_process_list();
1687
1688 /*
1689  * The controlvm messages are sent in a bulk. If we start receiving messages, we
1690  * want the polling to be fast. If we do not receive any message for
1691  * MIN_IDLE_SECONDS, we can slow down the polling.
1692  */
1693 schedule_out:
1694         if (time_after(jiffies, chipset_dev->most_recent_message_jiffies +
1695                                 (HZ * MIN_IDLE_SECONDS))) {
1696                 /*
1697                  * it's been longer than MIN_IDLE_SECONDS since we processed
1698                  * our last controlvm message; slow down the polling
1699                  */
1700                 if (chipset_dev->poll_jiffies !=
1701                                               POLLJIFFIES_CONTROLVMCHANNEL_SLOW)
1702                         chipset_dev->poll_jiffies =
1703                                               POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
1704         } else {
1705                 if (chipset_dev->poll_jiffies !=
1706                                               POLLJIFFIES_CONTROLVMCHANNEL_FAST)
1707                         chipset_dev->poll_jiffies =
1708                                               POLLJIFFIES_CONTROLVMCHANNEL_FAST;
1709         }
1710
1711         schedule_delayed_work(&chipset_dev->periodic_controlvm_work,
1712                               chipset_dev->poll_jiffies);
1713 }
1714
1715 static int visorchipset_init(struct acpi_device *acpi_device)
1716 {
1717         int err = -ENODEV;
1718         struct visorchannel *controlvm_channel;
1719
1720         chipset_dev = kzalloc(sizeof(*chipset_dev), GFP_KERNEL);
1721         if (!chipset_dev)
1722                 goto error;
1723
1724         err = controlvm_channel_create(chipset_dev);
1725         if (err)
1726                 goto error_free_chipset_dev;
1727
1728         acpi_device->driver_data = chipset_dev;
1729         chipset_dev->acpi_device = acpi_device;
1730         chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
1731
1732         err = sysfs_create_groups(&chipset_dev->acpi_device->dev.kobj,
1733                                   visorchipset_dev_groups);
1734         if (err < 0)
1735                 goto error_destroy_channel;
1736
1737         controlvm_channel = chipset_dev->controlvm_channel;
1738         if (!visor_check_channel(visorchannel_get_header(controlvm_channel),
1739                                  &chipset_dev->acpi_device->dev,
1740                                  &visor_controlvm_channel_guid,
1741                                  "controlvm",
1742                                  sizeof(struct visor_controlvm_channel),
1743                                  VISOR_CONTROLVM_CHANNEL_VERSIONID,
1744                                  VISOR_CHANNEL_SIGNATURE))
1745                 goto error_delete_groups;
1746
1747         /* if booting in a crash kernel */
1748         if (is_kdump_kernel())
1749                 INIT_DELAYED_WORK(&chipset_dev->periodic_controlvm_work,
1750                                   setup_crash_devices_work_queue);
1751         else
1752                 INIT_DELAYED_WORK(&chipset_dev->periodic_controlvm_work,
1753                                   controlvm_periodic_work);
1754
1755         chipset_dev->most_recent_message_jiffies = jiffies;
1756         chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
1757         schedule_delayed_work(&chipset_dev->periodic_controlvm_work,
1758                               chipset_dev->poll_jiffies);
1759
1760         err = visorbus_init();
1761         if (err < 0)
1762                 goto error_cancel_work;
1763
1764         return 0;
1765
1766 error_cancel_work:
1767         cancel_delayed_work_sync(&chipset_dev->periodic_controlvm_work);
1768
1769 error_delete_groups:
1770         sysfs_remove_groups(&chipset_dev->acpi_device->dev.kobj,
1771                             visorchipset_dev_groups);
1772
1773 error_destroy_channel:
1774         visorchannel_destroy(chipset_dev->controlvm_channel);
1775
1776 error_free_chipset_dev:
1777         kfree(chipset_dev);
1778
1779 error:
1780         dev_err(&acpi_device->dev, "failed with error %d\n", err);
1781         return err;
1782 }
1783
1784 static int visorchipset_exit(struct acpi_device *acpi_device)
1785 {
1786         visorbus_exit();
1787         cancel_delayed_work_sync(&chipset_dev->periodic_controlvm_work);
1788         sysfs_remove_groups(&chipset_dev->acpi_device->dev.kobj,
1789                             visorchipset_dev_groups);
1790
1791         visorchannel_destroy(chipset_dev->controlvm_channel);
1792         kfree(chipset_dev);
1793
1794         return 0;
1795 }
1796
1797 static const struct acpi_device_id unisys_device_ids[] = {
1798         {"PNP0A07", 0},
1799         {"", 0},
1800 };
1801
1802 static struct acpi_driver unisys_acpi_driver = {
1803         .name = "unisys_acpi",
1804         .class = "unisys_acpi_class",
1805         .owner = THIS_MODULE,
1806         .ids = unisys_device_ids,
1807         .ops = {
1808                 .add = visorchipset_init,
1809                 .remove = visorchipset_exit,
1810         },
1811 };
1812
1813 MODULE_DEVICE_TABLE(acpi, unisys_device_ids);
1814
1815 static __init int visorutil_spar_detect(void)
1816 {
1817         unsigned int eax, ebx, ecx, edx;
1818
1819         if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
1820                 /* check the ID */
1821                 cpuid(UNISYS_VISOR_LEAF_ID, &eax, &ebx, &ecx, &edx);
1822                 return  (ebx == UNISYS_VISOR_ID_EBX) &&
1823                         (ecx == UNISYS_VISOR_ID_ECX) &&
1824                         (edx == UNISYS_VISOR_ID_EDX);
1825         }
1826         return 0;
1827 }
1828
1829 static int __init init_unisys(void)
1830 {
1831         int result;
1832
1833         if (!visorutil_spar_detect())
1834                 return -ENODEV;
1835
1836         result = acpi_bus_register_driver(&unisys_acpi_driver);
1837         if (result)
1838                 return -ENODEV;
1839
1840         pr_info("Unisys Visorchipset Driver Loaded.\n");
1841         return 0;
1842 };
1843
1844 static void __exit exit_unisys(void)
1845 {
1846         acpi_bus_unregister_driver(&unisys_acpi_driver);
1847 }
1848
1849 module_init(init_unisys);
1850 module_exit(exit_unisys);
1851
1852 MODULE_AUTHOR("Unisys");
1853 MODULE_LICENSE("GPL");
1854 MODULE_DESCRIPTION("s-Par visorbus driver for virtual device buses");