1 // SPDX-License-Identifier: GPL-2.0
3 * VFIO based Physical Subchannel device driver
5 * Copyright IBM Corp. 2017
6 * Copyright Red Hat, Inc. 2019
8 * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
9 * Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
10 * Cornelia Huck <cohuck@redhat.com>
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/device.h>
16 #include <linux/slab.h>
17 #include <linux/uuid.h>
18 #include <linux/mdev.h>
25 #include "vfio_ccw_private.h"
27 struct workqueue_struct *vfio_ccw_work_q;
28 static struct kmem_cache *vfio_ccw_io_region;
29 static struct kmem_cache *vfio_ccw_cmd_region;
30 static struct kmem_cache *vfio_ccw_schib_region;
31 static struct kmem_cache *vfio_ccw_crw_region;
33 debug_info_t *vfio_ccw_debug_msg_id;
34 debug_info_t *vfio_ccw_debug_trace_id;
39 int vfio_ccw_sch_quiesce(struct subchannel *sch)
41 struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
42 DECLARE_COMPLETION_ONSTACK(completion);
45 spin_lock_irq(sch->lock);
46 if (!sch->schib.pmcw.ena)
48 ret = cio_disable_subchannel(sch);
55 ret = cio_cancel_halt_clear(sch, &iretry);
58 pr_err("vfio_ccw: could not quiesce subchannel 0.%x.%04x!\n",
59 sch->schid.ssid, sch->schid.sch_no);
64 * Flush all I/O and wait for
65 * cancel/halt/clear completion.
67 private->completion = &completion;
68 spin_unlock_irq(sch->lock);
71 wait_for_completion_timeout(&completion, 3*HZ);
73 private->completion = NULL;
74 flush_workqueue(vfio_ccw_work_q);
75 spin_lock_irq(sch->lock);
76 ret = cio_disable_subchannel(sch);
77 } while (ret == -EBUSY);
79 private->state = VFIO_CCW_STATE_NOT_OPER;
80 spin_unlock_irq(sch->lock);
84 static void vfio_ccw_sch_io_todo(struct work_struct *work)
86 struct vfio_ccw_private *private;
89 bool cp_is_finished = false;
91 private = container_of(work, struct vfio_ccw_private, io_work);
94 is_final = !(scsw_actl(&irb->scsw) &
95 (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT));
96 if (scsw_is_solicited(&irb->scsw)) {
97 cp_update_scsw(&private->cp, &irb->scsw);
98 if (is_final && private->state == VFIO_CCW_STATE_CP_PENDING) {
99 cp_free(&private->cp);
100 cp_is_finished = true;
103 mutex_lock(&private->io_mutex);
104 memcpy(private->io_region->irb_area, irb, sizeof(*irb));
105 mutex_unlock(&private->io_mutex);
108 * Reset to IDLE only if processing of a channel program
109 * has finished. Do not overwrite a possible processing
110 * state if the final interrupt was for HSCH or CSCH.
112 if (private->mdev && cp_is_finished)
113 private->state = VFIO_CCW_STATE_IDLE;
115 if (private->io_trigger)
116 eventfd_signal(private->io_trigger, 1);
119 static void vfio_ccw_crw_todo(struct work_struct *work)
121 struct vfio_ccw_private *private;
123 private = container_of(work, struct vfio_ccw_private, crw_work);
125 if (!list_empty(&private->crw) && private->crw_trigger)
126 eventfd_signal(private->crw_trigger, 1);
130 * Css driver callbacks
132 static void vfio_ccw_sch_irq(struct subchannel *sch)
134 struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
136 inc_irq_stat(IRQIO_CIO);
137 vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_INTERRUPT);
140 static void vfio_ccw_free_regions(struct vfio_ccw_private *private)
142 if (private->crw_region)
143 kmem_cache_free(vfio_ccw_crw_region, private->crw_region);
144 if (private->schib_region)
145 kmem_cache_free(vfio_ccw_schib_region, private->schib_region);
146 if (private->cmd_region)
147 kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region);
148 if (private->io_region)
149 kmem_cache_free(vfio_ccw_io_region, private->io_region);
152 static int vfio_ccw_sch_probe(struct subchannel *sch)
154 struct pmcw *pmcw = &sch->schib.pmcw;
155 struct vfio_ccw_private *private;
159 dev_warn(&sch->dev, "vfio: ccw: does not support QDIO: %s\n",
160 dev_name(&sch->dev));
164 private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
168 private->cp.guest_cp = kcalloc(CCWCHAIN_LEN_MAX, sizeof(struct ccw1),
170 if (!private->cp.guest_cp)
173 private->io_region = kmem_cache_zalloc(vfio_ccw_io_region,
174 GFP_KERNEL | GFP_DMA);
175 if (!private->io_region)
178 private->cmd_region = kmem_cache_zalloc(vfio_ccw_cmd_region,
179 GFP_KERNEL | GFP_DMA);
180 if (!private->cmd_region)
183 private->schib_region = kmem_cache_zalloc(vfio_ccw_schib_region,
184 GFP_KERNEL | GFP_DMA);
186 if (!private->schib_region)
189 private->crw_region = kmem_cache_zalloc(vfio_ccw_crw_region,
190 GFP_KERNEL | GFP_DMA);
192 if (!private->crw_region)
196 dev_set_drvdata(&sch->dev, private);
197 mutex_init(&private->io_mutex);
199 spin_lock_irq(sch->lock);
200 private->state = VFIO_CCW_STATE_NOT_OPER;
201 sch->isc = VFIO_CCW_ISC;
202 ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
203 spin_unlock_irq(sch->lock);
207 INIT_LIST_HEAD(&private->crw);
208 INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo);
209 INIT_WORK(&private->crw_work, vfio_ccw_crw_todo);
210 atomic_set(&private->avail, 1);
211 private->state = VFIO_CCW_STATE_STANDBY;
213 ret = vfio_ccw_mdev_reg(sch);
217 if (dev_get_uevent_suppress(&sch->dev)) {
218 dev_set_uevent_suppress(&sch->dev, 0);
219 kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
222 VFIO_CCW_MSG_EVENT(4, "bound to subchannel %x.%x.%04x\n",
223 sch->schid.cssid, sch->schid.ssid,
228 cio_disable_subchannel(sch);
230 dev_set_drvdata(&sch->dev, NULL);
231 vfio_ccw_free_regions(private);
232 kfree(private->cp.guest_cp);
237 static void vfio_ccw_sch_remove(struct subchannel *sch)
239 struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
240 struct vfio_ccw_crw *crw, *temp;
242 vfio_ccw_sch_quiesce(sch);
244 list_for_each_entry_safe(crw, temp, &private->crw, next) {
245 list_del(&crw->next);
249 vfio_ccw_mdev_unreg(sch);
251 dev_set_drvdata(&sch->dev, NULL);
253 vfio_ccw_free_regions(private);
254 kfree(private->cp.guest_cp);
257 VFIO_CCW_MSG_EVENT(4, "unbound from subchannel %x.%x.%04x\n",
258 sch->schid.cssid, sch->schid.ssid,
262 static void vfio_ccw_sch_shutdown(struct subchannel *sch)
264 vfio_ccw_sch_quiesce(sch);
268 * vfio_ccw_sch_event - process subchannel event
270 * @process: non-zero if function is called in process context
272 * An unspecified event occurred for this subchannel. Adjust data according
273 * to the current operational state of the subchannel. Return zero when the
274 * event has been handled sufficiently or -EAGAIN when this function should
275 * be called again in process context.
277 static int vfio_ccw_sch_event(struct subchannel *sch, int process)
279 struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
283 spin_lock_irqsave(sch->lock, flags);
284 if (!device_is_registered(&sch->dev))
287 if (work_pending(&sch->todo_work))
290 if (cio_update_schib(sch)) {
291 vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
296 private = dev_get_drvdata(&sch->dev);
297 if (private->state == VFIO_CCW_STATE_NOT_OPER) {
298 private->state = private->mdev ? VFIO_CCW_STATE_IDLE :
299 VFIO_CCW_STATE_STANDBY;
304 spin_unlock_irqrestore(sch->lock, flags);
309 static void vfio_ccw_queue_crw(struct vfio_ccw_private *private,
314 struct vfio_ccw_crw *crw;
317 * If unable to allocate a CRW, just drop the event and
318 * carry on. The guest will either see a later one or
319 * learn when it issues its own store subchannel.
321 crw = kzalloc(sizeof(*crw), GFP_ATOMIC);
326 * Build the CRW based on the inputs given to us.
330 crw->crw.rsid = rsid;
332 list_add_tail(&crw->next, &private->crw);
333 queue_work(vfio_ccw_work_q, &private->crw_work);
336 static int vfio_ccw_chp_event(struct subchannel *sch,
337 struct chp_link *link, int event)
339 struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
340 int mask = chp_ssd_get_mask(&sch->ssd_info, link);
343 if (!private || !mask)
346 trace_vfio_ccw_chp_event(private->sch->schid, mask, event);
347 VFIO_CCW_MSG_EVENT(2, "%pUl (%x.%x.%04x): mask=0x%x event=%d\n",
348 mdev_uuid(private->mdev), sch->schid.cssid,
349 sch->schid.ssid, sch->schid.sch_no,
352 if (cio_update_schib(sch))
357 /* Path logically turned off */
360 if (sch->schib.pmcw.lpum & mask)
361 cio_cancel_halt_clear(sch, &retry);
365 if (sch->schib.pmcw.lpum & mask)
366 cio_cancel_halt_clear(sch, &retry);
367 vfio_ccw_queue_crw(private, CRW_RSC_CPATH, CRW_ERC_PERRN,
371 /* Path logically turned on */
376 /* Path became available */
377 sch->lpm |= mask & sch->opm;
378 vfio_ccw_queue_crw(private, CRW_RSC_CPATH, CRW_ERC_INIT,
386 static struct css_device_id vfio_ccw_sch_ids[] = {
387 { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
388 { /* end of list */ },
390 MODULE_DEVICE_TABLE(css, vfio_ccw_sch_ids);
392 static struct css_driver vfio_ccw_sch_driver = {
395 .owner = THIS_MODULE,
397 .subchannel_type = vfio_ccw_sch_ids,
398 .irq = vfio_ccw_sch_irq,
399 .probe = vfio_ccw_sch_probe,
400 .remove = vfio_ccw_sch_remove,
401 .shutdown = vfio_ccw_sch_shutdown,
402 .sch_event = vfio_ccw_sch_event,
403 .chp_event = vfio_ccw_chp_event,
406 static int __init vfio_ccw_debug_init(void)
408 vfio_ccw_debug_msg_id = debug_register("vfio_ccw_msg", 16, 1,
410 if (!vfio_ccw_debug_msg_id)
412 debug_register_view(vfio_ccw_debug_msg_id, &debug_sprintf_view);
413 debug_set_level(vfio_ccw_debug_msg_id, 2);
414 vfio_ccw_debug_trace_id = debug_register("vfio_ccw_trace", 16, 1, 16);
415 if (!vfio_ccw_debug_trace_id)
417 debug_register_view(vfio_ccw_debug_trace_id, &debug_hex_ascii_view);
418 debug_set_level(vfio_ccw_debug_trace_id, 2);
422 debug_unregister(vfio_ccw_debug_msg_id);
423 debug_unregister(vfio_ccw_debug_trace_id);
427 static void vfio_ccw_debug_exit(void)
429 debug_unregister(vfio_ccw_debug_msg_id);
430 debug_unregister(vfio_ccw_debug_trace_id);
433 static void vfio_ccw_destroy_regions(void)
435 kmem_cache_destroy(vfio_ccw_crw_region);
436 kmem_cache_destroy(vfio_ccw_schib_region);
437 kmem_cache_destroy(vfio_ccw_cmd_region);
438 kmem_cache_destroy(vfio_ccw_io_region);
441 static int __init vfio_ccw_sch_init(void)
445 ret = vfio_ccw_debug_init();
449 vfio_ccw_work_q = create_singlethread_workqueue("vfio-ccw");
450 if (!vfio_ccw_work_q) {
455 vfio_ccw_io_region = kmem_cache_create_usercopy("vfio_ccw_io_region",
456 sizeof(struct ccw_io_region), 0,
458 sizeof(struct ccw_io_region), NULL);
459 if (!vfio_ccw_io_region) {
464 vfio_ccw_cmd_region = kmem_cache_create_usercopy("vfio_ccw_cmd_region",
465 sizeof(struct ccw_cmd_region), 0,
467 sizeof(struct ccw_cmd_region), NULL);
468 if (!vfio_ccw_cmd_region) {
473 vfio_ccw_schib_region = kmem_cache_create_usercopy("vfio_ccw_schib_region",
474 sizeof(struct ccw_schib_region), 0,
476 sizeof(struct ccw_schib_region), NULL);
478 if (!vfio_ccw_schib_region) {
483 vfio_ccw_crw_region = kmem_cache_create_usercopy("vfio_ccw_crw_region",
484 sizeof(struct ccw_crw_region), 0,
486 sizeof(struct ccw_crw_region), NULL);
488 if (!vfio_ccw_crw_region) {
493 isc_register(VFIO_CCW_ISC);
494 ret = css_driver_register(&vfio_ccw_sch_driver);
496 isc_unregister(VFIO_CCW_ISC);
503 vfio_ccw_destroy_regions();
504 destroy_workqueue(vfio_ccw_work_q);
505 vfio_ccw_debug_exit();
509 static void __exit vfio_ccw_sch_exit(void)
511 css_driver_unregister(&vfio_ccw_sch_driver);
512 isc_unregister(VFIO_CCW_ISC);
513 vfio_ccw_destroy_regions();
514 destroy_workqueue(vfio_ccw_work_q);
515 vfio_ccw_debug_exit();
517 module_init(vfio_ccw_sch_init);
518 module_exit(vfio_ccw_sch_exit);
520 MODULE_LICENSE("GPL v2");