1 // SPDX-License-Identifier: GPL-2.0
3 * VFIO based Physical Subchannel device driver
5 * Copyright IBM Corp. 2017
6 * Copyright Red Hat, Inc. 2019
8 * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
9 * Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
10 * Cornelia Huck <cohuck@redhat.com>
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/device.h>
16 #include <linux/slab.h>
17 #include <linux/uuid.h>
18 #include <linux/mdev.h>
24 #include "vfio_ccw_private.h"
26 struct workqueue_struct *vfio_ccw_work_q;
27 static struct kmem_cache *vfio_ccw_io_region;
28 static struct kmem_cache *vfio_ccw_cmd_region;
33 int vfio_ccw_sch_quiesce(struct subchannel *sch)
35 struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
36 DECLARE_COMPLETION_ONSTACK(completion);
39 spin_lock_irq(sch->lock);
40 if (!sch->schib.pmcw.ena)
42 ret = cio_disable_subchannel(sch);
49 ret = cio_cancel_halt_clear(sch, &iretry);
52 pr_err("vfio_ccw: could not quiesce subchannel 0.%x.%04x!\n",
53 sch->schid.ssid, sch->schid.sch_no);
58 * Flush all I/O and wait for
59 * cancel/halt/clear completion.
61 private->completion = &completion;
62 spin_unlock_irq(sch->lock);
65 wait_for_completion_timeout(&completion, 3*HZ);
67 private->completion = NULL;
68 flush_workqueue(vfio_ccw_work_q);
69 spin_lock_irq(sch->lock);
70 ret = cio_disable_subchannel(sch);
71 } while (ret == -EBUSY);
73 private->state = VFIO_CCW_STATE_NOT_OPER;
74 spin_unlock_irq(sch->lock);
78 static void vfio_ccw_sch_io_todo(struct work_struct *work)
80 struct vfio_ccw_private *private;
84 private = container_of(work, struct vfio_ccw_private, io_work);
87 is_final = !(scsw_actl(&irb->scsw) &
88 (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT));
89 if (scsw_is_solicited(&irb->scsw)) {
90 cp_update_scsw(&private->cp, &irb->scsw);
91 if (is_final && private->state == VFIO_CCW_STATE_CP_PENDING)
92 cp_free(&private->cp);
94 mutex_lock(&private->io_mutex);
95 memcpy(private->io_region->irb_area, irb, sizeof(*irb));
96 mutex_unlock(&private->io_mutex);
98 if (private->mdev && is_final)
99 private->state = VFIO_CCW_STATE_IDLE;
101 if (private->io_trigger)
102 eventfd_signal(private->io_trigger, 1);
106 * Css driver callbacks
108 static void vfio_ccw_sch_irq(struct subchannel *sch)
110 struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
112 inc_irq_stat(IRQIO_CIO);
113 vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_INTERRUPT);
116 static int vfio_ccw_sch_probe(struct subchannel *sch)
118 struct pmcw *pmcw = &sch->schib.pmcw;
119 struct vfio_ccw_private *private;
123 dev_warn(&sch->dev, "vfio: ccw: does not support QDIO: %s\n",
124 dev_name(&sch->dev));
128 private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
132 private->cp.guest_cp = kcalloc(CCWCHAIN_LEN_MAX, sizeof(struct ccw1),
134 if (!private->cp.guest_cp)
137 private->io_region = kmem_cache_zalloc(vfio_ccw_io_region,
138 GFP_KERNEL | GFP_DMA);
139 if (!private->io_region)
142 private->cmd_region = kmem_cache_zalloc(vfio_ccw_cmd_region,
143 GFP_KERNEL | GFP_DMA);
144 if (!private->cmd_region)
148 dev_set_drvdata(&sch->dev, private);
149 mutex_init(&private->io_mutex);
151 spin_lock_irq(sch->lock);
152 private->state = VFIO_CCW_STATE_NOT_OPER;
153 sch->isc = VFIO_CCW_ISC;
154 ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
155 spin_unlock_irq(sch->lock);
159 INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo);
160 atomic_set(&private->avail, 1);
161 private->state = VFIO_CCW_STATE_STANDBY;
163 ret = vfio_ccw_mdev_reg(sch);
170 cio_disable_subchannel(sch);
172 dev_set_drvdata(&sch->dev, NULL);
173 if (private->cmd_region)
174 kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region);
175 if (private->io_region)
176 kmem_cache_free(vfio_ccw_io_region, private->io_region);
177 kfree(private->cp.guest_cp);
182 static int vfio_ccw_sch_remove(struct subchannel *sch)
184 struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
186 vfio_ccw_sch_quiesce(sch);
188 vfio_ccw_mdev_unreg(sch);
190 dev_set_drvdata(&sch->dev, NULL);
192 kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region);
193 kmem_cache_free(vfio_ccw_io_region, private->io_region);
194 kfree(private->cp.guest_cp);
200 static void vfio_ccw_sch_shutdown(struct subchannel *sch)
202 vfio_ccw_sch_quiesce(sch);
206 * vfio_ccw_sch_event - process subchannel event
208 * @process: non-zero if function is called in process context
210 * An unspecified event occurred for this subchannel. Adjust data according
211 * to the current operational state of the subchannel. Return zero when the
212 * event has been handled sufficiently or -EAGAIN when this function should
213 * be called again in process context.
215 static int vfio_ccw_sch_event(struct subchannel *sch, int process)
217 struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
221 spin_lock_irqsave(sch->lock, flags);
222 if (!device_is_registered(&sch->dev))
225 if (work_pending(&sch->todo_work))
228 if (cio_update_schib(sch)) {
229 vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
234 private = dev_get_drvdata(&sch->dev);
235 if (private->state == VFIO_CCW_STATE_NOT_OPER) {
236 private->state = private->mdev ? VFIO_CCW_STATE_IDLE :
237 VFIO_CCW_STATE_STANDBY;
242 spin_unlock_irqrestore(sch->lock, flags);
247 static struct css_device_id vfio_ccw_sch_ids[] = {
248 { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
249 { /* end of list */ },
251 MODULE_DEVICE_TABLE(css, vfio_ccw_sch_ids);
253 static struct css_driver vfio_ccw_sch_driver = {
256 .owner = THIS_MODULE,
258 .subchannel_type = vfio_ccw_sch_ids,
259 .irq = vfio_ccw_sch_irq,
260 .probe = vfio_ccw_sch_probe,
261 .remove = vfio_ccw_sch_remove,
262 .shutdown = vfio_ccw_sch_shutdown,
263 .sch_event = vfio_ccw_sch_event,
266 static int __init vfio_ccw_sch_init(void)
270 vfio_ccw_work_q = create_singlethread_workqueue("vfio-ccw");
271 if (!vfio_ccw_work_q)
274 vfio_ccw_io_region = kmem_cache_create_usercopy("vfio_ccw_io_region",
275 sizeof(struct ccw_io_region), 0,
277 sizeof(struct ccw_io_region), NULL);
278 if (!vfio_ccw_io_region)
281 vfio_ccw_cmd_region = kmem_cache_create_usercopy("vfio_ccw_cmd_region",
282 sizeof(struct ccw_cmd_region), 0,
284 sizeof(struct ccw_cmd_region), NULL);
285 if (!vfio_ccw_cmd_region)
288 isc_register(VFIO_CCW_ISC);
289 ret = css_driver_register(&vfio_ccw_sch_driver);
291 isc_unregister(VFIO_CCW_ISC);
298 kmem_cache_destroy(vfio_ccw_cmd_region);
299 kmem_cache_destroy(vfio_ccw_io_region);
300 destroy_workqueue(vfio_ccw_work_q);
304 static void __exit vfio_ccw_sch_exit(void)
306 css_driver_unregister(&vfio_ccw_sch_driver);
307 isc_unregister(VFIO_CCW_ISC);
308 kmem_cache_destroy(vfio_ccw_io_region);
309 kmem_cache_destroy(vfio_ccw_cmd_region);
310 destroy_workqueue(vfio_ccw_work_q);
312 module_init(vfio_ccw_sch_init);
313 module_exit(vfio_ccw_sch_exit);
315 MODULE_LICENSE("GPL v2");