Merge tag 'gpio-v5.3-1' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux...
[sfrench/cifs-2.6.git] / drivers / vhost / test.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2009 Red Hat, Inc.
3  * Author: Michael S. Tsirkin <mst@redhat.com>
4  *
5  * test virtio server in host kernel.
6  */
7
8 #include <linux/compat.h>
9 #include <linux/eventfd.h>
10 #include <linux/vhost.h>
11 #include <linux/miscdevice.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/workqueue.h>
15 #include <linux/file.h>
16 #include <linux/slab.h>
17
18 #include "test.h"
19 #include "vhost.h"
20
21 /* Max number of bytes transferred before requeueing the job.
22  * Using this limit prevents one virtqueue from starving others. */
23 #define VHOST_TEST_WEIGHT 0x80000
24
25 enum {
26         VHOST_TEST_VQ = 0,
27         VHOST_TEST_VQ_MAX = 1,
28 };
29
30 struct vhost_test {
31         struct vhost_dev dev;
32         struct vhost_virtqueue vqs[VHOST_TEST_VQ_MAX];
33 };
34
35 /* Expects to be always run from workqueue - which acts as
36  * read-size critical section for our kind of RCU. */
37 static void handle_vq(struct vhost_test *n)
38 {
39         struct vhost_virtqueue *vq = &n->vqs[VHOST_TEST_VQ];
40         unsigned out, in;
41         int head;
42         size_t len, total_len = 0;
43         void *private;
44
45         mutex_lock(&vq->mutex);
46         private = vq->private_data;
47         if (!private) {
48                 mutex_unlock(&vq->mutex);
49                 return;
50         }
51
52         vhost_disable_notify(&n->dev, vq);
53
54         for (;;) {
55                 head = vhost_get_vq_desc(vq, vq->iov,
56                                          ARRAY_SIZE(vq->iov),
57                                          &out, &in,
58                                          NULL, NULL);
59                 /* On error, stop handling until the next kick. */
60                 if (unlikely(head < 0))
61                         break;
62                 /* Nothing new?  Wait for eventfd to tell us they refilled. */
63                 if (head == vq->num) {
64                         if (unlikely(vhost_enable_notify(&n->dev, vq))) {
65                                 vhost_disable_notify(&n->dev, vq);
66                                 continue;
67                         }
68                         break;
69                 }
70                 if (in) {
71                         vq_err(vq, "Unexpected descriptor format for TX: "
72                                "out %d, int %d\n", out, in);
73                         break;
74                 }
75                 len = iov_length(vq->iov, out);
76                 /* Sanity check */
77                 if (!len) {
78                         vq_err(vq, "Unexpected 0 len for TX\n");
79                         break;
80                 }
81                 vhost_add_used_and_signal(&n->dev, vq, head, 0);
82                 total_len += len;
83                 if (unlikely(total_len >= VHOST_TEST_WEIGHT)) {
84                         vhost_poll_queue(&vq->poll);
85                         break;
86                 }
87         }
88
89         mutex_unlock(&vq->mutex);
90 }
91
92 static void handle_vq_kick(struct vhost_work *work)
93 {
94         struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
95                                                   poll.work);
96         struct vhost_test *n = container_of(vq->dev, struct vhost_test, dev);
97
98         handle_vq(n);
99 }
100
101 static int vhost_test_open(struct inode *inode, struct file *f)
102 {
103         struct vhost_test *n = kmalloc(sizeof *n, GFP_KERNEL);
104         struct vhost_dev *dev;
105         struct vhost_virtqueue **vqs;
106
107         if (!n)
108                 return -ENOMEM;
109         vqs = kmalloc_array(VHOST_TEST_VQ_MAX, sizeof(*vqs), GFP_KERNEL);
110         if (!vqs) {
111                 kfree(n);
112                 return -ENOMEM;
113         }
114
115         dev = &n->dev;
116         vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ];
117         n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick;
118         vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX);
119
120         f->private_data = n;
121
122         return 0;
123 }
124
125 static void *vhost_test_stop_vq(struct vhost_test *n,
126                                 struct vhost_virtqueue *vq)
127 {
128         void *private;
129
130         mutex_lock(&vq->mutex);
131         private = vq->private_data;
132         vq->private_data = NULL;
133         mutex_unlock(&vq->mutex);
134         return private;
135 }
136
137 static void vhost_test_stop(struct vhost_test *n, void **privatep)
138 {
139         *privatep = vhost_test_stop_vq(n, n->vqs + VHOST_TEST_VQ);
140 }
141
142 static void vhost_test_flush_vq(struct vhost_test *n, int index)
143 {
144         vhost_poll_flush(&n->vqs[index].poll);
145 }
146
147 static void vhost_test_flush(struct vhost_test *n)
148 {
149         vhost_test_flush_vq(n, VHOST_TEST_VQ);
150 }
151
152 static int vhost_test_release(struct inode *inode, struct file *f)
153 {
154         struct vhost_test *n = f->private_data;
155         void  *private;
156
157         vhost_test_stop(n, &private);
158         vhost_test_flush(n);
159         vhost_dev_cleanup(&n->dev);
160         /* We do an extra flush before freeing memory,
161          * since jobs can re-queue themselves. */
162         vhost_test_flush(n);
163         kfree(n);
164         return 0;
165 }
166
167 static long vhost_test_run(struct vhost_test *n, int test)
168 {
169         void *priv, *oldpriv;
170         struct vhost_virtqueue *vq;
171         int r, index;
172
173         if (test < 0 || test > 1)
174                 return -EINVAL;
175
176         mutex_lock(&n->dev.mutex);
177         r = vhost_dev_check_owner(&n->dev);
178         if (r)
179                 goto err;
180
181         for (index = 0; index < n->dev.nvqs; ++index) {
182                 /* Verify that ring has been setup correctly. */
183                 if (!vhost_vq_access_ok(&n->vqs[index])) {
184                         r = -EFAULT;
185                         goto err;
186                 }
187         }
188
189         for (index = 0; index < n->dev.nvqs; ++index) {
190                 vq = n->vqs + index;
191                 mutex_lock(&vq->mutex);
192                 priv = test ? n : NULL;
193
194                 /* start polling new socket */
195                 oldpriv = vq->private_data;
196                 vq->private_data = priv;
197
198                 r = vhost_vq_init_access(&n->vqs[index]);
199
200                 mutex_unlock(&vq->mutex);
201
202                 if (r)
203                         goto err;
204
205                 if (oldpriv) {
206                         vhost_test_flush_vq(n, index);
207                 }
208         }
209
210         mutex_unlock(&n->dev.mutex);
211         return 0;
212
213 err:
214         mutex_unlock(&n->dev.mutex);
215         return r;
216 }
217
218 static long vhost_test_reset_owner(struct vhost_test *n)
219 {
220         void *priv = NULL;
221         long err;
222         struct vhost_umem *umem;
223
224         mutex_lock(&n->dev.mutex);
225         err = vhost_dev_check_owner(&n->dev);
226         if (err)
227                 goto done;
228         umem = vhost_dev_reset_owner_prepare();
229         if (!umem) {
230                 err = -ENOMEM;
231                 goto done;
232         }
233         vhost_test_stop(n, &priv);
234         vhost_test_flush(n);
235         vhost_dev_reset_owner(&n->dev, umem);
236 done:
237         mutex_unlock(&n->dev.mutex);
238         return err;
239 }
240
241 static int vhost_test_set_features(struct vhost_test *n, u64 features)
242 {
243         struct vhost_virtqueue *vq;
244
245         mutex_lock(&n->dev.mutex);
246         if ((features & (1 << VHOST_F_LOG_ALL)) &&
247             !vhost_log_access_ok(&n->dev)) {
248                 mutex_unlock(&n->dev.mutex);
249                 return -EFAULT;
250         }
251         vq = &n->vqs[VHOST_TEST_VQ];
252         mutex_lock(&vq->mutex);
253         vq->acked_features = features;
254         mutex_unlock(&vq->mutex);
255         mutex_unlock(&n->dev.mutex);
256         return 0;
257 }
258
259 static long vhost_test_ioctl(struct file *f, unsigned int ioctl,
260                              unsigned long arg)
261 {
262         struct vhost_test *n = f->private_data;
263         void __user *argp = (void __user *)arg;
264         u64 __user *featurep = argp;
265         int test;
266         u64 features;
267         int r;
268         switch (ioctl) {
269         case VHOST_TEST_RUN:
270                 if (copy_from_user(&test, argp, sizeof test))
271                         return -EFAULT;
272                 return vhost_test_run(n, test);
273         case VHOST_GET_FEATURES:
274                 features = VHOST_FEATURES;
275                 if (copy_to_user(featurep, &features, sizeof features))
276                         return -EFAULT;
277                 return 0;
278         case VHOST_SET_FEATURES:
279                 printk(KERN_ERR "1\n");
280                 if (copy_from_user(&features, featurep, sizeof features))
281                         return -EFAULT;
282                 printk(KERN_ERR "2\n");
283                 if (features & ~VHOST_FEATURES)
284                         return -EOPNOTSUPP;
285                 printk(KERN_ERR "3\n");
286                 return vhost_test_set_features(n, features);
287         case VHOST_RESET_OWNER:
288                 return vhost_test_reset_owner(n);
289         default:
290                 mutex_lock(&n->dev.mutex);
291                 r = vhost_dev_ioctl(&n->dev, ioctl, argp);
292                 if (r == -ENOIOCTLCMD)
293                         r = vhost_vring_ioctl(&n->dev, ioctl, argp);
294                 vhost_test_flush(n);
295                 mutex_unlock(&n->dev.mutex);
296                 return r;
297         }
298 }
299
300 #ifdef CONFIG_COMPAT
301 static long vhost_test_compat_ioctl(struct file *f, unsigned int ioctl,
302                                    unsigned long arg)
303 {
304         return vhost_test_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
305 }
306 #endif
307
308 static const struct file_operations vhost_test_fops = {
309         .owner          = THIS_MODULE,
310         .release        = vhost_test_release,
311         .unlocked_ioctl = vhost_test_ioctl,
312 #ifdef CONFIG_COMPAT
313         .compat_ioctl   = vhost_test_compat_ioctl,
314 #endif
315         .open           = vhost_test_open,
316         .llseek         = noop_llseek,
317 };
318
319 static struct miscdevice vhost_test_misc = {
320         MISC_DYNAMIC_MINOR,
321         "vhost-test",
322         &vhost_test_fops,
323 };
324 module_misc_device(vhost_test_misc);
325
326 MODULE_VERSION("0.0.1");
327 MODULE_LICENSE("GPL v2");
328 MODULE_AUTHOR("Michael S. Tsirkin");
329 MODULE_DESCRIPTION("Host kernel side for virtio simulator");