Merge tag 'pci-v4.13-fixes-1' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaa...
[sfrench/cifs-2.6.git] / drivers / vhost / scsi.c
1 /*******************************************************************************
2  * Vhost kernel TCM fabric driver for virtio SCSI initiators
3  *
4  * (C) Copyright 2010-2013 Datera, Inc.
5  * (C) Copyright 2010-2012 IBM Corp.
6  *
7  * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
8  *
9  * Authors: Nicholas A. Bellinger <nab@daterainc.com>
10  *          Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation; either version 2 of the License, or
15  * (at your option) any later version.
16  *
17  * This program is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  * GNU General Public License for more details.
21  *
22  ****************************************************************************/
23
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <generated/utsrelease.h>
27 #include <linux/utsname.h>
28 #include <linux/init.h>
29 #include <linux/slab.h>
30 #include <linux/kthread.h>
31 #include <linux/types.h>
32 #include <linux/string.h>
33 #include <linux/configfs.h>
34 #include <linux/ctype.h>
35 #include <linux/compat.h>
36 #include <linux/eventfd.h>
37 #include <linux/fs.h>
38 #include <linux/vmalloc.h>
39 #include <linux/miscdevice.h>
40 #include <asm/unaligned.h>
41 #include <scsi/scsi_common.h>
42 #include <scsi/scsi_proto.h>
43 #include <target/target_core_base.h>
44 #include <target/target_core_fabric.h>
45 #include <linux/vhost.h>
46 #include <linux/virtio_scsi.h>
47 #include <linux/llist.h>
48 #include <linux/bitmap.h>
49 #include <linux/percpu_ida.h>
50
51 #include "vhost.h"
52
53 #define VHOST_SCSI_VERSION  "v0.1"
54 #define VHOST_SCSI_NAMELEN 256
55 #define VHOST_SCSI_MAX_CDB_SIZE 32
56 #define VHOST_SCSI_DEFAULT_TAGS 256
57 #define VHOST_SCSI_PREALLOC_SGLS 2048
58 #define VHOST_SCSI_PREALLOC_UPAGES 2048
59 #define VHOST_SCSI_PREALLOC_PROT_SGLS 512
60
61 struct vhost_scsi_inflight {
62         /* Wait for the flush operation to finish */
63         struct completion comp;
64         /* Refcount for the inflight reqs */
65         struct kref kref;
66 };
67
68 struct vhost_scsi_cmd {
69         /* Descriptor from vhost_get_vq_desc() for virt_queue segment */
70         int tvc_vq_desc;
71         /* virtio-scsi initiator task attribute */
72         int tvc_task_attr;
73         /* virtio-scsi response incoming iovecs */
74         int tvc_in_iovs;
75         /* virtio-scsi initiator data direction */
76         enum dma_data_direction tvc_data_direction;
77         /* Expected data transfer length from virtio-scsi header */
78         u32 tvc_exp_data_len;
79         /* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */
80         u64 tvc_tag;
81         /* The number of scatterlists associated with this cmd */
82         u32 tvc_sgl_count;
83         u32 tvc_prot_sgl_count;
84         /* Saved unpacked SCSI LUN for vhost_scsi_submission_work() */
85         u32 tvc_lun;
86         /* Pointer to the SGL formatted memory from virtio-scsi */
87         struct scatterlist *tvc_sgl;
88         struct scatterlist *tvc_prot_sgl;
89         struct page **tvc_upages;
90         /* Pointer to response header iovec */
91         struct iovec tvc_resp_iov;
92         /* Pointer to vhost_scsi for our device */
93         struct vhost_scsi *tvc_vhost;
94         /* Pointer to vhost_virtqueue for the cmd */
95         struct vhost_virtqueue *tvc_vq;
96         /* Pointer to vhost nexus memory */
97         struct vhost_scsi_nexus *tvc_nexus;
98         /* The TCM I/O descriptor that is accessed via container_of() */
99         struct se_cmd tvc_se_cmd;
100         /* work item used for cmwq dispatch to vhost_scsi_submission_work() */
101         struct work_struct work;
102         /* Copy of the incoming SCSI command descriptor block (CDB) */
103         unsigned char tvc_cdb[VHOST_SCSI_MAX_CDB_SIZE];
104         /* Sense buffer that will be mapped into outgoing status */
105         unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
106         /* Completed commands list, serviced from vhost worker thread */
107         struct llist_node tvc_completion_list;
108         /* Used to track inflight cmd */
109         struct vhost_scsi_inflight *inflight;
110 };
111
112 struct vhost_scsi_nexus {
113         /* Pointer to TCM session for I_T Nexus */
114         struct se_session *tvn_se_sess;
115 };
116
117 struct vhost_scsi_tpg {
118         /* Vhost port target portal group tag for TCM */
119         u16 tport_tpgt;
120         /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
121         int tv_tpg_port_count;
122         /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
123         int tv_tpg_vhost_count;
124         /* Used for enabling T10-PI with legacy devices */
125         int tv_fabric_prot_type;
126         /* list for vhost_scsi_list */
127         struct list_head tv_tpg_list;
128         /* Used to protect access for tpg_nexus */
129         struct mutex tv_tpg_mutex;
130         /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
131         struct vhost_scsi_nexus *tpg_nexus;
132         /* Pointer back to vhost_scsi_tport */
133         struct vhost_scsi_tport *tport;
134         /* Returned by vhost_scsi_make_tpg() */
135         struct se_portal_group se_tpg;
136         /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
137         struct vhost_scsi *vhost_scsi;
138 };
139
140 struct vhost_scsi_tport {
141         /* SCSI protocol the tport is providing */
142         u8 tport_proto_id;
143         /* Binary World Wide unique Port Name for Vhost Target port */
144         u64 tport_wwpn;
145         /* ASCII formatted WWPN for Vhost Target port */
146         char tport_name[VHOST_SCSI_NAMELEN];
147         /* Returned by vhost_scsi_make_tport() */
148         struct se_wwn tport_wwn;
149 };
150
151 struct vhost_scsi_evt {
152         /* event to be sent to guest */
153         struct virtio_scsi_event event;
154         /* event list, serviced from vhost worker thread */
155         struct llist_node list;
156 };
157
158 enum {
159         VHOST_SCSI_VQ_CTL = 0,
160         VHOST_SCSI_VQ_EVT = 1,
161         VHOST_SCSI_VQ_IO = 2,
162 };
163
164 /* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */
165 enum {
166         VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) |
167                                                (1ULL << VIRTIO_SCSI_F_T10_PI)
168 };
169
170 #define VHOST_SCSI_MAX_TARGET   256
171 #define VHOST_SCSI_MAX_VQ       128
172 #define VHOST_SCSI_MAX_EVENT    128
173
174 struct vhost_scsi_virtqueue {
175         struct vhost_virtqueue vq;
176         /*
177          * Reference counting for inflight reqs, used for flush operation. At
178          * each time, one reference tracks new commands submitted, while we
179          * wait for another one to reach 0.
180          */
181         struct vhost_scsi_inflight inflights[2];
182         /*
183          * Indicate current inflight in use, protected by vq->mutex.
184          * Writers must also take dev mutex and flush under it.
185          */
186         int inflight_idx;
187 };
188
189 struct vhost_scsi {
190         /* Protected by vhost_scsi->dev.mutex */
191         struct vhost_scsi_tpg **vs_tpg;
192         char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
193
194         struct vhost_dev dev;
195         struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ];
196
197         struct vhost_work vs_completion_work; /* cmd completion work item */
198         struct llist_head vs_completion_list; /* cmd completion queue */
199
200         struct vhost_work vs_event_work; /* evt injection work item */
201         struct llist_head vs_event_list; /* evt injection queue */
202
203         bool vs_events_missed; /* any missed events, protected by vq->mutex */
204         int vs_events_nr; /* num of pending events, protected by vq->mutex */
205 };
206
207 static struct workqueue_struct *vhost_scsi_workqueue;
208
209 /* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */
210 static DEFINE_MUTEX(vhost_scsi_mutex);
211 static LIST_HEAD(vhost_scsi_list);
212
213 static int iov_num_pages(void __user *iov_base, size_t iov_len)
214 {
215         return (PAGE_ALIGN((unsigned long)iov_base + iov_len) -
216                ((unsigned long)iov_base & PAGE_MASK)) >> PAGE_SHIFT;
217 }
218
219 static void vhost_scsi_done_inflight(struct kref *kref)
220 {
221         struct vhost_scsi_inflight *inflight;
222
223         inflight = container_of(kref, struct vhost_scsi_inflight, kref);
224         complete(&inflight->comp);
225 }
226
227 static void vhost_scsi_init_inflight(struct vhost_scsi *vs,
228                                     struct vhost_scsi_inflight *old_inflight[])
229 {
230         struct vhost_scsi_inflight *new_inflight;
231         struct vhost_virtqueue *vq;
232         int idx, i;
233
234         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
235                 vq = &vs->vqs[i].vq;
236
237                 mutex_lock(&vq->mutex);
238
239                 /* store old infight */
240                 idx = vs->vqs[i].inflight_idx;
241                 if (old_inflight)
242                         old_inflight[i] = &vs->vqs[i].inflights[idx];
243
244                 /* setup new infight */
245                 vs->vqs[i].inflight_idx = idx ^ 1;
246                 new_inflight = &vs->vqs[i].inflights[idx ^ 1];
247                 kref_init(&new_inflight->kref);
248                 init_completion(&new_inflight->comp);
249
250                 mutex_unlock(&vq->mutex);
251         }
252 }
253
254 static struct vhost_scsi_inflight *
255 vhost_scsi_get_inflight(struct vhost_virtqueue *vq)
256 {
257         struct vhost_scsi_inflight *inflight;
258         struct vhost_scsi_virtqueue *svq;
259
260         svq = container_of(vq, struct vhost_scsi_virtqueue, vq);
261         inflight = &svq->inflights[svq->inflight_idx];
262         kref_get(&inflight->kref);
263
264         return inflight;
265 }
266
267 static void vhost_scsi_put_inflight(struct vhost_scsi_inflight *inflight)
268 {
269         kref_put(&inflight->kref, vhost_scsi_done_inflight);
270 }
271
272 static int vhost_scsi_check_true(struct se_portal_group *se_tpg)
273 {
274         return 1;
275 }
276
277 static int vhost_scsi_check_false(struct se_portal_group *se_tpg)
278 {
279         return 0;
280 }
281
282 static char *vhost_scsi_get_fabric_name(void)
283 {
284         return "vhost";
285 }
286
287 static char *vhost_scsi_get_fabric_wwn(struct se_portal_group *se_tpg)
288 {
289         struct vhost_scsi_tpg *tpg = container_of(se_tpg,
290                                 struct vhost_scsi_tpg, se_tpg);
291         struct vhost_scsi_tport *tport = tpg->tport;
292
293         return &tport->tport_name[0];
294 }
295
296 static u16 vhost_scsi_get_tpgt(struct se_portal_group *se_tpg)
297 {
298         struct vhost_scsi_tpg *tpg = container_of(se_tpg,
299                                 struct vhost_scsi_tpg, se_tpg);
300         return tpg->tport_tpgt;
301 }
302
303 static int vhost_scsi_check_prot_fabric_only(struct se_portal_group *se_tpg)
304 {
305         struct vhost_scsi_tpg *tpg = container_of(se_tpg,
306                                 struct vhost_scsi_tpg, se_tpg);
307
308         return tpg->tv_fabric_prot_type;
309 }
310
311 static u32 vhost_scsi_tpg_get_inst_index(struct se_portal_group *se_tpg)
312 {
313         return 1;
314 }
315
316 static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
317 {
318         struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd,
319                                 struct vhost_scsi_cmd, tvc_se_cmd);
320         struct se_session *se_sess = tv_cmd->tvc_nexus->tvn_se_sess;
321         int i;
322
323         if (tv_cmd->tvc_sgl_count) {
324                 for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
325                         put_page(sg_page(&tv_cmd->tvc_sgl[i]));
326         }
327         if (tv_cmd->tvc_prot_sgl_count) {
328                 for (i = 0; i < tv_cmd->tvc_prot_sgl_count; i++)
329                         put_page(sg_page(&tv_cmd->tvc_prot_sgl[i]));
330         }
331
332         vhost_scsi_put_inflight(tv_cmd->inflight);
333         percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
334 }
335
336 static u32 vhost_scsi_sess_get_index(struct se_session *se_sess)
337 {
338         return 0;
339 }
340
341 static int vhost_scsi_write_pending(struct se_cmd *se_cmd)
342 {
343         /* Go ahead and process the write immediately */
344         target_execute_cmd(se_cmd);
345         return 0;
346 }
347
348 static int vhost_scsi_write_pending_status(struct se_cmd *se_cmd)
349 {
350         return 0;
351 }
352
353 static void vhost_scsi_set_default_node_attrs(struct se_node_acl *nacl)
354 {
355         return;
356 }
357
358 static int vhost_scsi_get_cmd_state(struct se_cmd *se_cmd)
359 {
360         return 0;
361 }
362
363 static void vhost_scsi_complete_cmd(struct vhost_scsi_cmd *cmd)
364 {
365         struct vhost_scsi *vs = cmd->tvc_vhost;
366
367         llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list);
368
369         vhost_work_queue(&vs->dev, &vs->vs_completion_work);
370 }
371
372 static int vhost_scsi_queue_data_in(struct se_cmd *se_cmd)
373 {
374         struct vhost_scsi_cmd *cmd = container_of(se_cmd,
375                                 struct vhost_scsi_cmd, tvc_se_cmd);
376         vhost_scsi_complete_cmd(cmd);
377         return 0;
378 }
379
380 static int vhost_scsi_queue_status(struct se_cmd *se_cmd)
381 {
382         struct vhost_scsi_cmd *cmd = container_of(se_cmd,
383                                 struct vhost_scsi_cmd, tvc_se_cmd);
384         vhost_scsi_complete_cmd(cmd);
385         return 0;
386 }
387
388 static void vhost_scsi_queue_tm_rsp(struct se_cmd *se_cmd)
389 {
390         return;
391 }
392
393 static void vhost_scsi_aborted_task(struct se_cmd *se_cmd)
394 {
395         return;
396 }
397
398 static void vhost_scsi_free_evt(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
399 {
400         vs->vs_events_nr--;
401         kfree(evt);
402 }
403
404 static struct vhost_scsi_evt *
405 vhost_scsi_allocate_evt(struct vhost_scsi *vs,
406                        u32 event, u32 reason)
407 {
408         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
409         struct vhost_scsi_evt *evt;
410
411         if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
412                 vs->vs_events_missed = true;
413                 return NULL;
414         }
415
416         evt = kzalloc(sizeof(*evt), GFP_KERNEL);
417         if (!evt) {
418                 vq_err(vq, "Failed to allocate vhost_scsi_evt\n");
419                 vs->vs_events_missed = true;
420                 return NULL;
421         }
422
423         evt->event.event = cpu_to_vhost32(vq, event);
424         evt->event.reason = cpu_to_vhost32(vq, reason);
425         vs->vs_events_nr++;
426
427         return evt;
428 }
429
430 static void vhost_scsi_free_cmd(struct vhost_scsi_cmd *cmd)
431 {
432         struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
433
434         /* TODO locking against target/backend threads? */
435         transport_generic_free_cmd(se_cmd, 0);
436
437 }
438
439 static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
440 {
441         return target_put_sess_cmd(se_cmd);
442 }
443
444 static void
445 vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
446 {
447         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
448         struct virtio_scsi_event *event = &evt->event;
449         struct virtio_scsi_event __user *eventp;
450         unsigned out, in;
451         int head, ret;
452
453         if (!vq->private_data) {
454                 vs->vs_events_missed = true;
455                 return;
456         }
457
458 again:
459         vhost_disable_notify(&vs->dev, vq);
460         head = vhost_get_vq_desc(vq, vq->iov,
461                         ARRAY_SIZE(vq->iov), &out, &in,
462                         NULL, NULL);
463         if (head < 0) {
464                 vs->vs_events_missed = true;
465                 return;
466         }
467         if (head == vq->num) {
468                 if (vhost_enable_notify(&vs->dev, vq))
469                         goto again;
470                 vs->vs_events_missed = true;
471                 return;
472         }
473
474         if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
475                 vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
476                                 vq->iov[out].iov_len);
477                 vs->vs_events_missed = true;
478                 return;
479         }
480
481         if (vs->vs_events_missed) {
482                 event->event |= cpu_to_vhost32(vq, VIRTIO_SCSI_T_EVENTS_MISSED);
483                 vs->vs_events_missed = false;
484         }
485
486         eventp = vq->iov[out].iov_base;
487         ret = __copy_to_user(eventp, event, sizeof(*event));
488         if (!ret)
489                 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
490         else
491                 vq_err(vq, "Faulted on vhost_scsi_send_event\n");
492 }
493
494 static void vhost_scsi_evt_work(struct vhost_work *work)
495 {
496         struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
497                                         vs_event_work);
498         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
499         struct vhost_scsi_evt *evt, *t;
500         struct llist_node *llnode;
501
502         mutex_lock(&vq->mutex);
503         llnode = llist_del_all(&vs->vs_event_list);
504         llist_for_each_entry_safe(evt, t, llnode, list) {
505                 vhost_scsi_do_evt_work(vs, evt);
506                 vhost_scsi_free_evt(vs, evt);
507         }
508         mutex_unlock(&vq->mutex);
509 }
510
511 /* Fill in status and signal that we are done processing this command
512  *
513  * This is scheduled in the vhost work queue so we are called with the owner
514  * process mm and can access the vring.
515  */
516 static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
517 {
518         struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
519                                         vs_completion_work);
520         DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
521         struct virtio_scsi_cmd_resp v_rsp;
522         struct vhost_scsi_cmd *cmd;
523         struct llist_node *llnode;
524         struct se_cmd *se_cmd;
525         struct iov_iter iov_iter;
526         int ret, vq;
527
528         bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
529         llnode = llist_del_all(&vs->vs_completion_list);
530         llist_for_each_entry(cmd, llnode, tvc_completion_list) {
531                 se_cmd = &cmd->tvc_se_cmd;
532
533                 pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
534                         cmd, se_cmd->residual_count, se_cmd->scsi_status);
535
536                 memset(&v_rsp, 0, sizeof(v_rsp));
537                 v_rsp.resid = cpu_to_vhost32(cmd->tvc_vq, se_cmd->residual_count);
538                 /* TODO is status_qualifier field needed? */
539                 v_rsp.status = se_cmd->scsi_status;
540                 v_rsp.sense_len = cpu_to_vhost32(cmd->tvc_vq,
541                                                  se_cmd->scsi_sense_length);
542                 memcpy(v_rsp.sense, cmd->tvc_sense_buf,
543                        se_cmd->scsi_sense_length);
544
545                 iov_iter_init(&iov_iter, READ, &cmd->tvc_resp_iov,
546                               cmd->tvc_in_iovs, sizeof(v_rsp));
547                 ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
548                 if (likely(ret == sizeof(v_rsp))) {
549                         struct vhost_scsi_virtqueue *q;
550                         vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
551                         q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
552                         vq = q - vs->vqs;
553                         __set_bit(vq, signal);
554                 } else
555                         pr_err("Faulted on virtio_scsi_cmd_resp\n");
556
557                 vhost_scsi_free_cmd(cmd);
558         }
559
560         vq = -1;
561         while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1))
562                 < VHOST_SCSI_MAX_VQ)
563                 vhost_signal(&vs->dev, &vs->vqs[vq].vq);
564 }
565
566 static struct vhost_scsi_cmd *
567 vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
568                    unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr,
569                    u32 exp_data_len, int data_direction)
570 {
571         struct vhost_scsi_cmd *cmd;
572         struct vhost_scsi_nexus *tv_nexus;
573         struct se_session *se_sess;
574         struct scatterlist *sg, *prot_sg;
575         struct page **pages;
576         int tag;
577
578         tv_nexus = tpg->tpg_nexus;
579         if (!tv_nexus) {
580                 pr_err("Unable to locate active struct vhost_scsi_nexus\n");
581                 return ERR_PTR(-EIO);
582         }
583         se_sess = tv_nexus->tvn_se_sess;
584
585         tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
586         if (tag < 0) {
587                 pr_err("Unable to obtain tag for vhost_scsi_cmd\n");
588                 return ERR_PTR(-ENOMEM);
589         }
590
591         cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[tag];
592         sg = cmd->tvc_sgl;
593         prot_sg = cmd->tvc_prot_sgl;
594         pages = cmd->tvc_upages;
595         memset(cmd, 0, sizeof(struct vhost_scsi_cmd));
596
597         cmd->tvc_sgl = sg;
598         cmd->tvc_prot_sgl = prot_sg;
599         cmd->tvc_upages = pages;
600         cmd->tvc_se_cmd.map_tag = tag;
601         cmd->tvc_tag = scsi_tag;
602         cmd->tvc_lun = lun;
603         cmd->tvc_task_attr = task_attr;
604         cmd->tvc_exp_data_len = exp_data_len;
605         cmd->tvc_data_direction = data_direction;
606         cmd->tvc_nexus = tv_nexus;
607         cmd->inflight = vhost_scsi_get_inflight(vq);
608
609         memcpy(cmd->tvc_cdb, cdb, VHOST_SCSI_MAX_CDB_SIZE);
610
611         return cmd;
612 }
613
614 /*
615  * Map a user memory range into a scatterlist
616  *
617  * Returns the number of scatterlist entries used or -errno on error.
618  */
619 static int
620 vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd,
621                       void __user *ptr,
622                       size_t len,
623                       struct scatterlist *sgl,
624                       bool write)
625 {
626         unsigned int npages = 0, offset, nbytes;
627         unsigned int pages_nr = iov_num_pages(ptr, len);
628         struct scatterlist *sg = sgl;
629         struct page **pages = cmd->tvc_upages;
630         int ret, i;
631
632         if (pages_nr > VHOST_SCSI_PREALLOC_UPAGES) {
633                 pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than"
634                        " preallocated VHOST_SCSI_PREALLOC_UPAGES: %u\n",
635                         pages_nr, VHOST_SCSI_PREALLOC_UPAGES);
636                 return -ENOBUFS;
637         }
638
639         ret = get_user_pages_fast((unsigned long)ptr, pages_nr, write, pages);
640         /* No pages were pinned */
641         if (ret < 0)
642                 goto out;
643         /* Less pages pinned than wanted */
644         if (ret != pages_nr) {
645                 for (i = 0; i < ret; i++)
646                         put_page(pages[i]);
647                 ret = -EFAULT;
648                 goto out;
649         }
650
651         while (len > 0) {
652                 offset = (uintptr_t)ptr & ~PAGE_MASK;
653                 nbytes = min_t(unsigned int, PAGE_SIZE - offset, len);
654                 sg_set_page(sg, pages[npages], nbytes, offset);
655                 ptr += nbytes;
656                 len -= nbytes;
657                 sg++;
658                 npages++;
659         }
660
661 out:
662         return ret;
663 }
664
665 static int
666 vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls)
667 {
668         int sgl_count = 0;
669
670         if (!iter || !iter->iov) {
671                 pr_err("%s: iter->iov is NULL, but expected bytes: %zu"
672                        " present\n", __func__, bytes);
673                 return -EINVAL;
674         }
675
676         sgl_count = iov_iter_npages(iter, 0xffff);
677         if (sgl_count > max_sgls) {
678                 pr_err("%s: requested sgl_count: %d exceeds pre-allocated"
679                        " max_sgls: %d\n", __func__, sgl_count, max_sgls);
680                 return -EINVAL;
681         }
682         return sgl_count;
683 }
684
685 static int
686 vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write,
687                       struct iov_iter *iter,
688                       struct scatterlist *sg, int sg_count)
689 {
690         size_t off = iter->iov_offset;
691         int i, ret;
692
693         for (i = 0; i < iter->nr_segs; i++) {
694                 void __user *base = iter->iov[i].iov_base + off;
695                 size_t len = iter->iov[i].iov_len - off;
696
697                 ret = vhost_scsi_map_to_sgl(cmd, base, len, sg, write);
698                 if (ret < 0) {
699                         for (i = 0; i < sg_count; i++) {
700                                 struct page *page = sg_page(&sg[i]);
701                                 if (page)
702                                         put_page(page);
703                         }
704                         return ret;
705                 }
706                 sg += ret;
707                 off = 0;
708         }
709         return 0;
710 }
711
712 static int
713 vhost_scsi_mapal(struct vhost_scsi_cmd *cmd,
714                  size_t prot_bytes, struct iov_iter *prot_iter,
715                  size_t data_bytes, struct iov_iter *data_iter)
716 {
717         int sgl_count, ret;
718         bool write = (cmd->tvc_data_direction == DMA_FROM_DEVICE);
719
720         if (prot_bytes) {
721                 sgl_count = vhost_scsi_calc_sgls(prot_iter, prot_bytes,
722                                                  VHOST_SCSI_PREALLOC_PROT_SGLS);
723                 if (sgl_count < 0)
724                         return sgl_count;
725
726                 sg_init_table(cmd->tvc_prot_sgl, sgl_count);
727                 cmd->tvc_prot_sgl_count = sgl_count;
728                 pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__,
729                          cmd->tvc_prot_sgl, cmd->tvc_prot_sgl_count);
730
731                 ret = vhost_scsi_iov_to_sgl(cmd, write, prot_iter,
732                                             cmd->tvc_prot_sgl,
733                                             cmd->tvc_prot_sgl_count);
734                 if (ret < 0) {
735                         cmd->tvc_prot_sgl_count = 0;
736                         return ret;
737                 }
738         }
739         sgl_count = vhost_scsi_calc_sgls(data_iter, data_bytes,
740                                          VHOST_SCSI_PREALLOC_SGLS);
741         if (sgl_count < 0)
742                 return sgl_count;
743
744         sg_init_table(cmd->tvc_sgl, sgl_count);
745         cmd->tvc_sgl_count = sgl_count;
746         pr_debug("%s data_sg %p data_sgl_count %u\n", __func__,
747                   cmd->tvc_sgl, cmd->tvc_sgl_count);
748
749         ret = vhost_scsi_iov_to_sgl(cmd, write, data_iter,
750                                     cmd->tvc_sgl, cmd->tvc_sgl_count);
751         if (ret < 0) {
752                 cmd->tvc_sgl_count = 0;
753                 return ret;
754         }
755         return 0;
756 }
757
758 static int vhost_scsi_to_tcm_attr(int attr)
759 {
760         switch (attr) {
761         case VIRTIO_SCSI_S_SIMPLE:
762                 return TCM_SIMPLE_TAG;
763         case VIRTIO_SCSI_S_ORDERED:
764                 return TCM_ORDERED_TAG;
765         case VIRTIO_SCSI_S_HEAD:
766                 return TCM_HEAD_TAG;
767         case VIRTIO_SCSI_S_ACA:
768                 return TCM_ACA_TAG;
769         default:
770                 break;
771         }
772         return TCM_SIMPLE_TAG;
773 }
774
775 static void vhost_scsi_submission_work(struct work_struct *work)
776 {
777         struct vhost_scsi_cmd *cmd =
778                 container_of(work, struct vhost_scsi_cmd, work);
779         struct vhost_scsi_nexus *tv_nexus;
780         struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
781         struct scatterlist *sg_ptr, *sg_prot_ptr = NULL;
782         int rc;
783
784         /* FIXME: BIDI operation */
785         if (cmd->tvc_sgl_count) {
786                 sg_ptr = cmd->tvc_sgl;
787
788                 if (cmd->tvc_prot_sgl_count)
789                         sg_prot_ptr = cmd->tvc_prot_sgl;
790                 else
791                         se_cmd->prot_pto = true;
792         } else {
793                 sg_ptr = NULL;
794         }
795         tv_nexus = cmd->tvc_nexus;
796
797         se_cmd->tag = 0;
798         rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
799                         cmd->tvc_cdb, &cmd->tvc_sense_buf[0],
800                         cmd->tvc_lun, cmd->tvc_exp_data_len,
801                         vhost_scsi_to_tcm_attr(cmd->tvc_task_attr),
802                         cmd->tvc_data_direction, TARGET_SCF_ACK_KREF,
803                         sg_ptr, cmd->tvc_sgl_count, NULL, 0, sg_prot_ptr,
804                         cmd->tvc_prot_sgl_count);
805         if (rc < 0) {
806                 transport_send_check_condition_and_sense(se_cmd,
807                                 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
808                 transport_generic_free_cmd(se_cmd, 0);
809         }
810 }
811
812 static void
813 vhost_scsi_send_bad_target(struct vhost_scsi *vs,
814                            struct vhost_virtqueue *vq,
815                            int head, unsigned out)
816 {
817         struct virtio_scsi_cmd_resp __user *resp;
818         struct virtio_scsi_cmd_resp rsp;
819         int ret;
820
821         memset(&rsp, 0, sizeof(rsp));
822         rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
823         resp = vq->iov[out].iov_base;
824         ret = __copy_to_user(resp, &rsp, sizeof(rsp));
825         if (!ret)
826                 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
827         else
828                 pr_err("Faulted on virtio_scsi_cmd_resp\n");
829 }
830
831 static void
832 vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
833 {
834         struct vhost_scsi_tpg **vs_tpg, *tpg;
835         struct virtio_scsi_cmd_req v_req;
836         struct virtio_scsi_cmd_req_pi v_req_pi;
837         struct vhost_scsi_cmd *cmd;
838         struct iov_iter out_iter, in_iter, prot_iter, data_iter;
839         u64 tag;
840         u32 exp_data_len, data_direction;
841         unsigned int out = 0, in = 0;
842         int head, ret, prot_bytes;
843         size_t req_size, rsp_size = sizeof(struct virtio_scsi_cmd_resp);
844         size_t out_size, in_size;
845         u16 lun;
846         u8 *target, *lunp, task_attr;
847         bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI);
848         void *req, *cdb;
849
850         mutex_lock(&vq->mutex);
851         /*
852          * We can handle the vq only after the endpoint is setup by calling the
853          * VHOST_SCSI_SET_ENDPOINT ioctl.
854          */
855         vs_tpg = vq->private_data;
856         if (!vs_tpg)
857                 goto out;
858
859         vhost_disable_notify(&vs->dev, vq);
860
861         for (;;) {
862                 head = vhost_get_vq_desc(vq, vq->iov,
863                                          ARRAY_SIZE(vq->iov), &out, &in,
864                                          NULL, NULL);
865                 pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
866                          head, out, in);
867                 /* On error, stop handling until the next kick. */
868                 if (unlikely(head < 0))
869                         break;
870                 /* Nothing new?  Wait for eventfd to tell us they refilled. */
871                 if (head == vq->num) {
872                         if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
873                                 vhost_disable_notify(&vs->dev, vq);
874                                 continue;
875                         }
876                         break;
877                 }
878                 /*
879                  * Check for a sane response buffer so we can report early
880                  * errors back to the guest.
881                  */
882                 if (unlikely(vq->iov[out].iov_len < rsp_size)) {
883                         vq_err(vq, "Expecting at least virtio_scsi_cmd_resp"
884                                 " size, got %zu bytes\n", vq->iov[out].iov_len);
885                         break;
886                 }
887                 /*
888                  * Setup pointers and values based upon different virtio-scsi
889                  * request header if T10_PI is enabled in KVM guest.
890                  */
891                 if (t10_pi) {
892                         req = &v_req_pi;
893                         req_size = sizeof(v_req_pi);
894                         lunp = &v_req_pi.lun[0];
895                         target = &v_req_pi.lun[1];
896                 } else {
897                         req = &v_req;
898                         req_size = sizeof(v_req);
899                         lunp = &v_req.lun[0];
900                         target = &v_req.lun[1];
901                 }
902                 /*
903                  * FIXME: Not correct for BIDI operation
904                  */
905                 out_size = iov_length(vq->iov, out);
906                 in_size = iov_length(&vq->iov[out], in);
907
908                 /*
909                  * Copy over the virtio-scsi request header, which for a
910                  * ANY_LAYOUT enabled guest may span multiple iovecs, or a
911                  * single iovec may contain both the header + outgoing
912                  * WRITE payloads.
913                  *
914                  * copy_from_iter() will advance out_iter, so that it will
915                  * point at the start of the outgoing WRITE payload, if
916                  * DMA_TO_DEVICE is set.
917                  */
918                 iov_iter_init(&out_iter, WRITE, vq->iov, out, out_size);
919
920                 if (unlikely(!copy_from_iter_full(req, req_size, &out_iter))) {
921                         vq_err(vq, "Faulted on copy_from_iter\n");
922                         vhost_scsi_send_bad_target(vs, vq, head, out);
923                         continue;
924                 }
925                 /* virtio-scsi spec requires byte 0 of the lun to be 1 */
926                 if (unlikely(*lunp != 1)) {
927                         vq_err(vq, "Illegal virtio-scsi lun: %u\n", *lunp);
928                         vhost_scsi_send_bad_target(vs, vq, head, out);
929                         continue;
930                 }
931
932                 tpg = ACCESS_ONCE(vs_tpg[*target]);
933                 if (unlikely(!tpg)) {
934                         /* Target does not exist, fail the request */
935                         vhost_scsi_send_bad_target(vs, vq, head, out);
936                         continue;
937                 }
938                 /*
939                  * Determine data_direction by calculating the total outgoing
940                  * iovec sizes + incoming iovec sizes vs. virtio-scsi request +
941                  * response headers respectively.
942                  *
943                  * For DMA_TO_DEVICE this is out_iter, which is already pointing
944                  * to the right place.
945                  *
946                  * For DMA_FROM_DEVICE, the iovec will be just past the end
947                  * of the virtio-scsi response header in either the same
948                  * or immediately following iovec.
949                  *
950                  * Any associated T10_PI bytes for the outgoing / incoming
951                  * payloads are included in calculation of exp_data_len here.
952                  */
953                 prot_bytes = 0;
954
955                 if (out_size > req_size) {
956                         data_direction = DMA_TO_DEVICE;
957                         exp_data_len = out_size - req_size;
958                         data_iter = out_iter;
959                 } else if (in_size > rsp_size) {
960                         data_direction = DMA_FROM_DEVICE;
961                         exp_data_len = in_size - rsp_size;
962
963                         iov_iter_init(&in_iter, READ, &vq->iov[out], in,
964                                       rsp_size + exp_data_len);
965                         iov_iter_advance(&in_iter, rsp_size);
966                         data_iter = in_iter;
967                 } else {
968                         data_direction = DMA_NONE;
969                         exp_data_len = 0;
970                 }
971                 /*
972                  * If T10_PI header + payload is present, setup prot_iter values
973                  * and recalculate data_iter for vhost_scsi_mapal() mapping to
974                  * host scatterlists via get_user_pages_fast().
975                  */
976                 if (t10_pi) {
977                         if (v_req_pi.pi_bytesout) {
978                                 if (data_direction != DMA_TO_DEVICE) {
979                                         vq_err(vq, "Received non zero pi_bytesout,"
980                                                 " but wrong data_direction\n");
981                                         vhost_scsi_send_bad_target(vs, vq, head, out);
982                                         continue;
983                                 }
984                                 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout);
985                         } else if (v_req_pi.pi_bytesin) {
986                                 if (data_direction != DMA_FROM_DEVICE) {
987                                         vq_err(vq, "Received non zero pi_bytesin,"
988                                                 " but wrong data_direction\n");
989                                         vhost_scsi_send_bad_target(vs, vq, head, out);
990                                         continue;
991                                 }
992                                 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
993                         }
994                         /*
995                          * Set prot_iter to data_iter, and advance past any
996                          * preceeding prot_bytes that may be present.
997                          *
998                          * Also fix up the exp_data_len to reflect only the
999                          * actual data payload length.
1000                          */
1001                         if (prot_bytes) {
1002                                 exp_data_len -= prot_bytes;
1003                                 prot_iter = data_iter;
1004                                 iov_iter_advance(&data_iter, prot_bytes);
1005                         }
1006                         tag = vhost64_to_cpu(vq, v_req_pi.tag);
1007                         task_attr = v_req_pi.task_attr;
1008                         cdb = &v_req_pi.cdb[0];
1009                         lun = ((v_req_pi.lun[2] << 8) | v_req_pi.lun[3]) & 0x3FFF;
1010                 } else {
1011                         tag = vhost64_to_cpu(vq, v_req.tag);
1012                         task_attr = v_req.task_attr;
1013                         cdb = &v_req.cdb[0];
1014                         lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
1015                 }
1016                 /*
1017                  * Check that the received CDB size does not exceeded our
1018                  * hardcoded max for vhost-scsi, then get a pre-allocated
1019                  * cmd descriptor for the new virtio-scsi tag.
1020                  *
1021                  * TODO what if cdb was too small for varlen cdb header?
1022                  */
1023                 if (unlikely(scsi_command_size(cdb) > VHOST_SCSI_MAX_CDB_SIZE)) {
1024                         vq_err(vq, "Received SCSI CDB with command_size: %d that"
1025                                 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1026                                 scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE);
1027                         vhost_scsi_send_bad_target(vs, vq, head, out);
1028                         continue;
1029                 }
1030                 cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr,
1031                                          exp_data_len + prot_bytes,
1032                                          data_direction);
1033                 if (IS_ERR(cmd)) {
1034                         vq_err(vq, "vhost_scsi_get_tag failed %ld\n",
1035                                PTR_ERR(cmd));
1036                         vhost_scsi_send_bad_target(vs, vq, head, out);
1037                         continue;
1038                 }
1039                 cmd->tvc_vhost = vs;
1040                 cmd->tvc_vq = vq;
1041                 cmd->tvc_resp_iov = vq->iov[out];
1042                 cmd->tvc_in_iovs = in;
1043
1044                 pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
1045                          cmd->tvc_cdb[0], cmd->tvc_lun);
1046                 pr_debug("cmd: %p exp_data_len: %d, prot_bytes: %d data_direction:"
1047                          " %d\n", cmd, exp_data_len, prot_bytes, data_direction);
1048
1049                 if (data_direction != DMA_NONE) {
1050                         ret = vhost_scsi_mapal(cmd,
1051                                                prot_bytes, &prot_iter,
1052                                                exp_data_len, &data_iter);
1053                         if (unlikely(ret)) {
1054                                 vq_err(vq, "Failed to map iov to sgl\n");
1055                                 vhost_scsi_release_cmd(&cmd->tvc_se_cmd);
1056                                 vhost_scsi_send_bad_target(vs, vq, head, out);
1057                                 continue;
1058                         }
1059                 }
1060                 /*
1061                  * Save the descriptor from vhost_get_vq_desc() to be used to
1062                  * complete the virtio-scsi request in TCM callback context via
1063                  * vhost_scsi_queue_data_in() and vhost_scsi_queue_status()
1064                  */
1065                 cmd->tvc_vq_desc = head;
1066                 /*
1067                  * Dispatch cmd descriptor for cmwq execution in process
1068                  * context provided by vhost_scsi_workqueue.  This also ensures
1069                  * cmd is executed on the same kworker CPU as this vhost
1070                  * thread to gain positive L2 cache locality effects.
1071                  */
1072                 INIT_WORK(&cmd->work, vhost_scsi_submission_work);
1073                 queue_work(vhost_scsi_workqueue, &cmd->work);
1074         }
1075 out:
1076         mutex_unlock(&vq->mutex);
1077 }
1078
1079 static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
1080 {
1081         pr_debug("%s: The handling func for control queue.\n", __func__);
1082 }
1083
1084 static void
1085 vhost_scsi_send_evt(struct vhost_scsi *vs,
1086                    struct vhost_scsi_tpg *tpg,
1087                    struct se_lun *lun,
1088                    u32 event,
1089                    u32 reason)
1090 {
1091         struct vhost_scsi_evt *evt;
1092
1093         evt = vhost_scsi_allocate_evt(vs, event, reason);
1094         if (!evt)
1095                 return;
1096
1097         if (tpg && lun) {
1098                 /* TODO: share lun setup code with virtio-scsi.ko */
1099                 /*
1100                  * Note: evt->event is zeroed when we allocate it and
1101                  * lun[4-7] need to be zero according to virtio-scsi spec.
1102                  */
1103                 evt->event.lun[0] = 0x01;
1104                 evt->event.lun[1] = tpg->tport_tpgt;
1105                 if (lun->unpacked_lun >= 256)
1106                         evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
1107                 evt->event.lun[3] = lun->unpacked_lun & 0xFF;
1108         }
1109
1110         llist_add(&evt->list, &vs->vs_event_list);
1111         vhost_work_queue(&vs->dev, &vs->vs_event_work);
1112 }
1113
1114 static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
1115 {
1116         struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1117                                                 poll.work);
1118         struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1119
1120         mutex_lock(&vq->mutex);
1121         if (!vq->private_data)
1122                 goto out;
1123
1124         if (vs->vs_events_missed)
1125                 vhost_scsi_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
1126 out:
1127         mutex_unlock(&vq->mutex);
1128 }
1129
1130 static void vhost_scsi_handle_kick(struct vhost_work *work)
1131 {
1132         struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1133                                                 poll.work);
1134         struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1135
1136         vhost_scsi_handle_vq(vs, vq);
1137 }
1138
1139 static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
1140 {
1141         vhost_poll_flush(&vs->vqs[index].vq.poll);
1142 }
1143
1144 /* Callers must hold dev mutex */
1145 static void vhost_scsi_flush(struct vhost_scsi *vs)
1146 {
1147         struct vhost_scsi_inflight *old_inflight[VHOST_SCSI_MAX_VQ];
1148         int i;
1149
1150         /* Init new inflight and remember the old inflight */
1151         vhost_scsi_init_inflight(vs, old_inflight);
1152
1153         /*
1154          * The inflight->kref was initialized to 1. We decrement it here to
1155          * indicate the start of the flush operation so that it will reach 0
1156          * when all the reqs are finished.
1157          */
1158         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1159                 kref_put(&old_inflight[i]->kref, vhost_scsi_done_inflight);
1160
1161         /* Flush both the vhost poll and vhost work */
1162         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1163                 vhost_scsi_flush_vq(vs, i);
1164         vhost_work_flush(&vs->dev, &vs->vs_completion_work);
1165         vhost_work_flush(&vs->dev, &vs->vs_event_work);
1166
1167         /* Wait for all reqs issued before the flush to be finished */
1168         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1169                 wait_for_completion(&old_inflight[i]->comp);
1170 }
1171
1172 /*
1173  * Called from vhost_scsi_ioctl() context to walk the list of available
1174  * vhost_scsi_tpg with an active struct vhost_scsi_nexus
1175  *
1176  *  The lock nesting rule is:
1177  *    vhost_scsi_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
1178  */
1179 static int
1180 vhost_scsi_set_endpoint(struct vhost_scsi *vs,
1181                         struct vhost_scsi_target *t)
1182 {
1183         struct se_portal_group *se_tpg;
1184         struct vhost_scsi_tport *tv_tport;
1185         struct vhost_scsi_tpg *tpg;
1186         struct vhost_scsi_tpg **vs_tpg;
1187         struct vhost_virtqueue *vq;
1188         int index, ret, i, len;
1189         bool match = false;
1190
1191         mutex_lock(&vhost_scsi_mutex);
1192         mutex_lock(&vs->dev.mutex);
1193
1194         /* Verify that ring has been setup correctly. */
1195         for (index = 0; index < vs->dev.nvqs; ++index) {
1196                 /* Verify that ring has been setup correctly. */
1197                 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1198                         ret = -EFAULT;
1199                         goto out;
1200                 }
1201         }
1202
1203         len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
1204         vs_tpg = kzalloc(len, GFP_KERNEL);
1205         if (!vs_tpg) {
1206                 ret = -ENOMEM;
1207                 goto out;
1208         }
1209         if (vs->vs_tpg)
1210                 memcpy(vs_tpg, vs->vs_tpg, len);
1211
1212         list_for_each_entry(tpg, &vhost_scsi_list, tv_tpg_list) {
1213                 mutex_lock(&tpg->tv_tpg_mutex);
1214                 if (!tpg->tpg_nexus) {
1215                         mutex_unlock(&tpg->tv_tpg_mutex);
1216                         continue;
1217                 }
1218                 if (tpg->tv_tpg_vhost_count != 0) {
1219                         mutex_unlock(&tpg->tv_tpg_mutex);
1220                         continue;
1221                 }
1222                 tv_tport = tpg->tport;
1223
1224                 if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1225                         if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) {
1226                                 kfree(vs_tpg);
1227                                 mutex_unlock(&tpg->tv_tpg_mutex);
1228                                 ret = -EEXIST;
1229                                 goto out;
1230                         }
1231                         /*
1232                          * In order to ensure individual vhost-scsi configfs
1233                          * groups cannot be removed while in use by vhost ioctl,
1234                          * go ahead and take an explicit se_tpg->tpg_group.cg_item
1235                          * dependency now.
1236                          */
1237                         se_tpg = &tpg->se_tpg;
1238                         ret = target_depend_item(&se_tpg->tpg_group.cg_item);
1239                         if (ret) {
1240                                 pr_warn("configfs_depend_item() failed: %d\n", ret);
1241                                 kfree(vs_tpg);
1242                                 mutex_unlock(&tpg->tv_tpg_mutex);
1243                                 goto out;
1244                         }
1245                         tpg->tv_tpg_vhost_count++;
1246                         tpg->vhost_scsi = vs;
1247                         vs_tpg[tpg->tport_tpgt] = tpg;
1248                         smp_mb__after_atomic();
1249                         match = true;
1250                 }
1251                 mutex_unlock(&tpg->tv_tpg_mutex);
1252         }
1253
1254         if (match) {
1255                 memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
1256                        sizeof(vs->vs_vhost_wwpn));
1257                 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1258                         vq = &vs->vqs[i].vq;
1259                         mutex_lock(&vq->mutex);
1260                         vq->private_data = vs_tpg;
1261                         vhost_vq_init_access(vq);
1262                         mutex_unlock(&vq->mutex);
1263                 }
1264                 ret = 0;
1265         } else {
1266                 ret = -EEXIST;
1267         }
1268
1269         /*
1270          * Act as synchronize_rcu to make sure access to
1271          * old vs->vs_tpg is finished.
1272          */
1273         vhost_scsi_flush(vs);
1274         kfree(vs->vs_tpg);
1275         vs->vs_tpg = vs_tpg;
1276
1277 out:
1278         mutex_unlock(&vs->dev.mutex);
1279         mutex_unlock(&vhost_scsi_mutex);
1280         return ret;
1281 }
1282
1283 static int
1284 vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
1285                           struct vhost_scsi_target *t)
1286 {
1287         struct se_portal_group *se_tpg;
1288         struct vhost_scsi_tport *tv_tport;
1289         struct vhost_scsi_tpg *tpg;
1290         struct vhost_virtqueue *vq;
1291         bool match = false;
1292         int index, ret, i;
1293         u8 target;
1294
1295         mutex_lock(&vhost_scsi_mutex);
1296         mutex_lock(&vs->dev.mutex);
1297         /* Verify that ring has been setup correctly. */
1298         for (index = 0; index < vs->dev.nvqs; ++index) {
1299                 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1300                         ret = -EFAULT;
1301                         goto err_dev;
1302                 }
1303         }
1304
1305         if (!vs->vs_tpg) {
1306                 ret = 0;
1307                 goto err_dev;
1308         }
1309
1310         for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1311                 target = i;
1312                 tpg = vs->vs_tpg[target];
1313                 if (!tpg)
1314                         continue;
1315
1316                 mutex_lock(&tpg->tv_tpg_mutex);
1317                 tv_tport = tpg->tport;
1318                 if (!tv_tport) {
1319                         ret = -ENODEV;
1320                         goto err_tpg;
1321                 }
1322
1323                 if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1324                         pr_warn("tv_tport->tport_name: %s, tpg->tport_tpgt: %hu"
1325                                 " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
1326                                 tv_tport->tport_name, tpg->tport_tpgt,
1327                                 t->vhost_wwpn, t->vhost_tpgt);
1328                         ret = -EINVAL;
1329                         goto err_tpg;
1330                 }
1331                 tpg->tv_tpg_vhost_count--;
1332                 tpg->vhost_scsi = NULL;
1333                 vs->vs_tpg[target] = NULL;
1334                 match = true;
1335                 mutex_unlock(&tpg->tv_tpg_mutex);
1336                 /*
1337                  * Release se_tpg->tpg_group.cg_item configfs dependency now
1338                  * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur.
1339                  */
1340                 se_tpg = &tpg->se_tpg;
1341                 target_undepend_item(&se_tpg->tpg_group.cg_item);
1342         }
1343         if (match) {
1344                 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1345                         vq = &vs->vqs[i].vq;
1346                         mutex_lock(&vq->mutex);
1347                         vq->private_data = NULL;
1348                         mutex_unlock(&vq->mutex);
1349                 }
1350         }
1351         /*
1352          * Act as synchronize_rcu to make sure access to
1353          * old vs->vs_tpg is finished.
1354          */
1355         vhost_scsi_flush(vs);
1356         kfree(vs->vs_tpg);
1357         vs->vs_tpg = NULL;
1358         WARN_ON(vs->vs_events_nr);
1359         mutex_unlock(&vs->dev.mutex);
1360         mutex_unlock(&vhost_scsi_mutex);
1361         return 0;
1362
1363 err_tpg:
1364         mutex_unlock(&tpg->tv_tpg_mutex);
1365 err_dev:
1366         mutex_unlock(&vs->dev.mutex);
1367         mutex_unlock(&vhost_scsi_mutex);
1368         return ret;
1369 }
1370
1371 static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
1372 {
1373         struct vhost_virtqueue *vq;
1374         int i;
1375
1376         if (features & ~VHOST_SCSI_FEATURES)
1377                 return -EOPNOTSUPP;
1378
1379         mutex_lock(&vs->dev.mutex);
1380         if ((features & (1 << VHOST_F_LOG_ALL)) &&
1381             !vhost_log_access_ok(&vs->dev)) {
1382                 mutex_unlock(&vs->dev.mutex);
1383                 return -EFAULT;
1384         }
1385
1386         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1387                 vq = &vs->vqs[i].vq;
1388                 mutex_lock(&vq->mutex);
1389                 vq->acked_features = features;
1390                 mutex_unlock(&vq->mutex);
1391         }
1392         mutex_unlock(&vs->dev.mutex);
1393         return 0;
1394 }
1395
1396 static int vhost_scsi_open(struct inode *inode, struct file *f)
1397 {
1398         struct vhost_scsi *vs;
1399         struct vhost_virtqueue **vqs;
1400         int r = -ENOMEM, i;
1401
1402         vs = kzalloc(sizeof(*vs), GFP_KERNEL | __GFP_NOWARN | __GFP_RETRY_MAYFAIL);
1403         if (!vs) {
1404                 vs = vzalloc(sizeof(*vs));
1405                 if (!vs)
1406                         goto err_vs;
1407         }
1408
1409         vqs = kmalloc(VHOST_SCSI_MAX_VQ * sizeof(*vqs), GFP_KERNEL);
1410         if (!vqs)
1411                 goto err_vqs;
1412
1413         vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work);
1414         vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work);
1415
1416         vs->vs_events_nr = 0;
1417         vs->vs_events_missed = false;
1418
1419         vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq;
1420         vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1421         vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
1422         vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
1423         for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
1424                 vqs[i] = &vs->vqs[i].vq;
1425                 vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
1426         }
1427         vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ);
1428
1429         vhost_scsi_init_inflight(vs, NULL);
1430
1431         f->private_data = vs;
1432         return 0;
1433
1434 err_vqs:
1435         kvfree(vs);
1436 err_vs:
1437         return r;
1438 }
1439
1440 static int vhost_scsi_release(struct inode *inode, struct file *f)
1441 {
1442         struct vhost_scsi *vs = f->private_data;
1443         struct vhost_scsi_target t;
1444
1445         mutex_lock(&vs->dev.mutex);
1446         memcpy(t.vhost_wwpn, vs->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
1447         mutex_unlock(&vs->dev.mutex);
1448         vhost_scsi_clear_endpoint(vs, &t);
1449         vhost_dev_stop(&vs->dev);
1450         vhost_dev_cleanup(&vs->dev, false);
1451         /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
1452         vhost_scsi_flush(vs);
1453         kfree(vs->dev.vqs);
1454         kvfree(vs);
1455         return 0;
1456 }
1457
1458 static long
1459 vhost_scsi_ioctl(struct file *f,
1460                  unsigned int ioctl,
1461                  unsigned long arg)
1462 {
1463         struct vhost_scsi *vs = f->private_data;
1464         struct vhost_scsi_target backend;
1465         void __user *argp = (void __user *)arg;
1466         u64 __user *featurep = argp;
1467         u32 __user *eventsp = argp;
1468         u32 events_missed;
1469         u64 features;
1470         int r, abi_version = VHOST_SCSI_ABI_VERSION;
1471         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1472
1473         switch (ioctl) {
1474         case VHOST_SCSI_SET_ENDPOINT:
1475                 if (copy_from_user(&backend, argp, sizeof backend))
1476                         return -EFAULT;
1477                 if (backend.reserved != 0)
1478                         return -EOPNOTSUPP;
1479
1480                 return vhost_scsi_set_endpoint(vs, &backend);
1481         case VHOST_SCSI_CLEAR_ENDPOINT:
1482                 if (copy_from_user(&backend, argp, sizeof backend))
1483                         return -EFAULT;
1484                 if (backend.reserved != 0)
1485                         return -EOPNOTSUPP;
1486
1487                 return vhost_scsi_clear_endpoint(vs, &backend);
1488         case VHOST_SCSI_GET_ABI_VERSION:
1489                 if (copy_to_user(argp, &abi_version, sizeof abi_version))
1490                         return -EFAULT;
1491                 return 0;
1492         case VHOST_SCSI_SET_EVENTS_MISSED:
1493                 if (get_user(events_missed, eventsp))
1494                         return -EFAULT;
1495                 mutex_lock(&vq->mutex);
1496                 vs->vs_events_missed = events_missed;
1497                 mutex_unlock(&vq->mutex);
1498                 return 0;
1499         case VHOST_SCSI_GET_EVENTS_MISSED:
1500                 mutex_lock(&vq->mutex);
1501                 events_missed = vs->vs_events_missed;
1502                 mutex_unlock(&vq->mutex);
1503                 if (put_user(events_missed, eventsp))
1504                         return -EFAULT;
1505                 return 0;
1506         case VHOST_GET_FEATURES:
1507                 features = VHOST_SCSI_FEATURES;
1508                 if (copy_to_user(featurep, &features, sizeof features))
1509                         return -EFAULT;
1510                 return 0;
1511         case VHOST_SET_FEATURES:
1512                 if (copy_from_user(&features, featurep, sizeof features))
1513                         return -EFAULT;
1514                 return vhost_scsi_set_features(vs, features);
1515         default:
1516                 mutex_lock(&vs->dev.mutex);
1517                 r = vhost_dev_ioctl(&vs->dev, ioctl, argp);
1518                 /* TODO: flush backend after dev ioctl. */
1519                 if (r == -ENOIOCTLCMD)
1520                         r = vhost_vring_ioctl(&vs->dev, ioctl, argp);
1521                 mutex_unlock(&vs->dev.mutex);
1522                 return r;
1523         }
1524 }
1525
1526 #ifdef CONFIG_COMPAT
1527 static long vhost_scsi_compat_ioctl(struct file *f, unsigned int ioctl,
1528                                 unsigned long arg)
1529 {
1530         return vhost_scsi_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
1531 }
1532 #endif
1533
1534 static const struct file_operations vhost_scsi_fops = {
1535         .owner          = THIS_MODULE,
1536         .release        = vhost_scsi_release,
1537         .unlocked_ioctl = vhost_scsi_ioctl,
1538 #ifdef CONFIG_COMPAT
1539         .compat_ioctl   = vhost_scsi_compat_ioctl,
1540 #endif
1541         .open           = vhost_scsi_open,
1542         .llseek         = noop_llseek,
1543 };
1544
1545 static struct miscdevice vhost_scsi_misc = {
1546         MISC_DYNAMIC_MINOR,
1547         "vhost-scsi",
1548         &vhost_scsi_fops,
1549 };
1550
1551 static int __init vhost_scsi_register(void)
1552 {
1553         return misc_register(&vhost_scsi_misc);
1554 }
1555
1556 static void vhost_scsi_deregister(void)
1557 {
1558         misc_deregister(&vhost_scsi_misc);
1559 }
1560
1561 static char *vhost_scsi_dump_proto_id(struct vhost_scsi_tport *tport)
1562 {
1563         switch (tport->tport_proto_id) {
1564         case SCSI_PROTOCOL_SAS:
1565                 return "SAS";
1566         case SCSI_PROTOCOL_FCP:
1567                 return "FCP";
1568         case SCSI_PROTOCOL_ISCSI:
1569                 return "iSCSI";
1570         default:
1571                 break;
1572         }
1573
1574         return "Unknown";
1575 }
1576
1577 static void
1578 vhost_scsi_do_plug(struct vhost_scsi_tpg *tpg,
1579                   struct se_lun *lun, bool plug)
1580 {
1581
1582         struct vhost_scsi *vs = tpg->vhost_scsi;
1583         struct vhost_virtqueue *vq;
1584         u32 reason;
1585
1586         if (!vs)
1587                 return;
1588
1589         mutex_lock(&vs->dev.mutex);
1590
1591         if (plug)
1592                 reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
1593         else
1594                 reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
1595
1596         vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1597         mutex_lock(&vq->mutex);
1598         if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG))
1599                 vhost_scsi_send_evt(vs, tpg, lun,
1600                                    VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
1601         mutex_unlock(&vq->mutex);
1602         mutex_unlock(&vs->dev.mutex);
1603 }
1604
1605 static void vhost_scsi_hotplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
1606 {
1607         vhost_scsi_do_plug(tpg, lun, true);
1608 }
1609
1610 static void vhost_scsi_hotunplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
1611 {
1612         vhost_scsi_do_plug(tpg, lun, false);
1613 }
1614
1615 static int vhost_scsi_port_link(struct se_portal_group *se_tpg,
1616                                struct se_lun *lun)
1617 {
1618         struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1619                                 struct vhost_scsi_tpg, se_tpg);
1620
1621         mutex_lock(&vhost_scsi_mutex);
1622
1623         mutex_lock(&tpg->tv_tpg_mutex);
1624         tpg->tv_tpg_port_count++;
1625         mutex_unlock(&tpg->tv_tpg_mutex);
1626
1627         vhost_scsi_hotplug(tpg, lun);
1628
1629         mutex_unlock(&vhost_scsi_mutex);
1630
1631         return 0;
1632 }
1633
1634 static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,
1635                                   struct se_lun *lun)
1636 {
1637         struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1638                                 struct vhost_scsi_tpg, se_tpg);
1639
1640         mutex_lock(&vhost_scsi_mutex);
1641
1642         mutex_lock(&tpg->tv_tpg_mutex);
1643         tpg->tv_tpg_port_count--;
1644         mutex_unlock(&tpg->tv_tpg_mutex);
1645
1646         vhost_scsi_hotunplug(tpg, lun);
1647
1648         mutex_unlock(&vhost_scsi_mutex);
1649 }
1650
1651 static void vhost_scsi_free_cmd_map_res(struct se_session *se_sess)
1652 {
1653         struct vhost_scsi_cmd *tv_cmd;
1654         unsigned int i;
1655
1656         if (!se_sess->sess_cmd_map)
1657                 return;
1658
1659         for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
1660                 tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
1661
1662                 kfree(tv_cmd->tvc_sgl);
1663                 kfree(tv_cmd->tvc_prot_sgl);
1664                 kfree(tv_cmd->tvc_upages);
1665         }
1666 }
1667
1668 static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_store(
1669                 struct config_item *item, const char *page, size_t count)
1670 {
1671         struct se_portal_group *se_tpg = attrib_to_tpg(item);
1672         struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1673                                 struct vhost_scsi_tpg, se_tpg);
1674         unsigned long val;
1675         int ret = kstrtoul(page, 0, &val);
1676
1677         if (ret) {
1678                 pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
1679                 return ret;
1680         }
1681         if (val != 0 && val != 1 && val != 3) {
1682                 pr_err("Invalid vhost_scsi fabric_prot_type: %lu\n", val);
1683                 return -EINVAL;
1684         }
1685         tpg->tv_fabric_prot_type = val;
1686
1687         return count;
1688 }
1689
1690 static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_show(
1691                 struct config_item *item, char *page)
1692 {
1693         struct se_portal_group *se_tpg = attrib_to_tpg(item);
1694         struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1695                                 struct vhost_scsi_tpg, se_tpg);
1696
1697         return sprintf(page, "%d\n", tpg->tv_fabric_prot_type);
1698 }
1699
1700 CONFIGFS_ATTR(vhost_scsi_tpg_attrib_, fabric_prot_type);
1701
1702 static struct configfs_attribute *vhost_scsi_tpg_attrib_attrs[] = {
1703         &vhost_scsi_tpg_attrib_attr_fabric_prot_type,
1704         NULL,
1705 };
1706
1707 static int vhost_scsi_nexus_cb(struct se_portal_group *se_tpg,
1708                                struct se_session *se_sess, void *p)
1709 {
1710         struct vhost_scsi_cmd *tv_cmd;
1711         unsigned int i;
1712
1713         for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
1714                 tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
1715
1716                 tv_cmd->tvc_sgl = kzalloc(sizeof(struct scatterlist) *
1717                                         VHOST_SCSI_PREALLOC_SGLS, GFP_KERNEL);
1718                 if (!tv_cmd->tvc_sgl) {
1719                         pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
1720                         goto out;
1721                 }
1722
1723                 tv_cmd->tvc_upages = kzalloc(sizeof(struct page *) *
1724                                 VHOST_SCSI_PREALLOC_UPAGES, GFP_KERNEL);
1725                 if (!tv_cmd->tvc_upages) {
1726                         pr_err("Unable to allocate tv_cmd->tvc_upages\n");
1727                         goto out;
1728                 }
1729
1730                 tv_cmd->tvc_prot_sgl = kzalloc(sizeof(struct scatterlist) *
1731                                 VHOST_SCSI_PREALLOC_PROT_SGLS, GFP_KERNEL);
1732                 if (!tv_cmd->tvc_prot_sgl) {
1733                         pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
1734                         goto out;
1735                 }
1736         }
1737         return 0;
1738 out:
1739         vhost_scsi_free_cmd_map_res(se_sess);
1740         return -ENOMEM;
1741 }
1742
1743 static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
1744                                 const char *name)
1745 {
1746         struct vhost_scsi_nexus *tv_nexus;
1747
1748         mutex_lock(&tpg->tv_tpg_mutex);
1749         if (tpg->tpg_nexus) {
1750                 mutex_unlock(&tpg->tv_tpg_mutex);
1751                 pr_debug("tpg->tpg_nexus already exists\n");
1752                 return -EEXIST;
1753         }
1754
1755         tv_nexus = kzalloc(sizeof(struct vhost_scsi_nexus), GFP_KERNEL);
1756         if (!tv_nexus) {
1757                 mutex_unlock(&tpg->tv_tpg_mutex);
1758                 pr_err("Unable to allocate struct vhost_scsi_nexus\n");
1759                 return -ENOMEM;
1760         }
1761         /*
1762          * Since we are running in 'demo mode' this call with generate a
1763          * struct se_node_acl for the vhost_scsi struct se_portal_group with
1764          * the SCSI Initiator port name of the passed configfs group 'name'.
1765          */
1766         tv_nexus->tvn_se_sess = target_alloc_session(&tpg->se_tpg,
1767                                         VHOST_SCSI_DEFAULT_TAGS,
1768                                         sizeof(struct vhost_scsi_cmd),
1769                                         TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
1770                                         (unsigned char *)name, tv_nexus,
1771                                         vhost_scsi_nexus_cb);
1772         if (IS_ERR(tv_nexus->tvn_se_sess)) {
1773                 mutex_unlock(&tpg->tv_tpg_mutex);
1774                 kfree(tv_nexus);
1775                 return -ENOMEM;
1776         }
1777         tpg->tpg_nexus = tv_nexus;
1778
1779         mutex_unlock(&tpg->tv_tpg_mutex);
1780         return 0;
1781 }
1782
1783 static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
1784 {
1785         struct se_session *se_sess;
1786         struct vhost_scsi_nexus *tv_nexus;
1787
1788         mutex_lock(&tpg->tv_tpg_mutex);
1789         tv_nexus = tpg->tpg_nexus;
1790         if (!tv_nexus) {
1791                 mutex_unlock(&tpg->tv_tpg_mutex);
1792                 return -ENODEV;
1793         }
1794
1795         se_sess = tv_nexus->tvn_se_sess;
1796         if (!se_sess) {
1797                 mutex_unlock(&tpg->tv_tpg_mutex);
1798                 return -ENODEV;
1799         }
1800
1801         if (tpg->tv_tpg_port_count != 0) {
1802                 mutex_unlock(&tpg->tv_tpg_mutex);
1803                 pr_err("Unable to remove TCM_vhost I_T Nexus with"
1804                         " active TPG port count: %d\n",
1805                         tpg->tv_tpg_port_count);
1806                 return -EBUSY;
1807         }
1808
1809         if (tpg->tv_tpg_vhost_count != 0) {
1810                 mutex_unlock(&tpg->tv_tpg_mutex);
1811                 pr_err("Unable to remove TCM_vhost I_T Nexus with"
1812                         " active TPG vhost count: %d\n",
1813                         tpg->tv_tpg_vhost_count);
1814                 return -EBUSY;
1815         }
1816
1817         pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
1818                 " %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport),
1819                 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1820
1821         vhost_scsi_free_cmd_map_res(se_sess);
1822         /*
1823          * Release the SCSI I_T Nexus to the emulated vhost Target Port
1824          */
1825         transport_deregister_session(tv_nexus->tvn_se_sess);
1826         tpg->tpg_nexus = NULL;
1827         mutex_unlock(&tpg->tv_tpg_mutex);
1828
1829         kfree(tv_nexus);
1830         return 0;
1831 }
1832
1833 static ssize_t vhost_scsi_tpg_nexus_show(struct config_item *item, char *page)
1834 {
1835         struct se_portal_group *se_tpg = to_tpg(item);
1836         struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1837                                 struct vhost_scsi_tpg, se_tpg);
1838         struct vhost_scsi_nexus *tv_nexus;
1839         ssize_t ret;
1840
1841         mutex_lock(&tpg->tv_tpg_mutex);
1842         tv_nexus = tpg->tpg_nexus;
1843         if (!tv_nexus) {
1844                 mutex_unlock(&tpg->tv_tpg_mutex);
1845                 return -ENODEV;
1846         }
1847         ret = snprintf(page, PAGE_SIZE, "%s\n",
1848                         tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1849         mutex_unlock(&tpg->tv_tpg_mutex);
1850
1851         return ret;
1852 }
1853
1854 static ssize_t vhost_scsi_tpg_nexus_store(struct config_item *item,
1855                 const char *page, size_t count)
1856 {
1857         struct se_portal_group *se_tpg = to_tpg(item);
1858         struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1859                                 struct vhost_scsi_tpg, se_tpg);
1860         struct vhost_scsi_tport *tport_wwn = tpg->tport;
1861         unsigned char i_port[VHOST_SCSI_NAMELEN], *ptr, *port_ptr;
1862         int ret;
1863         /*
1864          * Shutdown the active I_T nexus if 'NULL' is passed..
1865          */
1866         if (!strncmp(page, "NULL", 4)) {
1867                 ret = vhost_scsi_drop_nexus(tpg);
1868                 return (!ret) ? count : ret;
1869         }
1870         /*
1871          * Otherwise make sure the passed virtual Initiator port WWN matches
1872          * the fabric protocol_id set in vhost_scsi_make_tport(), and call
1873          * vhost_scsi_make_nexus().
1874          */
1875         if (strlen(page) >= VHOST_SCSI_NAMELEN) {
1876                 pr_err("Emulated NAA Sas Address: %s, exceeds"
1877                                 " max: %d\n", page, VHOST_SCSI_NAMELEN);
1878                 return -EINVAL;
1879         }
1880         snprintf(&i_port[0], VHOST_SCSI_NAMELEN, "%s", page);
1881
1882         ptr = strstr(i_port, "naa.");
1883         if (ptr) {
1884                 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
1885                         pr_err("Passed SAS Initiator Port %s does not"
1886                                 " match target port protoid: %s\n", i_port,
1887                                 vhost_scsi_dump_proto_id(tport_wwn));
1888                         return -EINVAL;
1889                 }
1890                 port_ptr = &i_port[0];
1891                 goto check_newline;
1892         }
1893         ptr = strstr(i_port, "fc.");
1894         if (ptr) {
1895                 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
1896                         pr_err("Passed FCP Initiator Port %s does not"
1897                                 " match target port protoid: %s\n", i_port,
1898                                 vhost_scsi_dump_proto_id(tport_wwn));
1899                         return -EINVAL;
1900                 }
1901                 port_ptr = &i_port[3]; /* Skip over "fc." */
1902                 goto check_newline;
1903         }
1904         ptr = strstr(i_port, "iqn.");
1905         if (ptr) {
1906                 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
1907                         pr_err("Passed iSCSI Initiator Port %s does not"
1908                                 " match target port protoid: %s\n", i_port,
1909                                 vhost_scsi_dump_proto_id(tport_wwn));
1910                         return -EINVAL;
1911                 }
1912                 port_ptr = &i_port[0];
1913                 goto check_newline;
1914         }
1915         pr_err("Unable to locate prefix for emulated Initiator Port:"
1916                         " %s\n", i_port);
1917         return -EINVAL;
1918         /*
1919          * Clear any trailing newline for the NAA WWN
1920          */
1921 check_newline:
1922         if (i_port[strlen(i_port)-1] == '\n')
1923                 i_port[strlen(i_port)-1] = '\0';
1924
1925         ret = vhost_scsi_make_nexus(tpg, port_ptr);
1926         if (ret < 0)
1927                 return ret;
1928
1929         return count;
1930 }
1931
1932 CONFIGFS_ATTR(vhost_scsi_tpg_, nexus);
1933
1934 static struct configfs_attribute *vhost_scsi_tpg_attrs[] = {
1935         &vhost_scsi_tpg_attr_nexus,
1936         NULL,
1937 };
1938
1939 static struct se_portal_group *
1940 vhost_scsi_make_tpg(struct se_wwn *wwn,
1941                    struct config_group *group,
1942                    const char *name)
1943 {
1944         struct vhost_scsi_tport *tport = container_of(wwn,
1945                         struct vhost_scsi_tport, tport_wwn);
1946
1947         struct vhost_scsi_tpg *tpg;
1948         u16 tpgt;
1949         int ret;
1950
1951         if (strstr(name, "tpgt_") != name)
1952                 return ERR_PTR(-EINVAL);
1953         if (kstrtou16(name + 5, 10, &tpgt) || tpgt >= VHOST_SCSI_MAX_TARGET)
1954                 return ERR_PTR(-EINVAL);
1955
1956         tpg = kzalloc(sizeof(struct vhost_scsi_tpg), GFP_KERNEL);
1957         if (!tpg) {
1958                 pr_err("Unable to allocate struct vhost_scsi_tpg");
1959                 return ERR_PTR(-ENOMEM);
1960         }
1961         mutex_init(&tpg->tv_tpg_mutex);
1962         INIT_LIST_HEAD(&tpg->tv_tpg_list);
1963         tpg->tport = tport;
1964         tpg->tport_tpgt = tpgt;
1965
1966         ret = core_tpg_register(wwn, &tpg->se_tpg, tport->tport_proto_id);
1967         if (ret < 0) {
1968                 kfree(tpg);
1969                 return NULL;
1970         }
1971         mutex_lock(&vhost_scsi_mutex);
1972         list_add_tail(&tpg->tv_tpg_list, &vhost_scsi_list);
1973         mutex_unlock(&vhost_scsi_mutex);
1974
1975         return &tpg->se_tpg;
1976 }
1977
1978 static void vhost_scsi_drop_tpg(struct se_portal_group *se_tpg)
1979 {
1980         struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1981                                 struct vhost_scsi_tpg, se_tpg);
1982
1983         mutex_lock(&vhost_scsi_mutex);
1984         list_del(&tpg->tv_tpg_list);
1985         mutex_unlock(&vhost_scsi_mutex);
1986         /*
1987          * Release the virtual I_T Nexus for this vhost TPG
1988          */
1989         vhost_scsi_drop_nexus(tpg);
1990         /*
1991          * Deregister the se_tpg from TCM..
1992          */
1993         core_tpg_deregister(se_tpg);
1994         kfree(tpg);
1995 }
1996
1997 static struct se_wwn *
1998 vhost_scsi_make_tport(struct target_fabric_configfs *tf,
1999                      struct config_group *group,
2000                      const char *name)
2001 {
2002         struct vhost_scsi_tport *tport;
2003         char *ptr;
2004         u64 wwpn = 0;
2005         int off = 0;
2006
2007         /* if (vhost_scsi_parse_wwn(name, &wwpn, 1) < 0)
2008                 return ERR_PTR(-EINVAL); */
2009
2010         tport = kzalloc(sizeof(struct vhost_scsi_tport), GFP_KERNEL);
2011         if (!tport) {
2012                 pr_err("Unable to allocate struct vhost_scsi_tport");
2013                 return ERR_PTR(-ENOMEM);
2014         }
2015         tport->tport_wwpn = wwpn;
2016         /*
2017          * Determine the emulated Protocol Identifier and Target Port Name
2018          * based on the incoming configfs directory name.
2019          */
2020         ptr = strstr(name, "naa.");
2021         if (ptr) {
2022                 tport->tport_proto_id = SCSI_PROTOCOL_SAS;
2023                 goto check_len;
2024         }
2025         ptr = strstr(name, "fc.");
2026         if (ptr) {
2027                 tport->tport_proto_id = SCSI_PROTOCOL_FCP;
2028                 off = 3; /* Skip over "fc." */
2029                 goto check_len;
2030         }
2031         ptr = strstr(name, "iqn.");
2032         if (ptr) {
2033                 tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
2034                 goto check_len;
2035         }
2036
2037         pr_err("Unable to locate prefix for emulated Target Port:"
2038                         " %s\n", name);
2039         kfree(tport);
2040         return ERR_PTR(-EINVAL);
2041
2042 check_len:
2043         if (strlen(name) >= VHOST_SCSI_NAMELEN) {
2044                 pr_err("Emulated %s Address: %s, exceeds"
2045                         " max: %d\n", name, vhost_scsi_dump_proto_id(tport),
2046                         VHOST_SCSI_NAMELEN);
2047                 kfree(tport);
2048                 return ERR_PTR(-EINVAL);
2049         }
2050         snprintf(&tport->tport_name[0], VHOST_SCSI_NAMELEN, "%s", &name[off]);
2051
2052         pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
2053                 " %s Address: %s\n", vhost_scsi_dump_proto_id(tport), name);
2054
2055         return &tport->tport_wwn;
2056 }
2057
2058 static void vhost_scsi_drop_tport(struct se_wwn *wwn)
2059 {
2060         struct vhost_scsi_tport *tport = container_of(wwn,
2061                                 struct vhost_scsi_tport, tport_wwn);
2062
2063         pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
2064                 " %s Address: %s\n", vhost_scsi_dump_proto_id(tport),
2065                 tport->tport_name);
2066
2067         kfree(tport);
2068 }
2069
2070 static ssize_t
2071 vhost_scsi_wwn_version_show(struct config_item *item, char *page)
2072 {
2073         return sprintf(page, "TCM_VHOST fabric module %s on %s/%s"
2074                 "on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2075                 utsname()->machine);
2076 }
2077
2078 CONFIGFS_ATTR_RO(vhost_scsi_wwn_, version);
2079
2080 static struct configfs_attribute *vhost_scsi_wwn_attrs[] = {
2081         &vhost_scsi_wwn_attr_version,
2082         NULL,
2083 };
2084
2085 static const struct target_core_fabric_ops vhost_scsi_ops = {
2086         .module                         = THIS_MODULE,
2087         .name                           = "vhost",
2088         .get_fabric_name                = vhost_scsi_get_fabric_name,
2089         .tpg_get_wwn                    = vhost_scsi_get_fabric_wwn,
2090         .tpg_get_tag                    = vhost_scsi_get_tpgt,
2091         .tpg_check_demo_mode            = vhost_scsi_check_true,
2092         .tpg_check_demo_mode_cache      = vhost_scsi_check_true,
2093         .tpg_check_demo_mode_write_protect = vhost_scsi_check_false,
2094         .tpg_check_prod_mode_write_protect = vhost_scsi_check_false,
2095         .tpg_check_prot_fabric_only     = vhost_scsi_check_prot_fabric_only,
2096         .tpg_get_inst_index             = vhost_scsi_tpg_get_inst_index,
2097         .release_cmd                    = vhost_scsi_release_cmd,
2098         .check_stop_free                = vhost_scsi_check_stop_free,
2099         .sess_get_index                 = vhost_scsi_sess_get_index,
2100         .sess_get_initiator_sid         = NULL,
2101         .write_pending                  = vhost_scsi_write_pending,
2102         .write_pending_status           = vhost_scsi_write_pending_status,
2103         .set_default_node_attributes    = vhost_scsi_set_default_node_attrs,
2104         .get_cmd_state                  = vhost_scsi_get_cmd_state,
2105         .queue_data_in                  = vhost_scsi_queue_data_in,
2106         .queue_status                   = vhost_scsi_queue_status,
2107         .queue_tm_rsp                   = vhost_scsi_queue_tm_rsp,
2108         .aborted_task                   = vhost_scsi_aborted_task,
2109         /*
2110          * Setup callers for generic logic in target_core_fabric_configfs.c
2111          */
2112         .fabric_make_wwn                = vhost_scsi_make_tport,
2113         .fabric_drop_wwn                = vhost_scsi_drop_tport,
2114         .fabric_make_tpg                = vhost_scsi_make_tpg,
2115         .fabric_drop_tpg                = vhost_scsi_drop_tpg,
2116         .fabric_post_link               = vhost_scsi_port_link,
2117         .fabric_pre_unlink              = vhost_scsi_port_unlink,
2118
2119         .tfc_wwn_attrs                  = vhost_scsi_wwn_attrs,
2120         .tfc_tpg_base_attrs             = vhost_scsi_tpg_attrs,
2121         .tfc_tpg_attrib_attrs           = vhost_scsi_tpg_attrib_attrs,
2122 };
2123
2124 static int __init vhost_scsi_init(void)
2125 {
2126         int ret = -ENOMEM;
2127
2128         pr_debug("TCM_VHOST fabric module %s on %s/%s"
2129                 " on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2130                 utsname()->machine);
2131
2132         /*
2133          * Use our own dedicated workqueue for submitting I/O into
2134          * target core to avoid contention within system_wq.
2135          */
2136         vhost_scsi_workqueue = alloc_workqueue("vhost_scsi", 0, 0);
2137         if (!vhost_scsi_workqueue)
2138                 goto out;
2139
2140         ret = vhost_scsi_register();
2141         if (ret < 0)
2142                 goto out_destroy_workqueue;
2143
2144         ret = target_register_template(&vhost_scsi_ops);
2145         if (ret < 0)
2146                 goto out_vhost_scsi_deregister;
2147
2148         return 0;
2149
2150 out_vhost_scsi_deregister:
2151         vhost_scsi_deregister();
2152 out_destroy_workqueue:
2153         destroy_workqueue(vhost_scsi_workqueue);
2154 out:
2155         return ret;
2156 };
2157
2158 static void vhost_scsi_exit(void)
2159 {
2160         target_unregister_template(&vhost_scsi_ops);
2161         vhost_scsi_deregister();
2162         destroy_workqueue(vhost_scsi_workqueue);
2163 };
2164
2165 MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
2166 MODULE_ALIAS("tcm_vhost");
2167 MODULE_LICENSE("GPL");
2168 module_init(vhost_scsi_init);
2169 module_exit(vhost_scsi_exit);