Merge tag 'ext4_for_linus_stable' of git://git.kernel.org/pub/scm/linux/kernel/git...
[sfrench/cifs-2.6.git] / drivers / target / target_core_transport.c
1 /*******************************************************************************
2  * Filename:  target_core_transport.c
3  *
4  * This file contains the Generic Target Engine Core.
5  *
6  * (c) Copyright 2002-2013 Datera, Inc.
7  *
8  * Nicholas A. Bellinger <nab@kernel.org>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23  *
24  ******************************************************************************/
25
26 #include <linux/net.h>
27 #include <linux/delay.h>
28 #include <linux/string.h>
29 #include <linux/timer.h>
30 #include <linux/slab.h>
31 #include <linux/spinlock.h>
32 #include <linux/kthread.h>
33 #include <linux/in.h>
34 #include <linux/cdrom.h>
35 #include <linux/module.h>
36 #include <linux/ratelimit.h>
37 #include <linux/vmalloc.h>
38 #include <asm/unaligned.h>
39 #include <net/sock.h>
40 #include <net/tcp.h>
41 #include <scsi/scsi_proto.h>
42 #include <scsi/scsi_common.h>
43
44 #include <target/target_core_base.h>
45 #include <target/target_core_backend.h>
46 #include <target/target_core_fabric.h>
47
48 #include "target_core_internal.h"
49 #include "target_core_alua.h"
50 #include "target_core_pr.h"
51 #include "target_core_ua.h"
52
53 #define CREATE_TRACE_POINTS
54 #include <trace/events/target.h>
55
56 static struct workqueue_struct *target_completion_wq;
57 static struct kmem_cache *se_sess_cache;
58 struct kmem_cache *se_ua_cache;
59 struct kmem_cache *t10_pr_reg_cache;
60 struct kmem_cache *t10_alua_lu_gp_cache;
61 struct kmem_cache *t10_alua_lu_gp_mem_cache;
62 struct kmem_cache *t10_alua_tg_pt_gp_cache;
63 struct kmem_cache *t10_alua_lba_map_cache;
64 struct kmem_cache *t10_alua_lba_map_mem_cache;
65
66 static void transport_complete_task_attr(struct se_cmd *cmd);
67 static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason);
68 static void transport_handle_queue_full(struct se_cmd *cmd,
69                 struct se_device *dev, int err, bool write_pending);
70 static void target_complete_ok_work(struct work_struct *work);
71
72 int init_se_kmem_caches(void)
73 {
74         se_sess_cache = kmem_cache_create("se_sess_cache",
75                         sizeof(struct se_session), __alignof__(struct se_session),
76                         0, NULL);
77         if (!se_sess_cache) {
78                 pr_err("kmem_cache_create() for struct se_session"
79                                 " failed\n");
80                 goto out;
81         }
82         se_ua_cache = kmem_cache_create("se_ua_cache",
83                         sizeof(struct se_ua), __alignof__(struct se_ua),
84                         0, NULL);
85         if (!se_ua_cache) {
86                 pr_err("kmem_cache_create() for struct se_ua failed\n");
87                 goto out_free_sess_cache;
88         }
89         t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
90                         sizeof(struct t10_pr_registration),
91                         __alignof__(struct t10_pr_registration), 0, NULL);
92         if (!t10_pr_reg_cache) {
93                 pr_err("kmem_cache_create() for struct t10_pr_registration"
94                                 " failed\n");
95                 goto out_free_ua_cache;
96         }
97         t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
98                         sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
99                         0, NULL);
100         if (!t10_alua_lu_gp_cache) {
101                 pr_err("kmem_cache_create() for t10_alua_lu_gp_cache"
102                                 " failed\n");
103                 goto out_free_pr_reg_cache;
104         }
105         t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
106                         sizeof(struct t10_alua_lu_gp_member),
107                         __alignof__(struct t10_alua_lu_gp_member), 0, NULL);
108         if (!t10_alua_lu_gp_mem_cache) {
109                 pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_"
110                                 "cache failed\n");
111                 goto out_free_lu_gp_cache;
112         }
113         t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
114                         sizeof(struct t10_alua_tg_pt_gp),
115                         __alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
116         if (!t10_alua_tg_pt_gp_cache) {
117                 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
118                                 "cache failed\n");
119                 goto out_free_lu_gp_mem_cache;
120         }
121         t10_alua_lba_map_cache = kmem_cache_create(
122                         "t10_alua_lba_map_cache",
123                         sizeof(struct t10_alua_lba_map),
124                         __alignof__(struct t10_alua_lba_map), 0, NULL);
125         if (!t10_alua_lba_map_cache) {
126                 pr_err("kmem_cache_create() for t10_alua_lba_map_"
127                                 "cache failed\n");
128                 goto out_free_tg_pt_gp_cache;
129         }
130         t10_alua_lba_map_mem_cache = kmem_cache_create(
131                         "t10_alua_lba_map_mem_cache",
132                         sizeof(struct t10_alua_lba_map_member),
133                         __alignof__(struct t10_alua_lba_map_member), 0, NULL);
134         if (!t10_alua_lba_map_mem_cache) {
135                 pr_err("kmem_cache_create() for t10_alua_lba_map_mem_"
136                                 "cache failed\n");
137                 goto out_free_lba_map_cache;
138         }
139
140         target_completion_wq = alloc_workqueue("target_completion",
141                                                WQ_MEM_RECLAIM, 0);
142         if (!target_completion_wq)
143                 goto out_free_lba_map_mem_cache;
144
145         return 0;
146
147 out_free_lba_map_mem_cache:
148         kmem_cache_destroy(t10_alua_lba_map_mem_cache);
149 out_free_lba_map_cache:
150         kmem_cache_destroy(t10_alua_lba_map_cache);
151 out_free_tg_pt_gp_cache:
152         kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
153 out_free_lu_gp_mem_cache:
154         kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
155 out_free_lu_gp_cache:
156         kmem_cache_destroy(t10_alua_lu_gp_cache);
157 out_free_pr_reg_cache:
158         kmem_cache_destroy(t10_pr_reg_cache);
159 out_free_ua_cache:
160         kmem_cache_destroy(se_ua_cache);
161 out_free_sess_cache:
162         kmem_cache_destroy(se_sess_cache);
163 out:
164         return -ENOMEM;
165 }
166
167 void release_se_kmem_caches(void)
168 {
169         destroy_workqueue(target_completion_wq);
170         kmem_cache_destroy(se_sess_cache);
171         kmem_cache_destroy(se_ua_cache);
172         kmem_cache_destroy(t10_pr_reg_cache);
173         kmem_cache_destroy(t10_alua_lu_gp_cache);
174         kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
175         kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
176         kmem_cache_destroy(t10_alua_lba_map_cache);
177         kmem_cache_destroy(t10_alua_lba_map_mem_cache);
178 }
179
180 /* This code ensures unique mib indexes are handed out. */
181 static DEFINE_SPINLOCK(scsi_mib_index_lock);
182 static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
183
184 /*
185  * Allocate a new row index for the entry type specified
186  */
187 u32 scsi_get_new_index(scsi_index_t type)
188 {
189         u32 new_index;
190
191         BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX));
192
193         spin_lock(&scsi_mib_index_lock);
194         new_index = ++scsi_mib_index[type];
195         spin_unlock(&scsi_mib_index_lock);
196
197         return new_index;
198 }
199
200 void transport_subsystem_check_init(void)
201 {
202         int ret;
203         static int sub_api_initialized;
204
205         if (sub_api_initialized)
206                 return;
207
208         ret = IS_ENABLED(CONFIG_TCM_IBLOCK) && request_module("target_core_iblock");
209         if (ret != 0)
210                 pr_err("Unable to load target_core_iblock\n");
211
212         ret = IS_ENABLED(CONFIG_TCM_FILEIO) && request_module("target_core_file");
213         if (ret != 0)
214                 pr_err("Unable to load target_core_file\n");
215
216         ret = IS_ENABLED(CONFIG_TCM_PSCSI) && request_module("target_core_pscsi");
217         if (ret != 0)
218                 pr_err("Unable to load target_core_pscsi\n");
219
220         ret = IS_ENABLED(CONFIG_TCM_USER2) && request_module("target_core_user");
221         if (ret != 0)
222                 pr_err("Unable to load target_core_user\n");
223
224         sub_api_initialized = 1;
225 }
226
227 /**
228  * transport_init_session - initialize a session object
229  * @se_sess: Session object pointer.
230  *
231  * The caller must have zero-initialized @se_sess before calling this function.
232  */
233 void transport_init_session(struct se_session *se_sess)
234 {
235         INIT_LIST_HEAD(&se_sess->sess_list);
236         INIT_LIST_HEAD(&se_sess->sess_acl_list);
237         INIT_LIST_HEAD(&se_sess->sess_cmd_list);
238         spin_lock_init(&se_sess->sess_cmd_lock);
239         init_waitqueue_head(&se_sess->cmd_list_wq);
240 }
241 EXPORT_SYMBOL(transport_init_session);
242
243 /**
244  * transport_alloc_session - allocate a session object and initialize it
245  * @sup_prot_ops: bitmask that defines which T10-PI modes are supported.
246  */
247 struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops)
248 {
249         struct se_session *se_sess;
250
251         se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
252         if (!se_sess) {
253                 pr_err("Unable to allocate struct se_session from"
254                                 " se_sess_cache\n");
255                 return ERR_PTR(-ENOMEM);
256         }
257         transport_init_session(se_sess);
258         se_sess->sup_prot_ops = sup_prot_ops;
259
260         return se_sess;
261 }
262 EXPORT_SYMBOL(transport_alloc_session);
263
264 /**
265  * transport_alloc_session_tags - allocate target driver private data
266  * @se_sess:  Session pointer.
267  * @tag_num:  Maximum number of in-flight commands between initiator and target.
268  * @tag_size: Size in bytes of the private data a target driver associates with
269  *            each command.
270  */
271 int transport_alloc_session_tags(struct se_session *se_sess,
272                                  unsigned int tag_num, unsigned int tag_size)
273 {
274         int rc;
275
276         se_sess->sess_cmd_map = kcalloc(tag_size, tag_num,
277                                         GFP_KERNEL | __GFP_NOWARN | __GFP_RETRY_MAYFAIL);
278         if (!se_sess->sess_cmd_map) {
279                 se_sess->sess_cmd_map = vzalloc(array_size(tag_size, tag_num));
280                 if (!se_sess->sess_cmd_map) {
281                         pr_err("Unable to allocate se_sess->sess_cmd_map\n");
282                         return -ENOMEM;
283                 }
284         }
285
286         rc = sbitmap_queue_init_node(&se_sess->sess_tag_pool, tag_num, -1,
287                         false, GFP_KERNEL, NUMA_NO_NODE);
288         if (rc < 0) {
289                 pr_err("Unable to init se_sess->sess_tag_pool,"
290                         " tag_num: %u\n", tag_num);
291                 kvfree(se_sess->sess_cmd_map);
292                 se_sess->sess_cmd_map = NULL;
293                 return -ENOMEM;
294         }
295
296         return 0;
297 }
298 EXPORT_SYMBOL(transport_alloc_session_tags);
299
300 /**
301  * transport_init_session_tags - allocate a session and target driver private data
302  * @tag_num:  Maximum number of in-flight commands between initiator and target.
303  * @tag_size: Size in bytes of the private data a target driver associates with
304  *            each command.
305  * @sup_prot_ops: bitmask that defines which T10-PI modes are supported.
306  */
307 static struct se_session *
308 transport_init_session_tags(unsigned int tag_num, unsigned int tag_size,
309                             enum target_prot_op sup_prot_ops)
310 {
311         struct se_session *se_sess;
312         int rc;
313
314         if (tag_num != 0 && !tag_size) {
315                 pr_err("init_session_tags called with percpu-ida tag_num:"
316                        " %u, but zero tag_size\n", tag_num);
317                 return ERR_PTR(-EINVAL);
318         }
319         if (!tag_num && tag_size) {
320                 pr_err("init_session_tags called with percpu-ida tag_size:"
321                        " %u, but zero tag_num\n", tag_size);
322                 return ERR_PTR(-EINVAL);
323         }
324
325         se_sess = transport_alloc_session(sup_prot_ops);
326         if (IS_ERR(se_sess))
327                 return se_sess;
328
329         rc = transport_alloc_session_tags(se_sess, tag_num, tag_size);
330         if (rc < 0) {
331                 transport_free_session(se_sess);
332                 return ERR_PTR(-ENOMEM);
333         }
334
335         return se_sess;
336 }
337
338 /*
339  * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called.
340  */
341 void __transport_register_session(
342         struct se_portal_group *se_tpg,
343         struct se_node_acl *se_nacl,
344         struct se_session *se_sess,
345         void *fabric_sess_ptr)
346 {
347         const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
348         unsigned char buf[PR_REG_ISID_LEN];
349         unsigned long flags;
350
351         se_sess->se_tpg = se_tpg;
352         se_sess->fabric_sess_ptr = fabric_sess_ptr;
353         /*
354          * Used by struct se_node_acl's under ConfigFS to locate active se_session-t
355          *
356          * Only set for struct se_session's that will actually be moving I/O.
357          * eg: *NOT* discovery sessions.
358          */
359         if (se_nacl) {
360                 /*
361                  *
362                  * Determine if fabric allows for T10-PI feature bits exposed to
363                  * initiators for device backends with !dev->dev_attrib.pi_prot_type.
364                  *
365                  * If so, then always save prot_type on a per se_node_acl node
366                  * basis and re-instate the previous sess_prot_type to avoid
367                  * disabling PI from below any previously initiator side
368                  * registered LUNs.
369                  */
370                 if (se_nacl->saved_prot_type)
371                         se_sess->sess_prot_type = se_nacl->saved_prot_type;
372                 else if (tfo->tpg_check_prot_fabric_only)
373                         se_sess->sess_prot_type = se_nacl->saved_prot_type =
374                                         tfo->tpg_check_prot_fabric_only(se_tpg);
375                 /*
376                  * If the fabric module supports an ISID based TransportID,
377                  * save this value in binary from the fabric I_T Nexus now.
378                  */
379                 if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
380                         memset(&buf[0], 0, PR_REG_ISID_LEN);
381                         se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
382                                         &buf[0], PR_REG_ISID_LEN);
383                         se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
384                 }
385
386                 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
387                 /*
388                  * The se_nacl->nacl_sess pointer will be set to the
389                  * last active I_T Nexus for each struct se_node_acl.
390                  */
391                 se_nacl->nacl_sess = se_sess;
392
393                 list_add_tail(&se_sess->sess_acl_list,
394                               &se_nacl->acl_sess_list);
395                 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
396         }
397         list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
398
399         pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
400                 se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr);
401 }
402 EXPORT_SYMBOL(__transport_register_session);
403
404 void transport_register_session(
405         struct se_portal_group *se_tpg,
406         struct se_node_acl *se_nacl,
407         struct se_session *se_sess,
408         void *fabric_sess_ptr)
409 {
410         unsigned long flags;
411
412         spin_lock_irqsave(&se_tpg->session_lock, flags);
413         __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr);
414         spin_unlock_irqrestore(&se_tpg->session_lock, flags);
415 }
416 EXPORT_SYMBOL(transport_register_session);
417
418 struct se_session *
419 target_setup_session(struct se_portal_group *tpg,
420                      unsigned int tag_num, unsigned int tag_size,
421                      enum target_prot_op prot_op,
422                      const char *initiatorname, void *private,
423                      int (*callback)(struct se_portal_group *,
424                                      struct se_session *, void *))
425 {
426         struct se_session *sess;
427
428         /*
429          * If the fabric driver is using percpu-ida based pre allocation
430          * of I/O descriptor tags, go ahead and perform that setup now..
431          */
432         if (tag_num != 0)
433                 sess = transport_init_session_tags(tag_num, tag_size, prot_op);
434         else
435                 sess = transport_alloc_session(prot_op);
436
437         if (IS_ERR(sess))
438                 return sess;
439
440         sess->se_node_acl = core_tpg_check_initiator_node_acl(tpg,
441                                         (unsigned char *)initiatorname);
442         if (!sess->se_node_acl) {
443                 transport_free_session(sess);
444                 return ERR_PTR(-EACCES);
445         }
446         /*
447          * Go ahead and perform any remaining fabric setup that is
448          * required before transport_register_session().
449          */
450         if (callback != NULL) {
451                 int rc = callback(tpg, sess, private);
452                 if (rc) {
453                         transport_free_session(sess);
454                         return ERR_PTR(rc);
455                 }
456         }
457
458         transport_register_session(tpg, sess->se_node_acl, sess, private);
459         return sess;
460 }
461 EXPORT_SYMBOL(target_setup_session);
462
463 ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page)
464 {
465         struct se_session *se_sess;
466         ssize_t len = 0;
467
468         spin_lock_bh(&se_tpg->session_lock);
469         list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) {
470                 if (!se_sess->se_node_acl)
471                         continue;
472                 if (!se_sess->se_node_acl->dynamic_node_acl)
473                         continue;
474                 if (strlen(se_sess->se_node_acl->initiatorname) + 1 + len > PAGE_SIZE)
475                         break;
476
477                 len += snprintf(page + len, PAGE_SIZE - len, "%s\n",
478                                 se_sess->se_node_acl->initiatorname);
479                 len += 1; /* Include NULL terminator */
480         }
481         spin_unlock_bh(&se_tpg->session_lock);
482
483         return len;
484 }
485 EXPORT_SYMBOL(target_show_dynamic_sessions);
486
487 static void target_complete_nacl(struct kref *kref)
488 {
489         struct se_node_acl *nacl = container_of(kref,
490                                 struct se_node_acl, acl_kref);
491         struct se_portal_group *se_tpg = nacl->se_tpg;
492
493         if (!nacl->dynamic_stop) {
494                 complete(&nacl->acl_free_comp);
495                 return;
496         }
497
498         mutex_lock(&se_tpg->acl_node_mutex);
499         list_del_init(&nacl->acl_list);
500         mutex_unlock(&se_tpg->acl_node_mutex);
501
502         core_tpg_wait_for_nacl_pr_ref(nacl);
503         core_free_device_list_for_node(nacl, se_tpg);
504         kfree(nacl);
505 }
506
507 void target_put_nacl(struct se_node_acl *nacl)
508 {
509         kref_put(&nacl->acl_kref, target_complete_nacl);
510 }
511 EXPORT_SYMBOL(target_put_nacl);
512
513 void transport_deregister_session_configfs(struct se_session *se_sess)
514 {
515         struct se_node_acl *se_nacl;
516         unsigned long flags;
517         /*
518          * Used by struct se_node_acl's under ConfigFS to locate active struct se_session
519          */
520         se_nacl = se_sess->se_node_acl;
521         if (se_nacl) {
522                 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
523                 if (!list_empty(&se_sess->sess_acl_list))
524                         list_del_init(&se_sess->sess_acl_list);
525                 /*
526                  * If the session list is empty, then clear the pointer.
527                  * Otherwise, set the struct se_session pointer from the tail
528                  * element of the per struct se_node_acl active session list.
529                  */
530                 if (list_empty(&se_nacl->acl_sess_list))
531                         se_nacl->nacl_sess = NULL;
532                 else {
533                         se_nacl->nacl_sess = container_of(
534                                         se_nacl->acl_sess_list.prev,
535                                         struct se_session, sess_acl_list);
536                 }
537                 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
538         }
539 }
540 EXPORT_SYMBOL(transport_deregister_session_configfs);
541
542 void transport_free_session(struct se_session *se_sess)
543 {
544         struct se_node_acl *se_nacl = se_sess->se_node_acl;
545
546         /*
547          * Drop the se_node_acl->nacl_kref obtained from within
548          * core_tpg_get_initiator_node_acl().
549          */
550         if (se_nacl) {
551                 struct se_portal_group *se_tpg = se_nacl->se_tpg;
552                 const struct target_core_fabric_ops *se_tfo = se_tpg->se_tpg_tfo;
553                 unsigned long flags;
554
555                 se_sess->se_node_acl = NULL;
556
557                 /*
558                  * Also determine if we need to drop the extra ->cmd_kref if
559                  * it had been previously dynamically generated, and
560                  * the endpoint is not caching dynamic ACLs.
561                  */
562                 mutex_lock(&se_tpg->acl_node_mutex);
563                 if (se_nacl->dynamic_node_acl &&
564                     !se_tfo->tpg_check_demo_mode_cache(se_tpg)) {
565                         spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
566                         if (list_empty(&se_nacl->acl_sess_list))
567                                 se_nacl->dynamic_stop = true;
568                         spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
569
570                         if (se_nacl->dynamic_stop)
571                                 list_del_init(&se_nacl->acl_list);
572                 }
573                 mutex_unlock(&se_tpg->acl_node_mutex);
574
575                 if (se_nacl->dynamic_stop)
576                         target_put_nacl(se_nacl);
577
578                 target_put_nacl(se_nacl);
579         }
580         if (se_sess->sess_cmd_map) {
581                 sbitmap_queue_free(&se_sess->sess_tag_pool);
582                 kvfree(se_sess->sess_cmd_map);
583         }
584         kmem_cache_free(se_sess_cache, se_sess);
585 }
586 EXPORT_SYMBOL(transport_free_session);
587
588 void transport_deregister_session(struct se_session *se_sess)
589 {
590         struct se_portal_group *se_tpg = se_sess->se_tpg;
591         unsigned long flags;
592
593         if (!se_tpg) {
594                 transport_free_session(se_sess);
595                 return;
596         }
597
598         spin_lock_irqsave(&se_tpg->session_lock, flags);
599         list_del(&se_sess->sess_list);
600         se_sess->se_tpg = NULL;
601         se_sess->fabric_sess_ptr = NULL;
602         spin_unlock_irqrestore(&se_tpg->session_lock, flags);
603
604         pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
605                 se_tpg->se_tpg_tfo->get_fabric_name());
606         /*
607          * If last kref is dropping now for an explicit NodeACL, awake sleeping
608          * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group
609          * removal context from within transport_free_session() code.
610          *
611          * For dynamic ACL, target_put_nacl() uses target_complete_nacl()
612          * to release all remaining generate_node_acl=1 created ACL resources.
613          */
614
615         transport_free_session(se_sess);
616 }
617 EXPORT_SYMBOL(transport_deregister_session);
618
619 void target_remove_session(struct se_session *se_sess)
620 {
621         transport_deregister_session_configfs(se_sess);
622         transport_deregister_session(se_sess);
623 }
624 EXPORT_SYMBOL(target_remove_session);
625
626 static void target_remove_from_state_list(struct se_cmd *cmd)
627 {
628         struct se_device *dev = cmd->se_dev;
629         unsigned long flags;
630
631         if (!dev)
632                 return;
633
634         spin_lock_irqsave(&dev->execute_task_lock, flags);
635         if (cmd->state_active) {
636                 list_del(&cmd->state_list);
637                 cmd->state_active = false;
638         }
639         spin_unlock_irqrestore(&dev->execute_task_lock, flags);
640 }
641
642 /*
643  * This function is called by the target core after the target core has
644  * finished processing a SCSI command or SCSI TMF. Both the regular command
645  * processing code and the code for aborting commands can call this
646  * function. CMD_T_STOP is set if and only if another thread is waiting
647  * inside transport_wait_for_tasks() for t_transport_stop_comp.
648  */
649 static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
650 {
651         unsigned long flags;
652
653         target_remove_from_state_list(cmd);
654
655         /*
656          * Clear struct se_cmd->se_lun before the handoff to FE.
657          */
658         cmd->se_lun = NULL;
659
660         spin_lock_irqsave(&cmd->t_state_lock, flags);
661         /*
662          * Determine if frontend context caller is requesting the stopping of
663          * this command for frontend exceptions.
664          */
665         if (cmd->transport_state & CMD_T_STOP) {
666                 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
667                         __func__, __LINE__, cmd->tag);
668
669                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
670
671                 complete_all(&cmd->t_transport_stop_comp);
672                 return 1;
673         }
674         cmd->transport_state &= ~CMD_T_ACTIVE;
675         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
676
677         /*
678          * Some fabric modules like tcm_loop can release their internally
679          * allocated I/O reference and struct se_cmd now.
680          *
681          * Fabric modules are expected to return '1' here if the se_cmd being
682          * passed is released at this point, or zero if not being released.
683          */
684         return cmd->se_tfo->check_stop_free(cmd);
685 }
686
687 static void transport_lun_remove_cmd(struct se_cmd *cmd)
688 {
689         struct se_lun *lun = cmd->se_lun;
690
691         if (!lun)
692                 return;
693
694         if (cmpxchg(&cmd->lun_ref_active, true, false))
695                 percpu_ref_put(&lun->lun_ref);
696 }
697
698 int transport_cmd_finish_abort(struct se_cmd *cmd)
699 {
700         bool send_tas = cmd->transport_state & CMD_T_TAS;
701         bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF);
702         int ret = 0;
703
704         if (send_tas)
705                 transport_send_task_abort(cmd);
706
707         if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
708                 transport_lun_remove_cmd(cmd);
709         /*
710          * Allow the fabric driver to unmap any resources before
711          * releasing the descriptor via TFO->release_cmd()
712          */
713         if (!send_tas)
714                 cmd->se_tfo->aborted_task(cmd);
715
716         if (transport_cmd_check_stop_to_fabric(cmd))
717                 return 1;
718         if (!send_tas && ack_kref)
719                 ret = target_put_sess_cmd(cmd);
720
721         return ret;
722 }
723
724 static void target_complete_failure_work(struct work_struct *work)
725 {
726         struct se_cmd *cmd = container_of(work, struct se_cmd, work);
727
728         transport_generic_request_failure(cmd,
729                         TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
730 }
731
732 /*
733  * Used when asking transport to copy Sense Data from the underlying
734  * Linux/SCSI struct scsi_cmnd
735  */
736 static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd)
737 {
738         struct se_device *dev = cmd->se_dev;
739
740         WARN_ON(!cmd->se_lun);
741
742         if (!dev)
743                 return NULL;
744
745         if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION)
746                 return NULL;
747
748         cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
749
750         pr_debug("HBA_[%u]_PLUG[%s]: Requesting sense for SAM STATUS: 0x%02x\n",
751                 dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status);
752         return cmd->sense_buffer;
753 }
754
755 void transport_copy_sense_to_cmd(struct se_cmd *cmd, unsigned char *sense)
756 {
757         unsigned char *cmd_sense_buf;
758         unsigned long flags;
759
760         spin_lock_irqsave(&cmd->t_state_lock, flags);
761         cmd_sense_buf = transport_get_sense_buffer(cmd);
762         if (!cmd_sense_buf) {
763                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
764                 return;
765         }
766
767         cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
768         memcpy(cmd_sense_buf, sense, cmd->scsi_sense_length);
769         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
770 }
771 EXPORT_SYMBOL(transport_copy_sense_to_cmd);
772
773 void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
774 {
775         struct se_device *dev = cmd->se_dev;
776         int success;
777         unsigned long flags;
778
779         cmd->scsi_status = scsi_status;
780
781         spin_lock_irqsave(&cmd->t_state_lock, flags);
782         switch (cmd->scsi_status) {
783         case SAM_STAT_CHECK_CONDITION:
784                 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
785                         success = 1;
786                 else
787                         success = 0;
788                 break;
789         default:
790                 success = 1;
791                 break;
792         }
793
794         /*
795          * Check for case where an explicit ABORT_TASK has been received
796          * and transport_wait_for_tasks() will be waiting for completion..
797          */
798         if (cmd->transport_state & CMD_T_ABORTED ||
799             cmd->transport_state & CMD_T_STOP) {
800                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
801                 /*
802                  * If COMPARE_AND_WRITE was stopped by __transport_wait_for_tasks(),
803                  * release se_device->caw_sem obtained by sbc_compare_and_write()
804                  * since target_complete_ok_work() or target_complete_failure_work()
805                  * won't be called to invoke the normal CAW completion callbacks.
806                  */
807                 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) {
808                         up(&dev->caw_sem);
809                 }
810                 complete_all(&cmd->t_transport_stop_comp);
811                 return;
812         } else if (!success) {
813                 INIT_WORK(&cmd->work, target_complete_failure_work);
814         } else {
815                 INIT_WORK(&cmd->work, target_complete_ok_work);
816         }
817
818         cmd->t_state = TRANSPORT_COMPLETE;
819         cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE);
820         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
821
822         if (cmd->se_cmd_flags & SCF_USE_CPUID)
823                 queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work);
824         else
825                 queue_work(target_completion_wq, &cmd->work);
826 }
827 EXPORT_SYMBOL(target_complete_cmd);
828
829 void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length)
830 {
831         if ((scsi_status == SAM_STAT_GOOD ||
832              cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) &&
833             length < cmd->data_length) {
834                 if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
835                         cmd->residual_count += cmd->data_length - length;
836                 } else {
837                         cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
838                         cmd->residual_count = cmd->data_length - length;
839                 }
840
841                 cmd->data_length = length;
842         }
843
844         target_complete_cmd(cmd, scsi_status);
845 }
846 EXPORT_SYMBOL(target_complete_cmd_with_length);
847
848 static void target_add_to_state_list(struct se_cmd *cmd)
849 {
850         struct se_device *dev = cmd->se_dev;
851         unsigned long flags;
852
853         spin_lock_irqsave(&dev->execute_task_lock, flags);
854         if (!cmd->state_active) {
855                 list_add_tail(&cmd->state_list, &dev->state_list);
856                 cmd->state_active = true;
857         }
858         spin_unlock_irqrestore(&dev->execute_task_lock, flags);
859 }
860
861 /*
862  * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status
863  */
864 static void transport_write_pending_qf(struct se_cmd *cmd);
865 static void transport_complete_qf(struct se_cmd *cmd);
866
867 void target_qf_do_work(struct work_struct *work)
868 {
869         struct se_device *dev = container_of(work, struct se_device,
870                                         qf_work_queue);
871         LIST_HEAD(qf_cmd_list);
872         struct se_cmd *cmd, *cmd_tmp;
873
874         spin_lock_irq(&dev->qf_cmd_lock);
875         list_splice_init(&dev->qf_cmd_list, &qf_cmd_list);
876         spin_unlock_irq(&dev->qf_cmd_lock);
877
878         list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
879                 list_del(&cmd->se_qf_node);
880                 atomic_dec_mb(&dev->dev_qf_count);
881
882                 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
883                         " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
884                         (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" :
885                         (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
886                         : "UNKNOWN");
887
888                 if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP)
889                         transport_write_pending_qf(cmd);
890                 else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK ||
891                          cmd->t_state == TRANSPORT_COMPLETE_QF_ERR)
892                         transport_complete_qf(cmd);
893         }
894 }
895
896 unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
897 {
898         switch (cmd->data_direction) {
899         case DMA_NONE:
900                 return "NONE";
901         case DMA_FROM_DEVICE:
902                 return "READ";
903         case DMA_TO_DEVICE:
904                 return "WRITE";
905         case DMA_BIDIRECTIONAL:
906                 return "BIDI";
907         default:
908                 break;
909         }
910
911         return "UNKNOWN";
912 }
913
914 void transport_dump_dev_state(
915         struct se_device *dev,
916         char *b,
917         int *bl)
918 {
919         *bl += sprintf(b + *bl, "Status: ");
920         if (dev->export_count)
921                 *bl += sprintf(b + *bl, "ACTIVATED");
922         else
923                 *bl += sprintf(b + *bl, "DEACTIVATED");
924
925         *bl += sprintf(b + *bl, "  Max Queue Depth: %d", dev->queue_depth);
926         *bl += sprintf(b + *bl, "  SectorSize: %u  HwMaxSectors: %u\n",
927                 dev->dev_attrib.block_size,
928                 dev->dev_attrib.hw_max_sectors);
929         *bl += sprintf(b + *bl, "        ");
930 }
931
932 void transport_dump_vpd_proto_id(
933         struct t10_vpd *vpd,
934         unsigned char *p_buf,
935         int p_buf_len)
936 {
937         unsigned char buf[VPD_TMP_BUF_SIZE];
938         int len;
939
940         memset(buf, 0, VPD_TMP_BUF_SIZE);
941         len = sprintf(buf, "T10 VPD Protocol Identifier: ");
942
943         switch (vpd->protocol_identifier) {
944         case 0x00:
945                 sprintf(buf+len, "Fibre Channel\n");
946                 break;
947         case 0x10:
948                 sprintf(buf+len, "Parallel SCSI\n");
949                 break;
950         case 0x20:
951                 sprintf(buf+len, "SSA\n");
952                 break;
953         case 0x30:
954                 sprintf(buf+len, "IEEE 1394\n");
955                 break;
956         case 0x40:
957                 sprintf(buf+len, "SCSI Remote Direct Memory Access"
958                                 " Protocol\n");
959                 break;
960         case 0x50:
961                 sprintf(buf+len, "Internet SCSI (iSCSI)\n");
962                 break;
963         case 0x60:
964                 sprintf(buf+len, "SAS Serial SCSI Protocol\n");
965                 break;
966         case 0x70:
967                 sprintf(buf+len, "Automation/Drive Interface Transport"
968                                 " Protocol\n");
969                 break;
970         case 0x80:
971                 sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n");
972                 break;
973         default:
974                 sprintf(buf+len, "Unknown 0x%02x\n",
975                                 vpd->protocol_identifier);
976                 break;
977         }
978
979         if (p_buf)
980                 strncpy(p_buf, buf, p_buf_len);
981         else
982                 pr_debug("%s", buf);
983 }
984
985 void
986 transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83)
987 {
988         /*
989          * Check if the Protocol Identifier Valid (PIV) bit is set..
990          *
991          * from spc3r23.pdf section 7.5.1
992          */
993          if (page_83[1] & 0x80) {
994                 vpd->protocol_identifier = (page_83[0] & 0xf0);
995                 vpd->protocol_identifier_set = 1;
996                 transport_dump_vpd_proto_id(vpd, NULL, 0);
997         }
998 }
999 EXPORT_SYMBOL(transport_set_vpd_proto_id);
1000
1001 int transport_dump_vpd_assoc(
1002         struct t10_vpd *vpd,
1003         unsigned char *p_buf,
1004         int p_buf_len)
1005 {
1006         unsigned char buf[VPD_TMP_BUF_SIZE];
1007         int ret = 0;
1008         int len;
1009
1010         memset(buf, 0, VPD_TMP_BUF_SIZE);
1011         len = sprintf(buf, "T10 VPD Identifier Association: ");
1012
1013         switch (vpd->association) {
1014         case 0x00:
1015                 sprintf(buf+len, "addressed logical unit\n");
1016                 break;
1017         case 0x10:
1018                 sprintf(buf+len, "target port\n");
1019                 break;
1020         case 0x20:
1021                 sprintf(buf+len, "SCSI target device\n");
1022                 break;
1023         default:
1024                 sprintf(buf+len, "Unknown 0x%02x\n", vpd->association);
1025                 ret = -EINVAL;
1026                 break;
1027         }
1028
1029         if (p_buf)
1030                 strncpy(p_buf, buf, p_buf_len);
1031         else
1032                 pr_debug("%s", buf);
1033
1034         return ret;
1035 }
1036
1037 int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83)
1038 {
1039         /*
1040          * The VPD identification association..
1041          *
1042          * from spc3r23.pdf Section 7.6.3.1 Table 297
1043          */
1044         vpd->association = (page_83[1] & 0x30);
1045         return transport_dump_vpd_assoc(vpd, NULL, 0);
1046 }
1047 EXPORT_SYMBOL(transport_set_vpd_assoc);
1048
1049 int transport_dump_vpd_ident_type(
1050         struct t10_vpd *vpd,
1051         unsigned char *p_buf,
1052         int p_buf_len)
1053 {
1054         unsigned char buf[VPD_TMP_BUF_SIZE];
1055         int ret = 0;
1056         int len;
1057
1058         memset(buf, 0, VPD_TMP_BUF_SIZE);
1059         len = sprintf(buf, "T10 VPD Identifier Type: ");
1060
1061         switch (vpd->device_identifier_type) {
1062         case 0x00:
1063                 sprintf(buf+len, "Vendor specific\n");
1064                 break;
1065         case 0x01:
1066                 sprintf(buf+len, "T10 Vendor ID based\n");
1067                 break;
1068         case 0x02:
1069                 sprintf(buf+len, "EUI-64 based\n");
1070                 break;
1071         case 0x03:
1072                 sprintf(buf+len, "NAA\n");
1073                 break;
1074         case 0x04:
1075                 sprintf(buf+len, "Relative target port identifier\n");
1076                 break;
1077         case 0x08:
1078                 sprintf(buf+len, "SCSI name string\n");
1079                 break;
1080         default:
1081                 sprintf(buf+len, "Unsupported: 0x%02x\n",
1082                                 vpd->device_identifier_type);
1083                 ret = -EINVAL;
1084                 break;
1085         }
1086
1087         if (p_buf) {
1088                 if (p_buf_len < strlen(buf)+1)
1089                         return -EINVAL;
1090                 strncpy(p_buf, buf, p_buf_len);
1091         } else {
1092                 pr_debug("%s", buf);
1093         }
1094
1095         return ret;
1096 }
1097
1098 int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83)
1099 {
1100         /*
1101          * The VPD identifier type..
1102          *
1103          * from spc3r23.pdf Section 7.6.3.1 Table 298
1104          */
1105         vpd->device_identifier_type = (page_83[1] & 0x0f);
1106         return transport_dump_vpd_ident_type(vpd, NULL, 0);
1107 }
1108 EXPORT_SYMBOL(transport_set_vpd_ident_type);
1109
1110 int transport_dump_vpd_ident(
1111         struct t10_vpd *vpd,
1112         unsigned char *p_buf,
1113         int p_buf_len)
1114 {
1115         unsigned char buf[VPD_TMP_BUF_SIZE];
1116         int ret = 0;
1117
1118         memset(buf, 0, VPD_TMP_BUF_SIZE);
1119
1120         switch (vpd->device_identifier_code_set) {
1121         case 0x01: /* Binary */
1122                 snprintf(buf, sizeof(buf),
1123                         "T10 VPD Binary Device Identifier: %s\n",
1124                         &vpd->device_identifier[0]);
1125                 break;
1126         case 0x02: /* ASCII */
1127                 snprintf(buf, sizeof(buf),
1128                         "T10 VPD ASCII Device Identifier: %s\n",
1129                         &vpd->device_identifier[0]);
1130                 break;
1131         case 0x03: /* UTF-8 */
1132                 snprintf(buf, sizeof(buf),
1133                         "T10 VPD UTF-8 Device Identifier: %s\n",
1134                         &vpd->device_identifier[0]);
1135                 break;
1136         default:
1137                 sprintf(buf, "T10 VPD Device Identifier encoding unsupported:"
1138                         " 0x%02x", vpd->device_identifier_code_set);
1139                 ret = -EINVAL;
1140                 break;
1141         }
1142
1143         if (p_buf)
1144                 strncpy(p_buf, buf, p_buf_len);
1145         else
1146                 pr_debug("%s", buf);
1147
1148         return ret;
1149 }
1150
1151 int
1152 transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
1153 {
1154         static const char hex_str[] = "0123456789abcdef";
1155         int j = 0, i = 4; /* offset to start of the identifier */
1156
1157         /*
1158          * The VPD Code Set (encoding)
1159          *
1160          * from spc3r23.pdf Section 7.6.3.1 Table 296
1161          */
1162         vpd->device_identifier_code_set = (page_83[0] & 0x0f);
1163         switch (vpd->device_identifier_code_set) {
1164         case 0x01: /* Binary */
1165                 vpd->device_identifier[j++] =
1166                                 hex_str[vpd->device_identifier_type];
1167                 while (i < (4 + page_83[3])) {
1168                         vpd->device_identifier[j++] =
1169                                 hex_str[(page_83[i] & 0xf0) >> 4];
1170                         vpd->device_identifier[j++] =
1171                                 hex_str[page_83[i] & 0x0f];
1172                         i++;
1173                 }
1174                 break;
1175         case 0x02: /* ASCII */
1176         case 0x03: /* UTF-8 */
1177                 while (i < (4 + page_83[3]))
1178                         vpd->device_identifier[j++] = page_83[i++];
1179                 break;
1180         default:
1181                 break;
1182         }
1183
1184         return transport_dump_vpd_ident(vpd, NULL, 0);
1185 }
1186 EXPORT_SYMBOL(transport_set_vpd_ident);
1187
1188 static sense_reason_t
1189 target_check_max_data_sg_nents(struct se_cmd *cmd, struct se_device *dev,
1190                                unsigned int size)
1191 {
1192         u32 mtl;
1193
1194         if (!cmd->se_tfo->max_data_sg_nents)
1195                 return TCM_NO_SENSE;
1196         /*
1197          * Check if fabric enforced maximum SGL entries per I/O descriptor
1198          * exceeds se_cmd->data_length.  If true, set SCF_UNDERFLOW_BIT +
1199          * residual_count and reduce original cmd->data_length to maximum
1200          * length based on single PAGE_SIZE entry scatter-lists.
1201          */
1202         mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE);
1203         if (cmd->data_length > mtl) {
1204                 /*
1205                  * If an existing CDB overflow is present, calculate new residual
1206                  * based on CDB size minus fabric maximum transfer length.
1207                  *
1208                  * If an existing CDB underflow is present, calculate new residual
1209                  * based on original cmd->data_length minus fabric maximum transfer
1210                  * length.
1211                  *
1212                  * Otherwise, set the underflow residual based on cmd->data_length
1213                  * minus fabric maximum transfer length.
1214                  */
1215                 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
1216                         cmd->residual_count = (size - mtl);
1217                 } else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
1218                         u32 orig_dl = size + cmd->residual_count;
1219                         cmd->residual_count = (orig_dl - mtl);
1220                 } else {
1221                         cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
1222                         cmd->residual_count = (cmd->data_length - mtl);
1223                 }
1224                 cmd->data_length = mtl;
1225                 /*
1226                  * Reset sbc_check_prot() calculated protection payload
1227                  * length based upon the new smaller MTL.
1228                  */
1229                 if (cmd->prot_length) {
1230                         u32 sectors = (mtl / dev->dev_attrib.block_size);
1231                         cmd->prot_length = dev->prot_length * sectors;
1232                 }
1233         }
1234         return TCM_NO_SENSE;
1235 }
1236
1237 sense_reason_t
1238 target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
1239 {
1240         struct se_device *dev = cmd->se_dev;
1241
1242         if (cmd->unknown_data_length) {
1243                 cmd->data_length = size;
1244         } else if (size != cmd->data_length) {
1245                 pr_warn_ratelimited("TARGET_CORE[%s]: Expected Transfer Length:"
1246                         " %u does not match SCSI CDB Length: %u for SAM Opcode:"
1247                         " 0x%02x\n", cmd->se_tfo->get_fabric_name(),
1248                                 cmd->data_length, size, cmd->t_task_cdb[0]);
1249
1250                 if (cmd->data_direction == DMA_TO_DEVICE) {
1251                         if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
1252                                 pr_err_ratelimited("Rejecting underflow/overflow"
1253                                                    " for WRITE data CDB\n");
1254                                 return TCM_INVALID_CDB_FIELD;
1255                         }
1256                         /*
1257                          * Some fabric drivers like iscsi-target still expect to
1258                          * always reject overflow writes.  Reject this case until
1259                          * full fabric driver level support for overflow writes
1260                          * is introduced tree-wide.
1261                          */
1262                         if (size > cmd->data_length) {
1263                                 pr_err_ratelimited("Rejecting overflow for"
1264                                                    " WRITE control CDB\n");
1265                                 return TCM_INVALID_CDB_FIELD;
1266                         }
1267                 }
1268                 /*
1269                  * Reject READ_* or WRITE_* with overflow/underflow for
1270                  * type SCF_SCSI_DATA_CDB.
1271                  */
1272                 if (dev->dev_attrib.block_size != 512)  {
1273                         pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
1274                                 " CDB on non 512-byte sector setup subsystem"
1275                                 " plugin: %s\n", dev->transport->name);
1276                         /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
1277                         return TCM_INVALID_CDB_FIELD;
1278                 }
1279                 /*
1280                  * For the overflow case keep the existing fabric provided
1281                  * ->data_length.  Otherwise for the underflow case, reset
1282                  * ->data_length to the smaller SCSI expected data transfer
1283                  * length.
1284                  */
1285                 if (size > cmd->data_length) {
1286                         cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
1287                         cmd->residual_count = (size - cmd->data_length);
1288                 } else {
1289                         cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
1290                         cmd->residual_count = (cmd->data_length - size);
1291                         cmd->data_length = size;
1292                 }
1293         }
1294
1295         return target_check_max_data_sg_nents(cmd, dev, size);
1296
1297 }
1298
1299 /*
1300  * Used by fabric modules containing a local struct se_cmd within their
1301  * fabric dependent per I/O descriptor.
1302  *
1303  * Preserves the value of @cmd->tag.
1304  */
1305 void transport_init_se_cmd(
1306         struct se_cmd *cmd,
1307         const struct target_core_fabric_ops *tfo,
1308         struct se_session *se_sess,
1309         u32 data_length,
1310         int data_direction,
1311         int task_attr,
1312         unsigned char *sense_buffer)
1313 {
1314         INIT_LIST_HEAD(&cmd->se_delayed_node);
1315         INIT_LIST_HEAD(&cmd->se_qf_node);
1316         INIT_LIST_HEAD(&cmd->se_cmd_list);
1317         INIT_LIST_HEAD(&cmd->state_list);
1318         init_completion(&cmd->t_transport_stop_comp);
1319         cmd->compl = NULL;
1320         spin_lock_init(&cmd->t_state_lock);
1321         INIT_WORK(&cmd->work, NULL);
1322         kref_init(&cmd->cmd_kref);
1323
1324         cmd->se_tfo = tfo;
1325         cmd->se_sess = se_sess;
1326         cmd->data_length = data_length;
1327         cmd->data_direction = data_direction;
1328         cmd->sam_task_attr = task_attr;
1329         cmd->sense_buffer = sense_buffer;
1330
1331         cmd->state_active = false;
1332 }
1333 EXPORT_SYMBOL(transport_init_se_cmd);
1334
1335 static sense_reason_t
1336 transport_check_alloc_task_attr(struct se_cmd *cmd)
1337 {
1338         struct se_device *dev = cmd->se_dev;
1339
1340         /*
1341          * Check if SAM Task Attribute emulation is enabled for this
1342          * struct se_device storage object
1343          */
1344         if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
1345                 return 0;
1346
1347         if (cmd->sam_task_attr == TCM_ACA_TAG) {
1348                 pr_debug("SAM Task Attribute ACA"
1349                         " emulation is not supported\n");
1350                 return TCM_INVALID_CDB_FIELD;
1351         }
1352
1353         return 0;
1354 }
1355
1356 sense_reason_t
1357 target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb)
1358 {
1359         struct se_device *dev = cmd->se_dev;
1360         sense_reason_t ret;
1361
1362         /*
1363          * Ensure that the received CDB is less than the max (252 + 8) bytes
1364          * for VARIABLE_LENGTH_CMD
1365          */
1366         if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
1367                 pr_err("Received SCSI CDB with command_size: %d that"
1368                         " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1369                         scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
1370                 return TCM_INVALID_CDB_FIELD;
1371         }
1372         /*
1373          * If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
1374          * allocate the additional extended CDB buffer now..  Otherwise
1375          * setup the pointer from __t_task_cdb to t_task_cdb.
1376          */
1377         if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
1378                 cmd->t_task_cdb = kzalloc(scsi_command_size(cdb),
1379                                                 GFP_KERNEL);
1380                 if (!cmd->t_task_cdb) {
1381                         pr_err("Unable to allocate cmd->t_task_cdb"
1382                                 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
1383                                 scsi_command_size(cdb),
1384                                 (unsigned long)sizeof(cmd->__t_task_cdb));
1385                         return TCM_OUT_OF_RESOURCES;
1386                 }
1387         } else
1388                 cmd->t_task_cdb = &cmd->__t_task_cdb[0];
1389         /*
1390          * Copy the original CDB into cmd->
1391          */
1392         memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
1393
1394         trace_target_sequencer_start(cmd);
1395
1396         ret = dev->transport->parse_cdb(cmd);
1397         if (ret == TCM_UNSUPPORTED_SCSI_OPCODE)
1398                 pr_warn_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n",
1399                                     cmd->se_tfo->get_fabric_name(),
1400                                     cmd->se_sess->se_node_acl->initiatorname,
1401                                     cmd->t_task_cdb[0]);
1402         if (ret)
1403                 return ret;
1404
1405         ret = transport_check_alloc_task_attr(cmd);
1406         if (ret)
1407                 return ret;
1408
1409         cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
1410         atomic_long_inc(&cmd->se_lun->lun_stats.cmd_pdus);
1411         return 0;
1412 }
1413 EXPORT_SYMBOL(target_setup_cmd_from_cdb);
1414
1415 /*
1416  * Used by fabric module frontends to queue tasks directly.
1417  * May only be used from process context.
1418  */
1419 int transport_handle_cdb_direct(
1420         struct se_cmd *cmd)
1421 {
1422         sense_reason_t ret;
1423
1424         if (!cmd->se_lun) {
1425                 dump_stack();
1426                 pr_err("cmd->se_lun is NULL\n");
1427                 return -EINVAL;
1428         }
1429         if (in_interrupt()) {
1430                 dump_stack();
1431                 pr_err("transport_generic_handle_cdb cannot be called"
1432                                 " from interrupt context\n");
1433                 return -EINVAL;
1434         }
1435         /*
1436          * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE to ensure that
1437          * outstanding descriptors are handled correctly during shutdown via
1438          * transport_wait_for_tasks()
1439          *
1440          * Also, we don't take cmd->t_state_lock here as we only expect
1441          * this to be called for initial descriptor submission.
1442          */
1443         cmd->t_state = TRANSPORT_NEW_CMD;
1444         cmd->transport_state |= CMD_T_ACTIVE;
1445
1446         /*
1447          * transport_generic_new_cmd() is already handling QUEUE_FULL,
1448          * so follow TRANSPORT_NEW_CMD processing thread context usage
1449          * and call transport_generic_request_failure() if necessary..
1450          */
1451         ret = transport_generic_new_cmd(cmd);
1452         if (ret)
1453                 transport_generic_request_failure(cmd, ret);
1454         return 0;
1455 }
1456 EXPORT_SYMBOL(transport_handle_cdb_direct);
1457
1458 sense_reason_t
1459 transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
1460                 u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count)
1461 {
1462         if (!sgl || !sgl_count)
1463                 return 0;
1464
1465         /*
1466          * Reject SCSI data overflow with map_mem_to_cmd() as incoming
1467          * scatterlists already have been set to follow what the fabric
1468          * passes for the original expected data transfer length.
1469          */
1470         if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
1471                 pr_warn("Rejecting SCSI DATA overflow for fabric using"
1472                         " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
1473                 return TCM_INVALID_CDB_FIELD;
1474         }
1475
1476         cmd->t_data_sg = sgl;
1477         cmd->t_data_nents = sgl_count;
1478         cmd->t_bidi_data_sg = sgl_bidi;
1479         cmd->t_bidi_data_nents = sgl_bidi_count;
1480
1481         cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
1482         return 0;
1483 }
1484
1485 /**
1486  * target_submit_cmd_map_sgls - lookup unpacked lun and submit uninitialized
1487  *                       se_cmd + use pre-allocated SGL memory.
1488  *
1489  * @se_cmd: command descriptor to submit
1490  * @se_sess: associated se_sess for endpoint
1491  * @cdb: pointer to SCSI CDB
1492  * @sense: pointer to SCSI sense buffer
1493  * @unpacked_lun: unpacked LUN to reference for struct se_lun
1494  * @data_length: fabric expected data transfer length
1495  * @task_attr: SAM task attribute
1496  * @data_dir: DMA data direction
1497  * @flags: flags for command submission from target_sc_flags_tables
1498  * @sgl: struct scatterlist memory for unidirectional mapping
1499  * @sgl_count: scatterlist count for unidirectional mapping
1500  * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping
1501  * @sgl_bidi_count: scatterlist count for bidirectional READ mapping
1502  * @sgl_prot: struct scatterlist memory protection information
1503  * @sgl_prot_count: scatterlist count for protection information
1504  *
1505  * Task tags are supported if the caller has set @se_cmd->tag.
1506  *
1507  * Returns non zero to signal active I/O shutdown failure.  All other
1508  * setup exceptions will be returned as a SCSI CHECK_CONDITION response,
1509  * but still return zero here.
1510  *
1511  * This may only be called from process context, and also currently
1512  * assumes internal allocation of fabric payload buffer by target-core.
1513  */
1514 int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess,
1515                 unsigned char *cdb, unsigned char *sense, u64 unpacked_lun,
1516                 u32 data_length, int task_attr, int data_dir, int flags,
1517                 struct scatterlist *sgl, u32 sgl_count,
1518                 struct scatterlist *sgl_bidi, u32 sgl_bidi_count,
1519                 struct scatterlist *sgl_prot, u32 sgl_prot_count)
1520 {
1521         struct se_portal_group *se_tpg;
1522         sense_reason_t rc;
1523         int ret;
1524
1525         se_tpg = se_sess->se_tpg;
1526         BUG_ON(!se_tpg);
1527         BUG_ON(se_cmd->se_tfo || se_cmd->se_sess);
1528         BUG_ON(in_interrupt());
1529         /*
1530          * Initialize se_cmd for target operation.  From this point
1531          * exceptions are handled by sending exception status via
1532          * target_core_fabric_ops->queue_status() callback
1533          */
1534         transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
1535                                 data_length, data_dir, task_attr, sense);
1536
1537         if (flags & TARGET_SCF_USE_CPUID)
1538                 se_cmd->se_cmd_flags |= SCF_USE_CPUID;
1539         else
1540                 se_cmd->cpuid = WORK_CPU_UNBOUND;
1541
1542         if (flags & TARGET_SCF_UNKNOWN_SIZE)
1543                 se_cmd->unknown_data_length = 1;
1544         /*
1545          * Obtain struct se_cmd->cmd_kref reference and add new cmd to
1546          * se_sess->sess_cmd_list.  A second kref_get here is necessary
1547          * for fabrics using TARGET_SCF_ACK_KREF that expect a second
1548          * kref_put() to happen during fabric packet acknowledgement.
1549          */
1550         ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
1551         if (ret)
1552                 return ret;
1553         /*
1554          * Signal bidirectional data payloads to target-core
1555          */
1556         if (flags & TARGET_SCF_BIDI_OP)
1557                 se_cmd->se_cmd_flags |= SCF_BIDI;
1558         /*
1559          * Locate se_lun pointer and attach it to struct se_cmd
1560          */
1561         rc = transport_lookup_cmd_lun(se_cmd, unpacked_lun);
1562         if (rc) {
1563                 transport_send_check_condition_and_sense(se_cmd, rc, 0);
1564                 target_put_sess_cmd(se_cmd);
1565                 return 0;
1566         }
1567
1568         rc = target_setup_cmd_from_cdb(se_cmd, cdb);
1569         if (rc != 0) {
1570                 transport_generic_request_failure(se_cmd, rc);
1571                 return 0;
1572         }
1573
1574         /*
1575          * Save pointers for SGLs containing protection information,
1576          * if present.
1577          */
1578         if (sgl_prot_count) {
1579                 se_cmd->t_prot_sg = sgl_prot;
1580                 se_cmd->t_prot_nents = sgl_prot_count;
1581                 se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC;
1582         }
1583
1584         /*
1585          * When a non zero sgl_count has been passed perform SGL passthrough
1586          * mapping for pre-allocated fabric memory instead of having target
1587          * core perform an internal SGL allocation..
1588          */
1589         if (sgl_count != 0) {
1590                 BUG_ON(!sgl);
1591
1592                 /*
1593                  * A work-around for tcm_loop as some userspace code via
1594                  * scsi-generic do not memset their associated read buffers,
1595                  * so go ahead and do that here for type non-data CDBs.  Also
1596                  * note that this is currently guaranteed to be a single SGL
1597                  * for this case by target core in target_setup_cmd_from_cdb()
1598                  * -> transport_generic_cmd_sequencer().
1599                  */
1600                 if (!(se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) &&
1601                      se_cmd->data_direction == DMA_FROM_DEVICE) {
1602                         unsigned char *buf = NULL;
1603
1604                         if (sgl)
1605                                 buf = kmap(sg_page(sgl)) + sgl->offset;
1606
1607                         if (buf) {
1608                                 memset(buf, 0, sgl->length);
1609                                 kunmap(sg_page(sgl));
1610                         }
1611                 }
1612
1613                 rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count,
1614                                 sgl_bidi, sgl_bidi_count);
1615                 if (rc != 0) {
1616                         transport_generic_request_failure(se_cmd, rc);
1617                         return 0;
1618                 }
1619         }
1620
1621         /*
1622          * Check if we need to delay processing because of ALUA
1623          * Active/NonOptimized primary access state..
1624          */
1625         core_alua_check_nonop_delay(se_cmd);
1626
1627         transport_handle_cdb_direct(se_cmd);
1628         return 0;
1629 }
1630 EXPORT_SYMBOL(target_submit_cmd_map_sgls);
1631
1632 /**
1633  * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd
1634  *
1635  * @se_cmd: command descriptor to submit
1636  * @se_sess: associated se_sess for endpoint
1637  * @cdb: pointer to SCSI CDB
1638  * @sense: pointer to SCSI sense buffer
1639  * @unpacked_lun: unpacked LUN to reference for struct se_lun
1640  * @data_length: fabric expected data transfer length
1641  * @task_attr: SAM task attribute
1642  * @data_dir: DMA data direction
1643  * @flags: flags for command submission from target_sc_flags_tables
1644  *
1645  * Task tags are supported if the caller has set @se_cmd->tag.
1646  *
1647  * Returns non zero to signal active I/O shutdown failure.  All other
1648  * setup exceptions will be returned as a SCSI CHECK_CONDITION response,
1649  * but still return zero here.
1650  *
1651  * This may only be called from process context, and also currently
1652  * assumes internal allocation of fabric payload buffer by target-core.
1653  *
1654  * It also assumes interal target core SGL memory allocation.
1655  */
1656 int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
1657                 unsigned char *cdb, unsigned char *sense, u64 unpacked_lun,
1658                 u32 data_length, int task_attr, int data_dir, int flags)
1659 {
1660         return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense,
1661                         unpacked_lun, data_length, task_attr, data_dir,
1662                         flags, NULL, 0, NULL, 0, NULL, 0);
1663 }
1664 EXPORT_SYMBOL(target_submit_cmd);
1665
1666 static void target_complete_tmr_failure(struct work_struct *work)
1667 {
1668         struct se_cmd *se_cmd = container_of(work, struct se_cmd, work);
1669
1670         se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
1671         se_cmd->se_tfo->queue_tm_rsp(se_cmd);
1672
1673         transport_lun_remove_cmd(se_cmd);
1674         transport_cmd_check_stop_to_fabric(se_cmd);
1675 }
1676
1677 static bool target_lookup_lun_from_tag(struct se_session *se_sess, u64 tag,
1678                                        u64 *unpacked_lun)
1679 {
1680         struct se_cmd *se_cmd;
1681         unsigned long flags;
1682         bool ret = false;
1683
1684         spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
1685         list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) {
1686                 if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
1687                         continue;
1688
1689                 if (se_cmd->tag == tag) {
1690                         *unpacked_lun = se_cmd->orig_fe_lun;
1691                         ret = true;
1692                         break;
1693                 }
1694         }
1695         spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
1696
1697         return ret;
1698 }
1699
1700 /**
1701  * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd
1702  *                     for TMR CDBs
1703  *
1704  * @se_cmd: command descriptor to submit
1705  * @se_sess: associated se_sess for endpoint
1706  * @sense: pointer to SCSI sense buffer
1707  * @unpacked_lun: unpacked LUN to reference for struct se_lun
1708  * @fabric_tmr_ptr: fabric context for TMR req
1709  * @tm_type: Type of TM request
1710  * @gfp: gfp type for caller
1711  * @tag: referenced task tag for TMR_ABORT_TASK
1712  * @flags: submit cmd flags
1713  *
1714  * Callable from all contexts.
1715  **/
1716
1717 int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
1718                 unsigned char *sense, u64 unpacked_lun,
1719                 void *fabric_tmr_ptr, unsigned char tm_type,
1720                 gfp_t gfp, u64 tag, int flags)
1721 {
1722         struct se_portal_group *se_tpg;
1723         int ret;
1724
1725         se_tpg = se_sess->se_tpg;
1726         BUG_ON(!se_tpg);
1727
1728         transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
1729                               0, DMA_NONE, TCM_SIMPLE_TAG, sense);
1730         /*
1731          * FIXME: Currently expect caller to handle se_cmd->se_tmr_req
1732          * allocation failure.
1733          */
1734         ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp);
1735         if (ret < 0)
1736                 return -ENOMEM;
1737
1738         if (tm_type == TMR_ABORT_TASK)
1739                 se_cmd->se_tmr_req->ref_task_tag = tag;
1740
1741         /* See target_submit_cmd for commentary */
1742         ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
1743         if (ret) {
1744                 core_tmr_release_req(se_cmd->se_tmr_req);
1745                 return ret;
1746         }
1747         /*
1748          * If this is ABORT_TASK with no explicit fabric provided LUN,
1749          * go ahead and search active session tags for a match to figure
1750          * out unpacked_lun for the original se_cmd.
1751          */
1752         if (tm_type == TMR_ABORT_TASK && (flags & TARGET_SCF_LOOKUP_LUN_FROM_TAG)) {
1753                 if (!target_lookup_lun_from_tag(se_sess, tag, &unpacked_lun))
1754                         goto failure;
1755         }
1756
1757         ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun);
1758         if (ret)
1759                 goto failure;
1760
1761         transport_generic_handle_tmr(se_cmd);
1762         return 0;
1763
1764         /*
1765          * For callback during failure handling, push this work off
1766          * to process context with TMR_LUN_DOES_NOT_EXIST status.
1767          */
1768 failure:
1769         INIT_WORK(&se_cmd->work, target_complete_tmr_failure);
1770         schedule_work(&se_cmd->work);
1771         return 0;
1772 }
1773 EXPORT_SYMBOL(target_submit_tmr);
1774
1775 /*
1776  * Handle SAM-esque emulation for generic transport request failures.
1777  */
1778 void transport_generic_request_failure(struct se_cmd *cmd,
1779                 sense_reason_t sense_reason)
1780 {
1781         int ret = 0;
1782
1783         pr_debug("-----[ Storage Engine Exception; sense_reason %d\n",
1784                  sense_reason);
1785         target_show_cmd("-----[ ", cmd);
1786
1787         /*
1788          * For SAM Task Attribute emulation for failed struct se_cmd
1789          */
1790         transport_complete_task_attr(cmd);
1791
1792         if (cmd->transport_complete_callback)
1793                 cmd->transport_complete_callback(cmd, false, NULL);
1794
1795         if (transport_check_aborted_status(cmd, 1))
1796                 return;
1797
1798         switch (sense_reason) {
1799         case TCM_NON_EXISTENT_LUN:
1800         case TCM_UNSUPPORTED_SCSI_OPCODE:
1801         case TCM_INVALID_CDB_FIELD:
1802         case TCM_INVALID_PARAMETER_LIST:
1803         case TCM_PARAMETER_LIST_LENGTH_ERROR:
1804         case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
1805         case TCM_UNKNOWN_MODE_PAGE:
1806         case TCM_WRITE_PROTECTED:
1807         case TCM_ADDRESS_OUT_OF_RANGE:
1808         case TCM_CHECK_CONDITION_ABORT_CMD:
1809         case TCM_CHECK_CONDITION_UNIT_ATTENTION:
1810         case TCM_CHECK_CONDITION_NOT_READY:
1811         case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED:
1812         case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
1813         case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
1814         case TCM_COPY_TARGET_DEVICE_NOT_REACHABLE:
1815         case TCM_TOO_MANY_TARGET_DESCS:
1816         case TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE:
1817         case TCM_TOO_MANY_SEGMENT_DESCS:
1818         case TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE:
1819                 break;
1820         case TCM_OUT_OF_RESOURCES:
1821                 cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
1822                 goto queue_status;
1823         case TCM_LUN_BUSY:
1824                 cmd->scsi_status = SAM_STAT_BUSY;
1825                 goto queue_status;
1826         case TCM_RESERVATION_CONFLICT:
1827                 /*
1828                  * No SENSE Data payload for this case, set SCSI Status
1829                  * and queue the response to $FABRIC_MOD.
1830                  *
1831                  * Uses linux/include/scsi/scsi.h SAM status codes defs
1832                  */
1833                 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
1834                 /*
1835                  * For UA Interlock Code 11b, a RESERVATION CONFLICT will
1836                  * establish a UNIT ATTENTION with PREVIOUS RESERVATION
1837                  * CONFLICT STATUS.
1838                  *
1839                  * See spc4r17, section 7.4.6 Control Mode Page, Table 349
1840                  */
1841                 if (cmd->se_sess &&
1842                     cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl == 2) {
1843                         target_ua_allocate_lun(cmd->se_sess->se_node_acl,
1844                                                cmd->orig_fe_lun, 0x2C,
1845                                         ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
1846                 }
1847
1848                 goto queue_status;
1849         default:
1850                 pr_err("Unknown transport error for CDB 0x%02x: %d\n",
1851                         cmd->t_task_cdb[0], sense_reason);
1852                 sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
1853                 break;
1854         }
1855
1856         ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0);
1857         if (ret)
1858                 goto queue_full;
1859
1860 check_stop:
1861         transport_lun_remove_cmd(cmd);
1862         transport_cmd_check_stop_to_fabric(cmd);
1863         return;
1864
1865 queue_status:
1866         trace_target_cmd_complete(cmd);
1867         ret = cmd->se_tfo->queue_status(cmd);
1868         if (!ret)
1869                 goto check_stop;
1870 queue_full:
1871         transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
1872 }
1873 EXPORT_SYMBOL(transport_generic_request_failure);
1874
1875 void __target_execute_cmd(struct se_cmd *cmd, bool do_checks)
1876 {
1877         sense_reason_t ret;
1878
1879         if (!cmd->execute_cmd) {
1880                 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1881                 goto err;
1882         }
1883         if (do_checks) {
1884                 /*
1885                  * Check for an existing UNIT ATTENTION condition after
1886                  * target_handle_task_attr() has done SAM task attr
1887                  * checking, and possibly have already defered execution
1888                  * out to target_restart_delayed_cmds() context.
1889                  */
1890                 ret = target_scsi3_ua_check(cmd);
1891                 if (ret)
1892                         goto err;
1893
1894                 ret = target_alua_state_check(cmd);
1895                 if (ret)
1896                         goto err;
1897
1898                 ret = target_check_reservation(cmd);
1899                 if (ret) {
1900                         cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
1901                         goto err;
1902                 }
1903         }
1904
1905         ret = cmd->execute_cmd(cmd);
1906         if (!ret)
1907                 return;
1908 err:
1909         spin_lock_irq(&cmd->t_state_lock);
1910         cmd->transport_state &= ~CMD_T_SENT;
1911         spin_unlock_irq(&cmd->t_state_lock);
1912
1913         transport_generic_request_failure(cmd, ret);
1914 }
1915
1916 static int target_write_prot_action(struct se_cmd *cmd)
1917 {
1918         u32 sectors;
1919         /*
1920          * Perform WRITE_INSERT of PI using software emulation when backend
1921          * device has PI enabled, if the transport has not already generated
1922          * PI using hardware WRITE_INSERT offload.
1923          */
1924         switch (cmd->prot_op) {
1925         case TARGET_PROT_DOUT_INSERT:
1926                 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT))
1927                         sbc_dif_generate(cmd);
1928                 break;
1929         case TARGET_PROT_DOUT_STRIP:
1930                 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_STRIP)
1931                         break;
1932
1933                 sectors = cmd->data_length >> ilog2(cmd->se_dev->dev_attrib.block_size);
1934                 cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba,
1935                                              sectors, 0, cmd->t_prot_sg, 0);
1936                 if (unlikely(cmd->pi_err)) {
1937                         spin_lock_irq(&cmd->t_state_lock);
1938                         cmd->transport_state &= ~CMD_T_SENT;
1939                         spin_unlock_irq(&cmd->t_state_lock);
1940                         transport_generic_request_failure(cmd, cmd->pi_err);
1941                         return -1;
1942                 }
1943                 break;
1944         default:
1945                 break;
1946         }
1947
1948         return 0;
1949 }
1950
1951 static bool target_handle_task_attr(struct se_cmd *cmd)
1952 {
1953         struct se_device *dev = cmd->se_dev;
1954
1955         if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
1956                 return false;
1957
1958         cmd->se_cmd_flags |= SCF_TASK_ATTR_SET;
1959
1960         /*
1961          * Check for the existence of HEAD_OF_QUEUE, and if true return 1
1962          * to allow the passed struct se_cmd list of tasks to the front of the list.
1963          */
1964         switch (cmd->sam_task_attr) {
1965         case TCM_HEAD_TAG:
1966                 pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x\n",
1967                          cmd->t_task_cdb[0]);
1968                 return false;
1969         case TCM_ORDERED_TAG:
1970                 atomic_inc_mb(&dev->dev_ordered_sync);
1971
1972                 pr_debug("Added ORDERED for CDB: 0x%02x to ordered list\n",
1973                          cmd->t_task_cdb[0]);
1974
1975                 /*
1976                  * Execute an ORDERED command if no other older commands
1977                  * exist that need to be completed first.
1978                  */
1979                 if (!atomic_read(&dev->simple_cmds))
1980                         return false;
1981                 break;
1982         default:
1983                 /*
1984                  * For SIMPLE and UNTAGGED Task Attribute commands
1985                  */
1986                 atomic_inc_mb(&dev->simple_cmds);
1987                 break;
1988         }
1989
1990         if (atomic_read(&dev->dev_ordered_sync) == 0)
1991                 return false;
1992
1993         spin_lock(&dev->delayed_cmd_lock);
1994         list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list);
1995         spin_unlock(&dev->delayed_cmd_lock);
1996
1997         pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to delayed CMD listn",
1998                 cmd->t_task_cdb[0], cmd->sam_task_attr);
1999         return true;
2000 }
2001
2002 static int __transport_check_aborted_status(struct se_cmd *, int);
2003
2004 void target_execute_cmd(struct se_cmd *cmd)
2005 {
2006         /*
2007          * Determine if frontend context caller is requesting the stopping of
2008          * this command for frontend exceptions.
2009          *
2010          * If the received CDB has already been aborted stop processing it here.
2011          */
2012         spin_lock_irq(&cmd->t_state_lock);
2013         if (__transport_check_aborted_status(cmd, 1)) {
2014                 spin_unlock_irq(&cmd->t_state_lock);
2015                 return;
2016         }
2017         if (cmd->transport_state & CMD_T_STOP) {
2018                 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
2019                         __func__, __LINE__, cmd->tag);
2020
2021                 spin_unlock_irq(&cmd->t_state_lock);
2022                 complete_all(&cmd->t_transport_stop_comp);
2023                 return;
2024         }
2025
2026         cmd->t_state = TRANSPORT_PROCESSING;
2027         cmd->transport_state &= ~CMD_T_PRE_EXECUTE;
2028         cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT;
2029         spin_unlock_irq(&cmd->t_state_lock);
2030
2031         if (target_write_prot_action(cmd))
2032                 return;
2033
2034         if (target_handle_task_attr(cmd)) {
2035                 spin_lock_irq(&cmd->t_state_lock);
2036                 cmd->transport_state &= ~CMD_T_SENT;
2037                 spin_unlock_irq(&cmd->t_state_lock);
2038                 return;
2039         }
2040
2041         __target_execute_cmd(cmd, true);
2042 }
2043 EXPORT_SYMBOL(target_execute_cmd);
2044
2045 /*
2046  * Process all commands up to the last received ORDERED task attribute which
2047  * requires another blocking boundary
2048  */
2049 static void target_restart_delayed_cmds(struct se_device *dev)
2050 {
2051         for (;;) {
2052                 struct se_cmd *cmd;
2053
2054                 spin_lock(&dev->delayed_cmd_lock);
2055                 if (list_empty(&dev->delayed_cmd_list)) {
2056                         spin_unlock(&dev->delayed_cmd_lock);
2057                         break;
2058                 }
2059
2060                 cmd = list_entry(dev->delayed_cmd_list.next,
2061                                  struct se_cmd, se_delayed_node);
2062                 list_del(&cmd->se_delayed_node);
2063                 spin_unlock(&dev->delayed_cmd_lock);
2064
2065                 cmd->transport_state |= CMD_T_SENT;
2066
2067                 __target_execute_cmd(cmd, true);
2068
2069                 if (cmd->sam_task_attr == TCM_ORDERED_TAG)
2070                         break;
2071         }
2072 }
2073
2074 /*
2075  * Called from I/O completion to determine which dormant/delayed
2076  * and ordered cmds need to have their tasks added to the execution queue.
2077  */
2078 static void transport_complete_task_attr(struct se_cmd *cmd)
2079 {
2080         struct se_device *dev = cmd->se_dev;
2081
2082         if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
2083                 return;
2084
2085         if (!(cmd->se_cmd_flags & SCF_TASK_ATTR_SET))
2086                 goto restart;
2087
2088         if (cmd->sam_task_attr == TCM_SIMPLE_TAG) {
2089                 atomic_dec_mb(&dev->simple_cmds);
2090                 dev->dev_cur_ordered_id++;
2091         } else if (cmd->sam_task_attr == TCM_HEAD_TAG) {
2092                 dev->dev_cur_ordered_id++;
2093                 pr_debug("Incremented dev_cur_ordered_id: %u for HEAD_OF_QUEUE\n",
2094                          dev->dev_cur_ordered_id);
2095         } else if (cmd->sam_task_attr == TCM_ORDERED_TAG) {
2096                 atomic_dec_mb(&dev->dev_ordered_sync);
2097
2098                 dev->dev_cur_ordered_id++;
2099                 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n",
2100                          dev->dev_cur_ordered_id);
2101         }
2102         cmd->se_cmd_flags &= ~SCF_TASK_ATTR_SET;
2103
2104 restart:
2105         target_restart_delayed_cmds(dev);
2106 }
2107
2108 static void transport_complete_qf(struct se_cmd *cmd)
2109 {
2110         int ret = 0;
2111
2112         transport_complete_task_attr(cmd);
2113         /*
2114          * If a fabric driver ->write_pending() or ->queue_data_in() callback
2115          * has returned neither -ENOMEM or -EAGAIN, assume it's fatal and
2116          * the same callbacks should not be retried.  Return CHECK_CONDITION
2117          * if a scsi_status is not already set.
2118          *
2119          * If a fabric driver ->queue_status() has returned non zero, always
2120          * keep retrying no matter what..
2121          */
2122         if (cmd->t_state == TRANSPORT_COMPLETE_QF_ERR) {
2123                 if (cmd->scsi_status)
2124                         goto queue_status;
2125
2126                 translate_sense_reason(cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
2127                 goto queue_status;
2128         }
2129
2130         /*
2131          * Check if we need to send a sense buffer from
2132          * the struct se_cmd in question. We do NOT want
2133          * to take this path of the IO has been marked as
2134          * needing to be treated like a "normal read". This
2135          * is the case if it's a tape read, and either the
2136          * FM, EOM, or ILI bits are set, but there is no
2137          * sense data.
2138          */
2139         if (!(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) &&
2140             cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
2141                 goto queue_status;
2142
2143         switch (cmd->data_direction) {
2144         case DMA_FROM_DEVICE:
2145                 /* queue status if not treating this as a normal read */
2146                 if (cmd->scsi_status &&
2147                     !(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL))
2148                         goto queue_status;
2149
2150                 trace_target_cmd_complete(cmd);
2151                 ret = cmd->se_tfo->queue_data_in(cmd);
2152                 break;
2153         case DMA_TO_DEVICE:
2154                 if (cmd->se_cmd_flags & SCF_BIDI) {
2155                         ret = cmd->se_tfo->queue_data_in(cmd);
2156                         break;
2157                 }
2158                 /* fall through */
2159         case DMA_NONE:
2160 queue_status:
2161                 trace_target_cmd_complete(cmd);
2162                 ret = cmd->se_tfo->queue_status(cmd);
2163                 break;
2164         default:
2165                 break;
2166         }
2167
2168         if (ret < 0) {
2169                 transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
2170                 return;
2171         }
2172         transport_lun_remove_cmd(cmd);
2173         transport_cmd_check_stop_to_fabric(cmd);
2174 }
2175
2176 static void transport_handle_queue_full(struct se_cmd *cmd, struct se_device *dev,
2177                                         int err, bool write_pending)
2178 {
2179         /*
2180          * -EAGAIN or -ENOMEM signals retry of ->write_pending() and/or
2181          * ->queue_data_in() callbacks from new process context.
2182          *
2183          * Otherwise for other errors, transport_complete_qf() will send
2184          * CHECK_CONDITION via ->queue_status() instead of attempting to
2185          * retry associated fabric driver data-transfer callbacks.
2186          */
2187         if (err == -EAGAIN || err == -ENOMEM) {
2188                 cmd->t_state = (write_pending) ? TRANSPORT_COMPLETE_QF_WP :
2189                                                  TRANSPORT_COMPLETE_QF_OK;
2190         } else {
2191                 pr_warn_ratelimited("Got unknown fabric queue status: %d\n", err);
2192                 cmd->t_state = TRANSPORT_COMPLETE_QF_ERR;
2193         }
2194
2195         spin_lock_irq(&dev->qf_cmd_lock);
2196         list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
2197         atomic_inc_mb(&dev->dev_qf_count);
2198         spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);
2199
2200         schedule_work(&cmd->se_dev->qf_work_queue);
2201 }
2202
2203 static bool target_read_prot_action(struct se_cmd *cmd)
2204 {
2205         switch (cmd->prot_op) {
2206         case TARGET_PROT_DIN_STRIP:
2207                 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) {
2208                         u32 sectors = cmd->data_length >>
2209                                   ilog2(cmd->se_dev->dev_attrib.block_size);
2210
2211                         cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba,
2212                                                      sectors, 0, cmd->t_prot_sg,
2213                                                      0);
2214                         if (cmd->pi_err)
2215                                 return true;
2216                 }
2217                 break;
2218         case TARGET_PROT_DIN_INSERT:
2219                 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_INSERT)
2220                         break;
2221
2222                 sbc_dif_generate(cmd);
2223                 break;
2224         default:
2225                 break;
2226         }
2227
2228         return false;
2229 }
2230
2231 static void target_complete_ok_work(struct work_struct *work)
2232 {
2233         struct se_cmd *cmd = container_of(work, struct se_cmd, work);
2234         int ret;
2235
2236         /*
2237          * Check if we need to move delayed/dormant tasks from cmds on the
2238          * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
2239          * Attribute.
2240          */
2241         transport_complete_task_attr(cmd);
2242
2243         /*
2244          * Check to schedule QUEUE_FULL work, or execute an existing
2245          * cmd->transport_qf_callback()
2246          */
2247         if (atomic_read(&cmd->se_dev->dev_qf_count) != 0)
2248                 schedule_work(&cmd->se_dev->qf_work_queue);
2249
2250         /*
2251          * Check if we need to send a sense buffer from
2252          * the struct se_cmd in question. We do NOT want
2253          * to take this path of the IO has been marked as
2254          * needing to be treated like a "normal read". This
2255          * is the case if it's a tape read, and either the
2256          * FM, EOM, or ILI bits are set, but there is no
2257          * sense data.
2258          */
2259         if (!(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) &&
2260             cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
2261                 WARN_ON(!cmd->scsi_status);
2262                 ret = transport_send_check_condition_and_sense(
2263                                         cmd, 0, 1);
2264                 if (ret)
2265                         goto queue_full;
2266
2267                 transport_lun_remove_cmd(cmd);
2268                 transport_cmd_check_stop_to_fabric(cmd);
2269                 return;
2270         }
2271         /*
2272          * Check for a callback, used by amongst other things
2273          * XDWRITE_READ_10 and COMPARE_AND_WRITE emulation.
2274          */
2275         if (cmd->transport_complete_callback) {
2276                 sense_reason_t rc;
2277                 bool caw = (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE);
2278                 bool zero_dl = !(cmd->data_length);
2279                 int post_ret = 0;
2280
2281                 rc = cmd->transport_complete_callback(cmd, true, &post_ret);
2282                 if (!rc && !post_ret) {
2283                         if (caw && zero_dl)
2284                                 goto queue_rsp;
2285
2286                         return;
2287                 } else if (rc) {
2288                         ret = transport_send_check_condition_and_sense(cmd,
2289                                                 rc, 0);
2290                         if (ret)
2291                                 goto queue_full;
2292
2293                         transport_lun_remove_cmd(cmd);
2294                         transport_cmd_check_stop_to_fabric(cmd);
2295                         return;
2296                 }
2297         }
2298
2299 queue_rsp:
2300         switch (cmd->data_direction) {
2301         case DMA_FROM_DEVICE:
2302                 /*
2303                  * if this is a READ-type IO, but SCSI status
2304                  * is set, then skip returning data and just
2305                  * return the status -- unless this IO is marked
2306                  * as needing to be treated as a normal read,
2307                  * in which case we want to go ahead and return
2308                  * the data. This happens, for example, for tape
2309                  * reads with the FM, EOM, or ILI bits set, with
2310                  * no sense data.
2311                  */
2312                 if (cmd->scsi_status &&
2313                     !(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL))
2314                         goto queue_status;
2315
2316                 atomic_long_add(cmd->data_length,
2317                                 &cmd->se_lun->lun_stats.tx_data_octets);
2318                 /*
2319                  * Perform READ_STRIP of PI using software emulation when
2320                  * backend had PI enabled, if the transport will not be
2321                  * performing hardware READ_STRIP offload.
2322                  */
2323                 if (target_read_prot_action(cmd)) {
2324                         ret = transport_send_check_condition_and_sense(cmd,
2325                                                 cmd->pi_err, 0);
2326                         if (ret)
2327                                 goto queue_full;
2328
2329                         transport_lun_remove_cmd(cmd);
2330                         transport_cmd_check_stop_to_fabric(cmd);
2331                         return;
2332                 }
2333
2334                 trace_target_cmd_complete(cmd);
2335                 ret = cmd->se_tfo->queue_data_in(cmd);
2336                 if (ret)
2337                         goto queue_full;
2338                 break;
2339         case DMA_TO_DEVICE:
2340                 atomic_long_add(cmd->data_length,
2341                                 &cmd->se_lun->lun_stats.rx_data_octets);
2342                 /*
2343                  * Check if we need to send READ payload for BIDI-COMMAND
2344                  */
2345                 if (cmd->se_cmd_flags & SCF_BIDI) {
2346                         atomic_long_add(cmd->data_length,
2347                                         &cmd->se_lun->lun_stats.tx_data_octets);
2348                         ret = cmd->se_tfo->queue_data_in(cmd);
2349                         if (ret)
2350                                 goto queue_full;
2351                         break;
2352                 }
2353                 /* fall through */
2354         case DMA_NONE:
2355 queue_status:
2356                 trace_target_cmd_complete(cmd);
2357                 ret = cmd->se_tfo->queue_status(cmd);
2358                 if (ret)
2359                         goto queue_full;
2360                 break;
2361         default:
2362                 break;
2363         }
2364
2365         transport_lun_remove_cmd(cmd);
2366         transport_cmd_check_stop_to_fabric(cmd);
2367         return;
2368
2369 queue_full:
2370         pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
2371                 " data_direction: %d\n", cmd, cmd->data_direction);
2372
2373         transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
2374 }
2375
2376 void target_free_sgl(struct scatterlist *sgl, int nents)
2377 {
2378         sgl_free_n_order(sgl, nents, 0);
2379 }
2380 EXPORT_SYMBOL(target_free_sgl);
2381
2382 static inline void transport_reset_sgl_orig(struct se_cmd *cmd)
2383 {
2384         /*
2385          * Check for saved t_data_sg that may be used for COMPARE_AND_WRITE
2386          * emulation, and free + reset pointers if necessary..
2387          */
2388         if (!cmd->t_data_sg_orig)
2389                 return;
2390
2391         kfree(cmd->t_data_sg);
2392         cmd->t_data_sg = cmd->t_data_sg_orig;
2393         cmd->t_data_sg_orig = NULL;
2394         cmd->t_data_nents = cmd->t_data_nents_orig;
2395         cmd->t_data_nents_orig = 0;
2396 }
2397
2398 static inline void transport_free_pages(struct se_cmd *cmd)
2399 {
2400         if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) {
2401                 target_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents);
2402                 cmd->t_prot_sg = NULL;
2403                 cmd->t_prot_nents = 0;
2404         }
2405
2406         if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) {
2407                 /*
2408                  * Release special case READ buffer payload required for
2409                  * SG_TO_MEM_NOALLOC to function with COMPARE_AND_WRITE
2410                  */
2411                 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) {
2412                         target_free_sgl(cmd->t_bidi_data_sg,
2413                                            cmd->t_bidi_data_nents);
2414                         cmd->t_bidi_data_sg = NULL;
2415                         cmd->t_bidi_data_nents = 0;
2416                 }
2417                 transport_reset_sgl_orig(cmd);
2418                 return;
2419         }
2420         transport_reset_sgl_orig(cmd);
2421
2422         target_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
2423         cmd->t_data_sg = NULL;
2424         cmd->t_data_nents = 0;
2425
2426         target_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
2427         cmd->t_bidi_data_sg = NULL;
2428         cmd->t_bidi_data_nents = 0;
2429 }
2430
2431 void *transport_kmap_data_sg(struct se_cmd *cmd)
2432 {
2433         struct scatterlist *sg = cmd->t_data_sg;
2434         struct page **pages;
2435         int i;
2436
2437         /*
2438          * We need to take into account a possible offset here for fabrics like
2439          * tcm_loop who may be using a contig buffer from the SCSI midlayer for
2440          * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd()
2441          */
2442         if (!cmd->t_data_nents)
2443                 return NULL;
2444
2445         BUG_ON(!sg);
2446         if (cmd->t_data_nents == 1)
2447                 return kmap(sg_page(sg)) + sg->offset;
2448
2449         /* >1 page. use vmap */
2450         pages = kmalloc_array(cmd->t_data_nents, sizeof(*pages), GFP_KERNEL);
2451         if (!pages)
2452                 return NULL;
2453
2454         /* convert sg[] to pages[] */
2455         for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) {
2456                 pages[i] = sg_page(sg);
2457         }
2458
2459         cmd->t_data_vmap = vmap(pages, cmd->t_data_nents,  VM_MAP, PAGE_KERNEL);
2460         kfree(pages);
2461         if (!cmd->t_data_vmap)
2462                 return NULL;
2463
2464         return cmd->t_data_vmap + cmd->t_data_sg[0].offset;
2465 }
2466 EXPORT_SYMBOL(transport_kmap_data_sg);
2467
2468 void transport_kunmap_data_sg(struct se_cmd *cmd)
2469 {
2470         if (!cmd->t_data_nents) {
2471                 return;
2472         } else if (cmd->t_data_nents == 1) {
2473                 kunmap(sg_page(cmd->t_data_sg));
2474                 return;
2475         }
2476
2477         vunmap(cmd->t_data_vmap);
2478         cmd->t_data_vmap = NULL;
2479 }
2480 EXPORT_SYMBOL(transport_kunmap_data_sg);
2481
2482 int
2483 target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length,
2484                  bool zero_page, bool chainable)
2485 {
2486         gfp_t gfp = GFP_KERNEL | (zero_page ? __GFP_ZERO : 0);
2487
2488         *sgl = sgl_alloc_order(length, 0, chainable, gfp, nents);
2489         return *sgl ? 0 : -ENOMEM;
2490 }
2491 EXPORT_SYMBOL(target_alloc_sgl);
2492
2493 /*
2494  * Allocate any required resources to execute the command.  For writes we
2495  * might not have the payload yet, so notify the fabric via a call to
2496  * ->write_pending instead. Otherwise place it on the execution queue.
2497  */
2498 sense_reason_t
2499 transport_generic_new_cmd(struct se_cmd *cmd)
2500 {
2501         unsigned long flags;
2502         int ret = 0;
2503         bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB);
2504
2505         if (cmd->prot_op != TARGET_PROT_NORMAL &&
2506             !(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) {
2507                 ret = target_alloc_sgl(&cmd->t_prot_sg, &cmd->t_prot_nents,
2508                                        cmd->prot_length, true, false);
2509                 if (ret < 0)
2510                         return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2511         }
2512
2513         /*
2514          * Determine if the TCM fabric module has already allocated physical
2515          * memory, and is directly calling transport_generic_map_mem_to_cmd()
2516          * beforehand.
2517          */
2518         if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
2519             cmd->data_length) {
2520
2521                 if ((cmd->se_cmd_flags & SCF_BIDI) ||
2522                     (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) {
2523                         u32 bidi_length;
2524
2525                         if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)
2526                                 bidi_length = cmd->t_task_nolb *
2527                                               cmd->se_dev->dev_attrib.block_size;
2528                         else
2529                                 bidi_length = cmd->data_length;
2530
2531                         ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
2532                                                &cmd->t_bidi_data_nents,
2533                                                bidi_length, zero_flag, false);
2534                         if (ret < 0)
2535                                 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2536                 }
2537
2538                 ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents,
2539                                        cmd->data_length, zero_flag, false);
2540                 if (ret < 0)
2541                         return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2542         } else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
2543                     cmd->data_length) {
2544                 /*
2545                  * Special case for COMPARE_AND_WRITE with fabrics
2546                  * using SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC.
2547                  */
2548                 u32 caw_length = cmd->t_task_nolb *
2549                                  cmd->se_dev->dev_attrib.block_size;
2550
2551                 ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
2552                                        &cmd->t_bidi_data_nents,
2553                                        caw_length, zero_flag, false);
2554                 if (ret < 0)
2555                         return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2556         }
2557         /*
2558          * If this command is not a write we can execute it right here,
2559          * for write buffers we need to notify the fabric driver first
2560          * and let it call back once the write buffers are ready.
2561          */
2562         target_add_to_state_list(cmd);
2563         if (cmd->data_direction != DMA_TO_DEVICE || cmd->data_length == 0) {
2564                 target_execute_cmd(cmd);
2565                 return 0;
2566         }
2567
2568         spin_lock_irqsave(&cmd->t_state_lock, flags);
2569         cmd->t_state = TRANSPORT_WRITE_PENDING;
2570         /*
2571          * Determine if frontend context caller is requesting the stopping of
2572          * this command for frontend exceptions.
2573          */
2574         if (cmd->transport_state & CMD_T_STOP) {
2575                 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
2576                          __func__, __LINE__, cmd->tag);
2577
2578                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2579
2580                 complete_all(&cmd->t_transport_stop_comp);
2581                 return 0;
2582         }
2583         cmd->transport_state &= ~CMD_T_ACTIVE;
2584         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2585
2586         ret = cmd->se_tfo->write_pending(cmd);
2587         if (ret)
2588                 goto queue_full;
2589
2590         return 0;
2591
2592 queue_full:
2593         pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
2594         transport_handle_queue_full(cmd, cmd->se_dev, ret, true);
2595         return 0;
2596 }
2597 EXPORT_SYMBOL(transport_generic_new_cmd);
2598
2599 static void transport_write_pending_qf(struct se_cmd *cmd)
2600 {
2601         unsigned long flags;
2602         int ret;
2603         bool stop;
2604
2605         spin_lock_irqsave(&cmd->t_state_lock, flags);
2606         stop = (cmd->transport_state & (CMD_T_STOP | CMD_T_ABORTED));
2607         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2608
2609         if (stop) {
2610                 pr_debug("%s:%d CMD_T_STOP|CMD_T_ABORTED for ITT: 0x%08llx\n",
2611                         __func__, __LINE__, cmd->tag);
2612                 complete_all(&cmd->t_transport_stop_comp);
2613                 return;
2614         }
2615
2616         ret = cmd->se_tfo->write_pending(cmd);
2617         if (ret) {
2618                 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
2619                          cmd);
2620                 transport_handle_queue_full(cmd, cmd->se_dev, ret, true);
2621         }
2622 }
2623
2624 static bool
2625 __transport_wait_for_tasks(struct se_cmd *, bool, bool *, bool *,
2626                            unsigned long *flags);
2627
2628 static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas)
2629 {
2630         unsigned long flags;
2631
2632         spin_lock_irqsave(&cmd->t_state_lock, flags);
2633         __transport_wait_for_tasks(cmd, true, aborted, tas, &flags);
2634         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2635 }
2636
2637 /*
2638  * This function is called by frontend drivers after processing of a command
2639  * has finished.
2640  *
2641  * The protocol for ensuring that either the regular flow or the TMF
2642  * code drops one reference is as follows:
2643  * - Calling .queue_data_in(), .queue_status() or queue_tm_rsp() will cause
2644  *   the frontend driver to drop one reference, synchronously or asynchronously.
2645  * - During regular command processing the target core sets CMD_T_COMPLETE
2646  *   before invoking one of the .queue_*() functions.
2647  * - The code that aborts commands skips commands and TMFs for which
2648  *   CMD_T_COMPLETE has been set.
2649  * - CMD_T_ABORTED is set atomically after the CMD_T_COMPLETE check for
2650  *   commands that will be aborted.
2651  * - If the CMD_T_ABORTED flag is set but CMD_T_TAS has not been set
2652  *   transport_generic_free_cmd() skips its call to target_put_sess_cmd().
2653  * - For aborted commands for which CMD_T_TAS has been set .queue_status() will
2654  *   be called and will drop a reference.
2655  * - For aborted commands for which CMD_T_TAS has not been set .aborted_task()
2656  *   will be called. transport_cmd_finish_abort() will drop the final reference.
2657  */
2658 int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
2659 {
2660         DECLARE_COMPLETION_ONSTACK(compl);
2661         int ret = 0;
2662         bool aborted = false, tas = false;
2663
2664         if (wait_for_tasks)
2665                 target_wait_free_cmd(cmd, &aborted, &tas);
2666
2667         if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) {
2668                 /*
2669                  * Handle WRITE failure case where transport_generic_new_cmd()
2670                  * has already added se_cmd to state_list, but fabric has
2671                  * failed command before I/O submission.
2672                  */
2673                 if (cmd->state_active)
2674                         target_remove_from_state_list(cmd);
2675
2676                 if (cmd->se_lun)
2677                         transport_lun_remove_cmd(cmd);
2678         }
2679         if (aborted)
2680                 cmd->compl = &compl;
2681         if (!aborted || tas)
2682                 ret = target_put_sess_cmd(cmd);
2683         if (aborted) {
2684                 pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag);
2685                 wait_for_completion(&compl);
2686                 ret = 1;
2687         }
2688         return ret;
2689 }
2690 EXPORT_SYMBOL(transport_generic_free_cmd);
2691
2692 /**
2693  * target_get_sess_cmd - Add command to active ->sess_cmd_list
2694  * @se_cmd:     command descriptor to add
2695  * @ack_kref:   Signal that fabric will perform an ack target_put_sess_cmd()
2696  */
2697 int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
2698 {
2699         struct se_session *se_sess = se_cmd->se_sess;
2700         unsigned long flags;
2701         int ret = 0;
2702
2703         /*
2704          * Add a second kref if the fabric caller is expecting to handle
2705          * fabric acknowledgement that requires two target_put_sess_cmd()
2706          * invocations before se_cmd descriptor release.
2707          */
2708         if (ack_kref) {
2709                 if (!kref_get_unless_zero(&se_cmd->cmd_kref))
2710                         return -EINVAL;
2711
2712                 se_cmd->se_cmd_flags |= SCF_ACK_KREF;
2713         }
2714
2715         spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2716         if (se_sess->sess_tearing_down) {
2717                 ret = -ESHUTDOWN;
2718                 goto out;
2719         }
2720         se_cmd->transport_state |= CMD_T_PRE_EXECUTE;
2721         list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
2722 out:
2723         spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2724
2725         if (ret && ack_kref)
2726                 target_put_sess_cmd(se_cmd);
2727
2728         return ret;
2729 }
2730 EXPORT_SYMBOL(target_get_sess_cmd);
2731
2732 static void target_free_cmd_mem(struct se_cmd *cmd)
2733 {
2734         transport_free_pages(cmd);
2735
2736         if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
2737                 core_tmr_release_req(cmd->se_tmr_req);
2738         if (cmd->t_task_cdb != cmd->__t_task_cdb)
2739                 kfree(cmd->t_task_cdb);
2740 }
2741
2742 static void target_release_cmd_kref(struct kref *kref)
2743 {
2744         struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
2745         struct se_session *se_sess = se_cmd->se_sess;
2746         struct completion *compl = se_cmd->compl;
2747         unsigned long flags;
2748
2749         if (se_sess) {
2750                 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2751                 list_del_init(&se_cmd->se_cmd_list);
2752                 if (se_sess->sess_tearing_down && list_empty(&se_sess->sess_cmd_list))
2753                         wake_up(&se_sess->cmd_list_wq);
2754                 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2755         }
2756
2757         target_free_cmd_mem(se_cmd);
2758         se_cmd->se_tfo->release_cmd(se_cmd);
2759         if (compl)
2760                 complete(compl);
2761 }
2762
2763 /**
2764  * target_put_sess_cmd - decrease the command reference count
2765  * @se_cmd:     command to drop a reference from
2766  *
2767  * Returns 1 if and only if this target_put_sess_cmd() call caused the
2768  * refcount to drop to zero. Returns zero otherwise.
2769  */
2770 int target_put_sess_cmd(struct se_cmd *se_cmd)
2771 {
2772         return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref);
2773 }
2774 EXPORT_SYMBOL(target_put_sess_cmd);
2775
2776 static const char *data_dir_name(enum dma_data_direction d)
2777 {
2778         switch (d) {
2779         case DMA_BIDIRECTIONAL: return "BIDI";
2780         case DMA_TO_DEVICE:     return "WRITE";
2781         case DMA_FROM_DEVICE:   return "READ";
2782         case DMA_NONE:          return "NONE";
2783         }
2784
2785         return "(?)";
2786 }
2787
2788 static const char *cmd_state_name(enum transport_state_table t)
2789 {
2790         switch (t) {
2791         case TRANSPORT_NO_STATE:        return "NO_STATE";
2792         case TRANSPORT_NEW_CMD:         return "NEW_CMD";
2793         case TRANSPORT_WRITE_PENDING:   return "WRITE_PENDING";
2794         case TRANSPORT_PROCESSING:      return "PROCESSING";
2795         case TRANSPORT_COMPLETE:        return "COMPLETE";
2796         case TRANSPORT_ISTATE_PROCESSING:
2797                                         return "ISTATE_PROCESSING";
2798         case TRANSPORT_COMPLETE_QF_WP:  return "COMPLETE_QF_WP";
2799         case TRANSPORT_COMPLETE_QF_OK:  return "COMPLETE_QF_OK";
2800         case TRANSPORT_COMPLETE_QF_ERR: return "COMPLETE_QF_ERR";
2801         }
2802
2803         return "(?)";
2804 }
2805
2806 static void target_append_str(char **str, const char *txt)
2807 {
2808         char *prev = *str;
2809
2810         *str = *str ? kasprintf(GFP_ATOMIC, "%s,%s", *str, txt) :
2811                 kstrdup(txt, GFP_ATOMIC);
2812         kfree(prev);
2813 }
2814
2815 /*
2816  * Convert a transport state bitmask into a string. The caller is
2817  * responsible for freeing the returned pointer.
2818  */
2819 static char *target_ts_to_str(u32 ts)
2820 {
2821         char *str = NULL;
2822
2823         if (ts & CMD_T_ABORTED)
2824                 target_append_str(&str, "aborted");
2825         if (ts & CMD_T_ACTIVE)
2826                 target_append_str(&str, "active");
2827         if (ts & CMD_T_COMPLETE)
2828                 target_append_str(&str, "complete");
2829         if (ts & CMD_T_SENT)
2830                 target_append_str(&str, "sent");
2831         if (ts & CMD_T_STOP)
2832                 target_append_str(&str, "stop");
2833         if (ts & CMD_T_FABRIC_STOP)
2834                 target_append_str(&str, "fabric_stop");
2835
2836         return str;
2837 }
2838
2839 static const char *target_tmf_name(enum tcm_tmreq_table tmf)
2840 {
2841         switch (tmf) {
2842         case TMR_ABORT_TASK:            return "ABORT_TASK";
2843         case TMR_ABORT_TASK_SET:        return "ABORT_TASK_SET";
2844         case TMR_CLEAR_ACA:             return "CLEAR_ACA";
2845         case TMR_CLEAR_TASK_SET:        return "CLEAR_TASK_SET";
2846         case TMR_LUN_RESET:             return "LUN_RESET";
2847         case TMR_TARGET_WARM_RESET:     return "TARGET_WARM_RESET";
2848         case TMR_TARGET_COLD_RESET:     return "TARGET_COLD_RESET";
2849         case TMR_UNKNOWN:               break;
2850         }
2851         return "(?)";
2852 }
2853
2854 void target_show_cmd(const char *pfx, struct se_cmd *cmd)
2855 {
2856         char *ts_str = target_ts_to_str(cmd->transport_state);
2857         const u8 *cdb = cmd->t_task_cdb;
2858         struct se_tmr_req *tmf = cmd->se_tmr_req;
2859
2860         if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
2861                 pr_debug("%scmd %#02x:%#02x with tag %#llx dir %s i_state %d t_state %s len %d refcnt %d transport_state %s\n",
2862                          pfx, cdb[0], cdb[1], cmd->tag,
2863                          data_dir_name(cmd->data_direction),
2864                          cmd->se_tfo->get_cmd_state(cmd),
2865                          cmd_state_name(cmd->t_state), cmd->data_length,
2866                          kref_read(&cmd->cmd_kref), ts_str);
2867         } else {
2868                 pr_debug("%stmf %s with tag %#llx ref_task_tag %#llx i_state %d t_state %s refcnt %d transport_state %s\n",
2869                          pfx, target_tmf_name(tmf->function), cmd->tag,
2870                          tmf->ref_task_tag, cmd->se_tfo->get_cmd_state(cmd),
2871                          cmd_state_name(cmd->t_state),
2872                          kref_read(&cmd->cmd_kref), ts_str);
2873         }
2874         kfree(ts_str);
2875 }
2876 EXPORT_SYMBOL(target_show_cmd);
2877
2878 /**
2879  * target_sess_cmd_list_set_waiting - Set sess_tearing_down so no new commands are queued.
2880  * @se_sess:    session to flag
2881  */
2882 void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
2883 {
2884         unsigned long flags;
2885
2886         spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2887         se_sess->sess_tearing_down = 1;
2888         spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2889 }
2890 EXPORT_SYMBOL(target_sess_cmd_list_set_waiting);
2891
2892 /**
2893  * target_wait_for_sess_cmds - Wait for outstanding commands
2894  * @se_sess:    session to wait for active I/O
2895  */
2896 void target_wait_for_sess_cmds(struct se_session *se_sess)
2897 {
2898         struct se_cmd *cmd;
2899         int ret;
2900
2901         WARN_ON_ONCE(!se_sess->sess_tearing_down);
2902
2903         spin_lock_irq(&se_sess->sess_cmd_lock);
2904         do {
2905                 ret = wait_event_lock_irq_timeout(
2906                                 se_sess->cmd_list_wq,
2907                                 list_empty(&se_sess->sess_cmd_list),
2908                                 se_sess->sess_cmd_lock, 180 * HZ);
2909                 list_for_each_entry(cmd, &se_sess->sess_cmd_list, se_cmd_list)
2910                         target_show_cmd("session shutdown: still waiting for ",
2911                                         cmd);
2912         } while (ret <= 0);
2913         spin_unlock_irq(&se_sess->sess_cmd_lock);
2914 }
2915 EXPORT_SYMBOL(target_wait_for_sess_cmds);
2916
2917 static void target_lun_confirm(struct percpu_ref *ref)
2918 {
2919         struct se_lun *lun = container_of(ref, struct se_lun, lun_ref);
2920
2921         complete(&lun->lun_ref_comp);
2922 }
2923
2924 void transport_clear_lun_ref(struct se_lun *lun)
2925 {
2926         /*
2927          * Mark the percpu-ref as DEAD, switch to atomic_t mode, drop
2928          * the initial reference and schedule confirm kill to be
2929          * executed after one full RCU grace period has completed.
2930          */
2931         percpu_ref_kill_and_confirm(&lun->lun_ref, target_lun_confirm);
2932         /*
2933          * The first completion waits for percpu_ref_switch_to_atomic_rcu()
2934          * to call target_lun_confirm after lun->lun_ref has been marked
2935          * as __PERCPU_REF_DEAD on all CPUs, and switches to atomic_t
2936          * mode so that percpu_ref_tryget_live() lookup of lun->lun_ref
2937          * fails for all new incoming I/O.
2938          */
2939         wait_for_completion(&lun->lun_ref_comp);
2940         /*
2941          * The second completion waits for percpu_ref_put_many() to
2942          * invoke ->release() after lun->lun_ref has switched to
2943          * atomic_t mode, and lun->lun_ref.count has reached zero.
2944          *
2945          * At this point all target-core lun->lun_ref references have
2946          * been dropped via transport_lun_remove_cmd(), and it's safe
2947          * to proceed with the remaining LUN shutdown.
2948          */
2949         wait_for_completion(&lun->lun_shutdown_comp);
2950 }
2951
2952 static bool
2953 __transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop,
2954                            bool *aborted, bool *tas, unsigned long *flags)
2955         __releases(&cmd->t_state_lock)
2956         __acquires(&cmd->t_state_lock)
2957 {
2958
2959         assert_spin_locked(&cmd->t_state_lock);
2960         WARN_ON_ONCE(!irqs_disabled());
2961
2962         if (fabric_stop)
2963                 cmd->transport_state |= CMD_T_FABRIC_STOP;
2964
2965         if (cmd->transport_state & CMD_T_ABORTED)
2966                 *aborted = true;
2967
2968         if (cmd->transport_state & CMD_T_TAS)
2969                 *tas = true;
2970
2971         if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) &&
2972             !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
2973                 return false;
2974
2975         if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
2976             !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
2977                 return false;
2978
2979         if (!(cmd->transport_state & CMD_T_ACTIVE))
2980                 return false;
2981
2982         if (fabric_stop && *aborted)
2983                 return false;
2984
2985         cmd->transport_state |= CMD_T_STOP;
2986
2987         target_show_cmd("wait_for_tasks: Stopping ", cmd);
2988
2989         spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
2990
2991         while (!wait_for_completion_timeout(&cmd->t_transport_stop_comp,
2992                                             180 * HZ))
2993                 target_show_cmd("wait for tasks: ", cmd);
2994
2995         spin_lock_irqsave(&cmd->t_state_lock, *flags);
2996         cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
2997
2998         pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->"
2999                  "t_transport_stop_comp) for ITT: 0x%08llx\n", cmd->tag);
3000
3001         return true;
3002 }
3003
3004 /**
3005  * transport_wait_for_tasks - set CMD_T_STOP and wait for t_transport_stop_comp
3006  * @cmd: command to wait on
3007  */
3008 bool transport_wait_for_tasks(struct se_cmd *cmd)
3009 {
3010         unsigned long flags;
3011         bool ret, aborted = false, tas = false;
3012
3013         spin_lock_irqsave(&cmd->t_state_lock, flags);
3014         ret = __transport_wait_for_tasks(cmd, false, &aborted, &tas, &flags);
3015         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3016
3017         return ret;
3018 }
3019 EXPORT_SYMBOL(transport_wait_for_tasks);
3020
3021 struct sense_info {
3022         u8 key;
3023         u8 asc;
3024         u8 ascq;
3025         bool add_sector_info;
3026 };
3027
3028 static const struct sense_info sense_info_table[] = {
3029         [TCM_NO_SENSE] = {
3030                 .key = NOT_READY
3031         },
3032         [TCM_NON_EXISTENT_LUN] = {
3033                 .key = ILLEGAL_REQUEST,
3034                 .asc = 0x25 /* LOGICAL UNIT NOT SUPPORTED */
3035         },
3036         [TCM_UNSUPPORTED_SCSI_OPCODE] = {
3037                 .key = ILLEGAL_REQUEST,
3038                 .asc = 0x20, /* INVALID COMMAND OPERATION CODE */
3039         },
3040         [TCM_SECTOR_COUNT_TOO_MANY] = {
3041                 .key = ILLEGAL_REQUEST,
3042                 .asc = 0x20, /* INVALID COMMAND OPERATION CODE */
3043         },
3044         [TCM_UNKNOWN_MODE_PAGE] = {
3045                 .key = ILLEGAL_REQUEST,
3046                 .asc = 0x24, /* INVALID FIELD IN CDB */
3047         },
3048         [TCM_CHECK_CONDITION_ABORT_CMD] = {
3049                 .key = ABORTED_COMMAND,
3050                 .asc = 0x29, /* BUS DEVICE RESET FUNCTION OCCURRED */
3051                 .ascq = 0x03,
3052         },
3053         [TCM_INCORRECT_AMOUNT_OF_DATA] = {
3054                 .key = ABORTED_COMMAND,
3055                 .asc = 0x0c, /* WRITE ERROR */
3056                 .ascq = 0x0d, /* NOT ENOUGH UNSOLICITED DATA */
3057         },
3058         [TCM_INVALID_CDB_FIELD] = {
3059                 .key = ILLEGAL_REQUEST,
3060                 .asc = 0x24, /* INVALID FIELD IN CDB */
3061         },
3062         [TCM_INVALID_PARAMETER_LIST] = {
3063                 .key = ILLEGAL_REQUEST,
3064                 .asc = 0x26, /* INVALID FIELD IN PARAMETER LIST */
3065         },
3066         [TCM_TOO_MANY_TARGET_DESCS] = {
3067                 .key = ILLEGAL_REQUEST,
3068                 .asc = 0x26,
3069                 .ascq = 0x06, /* TOO MANY TARGET DESCRIPTORS */
3070         },
3071         [TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE] = {
3072                 .key = ILLEGAL_REQUEST,
3073                 .asc = 0x26,
3074                 .ascq = 0x07, /* UNSUPPORTED TARGET DESCRIPTOR TYPE CODE */
3075         },
3076         [TCM_TOO_MANY_SEGMENT_DESCS] = {
3077                 .key = ILLEGAL_REQUEST,
3078                 .asc = 0x26,
3079                 .ascq = 0x08, /* TOO MANY SEGMENT DESCRIPTORS */
3080         },
3081         [TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE] = {
3082                 .key = ILLEGAL_REQUEST,
3083                 .asc = 0x26,
3084                 .ascq = 0x09, /* UNSUPPORTED SEGMENT DESCRIPTOR TYPE CODE */
3085         },
3086         [TCM_PARAMETER_LIST_LENGTH_ERROR] = {
3087                 .key = ILLEGAL_REQUEST,
3088                 .asc = 0x1a, /* PARAMETER LIST LENGTH ERROR */
3089         },
3090         [TCM_UNEXPECTED_UNSOLICITED_DATA] = {
3091                 .key = ILLEGAL_REQUEST,
3092                 .asc = 0x0c, /* WRITE ERROR */
3093                 .ascq = 0x0c, /* UNEXPECTED_UNSOLICITED_DATA */
3094         },
3095         [TCM_SERVICE_CRC_ERROR] = {
3096                 .key = ABORTED_COMMAND,
3097                 .asc = 0x47, /* PROTOCOL SERVICE CRC ERROR */
3098                 .ascq = 0x05, /* N/A */
3099         },
3100         [TCM_SNACK_REJECTED] = {
3101                 .key = ABORTED_COMMAND,
3102                 .asc = 0x11, /* READ ERROR */
3103                 .ascq = 0x13, /* FAILED RETRANSMISSION REQUEST */
3104         },
3105         [TCM_WRITE_PROTECTED] = {
3106                 .key = DATA_PROTECT,
3107                 .asc = 0x27, /* WRITE PROTECTED */
3108         },
3109         [TCM_ADDRESS_OUT_OF_RANGE] = {
3110                 .key = ILLEGAL_REQUEST,
3111                 .asc = 0x21, /* LOGICAL BLOCK ADDRESS OUT OF RANGE */
3112         },
3113         [TCM_CHECK_CONDITION_UNIT_ATTENTION] = {
3114                 .key = UNIT_ATTENTION,
3115         },
3116         [TCM_CHECK_CONDITION_NOT_READY] = {
3117                 .key = NOT_READY,
3118         },
3119         [TCM_MISCOMPARE_VERIFY] = {
3120                 .key = MISCOMPARE,
3121                 .asc = 0x1d, /* MISCOMPARE DURING VERIFY OPERATION */
3122                 .ascq = 0x00,
3123         },
3124         [TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED] = {
3125                 .key = ABORTED_COMMAND,
3126                 .asc = 0x10,
3127                 .ascq = 0x01, /* LOGICAL BLOCK GUARD CHECK FAILED */
3128                 .add_sector_info = true,
3129         },
3130         [TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED] = {
3131                 .key = ABORTED_COMMAND,
3132                 .asc = 0x10,
3133                 .ascq = 0x02, /* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */
3134                 .add_sector_info = true,
3135         },
3136         [TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED] = {
3137                 .key = ABORTED_COMMAND,
3138                 .asc = 0x10,
3139                 .ascq = 0x03, /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */
3140                 .add_sector_info = true,
3141         },
3142         [TCM_COPY_TARGET_DEVICE_NOT_REACHABLE] = {
3143                 .key = COPY_ABORTED,
3144                 .asc = 0x0d,
3145                 .ascq = 0x02, /* COPY TARGET DEVICE NOT REACHABLE */
3146
3147         },
3148         [TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE] = {
3149                 /*
3150                  * Returning ILLEGAL REQUEST would cause immediate IO errors on
3151                  * Solaris initiators.  Returning NOT READY instead means the
3152                  * operations will be retried a finite number of times and we
3153                  * can survive intermittent errors.
3154                  */
3155                 .key = NOT_READY,
3156                 .asc = 0x08, /* LOGICAL UNIT COMMUNICATION FAILURE */
3157         },
3158         [TCM_INSUFFICIENT_REGISTRATION_RESOURCES] = {
3159                 /*
3160                  * From spc4r22 section5.7.7,5.7.8
3161                  * If a PERSISTENT RESERVE OUT command with a REGISTER service action
3162                  * or a REGISTER AND IGNORE EXISTING KEY service action or
3163                  * REGISTER AND MOVE service actionis attempted,
3164                  * but there are insufficient device server resources to complete the
3165                  * operation, then the command shall be terminated with CHECK CONDITION
3166                  * status, with the sense key set to ILLEGAL REQUEST,and the additonal
3167                  * sense code set to INSUFFICIENT REGISTRATION RESOURCES.
3168                  */
3169                 .key = ILLEGAL_REQUEST,
3170                 .asc = 0x55,
3171                 .ascq = 0x04, /* INSUFFICIENT REGISTRATION RESOURCES */
3172         },
3173 };
3174
3175 /**
3176  * translate_sense_reason - translate a sense reason into T10 key, asc and ascq
3177  * @cmd: SCSI command in which the resulting sense buffer or SCSI status will
3178  *   be stored.
3179  * @reason: LIO sense reason code. If this argument has the value
3180  *   TCM_CHECK_CONDITION_UNIT_ATTENTION, try to dequeue a unit attention. If
3181  *   dequeuing a unit attention fails due to multiple commands being processed
3182  *   concurrently, set the command status to BUSY.
3183  *
3184  * Return: 0 upon success or -EINVAL if the sense buffer is too small.
3185  */
3186 static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason)
3187 {
3188         const struct sense_info *si;
3189         u8 *buffer = cmd->sense_buffer;
3190         int r = (__force int)reason;
3191         u8 key, asc, ascq;
3192         bool desc_format = target_sense_desc_format(cmd->se_dev);
3193
3194         if (r < ARRAY_SIZE(sense_info_table) && sense_info_table[r].key)
3195                 si = &sense_info_table[r];
3196         else
3197                 si = &sense_info_table[(__force int)
3198                                        TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE];
3199
3200         key = si->key;
3201         if (reason == TCM_CHECK_CONDITION_UNIT_ATTENTION) {
3202                 if (!core_scsi3_ua_for_check_condition(cmd, &key, &asc,
3203                                                        &ascq)) {
3204                         cmd->scsi_status = SAM_STAT_BUSY;
3205                         return;
3206                 }
3207         } else if (si->asc == 0) {
3208                 WARN_ON_ONCE(cmd->scsi_asc == 0);
3209                 asc = cmd->scsi_asc;
3210                 ascq = cmd->scsi_ascq;
3211         } else {
3212                 asc = si->asc;
3213                 ascq = si->ascq;
3214         }
3215
3216         cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
3217         cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
3218         cmd->scsi_sense_length  = TRANSPORT_SENSE_BUFFER;
3219         scsi_build_sense_buffer(desc_format, buffer, key, asc, ascq);
3220         if (si->add_sector_info)
3221                 WARN_ON_ONCE(scsi_set_sense_information(buffer,
3222                                                         cmd->scsi_sense_length,
3223                                                         cmd->bad_sector) < 0);
3224 }
3225
3226 int
3227 transport_send_check_condition_and_sense(struct se_cmd *cmd,
3228                 sense_reason_t reason, int from_transport)
3229 {
3230         unsigned long flags;
3231
3232         spin_lock_irqsave(&cmd->t_state_lock, flags);
3233         if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
3234                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3235                 return 0;
3236         }
3237         cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
3238         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3239
3240         if (!from_transport)
3241                 translate_sense_reason(cmd, reason);
3242
3243         trace_target_cmd_complete(cmd);
3244         return cmd->se_tfo->queue_status(cmd);
3245 }
3246 EXPORT_SYMBOL(transport_send_check_condition_and_sense);
3247
3248 static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status)
3249         __releases(&cmd->t_state_lock)
3250         __acquires(&cmd->t_state_lock)
3251 {
3252         int ret;
3253
3254         assert_spin_locked(&cmd->t_state_lock);
3255         WARN_ON_ONCE(!irqs_disabled());
3256
3257         if (!(cmd->transport_state & CMD_T_ABORTED))
3258                 return 0;
3259         /*
3260          * If cmd has been aborted but either no status is to be sent or it has
3261          * already been sent, just return
3262          */
3263         if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS)) {
3264                 if (send_status)
3265                         cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
3266                 return 1;
3267         }
3268
3269         pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB:"
3270                 " 0x%02x ITT: 0x%08llx\n", cmd->t_task_cdb[0], cmd->tag);
3271
3272         cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS;
3273         cmd->scsi_status = SAM_STAT_TASK_ABORTED;
3274         trace_target_cmd_complete(cmd);
3275
3276         spin_unlock_irq(&cmd->t_state_lock);
3277         ret = cmd->se_tfo->queue_status(cmd);
3278         if (ret)
3279                 transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
3280         spin_lock_irq(&cmd->t_state_lock);
3281
3282         return 1;
3283 }
3284
3285 int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
3286 {
3287         int ret;
3288
3289         spin_lock_irq(&cmd->t_state_lock);
3290         ret = __transport_check_aborted_status(cmd, send_status);
3291         spin_unlock_irq(&cmd->t_state_lock);
3292
3293         return ret;
3294 }
3295 EXPORT_SYMBOL(transport_check_aborted_status);
3296
3297 void transport_send_task_abort(struct se_cmd *cmd)
3298 {
3299         unsigned long flags;
3300         int ret;
3301
3302         spin_lock_irqsave(&cmd->t_state_lock, flags);
3303         if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) {
3304                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3305                 return;
3306         }
3307         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3308
3309         /*
3310          * If there are still expected incoming fabric WRITEs, we wait
3311          * until until they have completed before sending a TASK_ABORTED
3312          * response.  This response with TASK_ABORTED status will be
3313          * queued back to fabric module by transport_check_aborted_status().
3314          */
3315         if (cmd->data_direction == DMA_TO_DEVICE) {
3316                 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
3317                         spin_lock_irqsave(&cmd->t_state_lock, flags);
3318                         if (cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS) {
3319                                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3320                                 goto send_abort;
3321                         }
3322                         cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
3323                         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3324                         return;
3325                 }
3326         }
3327 send_abort:
3328         cmd->scsi_status = SAM_STAT_TASK_ABORTED;
3329
3330         transport_lun_remove_cmd(cmd);
3331
3332         pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n",
3333                  cmd->t_task_cdb[0], cmd->tag);
3334
3335         trace_target_cmd_complete(cmd);
3336         ret = cmd->se_tfo->queue_status(cmd);
3337         if (ret)
3338                 transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
3339 }
3340
3341 static void target_tmr_work(struct work_struct *work)
3342 {
3343         struct se_cmd *cmd = container_of(work, struct se_cmd, work);
3344         struct se_device *dev = cmd->se_dev;
3345         struct se_tmr_req *tmr = cmd->se_tmr_req;
3346         unsigned long flags;
3347         int ret;
3348
3349         spin_lock_irqsave(&cmd->t_state_lock, flags);
3350         if (cmd->transport_state & CMD_T_ABORTED) {
3351                 tmr->response = TMR_FUNCTION_REJECTED;
3352                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3353                 goto check_stop;
3354         }
3355         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3356
3357         switch (tmr->function) {
3358         case TMR_ABORT_TASK:
3359                 core_tmr_abort_task(dev, tmr, cmd->se_sess);
3360                 break;
3361         case TMR_ABORT_TASK_SET:
3362         case TMR_CLEAR_ACA:
3363         case TMR_CLEAR_TASK_SET:
3364                 tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
3365                 break;
3366         case TMR_LUN_RESET:
3367                 ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
3368                 tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
3369                                          TMR_FUNCTION_REJECTED;
3370                 if (tmr->response == TMR_FUNCTION_COMPLETE) {
3371                         target_ua_allocate_lun(cmd->se_sess->se_node_acl,
3372                                                cmd->orig_fe_lun, 0x29,
3373                                                ASCQ_29H_BUS_DEVICE_RESET_FUNCTION_OCCURRED);
3374                 }
3375                 break;
3376         case TMR_TARGET_WARM_RESET:
3377                 tmr->response = TMR_FUNCTION_REJECTED;
3378                 break;
3379         case TMR_TARGET_COLD_RESET:
3380                 tmr->response = TMR_FUNCTION_REJECTED;
3381                 break;
3382         default:
3383                 pr_err("Unknown TMR function: 0x%02x.\n",
3384                                 tmr->function);
3385                 tmr->response = TMR_FUNCTION_REJECTED;
3386                 break;
3387         }
3388
3389         spin_lock_irqsave(&cmd->t_state_lock, flags);
3390         if (cmd->transport_state & CMD_T_ABORTED) {
3391                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3392                 goto check_stop;
3393         }
3394         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3395
3396         cmd->se_tfo->queue_tm_rsp(cmd);
3397
3398 check_stop:
3399         transport_lun_remove_cmd(cmd);
3400         transport_cmd_check_stop_to_fabric(cmd);
3401 }
3402
3403 int transport_generic_handle_tmr(
3404         struct se_cmd *cmd)
3405 {
3406         unsigned long flags;
3407         bool aborted = false;
3408
3409         spin_lock_irqsave(&cmd->t_state_lock, flags);
3410         if (cmd->transport_state & CMD_T_ABORTED) {
3411                 aborted = true;
3412         } else {
3413                 cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
3414                 cmd->transport_state |= CMD_T_ACTIVE;
3415         }
3416         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3417
3418         if (aborted) {
3419                 pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d"
3420                         "ref_tag: %llu tag: %llu\n", cmd->se_tmr_req->function,
3421                         cmd->se_tmr_req->ref_task_tag, cmd->tag);
3422                 transport_lun_remove_cmd(cmd);
3423                 transport_cmd_check_stop_to_fabric(cmd);
3424                 return 0;
3425         }
3426
3427         INIT_WORK(&cmd->work, target_tmr_work);
3428         queue_work(cmd->se_dev->tmr_wq, &cmd->work);
3429         return 0;
3430 }
3431 EXPORT_SYMBOL(transport_generic_handle_tmr);
3432
3433 bool
3434 target_check_wce(struct se_device *dev)
3435 {
3436         bool wce = false;
3437
3438         if (dev->transport->get_write_cache)
3439                 wce = dev->transport->get_write_cache(dev);
3440         else if (dev->dev_attrib.emulate_write_cache > 0)
3441                 wce = true;
3442
3443         return wce;
3444 }
3445
3446 bool
3447 target_check_fua(struct se_device *dev)
3448 {
3449         return target_check_wce(dev) && dev->dev_attrib.emulate_fua_write > 0;
3450 }