Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target...
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 21 Oct 2014 20:06:38 +0000 (13:06 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 21 Oct 2014 20:06:38 +0000 (13:06 -0700)
Pull SCSI target updates from Nicholas Bellinger:
 "Here are the target updates for v3.18-rc2 code.  These where
  originally destined for -rc1, but due to the combination of travel
  last week for KVM Forum and my mistake of taking the three week merge
  window literally, the pull request slipped..  Apologies for that.

  Things where reasonably quiet this round.  The highlights include:

   - New userspace backend driver (target_core_user.ko) by Shaohua Li
     and Andy Grover
   - A number of cleanups in target, iscsi-taret and qla_target code
     from Joern Engel
   - Fix an OOPs related to queue full handling with CHECK_CONDITION
     status from Quinn Tran
   - Fix to disable TX completion interrupt coalescing in iser-target,
     that was causing problems on some hardware
   - Fix for PR APTPL metadata handling with demo-mode ACLs

  I'm most excited about the new backend driver that uses UIO + shared
  memory ring to dispatch I/O and control commands into user-space.
  This was probably the most requested feature by users over the last
  couple of years, and opens up a new area of development + porting of
  existing user-space storage applications to LIO.  Thanks to Shaohua +
  Andy for making this happen.

  Also another honorable mention, a new Xen PV SCSI driver was merged
  via the xen/tip.git tree recently, which puts us now at 10 target
  drivers in upstream! Thanks to David Vrabel + Juergen Gross for their
  work to get this code merged"

* 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (40 commits)
  target/file: fix inclusive vfs_fsync_range() end
  iser-target: Disable TX completion interrupt coalescing
  target: Add force_pr_aptpl device attribute
  target: Fix APTPL metadata handling for dynamic MappedLUNs
  qla_target: don't delete changed nacls
  target/user: Recalculate pad size inside is_ring_space_avail()
  tcm_loop: Fixup tag handling
  iser-target: Fix smatch warning
  target/user: Fix up smatch warnings in tcmu_netlink_event
  target: Add a user-passthrough backstore
  target: Add documentation on the target userspace pass-through driver
  uio: Export definition of struct uio_device
  target: Remove unneeded check in sbc_parse_cdb
  target: Fix queue full status NULL pointer for SCF_TRANSPORT_TASK_SENSE
  qla_target: rearrange struct qla_tgt_prm
  qla_target: improve qlt_unmap_sg()
  qla_target: make some global functions static
  qla_target: remove unused parameter
  target: simplify core_tmr_abort_task
  target: encapsulate smp_mb__after_atomic()
  ...

1  2 
drivers/infiniband/ulp/isert/ib_isert.c
drivers/scsi/qla2xxx/qla_target.c
drivers/scsi/qla2xxx/qla_target.h
drivers/scsi/qla2xxx/tcm_qla2xxx.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_util.c
drivers/target/target_core_configfs.c
drivers/target/target_core_pscsi.c
drivers/target/target_core_tpg.c
include/uapi/linux/Kbuild

index 0bea5776bcbcc945166062d24dd70aedc9e9e06c,f7191128b725107fb06291fb02ad81a9f2210142..3effa931fce259cdf661af95f42ad1463450acf4
@@@ -586,12 -586,17 +586,12 @@@ isert_connect_request(struct rdma_cm_i
        init_completion(&isert_conn->conn_wait);
        init_completion(&isert_conn->conn_wait_comp_err);
        kref_init(&isert_conn->conn_kref);
 -      kref_get(&isert_conn->conn_kref);
        mutex_init(&isert_conn->conn_mutex);
        spin_lock_init(&isert_conn->conn_lock);
        INIT_LIST_HEAD(&isert_conn->conn_fr_pool);
  
        cma_id->context = isert_conn;
        isert_conn->conn_cm_id = cma_id;
 -      isert_conn->responder_resources = event->param.conn.responder_resources;
 -      isert_conn->initiator_depth = event->param.conn.initiator_depth;
 -      pr_debug("Using responder_resources: %u initiator_depth: %u\n",
 -               isert_conn->responder_resources, isert_conn->initiator_depth);
  
        isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
                                        ISER_RX_LOGIN_SIZE, GFP_KERNEL);
                goto out_rsp_dma_map;
        }
  
 +      /* Set max inflight RDMA READ requests */
 +      isert_conn->initiator_depth = min_t(u8,
 +                              event->param.conn.initiator_depth,
 +                              device->dev_attr.max_qp_init_rd_atom);
 +      pr_debug("Using initiator_depth: %u\n", isert_conn->initiator_depth);
 +
        isert_conn->conn_device = device;
        isert_conn->conn_pd = ib_alloc_pd(isert_conn->conn_device->ib_device);
        if (IS_ERR(isert_conn->conn_pd)) {
@@@ -747,9 -746,7 +747,9 @@@ isert_connect_release(struct isert_con
  static void
  isert_connected_handler(struct rdma_cm_id *cma_id)
  {
 -      return;
 +      struct isert_conn *isert_conn = cma_id->context;
 +
 +      kref_get(&isert_conn->conn_kref);
  }
  
  static void
@@@ -801,6 -798,7 +801,6 @@@ isert_disconnect_work(struct work_struc
  
  wake_up:
        complete(&isert_conn->conn_wait);
 -      isert_put_conn(isert_conn);
  }
  
  static void
@@@ -2185,7 -2183,7 +2185,7 @@@ isert_put_response(struct iscsi_conn *c
                isert_cmd->tx_desc.num_sge = 2;
        }
  
-       isert_init_send_wr(isert_conn, isert_cmd, send_wr, true);
+       isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
  
        pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
  
@@@ -2609,45 -2607,58 +2609,45 @@@ isert_fast_reg_mr(struct isert_conn *is
        return ret;
  }
  
 -static inline enum ib_t10_dif_type
 -se2ib_prot_type(enum target_prot_type prot_type)
 +static inline void
 +isert_set_dif_domain(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs,
 +                   struct ib_sig_domain *domain)
  {
 -      switch (prot_type) {
 -      case TARGET_DIF_TYPE0_PROT:
 -              return IB_T10DIF_NONE;
 -      case TARGET_DIF_TYPE1_PROT:
 -              return IB_T10DIF_TYPE1;
 -      case TARGET_DIF_TYPE2_PROT:
 -              return IB_T10DIF_TYPE2;
 -      case TARGET_DIF_TYPE3_PROT:
 -              return IB_T10DIF_TYPE3;
 -      default:
 -              return IB_T10DIF_NONE;
 -      }
 -}
 +      domain->sig_type = IB_SIG_TYPE_T10_DIF;
 +      domain->sig.dif.bg_type = IB_T10DIF_CRC;
 +      domain->sig.dif.pi_interval = se_cmd->se_dev->dev_attrib.block_size;
 +      domain->sig.dif.ref_tag = se_cmd->reftag_seed;
 +      /*
 +       * At the moment we hard code those, but if in the future
 +       * the target core would like to use it, we will take it
 +       * from se_cmd.
 +       */
 +      domain->sig.dif.apptag_check_mask = 0xffff;
 +      domain->sig.dif.app_escape = true;
 +      domain->sig.dif.ref_escape = true;
 +      if (se_cmd->prot_type == TARGET_DIF_TYPE1_PROT ||
 +          se_cmd->prot_type == TARGET_DIF_TYPE2_PROT)
 +              domain->sig.dif.ref_remap = true;
 +};
  
  static int
  isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs)
  {
 -      enum ib_t10_dif_type ib_prot_type = se2ib_prot_type(se_cmd->prot_type);
 -
 -      sig_attrs->mem.sig_type = IB_SIG_TYPE_T10_DIF;
 -      sig_attrs->wire.sig_type = IB_SIG_TYPE_T10_DIF;
 -      sig_attrs->mem.sig.dif.pi_interval =
 -                              se_cmd->se_dev->dev_attrib.block_size;
 -      sig_attrs->wire.sig.dif.pi_interval =
 -                              se_cmd->se_dev->dev_attrib.block_size;
 -
        switch (se_cmd->prot_op) {
        case TARGET_PROT_DIN_INSERT:
        case TARGET_PROT_DOUT_STRIP:
 -              sig_attrs->mem.sig.dif.type = IB_T10DIF_NONE;
 -              sig_attrs->wire.sig.dif.type = ib_prot_type;
 -              sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
 -              sig_attrs->wire.sig.dif.ref_tag = se_cmd->reftag_seed;
 +              sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE;
 +              isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->wire);
                break;
        case TARGET_PROT_DOUT_INSERT:
        case TARGET_PROT_DIN_STRIP:
 -              sig_attrs->mem.sig.dif.type = ib_prot_type;
 -              sig_attrs->mem.sig.dif.bg_type = IB_T10DIF_CRC;
 -              sig_attrs->mem.sig.dif.ref_tag = se_cmd->reftag_seed;
 -              sig_attrs->wire.sig.dif.type = IB_T10DIF_NONE;
 +              sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE;
 +              isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem);
                break;
        case TARGET_PROT_DIN_PASS:
        case TARGET_PROT_DOUT_PASS:
 -              sig_attrs->mem.sig.dif.type = ib_prot_type;
 -              sig_attrs->mem.sig.dif.bg_type = IB_T10DIF_CRC;
 -              sig_attrs->mem.sig.dif.ref_tag = se_cmd->reftag_seed;
 -              sig_attrs->wire.sig.dif.type = ib_prot_type;
 -              sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
 -              sig_attrs->wire.sig.dif.ref_tag = se_cmd->reftag_seed;
 +              isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->wire);
 +              isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem);
                break;
        default:
                pr_err("Unsupported PI operation %d\n", se_cmd->prot_op);
@@@ -2871,7 -2882,7 +2871,7 @@@ isert_put_datain(struct iscsi_conn *con
                                     &isert_cmd->tx_desc.iscsi_header);
                isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
                isert_init_send_wr(isert_conn, isert_cmd,
-                                  &isert_cmd->tx_desc.send_wr, true);
+                                  &isert_cmd->tx_desc.send_wr, false);
                isert_cmd->rdma_wr.s_send_wr.next = &isert_cmd->tx_desc.send_wr;
                wr->send_wr_num += 1;
        }
@@@ -3056,6 -3067,7 +3056,6 @@@ isert_rdma_accept(struct isert_conn *is
        int ret;
  
        memset(&cp, 0, sizeof(struct rdma_conn_param));
 -      cp.responder_resources = isert_conn->responder_resources;
        cp.initiator_depth = isert_conn->initiator_depth;
        cp.retry_count = 7;
        cp.rnr_retry_count = 7;
@@@ -3140,7 -3152,7 +3140,7 @@@ isert_accept_np(struct iscsi_np *np, st
  
  accept_wait:
        ret = down_interruptible(&isert_np->np_sem);
-       if (max_accept > 5)
+       if (ret || max_accept > 5)
                return -ENODEV;
  
        spin_lock_bh(&np->np_thread_lock);
@@@ -3203,7 -3215,7 +3203,7 @@@ static void isert_wait_conn(struct iscs
        pr_debug("isert_wait_conn: Starting \n");
  
        mutex_lock(&isert_conn->conn_mutex);
 -      if (isert_conn->conn_cm_id) {
 +      if (isert_conn->conn_cm_id && !isert_conn->disconnect) {
                pr_debug("Calling rdma_disconnect from isert_wait_conn\n");
                rdma_disconnect(isert_conn->conn_cm_id);
        }
        wait_for_completion(&isert_conn->conn_wait_comp_err);
  
        wait_for_completion(&isert_conn->conn_wait);
 +      isert_put_conn(isert_conn);
  }
  
  static void isert_free_conn(struct iscsi_conn *conn)
index 829752cfd73fe87a6d5325cb4c9399e022ec92f7,68c90ad441f4f8312112b6c6d66dab2baa8bea85..a902fa1db7aff1a0fd3ca8622040b56f6e03cef1
  #include "qla_def.h"
  #include "qla_target.h"
  
 +static int ql2xtgt_tape_enable;
 +module_param(ql2xtgt_tape_enable, int, S_IRUGO|S_IWUSR);
 +MODULE_PARM_DESC(ql2xtgt_tape_enable,
 +              "Enables Sequence level error recovery (aka FC Tape). Default is 0 - no SLER. 1 - Enable SLER.");
 +
  static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED;
  module_param(qlini_mode, charp, S_IRUGO);
  MODULE_PARM_DESC(qlini_mode,
@@@ -59,8 -54,6 +59,8 @@@
  
  int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
  
 +static int temp_sam_status = SAM_STAT_BUSY;
 +
  /*
   * From scsi/fc/fc_fcp.h
   */
@@@ -108,10 -101,7 +108,11 @@@ static void qlt_send_term_exchange(stru
        *cmd, struct atio_from_isp *atio, int ha_locked);
  static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha,
        struct qla_tgt_srr_imm *imm, int ha_lock);
 +static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha,
 +      struct qla_tgt_cmd *cmd);
 +static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
 +      struct atio_from_isp *atio, uint16_t status, int qfull);
+ static void qlt_disable_vha(struct scsi_qla_host *vha);
  /*
   * Global Variables
   */
@@@ -189,28 -179,7 +190,28 @@@ struct scsi_qla_host *qlt_find_host_by_
        return NULL;
  }
  
- void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
 +static inline void qlt_incr_num_pend_cmds(struct scsi_qla_host *vha)
 +{
 +      unsigned long flags;
 +
 +      spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
 +
 +      vha->hw->tgt.num_pend_cmds++;
 +      if (vha->hw->tgt.num_pend_cmds > vha->hw->qla_stats.stat_max_pend_cmds)
 +              vha->hw->qla_stats.stat_max_pend_cmds =
 +                      vha->hw->tgt.num_pend_cmds;
 +      spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
 +}
 +static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host *vha)
 +{
 +      unsigned long flags;
 +
 +      spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
 +      vha->hw->tgt.num_pend_cmds--;
 +      spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
 +}
 +
+ static void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
        struct atio_from_isp *atio)
  {
        ql_dbg(ql_dbg_tgt, vha, 0xe072,
@@@ -433,7 -402,7 +434,7 @@@ static int qlt_reset(struct scsi_qla_ho
  #if 0 /* FIXME: Re-enable Global event handling.. */
                /* Global event */
                atomic_inc(&ha->tgt.qla_tgt->tgt_global_resets_count);
-               qlt_clear_tgt_db(ha->tgt.qla_tgt, 1);
+               qlt_clear_tgt_db(ha->tgt.qla_tgt);
                if (!list_empty(&ha->tgt.qla_tgt->sess_list)) {
                        sess = list_entry(ha->tgt.qla_tgt->sess_list.next,
                            typeof(*sess), sess_list_entry);
@@@ -515,7 -484,7 +516,7 @@@ static void qlt_schedule_sess_for_delet
  }
  
  /* ha->hardware_lock supposed to be held on entry */
- static void qlt_clear_tgt_db(struct qla_tgt *tgt, bool local_only)
+ static void qlt_clear_tgt_db(struct qla_tgt *tgt)
  {
        struct qla_tgt_sess *sess;
  
@@@ -867,7 -836,7 +868,7 @@@ int qlt_stop_phase1(struct qla_tgt *tgt
        mutex_lock(&vha->vha_tgt.tgt_mutex);
        spin_lock_irqsave(&ha->hardware_lock, flags);
        tgt->tgt_stop = 1;
-       qlt_clear_tgt_db(tgt, true);
+       qlt_clear_tgt_db(tgt);
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
        mutex_unlock(&vha->vha_tgt.tgt_mutex);
        mutex_unlock(&qla_tgt_mutex);
@@@ -1040,8 -1009,6 +1041,8 @@@ static void qlt_send_notify_ack(struct 
            "qla_target(%d): Sending 24xx Notify Ack %d\n",
            vha->vp_idx, nack->u.isp24.status);
  
 +      /* Memory Barrier */
 +      wmb();
        qla2x00_start_iocbs(vha, vha->req);
  }
  
@@@ -1065,7 -1032,7 +1066,7 @@@ static void qlt_24xx_send_abts_resp(str
        if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
                return;
  
 -      resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs(vha, NULL);
 +      resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs_ready(vha, NULL);
        if (!resp) {
                ql_dbg(ql_dbg_tgt, vha, 0xe04a,
                    "qla_target(%d): %s failed: unable to allocate "
  
        vha->vha_tgt.qla_tgt->abts_resp_expected++;
  
 +      /* Memory Barrier */
 +      wmb();
        qla2x00_start_iocbs(vha, vha->req);
  }
  
@@@ -1138,7 -1103,7 +1139,7 @@@ static void qlt_24xx_retry_term_exchang
        if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
                return;
  
 -      ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL);
 +      ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs_ready(vha, NULL);
        if (ctio == NULL) {
                ql_dbg(ql_dbg_tgt, vha, 0xe04b,
                    "qla_target(%d): %s failed: unable to allocate "
                CTIO7_FLAGS_TERMINATE);
        ctio->u.status1.ox_id = cpu_to_le16(entry->fcp_hdr_le.ox_id);
  
 +      /* Memory Barrier */
 +      wmb();
        qla2x00_start_iocbs(vha, vha->req);
  
        qlt_24xx_send_abts_resp(vha, (struct abts_recv_from_24xx *)entry,
@@@ -1216,7 -1179,6 +1217,7 @@@ static int __qlt_24xx_handle_abts(struc
  
        mcmd->sess = sess;
        memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
 +      mcmd->reset_count = vha->hw->chip_reset;
  
        rc = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, TMR_ABORT_TASK,
            abts->exchange_addr_to_abort);
@@@ -1339,8 -1301,6 +1340,8 @@@ static void qlt_24xx_send_task_mgmt_cti
        ctio->u.status1.response_len = __constant_cpu_to_le16(8);
        ctio->u.status1.sense_data[0] = resp_code;
  
 +      /* Memory Barrier */
 +      wmb();
        qla2x00_start_iocbs(ha, ha->req);
  }
  
@@@ -1362,21 -1322,6 +1363,21 @@@ void qlt_xmit_tm_rsp(struct qla_tgt_mgm
            mcmd, mcmd->fc_tm_rsp, mcmd->flags);
  
        spin_lock_irqsave(&ha->hardware_lock, flags);
 +
 +      if (qla2x00_reset_active(vha) || mcmd->reset_count != ha->chip_reset) {
 +              /*
 +               * Either a chip reset is active or this request was from
 +               * previous life, just abort the processing.
 +               */
 +              ql_dbg(ql_dbg_async, vha, 0xe100,
 +                      "RESET-TMR active/old-count/new-count = %d/%d/%d.\n",
 +                      qla2x00_reset_active(vha), mcmd->reset_count,
 +                      ha->chip_reset);
 +              ha->tgt.tgt_ops->free_mcmd(mcmd);
 +              spin_unlock_irqrestore(&ha->hardware_lock, flags);
 +              return;
 +      }
 +
        if (mcmd->flags == QLA24XX_MGMT_SEND_NACK)
                qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy,
                    0, 0, 0, 0, 0, 0);
@@@ -1453,6 -1398,8 +1454,6 @@@ static int qlt_pci_map_calc_cnt(struct 
                }
        }
  
 -      ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe009, "seg_cnt=%d, req_cnt=%d\n",
 -          prm->seg_cnt, prm->req_cnt);
        return 0;
  
  out_err:
        return -1;
  }
  
- static inline void qlt_unmap_sg(struct scsi_qla_host *vha,
-       struct qla_tgt_cmd *cmd)
+ static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
  {
        struct qla_hw_data *ha = vha->hw;
  
-       BUG_ON(!cmd->sg_mapped);
+       if (!cmd->sg_mapped)
+               return;
        pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
        cmd->sg_mapped = 0;
  
  static int qlt_check_reserve_free_req(struct scsi_qla_host *vha,
        uint32_t req_cnt)
  {
 -      struct qla_hw_data *ha = vha->hw;
 -      device_reg_t __iomem *reg = ha->iobase;
 -      uint32_t cnt;
 +      uint32_t cnt, cnt_in;
  
        if (vha->req->cnt < (req_cnt + 2)) {
 -              cnt = (uint16_t)RD_REG_DWORD(&reg->isp24.req_q_out);
 +              cnt = (uint16_t)RD_REG_DWORD(vha->req->req_q_out);
 +              cnt_in = (uint16_t)RD_REG_DWORD(vha->req->req_q_in);
  
 -              ql_dbg(ql_dbg_tgt, vha, 0xe00a,
 -                  "Request ring circled: cnt=%d, vha->->ring_index=%d, "
 -                  "vha->req->cnt=%d, req_cnt=%d\n", cnt,
 -                  vha->req->ring_index, vha->req->cnt, req_cnt);
                if  (vha->req->ring_index < cnt)
                        vha->req->cnt = cnt - vha->req->ring_index;
                else
        }
  
        if (unlikely(vha->req->cnt < (req_cnt + 2))) {
 -              ql_dbg(ql_dbg_tgt, vha, 0xe00b,
 -                  "qla_target(%d): There is no room in the "
 -                  "request ring: vha->req->ring_index=%d, vha->req->cnt=%d, "
 -                  "req_cnt=%d\n", vha->vp_idx, vha->req->ring_index,
 -                  vha->req->cnt, req_cnt);
 +              ql_dbg(ql_dbg_io, vha, 0x305a,
 +                  "qla_target(%d): There is no room in the request ring: vha->req->ring_index=%d, vha->req->cnt=%d, req_cnt=%d Req-out=%d Req-in=%d Req-Length=%d\n",
 +                  vha->vp_idx, vha->req->ring_index,
 +                  vha->req->cnt, req_cnt, cnt, cnt_in, vha->req->length);
                return -EAGAIN;
        }
        vha->req->cnt -= req_cnt;
@@@ -1539,7 -1493,7 +1541,7 @@@ static inline uint32_t qlt_make_handle(
                if (h > DEFAULT_OUTSTANDING_COMMANDS)
                        h = 1; /* 0 is QLA_TGT_NULL_HANDLE */
                if (h == ha->tgt.current_handle) {
 -                      ql_dbg(ql_dbg_tgt, vha, 0xe04e,
 +                      ql_dbg(ql_dbg_io, vha, 0x305b,
                            "qla_target(%d): Ran out of "
                            "empty cmd slots in ha %p\n", vha->vp_idx, ha);
                        h = QLA_TGT_NULL_HANDLE;
@@@ -1596,6 -1550,9 +1598,6 @@@ static int qlt_24xx_build_ctio_pkt(stru
        pkt->u.status0.ox_id = cpu_to_le16(temp);
        pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset);
  
 -      ql_dbg(ql_dbg_tgt, vha, 0xe00c,
 -          "qla_target(%d): handle(cmd) -> %08x, timeout %d, ox_id %#x\n",
 -          vha->vp_idx, pkt->handle, QLA_TGT_TIMEOUT, temp);
        return 0;
  }
  
@@@ -1653,6 -1610,14 +1655,6 @@@ static void qlt_load_cont_data_segments
                        }
                        *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
  
 -                      ql_dbg(ql_dbg_tgt, vha, 0xe00d,
 -                          "S/G Segment Cont. phys_addr=%llx:%llx, len=%d\n",
 -                          (long long unsigned int)
 -                          pci_dma_hi32(sg_dma_address(prm->sg)),
 -                          (long long unsigned int)
 -                          pci_dma_lo32(sg_dma_address(prm->sg)),
 -                          (int)sg_dma_len(prm->sg));
 -
                        prm->sg = sg_next(prm->sg);
                }
        }
@@@ -1670,6 -1635,11 +1672,6 @@@ static void qlt_load_data_segments(stru
        int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
        struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt;
  
 -      ql_dbg(ql_dbg_tgt, vha, 0xe00e,
 -          "iocb->scsi_status=%x, iocb->flags=%x\n",
 -          le16_to_cpu(pkt24->u.status0.scsi_status),
 -          le16_to_cpu(pkt24->u.status0.flags));
 -
        pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen);
  
        /* Setup packet address segment pointer */
        }
  
        /* If scatter gather */
 -      ql_dbg(ql_dbg_tgt, vha, 0xe00f, "%s", "Building S/G data segments...");
  
        /* Load command entry data segments */
        for (cnt = 0;
                }
                *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
  
 -              ql_dbg(ql_dbg_tgt, vha, 0xe010,
 -                  "S/G Segment phys_addr=%llx:%llx, len=%d\n",
 -                  (long long unsigned int)pci_dma_hi32(sg_dma_address(
 -                  prm->sg)),
 -                  (long long unsigned int)pci_dma_lo32(sg_dma_address(
 -                  prm->sg)),
 -                  (int)sg_dma_len(prm->sg));
 -
                prm->sg = sg_next(prm->sg);
        }
  
@@@ -1731,7 -1710,6 +1733,7 @@@ static int qlt_pre_xmit_response(struc
                    se_cmd, cmd->tag);
  
                cmd->state = QLA_TGT_STATE_ABORTED;
 +              cmd->cmd_flags |= BIT_6;
  
                qlt_send_term_exchange(vha, cmd, &cmd->atio, 0);
  
                return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED;
        }
  
 -      ql_dbg(ql_dbg_tgt, vha, 0xe011, "qla_target(%d): tag=%u ox_id %04x\n",
 -              vha->vp_idx, cmd->tag,
 -              be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
 -
        prm->cmd = cmd;
        prm->tgt = tgt;
        prm->rq_result = scsi_status;
        prm->req_cnt = 1;
        prm->add_status_pkt = 0;
  
 -      ql_dbg(ql_dbg_tgt, vha, 0xe012, "rq_result=%x, xmit_type=%x\n",
 -          prm->rq_result, xmit_type);
 -
        /* Send marker if required */
        if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
                return -EFAULT;
  
 -      ql_dbg(ql_dbg_tgt, vha, 0xe013, "CTIO start: vha(%d)\n", vha->vp_idx);
 -
        if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) {
                if  (qlt_pci_map_calc_cnt(prm) != 0)
                        return -EAGAIN;
  
        if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
                prm->residual = se_cmd->residual_count;
 -              ql_dbg(ql_dbg_tgt, vha, 0xe014,
 +              ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x305c,
                    "Residual underflow: %d (tag %d, "
                    "op %x, bufflen %d, rq_result %x)\n", prm->residual,
                    cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
                prm->rq_result |= SS_RESIDUAL_UNDER;
        } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
                prm->residual = se_cmd->residual_count;
 -              ql_dbg(ql_dbg_tgt, vha, 0xe015,
 +              ql_dbg(ql_dbg_io, vha, 0x305d,
                    "Residual overflow: %d (tag %d, "
                    "op %x, bufflen %d, rq_result %x)\n", prm->residual,
                    cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
                }
        }
  
 -      ql_dbg(ql_dbg_tgt, vha, 0xe016,
 -          "req_cnt=%d, full_req_cnt=%d, add_status_pkt=%d\n",
 -          prm->req_cnt, *full_req_cnt, prm->add_status_pkt);
 -
        return 0;
  }
  
@@@ -2321,21 -2312,6 +2323,21 @@@ int qlt_xmit_response(struct qla_tgt_cm
  
        spin_lock_irqsave(&ha->hardware_lock, flags);
  
 +      if (qla2x00_reset_active(vha) || cmd->reset_count != ha->chip_reset) {
 +              /*
 +               * Either a chip reset is active or this request was from
 +               * previous life, just abort the processing.
 +               */
 +              cmd->state = QLA_TGT_STATE_PROCESSED;
 +              qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
 +              ql_dbg(ql_dbg_async, vha, 0xe101,
 +                      "RESET-RSP active/old-count/new-count = %d/%d/%d.\n",
 +                      qla2x00_reset_active(vha), cmd->reset_count,
 +                      ha->chip_reset);
 +              spin_unlock_irqrestore(&ha->hardware_lock, flags);
 +              return 0;
 +      }
 +
        /* Does F/W have an IOCBs for this request */
        res = qlt_check_reserve_free_req(vha, full_req_cnt);
        if (unlikely(res))
                        struct ctio7_to_24xx *ctio =
                                (struct ctio7_to_24xx *)qlt_get_req_pkt(vha);
  
 -                      ql_dbg(ql_dbg_tgt, vha, 0xe019,
 -                          "Building additional status packet\n");
 +                      ql_dbg(ql_dbg_io, vha, 0x305e,
 +                          "Building additional status packet 0x%p.\n",
 +                          ctio);
  
                        /*
                         * T10Dif: ctio_crc2_to_fw overlay ontop of
  
  
        cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */
 +      cmd->cmd_sent_to_fw = 1;
  
 -      ql_dbg(ql_dbg_tgt, vha, 0xe01a,
 -          "Xmitting CTIO7 response pkt for 24xx: %p scsi_status: 0x%02x\n",
 -          pkt, scsi_status);
 -
 +      /* Memory Barrier */
 +      wmb();
        qla2x00_start_iocbs(vha, vha->req);
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
  
        return 0;
  
  out_unmap_unlock:
-       if (cmd->sg_mapped)
-               qlt_unmap_sg(vha, cmd);
+       qlt_unmap_sg(vha, cmd);
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
  
        return res;
@@@ -2456,27 -2431,17 +2457,27 @@@ int qlt_rdy_to_xfer(struct qla_tgt_cmd 
        if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
                return -EIO;
  
 -      ql_dbg(ql_dbg_tgt, vha, 0xe01b,
 -              "%s: CTIO_start: vha(%d) se_cmd %p ox_id %04x\n",
 -              __func__, (int)vha->vp_idx, &cmd->se_cmd,
 -              be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
 -
        /* Calculate number of entries and segments required */
        if (qlt_pci_map_calc_cnt(&prm) != 0)
                return -EAGAIN;
  
        spin_lock_irqsave(&ha->hardware_lock, flags);
  
 +      if (qla2x00_reset_active(vha) || cmd->reset_count != ha->chip_reset) {
 +              /*
 +               * Either a chip reset is active or this request was from
 +               * previous life, just abort the processing.
 +               */
 +              cmd->state = QLA_TGT_STATE_NEED_DATA;
 +              qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
 +              ql_dbg(ql_dbg_async, vha, 0xe102,
 +                      "RESET-XFR active/old-count/new-count = %d/%d/%d.\n",
 +                      qla2x00_reset_active(vha), cmd->reset_count,
 +                      ha->chip_reset);
 +              spin_unlock_irqrestore(&ha->hardware_lock, flags);
 +              return 0;
 +      }
 +
        /* Does F/W have an IOCBs for this request */
        res = qlt_check_reserve_free_req(vha, prm.req_cnt);
        if (res != 0)
                qlt_load_data_segments(&prm, vha);
  
        cmd->state = QLA_TGT_STATE_NEED_DATA;
 +      cmd->cmd_sent_to_fw = 1;
  
 +      /* Memory Barrier */
 +      wmb();
        qla2x00_start_iocbs(vha, vha->req);
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
  
        return res;
  
  out_unlock_free_unmap:
-       if (cmd->sg_mapped)
-               qlt_unmap_sg(vha, cmd);
+       qlt_unmap_sg(vha, cmd);
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
  
        return res;
@@@ -2542,7 -2503,7 +2542,7 @@@ qlt_handle_dif_error(struct scsi_qla_ho
            "iocb(s) %p Returned STATUS.\n", sts);
  
        ql_dbg(ql_dbg_tgt, vha, 0xf075,
 -          "dif check TGT cdb 0x%x lba 0x%llu: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x]\n",
 +          "dif check TGT cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x]\n",
            cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
            a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, a_guard, e_guard);
  
@@@ -2665,7 -2626,7 +2665,7 @@@ static int __qlt_send_term_exchange(str
  
        ql_dbg(ql_dbg_tgt, vha, 0xe01c, "Sending TERM EXCH CTIO (ha=%p)\n", ha);
  
 -      pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
 +      pkt = (request_t *)qla2x00_alloc_iocbs_ready(vha, NULL);
        if (pkt == NULL) {
                ql_dbg(ql_dbg_tgt, vha, 0xe050,
                    "qla_target(%d): %s failed: unable to allocate "
        if (ctio24->u.status1.residual != 0)
                ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER;
  
 +      /* Memory Barrier */
 +      wmb();
        qla2x00_start_iocbs(vha, vha->req);
        return ret;
  }
@@@ -2725,76 -2684,33 +2725,75 @@@ static void qlt_send_term_exchange(stru
  
        if (ha_locked) {
                rc = __qlt_send_term_exchange(vha, cmd, atio);
 +              if (rc == -ENOMEM)
 +                      qlt_alloc_qfull_cmd(vha, atio, 0, 0);
                goto done;
        }
        spin_lock_irqsave(&vha->hw->hardware_lock, flags);
        rc = __qlt_send_term_exchange(vha, cmd, atio);
 +      if (rc == -ENOMEM)
 +              qlt_alloc_qfull_cmd(vha, atio, 0, 0);
        spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
 +
  done:
 -      /*
 -       * Terminate exchange will tell fw to release any active CTIO
 -       * that's in FW posession and cleanup the exchange.
 -       *
 -       * "cmd->state == QLA_TGT_STATE_ABORTED" means CTIO is still
 -       * down at FW.  Free the cmd later when CTIO comes back later
 -       * w/aborted(0x2) status.
 -       *
 -       * "cmd->state != QLA_TGT_STATE_ABORTED" means CTIO is already
 -       * back w/some err.  Free the cmd now.
 -       */
 -      if ((rc == 1) && (cmd->state != QLA_TGT_STATE_ABORTED)) {
 +      if (cmd && ((cmd->state != QLA_TGT_STATE_ABORTED) ||
 +          !cmd->cmd_sent_to_fw)) {
                if (!ha_locked && !in_interrupt())
                        msleep(250); /* just in case */
  
-               if (cmd->sg_mapped)
-                       qlt_unmap_sg(vha, cmd);
+               qlt_unmap_sg(vha, cmd);
                vha->hw->tgt.tgt_ops->free_cmd(cmd);
        }
        return;
  }
  
 +static void qlt_init_term_exchange(struct scsi_qla_host *vha)
 +{
 +      struct list_head free_list;
 +      struct qla_tgt_cmd *cmd, *tcmd;
 +
 +      vha->hw->tgt.leak_exchg_thresh_hold =
 +          (vha->hw->fw_xcb_count/100) * LEAK_EXCHG_THRESH_HOLD_PERCENT;
 +
 +      cmd = tcmd = NULL;
 +      if (!list_empty(&vha->hw->tgt.q_full_list)) {
 +              INIT_LIST_HEAD(&free_list);
 +              list_splice_init(&vha->hw->tgt.q_full_list, &free_list);
 +
 +              list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) {
 +                      list_del(&cmd->cmd_list);
 +                      /* This cmd was never sent to TCM.  There is no need
 +                       * to schedule free or call free_cmd
 +                       */
 +                      qlt_free_cmd(cmd);
 +                      vha->hw->tgt.num_qfull_cmds_alloc--;
 +              }
 +      }
 +      vha->hw->tgt.num_qfull_cmds_dropped = 0;
 +}
 +
 +static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha)
 +{
 +      uint32_t total_leaked;
 +
 +      total_leaked = vha->hw->tgt.num_qfull_cmds_dropped;
 +
 +      if (vha->hw->tgt.leak_exchg_thresh_hold &&
 +          (total_leaked > vha->hw->tgt.leak_exchg_thresh_hold)) {
 +
 +              ql_dbg(ql_dbg_tgt, vha, 0xe079,
 +                  "Chip reset due to exchange starvation: %d/%d.\n",
 +                  total_leaked, vha->hw->fw_xcb_count);
 +
 +              if (IS_P3P_TYPE(vha->hw))
 +                      set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
 +              else
 +                      set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
 +              qla2xxx_wake_dpc(vha);
 +      }
 +
 +}
 +
  void qlt_free_cmd(struct qla_tgt_cmd *cmd)
  {
        struct qla_tgt_sess *sess = cmd->sess;
            __func__, &cmd->se_cmd,
            be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
  
 +      BUG_ON(cmd->cmd_in_wq);
 +
 +      if (!cmd->q_full)
 +              qlt_decr_num_pend_cmds(cmd->vha);
 +
        BUG_ON(cmd->sg_mapped);
 +      cmd->jiffies_at_free = get_jiffies_64();
        if (unlikely(cmd->free_sg))
                kfree(cmd->sg);
  
                WARN_ON(1);
                return;
        }
 +      cmd->jiffies_at_free = get_jiffies_64();
        percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
  }
  EXPORT_SYMBOL(qlt_free_cmd);
@@@ -2832,7 -2741,6 +2831,7 @@@ static int qlt_prepare_srr_ctio(struct 
        struct qla_tgt_srr_imm *imm;
  
        tgt->ctio_srr_id++;
 +      cmd->cmd_flags |= BIT_15;
  
        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019,
            "qla_target(%d): CTIO with SRR status received\n", vha->vp_idx);
@@@ -2954,9 -2862,11 +2953,9 @@@ static struct qla_tgt_cmd *qlt_ctio_to_
            CTIO_INTERMEDIATE_HANDLE_MARK);
  
        if (handle != QLA_TGT_NULL_HANDLE) {
 -              if (unlikely(handle == QLA_TGT_SKIP_HANDLE)) {
 -                      ql_dbg(ql_dbg_tgt, vha, 0xe01d, "%s",
 -                          "SKIP_HANDLE CTIO\n");
 +              if (unlikely(handle == QLA_TGT_SKIP_HANDLE))
                        return NULL;
 -              }
 +
                /* handle-1 is actually used */
                if (unlikely(handle > DEFAULT_OUTSTANDING_COMMANDS)) {
                        ql_dbg(ql_dbg_tgt, vha, 0xe052,
        return cmd;
  }
  
 +/* hardware_lock should be held by caller. */
 +static void
 +qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
 +{
 +      struct qla_hw_data *ha = vha->hw;
 +      uint32_t handle;
 +
 +      if (cmd->sg_mapped)
 +              qlt_unmap_sg(vha, cmd);
 +
 +      handle = qlt_make_handle(vha);
 +
 +      /* TODO: fix debug message type and ids. */
 +      if (cmd->state == QLA_TGT_STATE_PROCESSED) {
 +              ql_dbg(ql_dbg_io, vha, 0xff00,
 +                  "HOST-ABORT: handle=%d, state=PROCESSED.\n", handle);
 +      } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
 +              cmd->write_data_transferred = 0;
 +              cmd->state = QLA_TGT_STATE_DATA_IN;
 +
 +              ql_dbg(ql_dbg_io, vha, 0xff01,
 +                  "HOST-ABORT: handle=%d, state=DATA_IN.\n", handle);
 +
 +              ha->tgt.tgt_ops->handle_data(cmd);
 +              return;
 +      } else if (cmd->state == QLA_TGT_STATE_ABORTED) {
 +              ql_dbg(ql_dbg_io, vha, 0xff02,
 +                  "HOST-ABORT: handle=%d, state=ABORTED.\n", handle);
 +      } else {
 +              ql_dbg(ql_dbg_io, vha, 0xff03,
 +                  "HOST-ABORT: handle=%d, state=BAD(%d).\n", handle,
 +                  cmd->state);
 +              dump_stack();
 +      }
 +
 +      cmd->cmd_flags |= BIT_12;
 +      ha->tgt.tgt_ops->free_cmd(cmd);
 +}
 +
 +void
 +qlt_host_reset_handler(struct qla_hw_data *ha)
 +{
 +      struct qla_tgt_cmd *cmd;
 +      unsigned long flags;
 +      scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
 +      scsi_qla_host_t *vha = NULL;
 +      struct qla_tgt *tgt = base_vha->vha_tgt.qla_tgt;
 +      uint32_t i;
 +
 +      if (!base_vha->hw->tgt.tgt_ops)
 +              return;
 +
 +      if (!tgt || qla_ini_mode_enabled(base_vha)) {
 +              ql_dbg(ql_dbg_tgt_mgt, vha, 0xf003,
 +                      "Target mode disabled\n");
 +              return;
 +      }
 +
 +      ql_dbg(ql_dbg_tgt_mgt, vha, 0xff10,
 +          "HOST-ABORT-HNDLR: base_vha->dpc_flags=%lx.\n",
 +          base_vha->dpc_flags);
 +
 +      spin_lock_irqsave(&ha->hardware_lock, flags);
 +      for (i = 1; i < DEFAULT_OUTSTANDING_COMMANDS + 1; i++) {
 +              cmd = qlt_get_cmd(base_vha, i);
 +              if (!cmd)
 +                      continue;
 +              /* ha->tgt.cmds entry is cleared by qlt_get_cmd. */
 +              vha = cmd->vha;
 +              qlt_abort_cmd_on_host_reset(vha, cmd);
 +      }
 +      spin_unlock_irqrestore(&ha->hardware_lock, flags);
 +}
 +
 +
  /*
   * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
   */
@@@ -3069,6 -2904,10 +3068,6 @@@ static void qlt_do_ctio_completion(stru
        struct target_core_fabric_ops *tfo;
        struct qla_tgt_cmd *cmd;
  
 -      ql_dbg(ql_dbg_tgt, vha, 0xe01e,
 -          "qla_target(%d): handle(ctio %p status %#x) <- %08x\n",
 -          vha->vp_idx, ctio, status, handle);
 -
        if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) {
                /* That could happen only in case of an error/reset/abort */
                if (status != CTIO_SUCCESS) {
  
        se_cmd = &cmd->se_cmd;
        tfo = se_cmd->se_tfo;
 +      cmd->cmd_sent_to_fw = 0;
  
-       if (cmd->sg_mapped)
-               qlt_unmap_sg(vha, cmd);
+       qlt_unmap_sg(vha, cmd);
  
        if (unlikely(status != CTIO_SUCCESS)) {
                switch (status & 0xFFFF) {
                 * level.
                 */
                if ((cmd->state != QLA_TGT_STATE_NEED_DATA) &&
 -                      (cmd->state != QLA_TGT_STATE_ABORTED)) {
 +                  (cmd->state != QLA_TGT_STATE_ABORTED)) {
 +                      cmd->cmd_flags |= BIT_13;
                        if (qlt_term_ctio_exchange(vha, ctio, cmd, status))
                                return;
                }
  skip_term:
  
        if (cmd->state == QLA_TGT_STATE_PROCESSED) {
 -              ql_dbg(ql_dbg_tgt, vha, 0xe01f, "Command %p finished\n", cmd);
 +              ;
        } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
                int rx_status = 0;
  
                else
                        cmd->write_data_transferred = 1;
  
 -              ql_dbg(ql_dbg_tgt, vha, 0xe020,
 -                  "Data received, context %x, rx_status %d\n",
 -                  0x0, rx_status);
 -
                ha->tgt.tgt_ops->handle_data(cmd);
                return;
        } else if (cmd->state == QLA_TGT_STATE_ABORTED) {
                dump_stack();
        }
  
 +
        ha->tgt.tgt_ops->free_cmd(cmd);
  }
  
@@@ -3262,8 -3101,6 +3260,8 @@@ static void __qlt_do_work(struct qla_tg
        uint32_t data_length;
        int ret, fcp_task_attr, data_dir, bidi = 0;
  
 +      cmd->cmd_in_wq = 0;
 +      cmd->cmd_flags |= BIT_1;
        if (tgt->tgt_stop)
                goto out_term;
  
            &atio->u.isp24.fcp_cmnd.add_cdb[
            atio->u.isp24.fcp_cmnd.add_cdb_len]));
  
 -      ql_dbg(ql_dbg_tgt, vha, 0xe022,
 -              "qla_target: START qla cmd: %p se_cmd %p lun: 0x%04x (tag %d) len(%d) ox_id %x\n",
 -              cmd, &cmd->se_cmd, cmd->unpacked_lun, cmd->tag, data_length,
 -              cmd->atio.u.isp24.fcp_hdr.ox_id);
 -
        ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
                                          fcp_task_attr, data_dir, bidi);
        if (ret != 0)
        return;
  
  out_term:
 -      ql_dbg(ql_dbg_tgt_mgt, vha, 0xf020, "Terminating work cmd %p", cmd);
 +      ql_dbg(ql_dbg_io, vha, 0x3060, "Terminating work cmd %p", cmd);
        /*
         * cmd has not sent to target yet, so pass NULL as the second
         * argument to qlt_send_term_exchange() and free the memory here.
         */
 +      cmd->cmd_flags |= BIT_2;
        spin_lock_irqsave(&ha->hardware_lock, flags);
        qlt_send_term_exchange(vha, NULL, &cmd->atio, 1);
 +
 +      qlt_decr_num_pend_cmds(vha);
        percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
        ha->tgt.tgt_ops->put_sess(sess);
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@@ -3342,7 -3181,6 +3340,7 @@@ static struct qla_tgt_cmd *qlt_get_tag(
        memcpy(&cmd->atio, atio, sizeof(*atio));
        cmd->state = QLA_TGT_STATE_NEW;
        cmd->tgt = vha->vha_tgt.qla_tgt;
 +      qlt_incr_num_pend_cmds(vha);
        cmd->vha = vha;
        cmd->se_cmd.map_tag = tag;
        cmd->sess = sess;
@@@ -3424,7 -3262,7 +3422,7 @@@ static int qlt_handle_cmd_for_atio(stru
        struct qla_tgt_cmd *cmd;
  
        if (unlikely(tgt->tgt_stop)) {
 -              ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021,
 +              ql_dbg(ql_dbg_io, vha, 0x3061,
                    "New command while device %p is shutting down\n", tgt);
                return -EFAULT;
        }
                        return -ENOMEM;
  
                memcpy(&op->atio, atio, sizeof(*atio));
 +              op->vha = vha;
                INIT_WORK(&op->work, qlt_create_sess_from_atio);
                queue_work(qla_tgt_wq, &op->work);
                return 0;
  
        cmd = qlt_get_tag(vha, sess, atio);
        if (!cmd) {
 -              ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05e,
 +              ql_dbg(ql_dbg_io, vha, 0x3062,
                    "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
                ha->tgt.tgt_ops->put_sess(sess);
                return -ENOMEM;
        }
  
 +      cmd->cmd_flags = 0;
 +      cmd->jiffies_at_alloc = get_jiffies_64();
 +
 +      cmd->reset_count = vha->hw->chip_reset;
 +
 +      cmd->cmd_in_wq = 1;
 +      cmd->cmd_flags |= BIT_0;
        INIT_WORK(&cmd->work, qlt_do_work);
        queue_work(qla_tgt_wq, &cmd->work);
        return 0;
@@@ -3495,7 -3325,6 +3493,7 @@@ static int qlt_issue_task_mgmt(struct q
        }
        mcmd->tmr_func = fn;
        mcmd->flags = flags;
 +      mcmd->reset_count = vha->hw->chip_reset;
  
        switch (fn) {
        case QLA_TGT_CLEAR_ACA:
@@@ -3631,7 -3460,6 +3629,7 @@@ static int __qlt_abort_task(struct scsi
  
        lun = a->u.isp24.fcp_cmnd.lun;
        unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
 +      mcmd->reset_count = vha->hw->chip_reset;
  
        rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, TMR_ABORT_TASK,
            le16_to_cpu(iocb->u.isp2x.seq_id));
@@@ -3923,10 -3751,8 +3921,10 @@@ static void qlt_handle_srr(struct scsi_
                        qlt_send_notify_ack(vha, ntfy,
                            0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
                        spin_unlock_irqrestore(&ha->hardware_lock, flags);
 -                      if (xmit_type & QLA_TGT_XMIT_DATA)
 +                      if (xmit_type & QLA_TGT_XMIT_DATA) {
 +                              cmd->cmd_flags |= BIT_8;
                                qlt_rdy_to_xfer(cmd);
 +                      }
                } else {
                        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf066,
                            "qla_target(%d): SRR for out data for cmd "
        }
  
        /* Transmit response in case of status and data-in cases */
 -      if (resp)
 +      if (resp) {
 +              cmd->cmd_flags |= BIT_7;
                qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status);
 +      }
  
        return;
  
@@@ -3960,10 -3784,8 +3958,10 @@@ out_reject
        if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
                cmd->state = QLA_TGT_STATE_DATA_IN;
                dump_stack();
 -      } else
 +      } else {
 +              cmd->cmd_flags |= BIT_9;
                qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
 +      }
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
  }
  
@@@ -4077,7 -3899,7 +4075,7 @@@ static void qlt_prepare_srr_imm(struct 
  
        tgt->imm_srr_id++;
  
 -      ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02d, "qla_target(%d): SRR received\n",
 +      ql_log(ql_log_warn, vha, 0xf02d, "qla_target(%d): SRR received\n",
            vha->vp_idx);
  
        imm = kzalloc(sizeof(*imm), GFP_ATOMIC);
@@@ -4297,7 -4119,7 +4295,7 @@@ static void qlt_handle_imm_notify(struc
   * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
   * This function sends busy to ISP 2xxx or 24xx.
   */
 -static void qlt_send_busy(struct scsi_qla_host *vha,
 +static int __qlt_send_busy(struct scsi_qla_host *vha,
        struct atio_from_isp *atio, uint16_t status)
  {
        struct ctio7_to_24xx *ctio24;
            atio->u.isp24.fcp_hdr.s_id);
        if (!sess) {
                qlt_send_term_exchange(vha, NULL, atio, 1);
 -              return;
 +              return 0;
        }
        /* Sending marker isn't necessary, since we called from ISR */
  
        pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
        if (!pkt) {
 -              ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06e,
 +              ql_dbg(ql_dbg_io, vha, 0x3063,
                    "qla_target(%d): %s failed: unable to allocate "
                    "request packet", vha->vp_idx, __func__);
 -              return;
 +              return -ENOMEM;
        }
  
        pkt->entry_count = 1;
         */
        ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
        ctio24->u.status1.scsi_status = cpu_to_le16(status);
 -      ctio24->u.status1.residual = get_unaligned((uint32_t *)
 -          &atio->u.isp24.fcp_cmnd.add_cdb[
 -          atio->u.isp24.fcp_cmnd.add_cdb_len]);
 -      if (ctio24->u.status1.residual != 0)
 -              ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER;
 -
 +      /* Memory Barrier */
 +      wmb();
        qla2x00_start_iocbs(vha, vha->req);
 +      return 0;
 +}
 +
 +/*
 + * This routine is used to allocate a command for either a QFull condition
 + * (ie reply SAM_STAT_BUSY) or to terminate an exchange that did not go
 + * out previously.
 + */
 +static void
 +qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
 +      struct atio_from_isp *atio, uint16_t status, int qfull)
 +{
 +      struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
 +      struct qla_hw_data *ha = vha->hw;
 +      struct qla_tgt_sess *sess;
 +      struct se_session *se_sess;
 +      struct qla_tgt_cmd *cmd;
 +      int tag;
 +
 +      if (unlikely(tgt->tgt_stop)) {
 +              ql_dbg(ql_dbg_io, vha, 0x300a,
 +                      "New command while device %p is shutting down\n", tgt);
 +              return;
 +      }
 +
 +      if ((vha->hw->tgt.num_qfull_cmds_alloc + 1) > MAX_QFULL_CMDS_ALLOC) {
 +              vha->hw->tgt.num_qfull_cmds_dropped++;
 +              if (vha->hw->tgt.num_qfull_cmds_dropped >
 +                      vha->hw->qla_stats.stat_max_qfull_cmds_dropped)
 +                      vha->hw->qla_stats.stat_max_qfull_cmds_dropped =
 +                              vha->hw->tgt.num_qfull_cmds_dropped;
 +
 +              ql_dbg(ql_dbg_io, vha, 0x3068,
 +                      "qla_target(%d): %s: QFull CMD dropped[%d]\n",
 +                      vha->vp_idx, __func__,
 +                      vha->hw->tgt.num_qfull_cmds_dropped);
 +
 +              qlt_chk_exch_leak_thresh_hold(vha);
 +              return;
 +      }
 +
 +      sess = ha->tgt.tgt_ops->find_sess_by_s_id
 +              (vha, atio->u.isp24.fcp_hdr.s_id);
 +      if (!sess)
 +              return;
 +
 +      se_sess = sess->se_sess;
 +
 +      tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
 +      if (tag < 0)
 +              return;
 +
 +      cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
 +      if (!cmd) {
 +              ql_dbg(ql_dbg_io, vha, 0x3009,
 +                      "qla_target(%d): %s: Allocation of cmd failed\n",
 +                      vha->vp_idx, __func__);
 +
 +              vha->hw->tgt.num_qfull_cmds_dropped++;
 +              if (vha->hw->tgt.num_qfull_cmds_dropped >
 +                      vha->hw->qla_stats.stat_max_qfull_cmds_dropped)
 +                      vha->hw->qla_stats.stat_max_qfull_cmds_dropped =
 +                              vha->hw->tgt.num_qfull_cmds_dropped;
 +
 +              qlt_chk_exch_leak_thresh_hold(vha);
 +              return;
 +      }
 +
 +      memset(cmd, 0, sizeof(struct qla_tgt_cmd));
 +
 +      qlt_incr_num_pend_cmds(vha);
 +      INIT_LIST_HEAD(&cmd->cmd_list);
 +      memcpy(&cmd->atio, atio, sizeof(*atio));
 +
 +      cmd->tgt = vha->vha_tgt.qla_tgt;
 +      cmd->vha = vha;
 +      cmd->reset_count = vha->hw->chip_reset;
 +      cmd->q_full = 1;
 +
 +      if (qfull) {
 +              cmd->q_full = 1;
 +              /* NOTE: borrowing the state field to carry the status */
 +              cmd->state = status;
 +      } else
 +              cmd->term_exchg = 1;
 +
 +      list_add_tail(&cmd->cmd_list, &vha->hw->tgt.q_full_list);
 +
 +      vha->hw->tgt.num_qfull_cmds_alloc++;
 +      if (vha->hw->tgt.num_qfull_cmds_alloc >
 +              vha->hw->qla_stats.stat_max_qfull_cmds_alloc)
 +              vha->hw->qla_stats.stat_max_qfull_cmds_alloc =
 +                      vha->hw->tgt.num_qfull_cmds_alloc;
 +}
 +
 +int
 +qlt_free_qfull_cmds(struct scsi_qla_host *vha)
 +{
 +      struct qla_hw_data *ha = vha->hw;
 +      unsigned long flags;
 +      struct qla_tgt_cmd *cmd, *tcmd;
 +      struct list_head free_list;
 +      int rc = 0;
 +
 +      if (list_empty(&ha->tgt.q_full_list))
 +              return 0;
 +
 +      INIT_LIST_HEAD(&free_list);
 +
 +      spin_lock_irqsave(&vha->hw->hardware_lock, flags);
 +
 +      if (list_empty(&ha->tgt.q_full_list)) {
 +              spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
 +              return 0;
 +      }
 +
 +      list_for_each_entry_safe(cmd, tcmd, &ha->tgt.q_full_list, cmd_list) {
 +              if (cmd->q_full)
 +                      /* cmd->state is a borrowed field to hold status */
 +                      rc = __qlt_send_busy(vha, &cmd->atio, cmd->state);
 +              else if (cmd->term_exchg)
 +                      rc = __qlt_send_term_exchange(vha, NULL, &cmd->atio);
 +
 +              if (rc == -ENOMEM)
 +                      break;
 +
 +              if (cmd->q_full)
 +                      ql_dbg(ql_dbg_io, vha, 0x3006,
 +                          "%s: busy sent for ox_id[%04x]\n", __func__,
 +                          be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
 +              else if (cmd->term_exchg)
 +                      ql_dbg(ql_dbg_io, vha, 0x3007,
 +                          "%s: Term exchg sent for ox_id[%04x]\n", __func__,
 +                          be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
 +              else
 +                      ql_dbg(ql_dbg_io, vha, 0x3008,
 +                          "%s: Unexpected cmd in QFull list %p\n", __func__,
 +                          cmd);
 +
 +              list_del(&cmd->cmd_list);
 +              list_add_tail(&cmd->cmd_list, &free_list);
 +
 +              /* piggy back on hardware_lock for protection */
 +              vha->hw->tgt.num_qfull_cmds_alloc--;
 +      }
 +      spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
 +
 +      cmd = NULL;
 +
 +      list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) {
 +              list_del(&cmd->cmd_list);
 +              /* This cmd was never sent to TCM.  There is no need
 +               * to schedule free or call free_cmd
 +               */
 +              qlt_free_cmd(cmd);
 +      }
 +      return rc;
 +}
 +
 +static void
 +qlt_send_busy(struct scsi_qla_host *vha,
 +      struct atio_from_isp *atio, uint16_t status)
 +{
 +      int rc = 0;
 +
 +      rc = __qlt_send_busy(vha, atio, status);
 +      if (rc == -ENOMEM)
 +              qlt_alloc_qfull_cmd(vha, atio, status, 1);
 +}
 +
 +static int
 +qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha,
 +      struct atio_from_isp *atio)
 +{
 +      struct qla_hw_data *ha = vha->hw;
 +      uint16_t status;
 +
 +      if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha))
 +              return 0;
 +
 +      status = temp_sam_status;
 +      qlt_send_busy(vha, atio, status);
 +      return 1;
  }
  
  /* ha->hardware_lock supposed to be held on entry */
@@@ -4541,10 -4184,14 +4539,10 @@@ static void qlt_24xx_atio_pkt(struct sc
        int rc;
  
        if (unlikely(tgt == NULL)) {
 -              ql_dbg(ql_dbg_tgt_mgt, vha, 0xf039,
 +              ql_dbg(ql_dbg_io, vha, 0x3064,
                    "ATIO pkt, but no tgt (ha %p)", ha);
                return;
        }
 -      ql_dbg(ql_dbg_tgt, vha, 0xe02c,
 -          "qla_target(%d): ATIO pkt %p: type %02x count %02x",
 -          vha->vp_idx, atio, atio->u.raw.entry_type,
 -          atio->u.raw.entry_count);
        /*
         * In tgt_stop mode we also should allow all requests to pass.
         * Otherwise, some commands can stuck.
  
        switch (atio->u.raw.entry_type) {
        case ATIO_TYPE7:
 -              ql_dbg(ql_dbg_tgt, vha, 0xe02d,
 -                  "ATIO_TYPE7 instance %d, lun %Lx, read/write %d/%d, cdb %x, add_cdb_len %x, data_length %04x, s_id %02x%02x%02x\n",
 -                  vha->vp_idx, atio->u.isp24.fcp_cmnd.lun,
 -                  atio->u.isp24.fcp_cmnd.rddata,
 -                  atio->u.isp24.fcp_cmnd.wrdata,
 -                  atio->u.isp24.fcp_cmnd.cdb[0],
 -                  atio->u.isp24.fcp_cmnd.add_cdb_len,
 -                  be32_to_cpu(get_unaligned((uint32_t *)
 -                      &atio->u.isp24.fcp_cmnd.add_cdb[
 -                      atio->u.isp24.fcp_cmnd.add_cdb_len])),
 -                  atio->u.isp24.fcp_hdr.s_id[0],
 -                  atio->u.isp24.fcp_hdr.s_id[1],
 -                  atio->u.isp24.fcp_hdr.s_id[2]);
 -
                if (unlikely(atio->u.isp24.exchange_addr ==
                    ATIO_EXCHANGE_ADDRESS_UNKNOWN)) {
 -                      ql_dbg(ql_dbg_tgt, vha, 0xe058,
 +                      ql_dbg(ql_dbg_io, vha, 0x3065,
                            "qla_target(%d): ATIO_TYPE7 "
                            "received with UNKNOWN exchange address, "
                            "sending QUEUE_FULL\n", vha->vp_idx);
                        qlt_send_busy(vha, atio, SAM_STAT_TASK_SET_FULL);
                        break;
                }
 -              if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0))
 +
 +
 +
 +              if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) {
 +                      rc = qlt_chk_qfull_thresh_hold(vha, atio);
 +                      if (rc != 0) {
 +                              tgt->irq_cmd_count--;
 +                              return;
 +                      }
                        rc = qlt_handle_cmd_for_atio(vha, atio);
 -              else
 +              } else {
                        rc = qlt_handle_task_mgmt(vha, atio);
 +              }
                if (unlikely(rc != 0)) {
                        if (rc == -ESRCH) {
  #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
@@@ -4639,6 -4291,11 +4637,6 @@@ static void qlt_response_pkt(struct scs
                return;
        }
  
 -      ql_dbg(ql_dbg_tgt, vha, 0xe02f,
 -          "qla_target(%d): response pkt %p: T %02x C %02x S %02x "
 -          "handle %#x\n", vha->vp_idx, pkt, pkt->entry_type,
 -          pkt->entry_count, pkt->entry_status, pkt->handle);
 -
        /*
         * In tgt_stop mode we also should allow all requests to pass.
         * Otherwise, some commands can stuck.
        case CTIO_TYPE7:
        {
                struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
 -              ql_dbg(ql_dbg_tgt, vha, 0xe030,
 -                      "CTIO[0x%x] 12/CTIO7 7A/CRC2: instance %d\n",
 -                      entry->entry_type, vha->vp_idx);
                qlt_do_ctio_completion(vha, entry->handle,
                    le16_to_cpu(entry->status)|(pkt->entry_status << 16),
                    entry);
        {
                struct atio_from_isp *atio = (struct atio_from_isp *)pkt;
                int rc;
 -              ql_dbg(ql_dbg_tgt, vha, 0xe031,
 -                  "ACCEPT_TGT_IO instance %d status %04x "
 -                  "lun %04x read/write %d data_length %04x "
 -                  "target_id %02x rx_id %04x\n ", vha->vp_idx,
 -                  le16_to_cpu(atio->u.isp2x.status),
 -                  le16_to_cpu(atio->u.isp2x.lun),
 -                  atio->u.isp2x.execution_codes,
 -                  le32_to_cpu(atio->u.isp2x.data_length), GET_TARGET_ID(ha,
 -                  atio), atio->u.isp2x.rx_id);
                if (atio->u.isp2x.status !=
                    __constant_cpu_to_le16(ATIO_CDB_VALID)) {
                        ql_dbg(ql_dbg_tgt, vha, 0xe05e,
                            le16_to_cpu(atio->u.isp2x.status));
                        break;
                }
 -              ql_dbg(ql_dbg_tgt, vha, 0xe032,
 -                  "FCP CDB: 0x%02x, sizeof(cdb): %lu",
 -                  atio->u.isp2x.cdb[0], (unsigned long
 -                  int)sizeof(atio->u.isp2x.cdb));
 +
 +              rc = qlt_chk_qfull_thresh_hold(vha, atio);
 +              if (rc != 0) {
 +                      tgt->irq_cmd_count--;
 +                      return;
 +              }
  
                rc = qlt_handle_cmd_for_atio(vha, atio);
                if (unlikely(rc != 0)) {
        case CONTINUE_TGT_IO_TYPE:
        {
                struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
 -              ql_dbg(ql_dbg_tgt, vha, 0xe033,
 -                  "CONTINUE_TGT_IO: instance %d\n", vha->vp_idx);
                qlt_do_ctio_completion(vha, entry->handle,
                    le16_to_cpu(entry->status)|(pkt->entry_status << 16),
                    entry);
        case CTIO_A64_TYPE:
        {
                struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
 -              ql_dbg(ql_dbg_tgt, vha, 0xe034, "CTIO_A64: instance %d\n",
 -                  vha->vp_idx);
                qlt_do_ctio_completion(vha, entry->handle,
                    le16_to_cpu(entry->status)|(pkt->entry_status << 16),
                    entry);
@@@ -4819,6 -4490,11 +4817,6 @@@ void qlt_async_event(uint16_t code, str
        struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
        int login_code;
  
 -      ql_dbg(ql_dbg_tgt, vha, 0xe039,
 -          "scsi(%ld): ha state %d init_done %d oper_mode %d topo %d\n",
 -          vha->host_no, atomic_read(&vha->loop_state), vha->flags.init_done,
 -          ha->operating_mode, ha->current_topology);
 -
        if (!ha->tgt.tgt_ops)
                return;
  
                break;
  
        default:
 -              ql_dbg(ql_dbg_tgt_mgt, vha, 0xf040,
 -                  "qla_target(%d): Async event %#x occurred: "
 -                  "ignore (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
 -                  code, le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
 -                  le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
                break;
        }
  
@@@ -4915,6 -4596,8 +4913,6 @@@ static fc_port_t *qlt_get_port_database
                return NULL;
        }
  
 -      ql_dbg(ql_dbg_tgt_mgt, vha, 0xf041, "loop_id %d", loop_id);
 -
        fcport->loop_id = loop_id;
  
        rc = qla2x00_get_port_database(vha, fcport, 0);
@@@ -5213,10 -4896,6 +5211,10 @@@ int qlt_remove_target(struct qla_hw_dat
                qlt_release(vha->vha_tgt.qla_tgt);
                return 0;
        }
 +
 +      /* free left over qfull cmds */
 +      qlt_init_term_exchange(vha);
 +
        mutex_lock(&qla_tgt_mutex);
        list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry);
        mutex_unlock(&qla_tgt_mutex);
@@@ -5343,7 -5022,7 +5341,7 @@@ void qlt_lport_deregister(struct scsi_q
  EXPORT_SYMBOL(qlt_lport_deregister);
  
  /* Must be called under HW lock */
- void qlt_set_mode(struct scsi_qla_host *vha)
static void qlt_set_mode(struct scsi_qla_host *vha)
  {
        struct qla_hw_data *ha = vha->hw;
  
  }
  
  /* Must be called under HW lock */
- void qlt_clear_mode(struct scsi_qla_host *vha)
static void qlt_clear_mode(struct scsi_qla_host *vha)
  {
        struct qla_hw_data *ha = vha->hw;
  
@@@ -5428,8 -5107,7 +5426,7 @@@ EXPORT_SYMBOL(qlt_enable_vha)
   *
   * Disable Target Mode and reset the adapter
   */
- void
- qlt_disable_vha(struct scsi_qla_host *vha)
+ static void qlt_disable_vha(struct scsi_qla_host *vha)
  {
        struct qla_hw_data *ha = vha->hw;
        struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
@@@ -5614,13 -5292,8 +5611,13 @@@ qlt_24xx_config_nvram_stage1(struct scs
                nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13);
                /* Enable initial LIP */
                nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9);
 -              /* Enable FC tapes support */
 -              nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
 +              if (ql2xtgt_tape_enable)
 +                      /* Enable FC Tape support */
 +                      nv->firmware_options_2 |= cpu_to_le32(BIT_12);
 +              else
 +                      /* Disable FC Tape support */
 +                      nv->firmware_options_2 &= cpu_to_le32(~BIT_12);
 +
                /* Disable Full Login after LIP */
                nv->host_p &= __constant_cpu_to_le32(~BIT_10);
                /* Enable target PRLI control */
@@@ -5702,13 -5375,8 +5699,13 @@@ qlt_81xx_config_nvram_stage1(struct scs
                nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13);
                /* Enable initial LIP */
                nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9);
 -              /* Enable FC tapes support */
 -              nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
 +              if (ql2xtgt_tape_enable)
 +                      /* Enable FC tape support */
 +                      nv->firmware_options_2 |= cpu_to_le32(BIT_12);
 +              else
 +                      /* Disable FC tape support */
 +                      nv->firmware_options_2 &= cpu_to_le32(~BIT_12);
 +
                /* Disable Full Login after LIP */
                nv->host_p &= __constant_cpu_to_le32(~BIT_10);
                /* Enable target PRLI control */
index 8ff330f7d6f55863b2ceb8af62e1ae2ef556ea42,c93eeab0cfb29324a1941469f2e8e3a643bbc3cb..332086776dfe94db4be404cca5c621518d600b1f
@@@ -915,10 -915,6 +915,10 @@@ struct qla_tgt_cmd 
        unsigned int aborted:1; /* Needed in case of SRR */
        unsigned int write_data_transferred:1;
        unsigned int ctx_dsd_alloced:1;
 +      unsigned int q_full:1;
 +      unsigned int term_exchg:1;
 +      unsigned int cmd_sent_to_fw:1;
 +      unsigned int cmd_in_wq:1;
  
        struct scatterlist *sg; /* cmd data buffer SG vector */
        int sg_cnt;             /* SG segments count */
        uint32_t tag;
        uint32_t unpacked_lun;
        enum dma_data_direction dma_data_direction;
 +      uint32_t reset_count;
  
        uint16_t loop_id;       /* to save extra sess dereferences */
        struct qla_tgt *tgt;    /* to save extra sess dereferences */
        struct scsi_qla_host *vha;
 +      struct list_head cmd_list;
  
        struct atio_from_isp atio;
        /* t10dif */
        uint32_t blk_sz;
        struct crc_context *ctx;
  
 +      uint64_t jiffies_at_alloc;
 +      uint64_t jiffies_at_free;
 +      /* BIT_0 - Atio Arrival / schedule to work
 +       * BIT_1 - qlt_do_work
 +       * BIT_2 - qlt_do work failed
 +       * BIT_3 - xfer rdy/tcm_qla2xxx_write_pending
 +       * BIT_4 - read respond/tcm_qla2xx_queue_data_in
 +       * BIT_5 - status respond / tcm_qla2xx_queue_status
 +       * BIT_6 - tcm request to abort/Term exchange.
 +       *      pre_xmit_response->qlt_send_term_exchange
 +       * BIT_7 - SRR received (qlt_handle_srr->qlt_xmit_response)
 +       * BIT_8 - SRR received (qlt_handle_srr->qlt_rdy_to_xfer)
 +       * BIT_9 - SRR received (qla_handle_srr->qlt_send_term_exchange)
 +       * BIT_10 - Data in - hanlde_data->tcm_qla2xxx_handle_data
 +       * BIT_11 - Data actually going to TCM : tcm_qla2xx_handle_data_work
 +       * BIT_12 - good completion - qlt_ctio_do_completion -->free_cmd
 +       * BIT_13 - Bad completion -
 +       *      qlt_ctio_do_completion --> qlt_term_ctio_exchange
 +       * BIT_14 - Back end data received/sent.
 +       * BIT_15 - SRR prepare ctio
 +       * BIT_16 - complete free
 +       */
 +      uint32_t cmd_flags;
  };
  
  struct qla_tgt_sess_work_param {
@@@ -987,7 -958,6 +987,7 @@@ struct qla_tgt_mgmt_cmd 
        struct se_cmd se_cmd;
        struct work_struct free_work;
        unsigned int flags;
 +      uint32_t reset_count;
  #define QLA24XX_MGMT_SEND_NACK        1
        union {
                struct atio_from_isp atio;
@@@ -1001,11 -971,11 +1001,11 @@@ struct qla_tgt_prm 
        struct qla_tgt *tgt;
        void *pkt;
        struct scatterlist *sg; /* cmd data buffer SG vector */
+       unsigned char *sense_buffer;
        int seg_cnt;
        int req_cnt;
        uint16_t rq_result;
        uint16_t scsi_status;
-       unsigned char *sense_buffer;
        int sense_buffer_len;
        int residual;
        int add_status_pkt;
@@@ -1033,10 -1003,6 +1033,6 @@@ struct qla_tgt_srr_ctio 
  
  
  extern struct qla_tgt_data qla_target;
- /*
-  * Internal function prototypes
-  */
- void qlt_disable_vha(struct scsi_qla_host *);
  
  /*
   * Function prototypes for qla_target.c logic used by qla2xxx LLD code.
@@@ -1049,8 -1015,6 +1045,6 @@@ extern void qlt_lport_deregister(struc
  extern void qlt_unreg_sess(struct qla_tgt_sess *);
  extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *);
  extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *);
- extern void qlt_set_mode(struct scsi_qla_host *ha);
- extern void qlt_clear_mode(struct scsi_qla_host *ha);
  extern int __init qlt_init(void);
  extern void qlt_exit(void);
  extern void qlt_update_vp_map(struct scsi_qla_host *, int);
@@@ -1083,13 -1047,9 +1077,9 @@@ static inline void qla_reverse_ini_mode
  /*
   * Exported symbols from qla_target.c LLD logic used by qla2xxx code..
   */
- extern void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *,
-       struct atio_from_isp *);
  extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *);
  extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *);
  extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t);
- extern int qlt_rdy_to_xfer_dif(struct qla_tgt_cmd *);
- extern int qlt_xmit_response_dif(struct qla_tgt_cmd *, int, uint8_t);
  extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *);
  extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *);
  extern void qlt_free_cmd(struct qla_tgt_cmd *cmd);
@@@ -1119,6 -1079,5 +1109,6 @@@ extern int qlt_stop_phase1(struct qla_t
  extern void qlt_stop_phase2(struct qla_tgt *);
  extern irqreturn_t qla83xx_msix_atio_q(int, void *);
  extern void qlt_83xx_iospace_config(struct qla_hw_data *);
 +extern int qlt_free_qfull_cmds(struct scsi_qla_host *);
  
  #endif /* __QLA_TARGET_H */
index 031b2961c6b72c9ff445898f61eb21ba526706cc,4747d2c660247cc80312c1f65a0417e9fbe658b3..73f9feecda72b71552b63eb28b7bec9106f6fd4e
  #include "qla_target.h"
  #include "tcm_qla2xxx.h"
  
 -struct workqueue_struct *tcm_qla2xxx_free_wq;
 -struct workqueue_struct *tcm_qla2xxx_cmd_wq;
 +static struct workqueue_struct *tcm_qla2xxx_free_wq;
 +static struct workqueue_struct *tcm_qla2xxx_cmd_wq;
 +
 +/* Local pointer to allocated TCM configfs fabric module */
 +static struct target_fabric_configfs *tcm_qla2xxx_fabric_configfs;
 +static struct target_fabric_configfs *tcm_qla2xxx_npiv_fabric_configfs;
  
  /*
   * Parse WWN.
@@@ -390,11 -386,6 +390,11 @@@ static void tcm_qla2xxx_complete_free(s
  {
        struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
  
 +      cmd->cmd_in_wq = 0;
 +
 +      WARN_ON(cmd->cmd_flags &  BIT_16);
 +
 +      cmd->cmd_flags |= BIT_16;
        transport_generic_free_cmd(&cmd->se_cmd, 0);
  }
  
   */
  static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd)
  {
 +      cmd->cmd_in_wq = 1;
        INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free);
        queue_work(tcm_qla2xxx_free_wq, &cmd->work);
  }
   */
  static int tcm_qla2xxx_check_stop_free(struct se_cmd *se_cmd)
  {
 +      struct qla_tgt_cmd *cmd;
 +
 +      if ((se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) == 0) {
 +              cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
 +              cmd->cmd_flags |= BIT_14;
 +      }
 +
        return target_put_sess_cmd(se_cmd->se_sess, se_cmd);
  }
  
@@@ -528,13 -511,8 +528,13 @@@ static void tcm_qla2xxx_set_default_nod
  
  static u32 tcm_qla2xxx_get_task_tag(struct se_cmd *se_cmd)
  {
 -      struct qla_tgt_cmd *cmd = container_of(se_cmd,
 -                              struct qla_tgt_cmd, se_cmd);
 +      struct qla_tgt_cmd *cmd;
 +
 +      /* check for task mgmt cmd */
 +      if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
 +              return 0xffffffff;
 +
 +      cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
  
        return cmd->tag;
  }
@@@ -584,8 -562,6 +584,8 @@@ static void tcm_qla2xxx_handle_data_wor
         * Ensure that the complete FCP WRITE payload has been received.
         * Otherwise return an exception via CHECK_CONDITION status.
         */
 +      cmd->cmd_in_wq = 0;
 +      cmd->cmd_flags |= BIT_11;
        if (!cmd->write_data_transferred) {
                /*
                 * Check if se_cmd has already been aborted via LUN_RESET, and
   */
  static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd)
  {
 +      cmd->cmd_flags |= BIT_10;
 +      cmd->cmd_in_wq = 1;
        INIT_WORK(&cmd->work, tcm_qla2xxx_handle_data_work);
        queue_work(tcm_qla2xxx_free_wq, &cmd->work);
  }
@@@ -659,7 -633,6 +659,7 @@@ static int tcm_qla2xxx_queue_data_in(st
        struct qla_tgt_cmd *cmd = container_of(se_cmd,
                                struct qla_tgt_cmd, se_cmd);
  
 +      cmd->cmd_flags |= BIT_4;
        cmd->bufflen = se_cmd->data_length;
        cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
        cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
        cmd->sg_cnt = se_cmd->t_data_nents;
        cmd->sg = se_cmd->t_data_sg;
        cmd->offset = 0;
 +      cmd->cmd_flags |= BIT_3;
  
        cmd->prot_sg_cnt = se_cmd->t_prot_nents;
        cmd->prot_sg = se_cmd->t_prot_sg;
@@@ -693,11 -665,6 +693,11 @@@ static int tcm_qla2xxx_queue_status(str
        cmd->offset = 0;
        cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
        cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
 +      if (cmd->cmd_flags &  BIT_5) {
 +              pr_crit("Bit_5 already set for cmd = %p.\n", cmd);
 +              dump_stack();
 +      }
 +      cmd->cmd_flags |= BIT_5;
  
        if (se_cmd->data_direction == DMA_FROM_DEVICE) {
                /*
@@@ -767,6 -734,10 +767,6 @@@ static void tcm_qla2xxx_aborted_task(st
        cmd->sg_mapped = 0;
  }
  
 -/* Local pointer to allocated TCM configfs fabric module */
 -struct target_fabric_configfs *tcm_qla2xxx_fabric_configfs;
 -struct target_fabric_configfs *tcm_qla2xxx_npiv_fabric_configfs;
 -
  static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *,
                        struct tcm_qla2xxx_nacl *, struct qla_tgt_sess *);
  /*
@@@ -786,7 -757,16 +786,16 @@@ static void tcm_qla2xxx_clear_nacl_from
        pr_debug("fc_rport domain: port_id 0x%06x\n", nacl->nport_id);
  
        node = btree_remove32(&lport->lport_fcport_map, nacl->nport_id);
-       WARN_ON(node && (node != se_nacl));
+       if (WARN_ON(node && (node != se_nacl))) {
+               /*
+                * The nacl no longer matches what we think it should be.
+                * Most likely a new dynamic acl has been added while
+                * someone dropped the hardware lock.  It clearly is a
+                * bug elsewhere, but this bit can't make things worse.
+                */
+               btree_insert32(&lport->lport_fcport_map, nacl->nport_id,
+                              node, GFP_ATOMIC);
+       }
  
        pr_debug("Removed from fcport_map: %p for WWNN: 0x%016LX, port_id: 0x%06x\n",
            se_nacl, nacl->nport_wwnn, nacl->nport_id);
index 260c3e1e312c66310917238cada772696e457b6a,30f4a7d42e3220986b2e110c171eb7a21a280217..b19e4329ba00739503215124ec768e8c99d843c3
@@@ -3709,7 -3709,6 +3709,6 @@@ static inline void iscsit_thread_check_
        struct task_struct *p,
        int mode)
  {
-       char buf[128];
        /*
         * mode == 1 signals iscsi_target_tx_thread() usage.
         * mode == 0 signals iscsi_target_rx_thread() usage.
         * both TX and RX kthreads are scheduled to run on the
         * same CPU.
         */
-       memset(buf, 0, 128);
-       cpumask_scnprintf(buf, 128, conn->conn_cpumask);
        set_cpus_allowed_ptr(p, conn->conn_cpumask);
  }
  
@@@ -4326,8 -4323,7 +4323,7 @@@ int iscsit_close_connection
        if (conn->conn_tx_hash.tfm)
                crypto_free_hash(conn->conn_tx_hash.tfm);
  
-       if (conn->conn_cpumask)
-               free_cpumask_var(conn->conn_cpumask);
+       free_cpumask_var(conn->conn_cpumask);
  
        kfree(conn->conn_ops);
        conn->conn_ops = NULL;
@@@ -4540,7 -4536,6 +4536,7 @@@ static void iscsit_logout_post_handler_
  {
        struct iscsi_conn *l_conn;
        struct iscsi_session *sess = conn->sess;
 +      bool conn_found = false;
  
        if (!sess)
                return;
        list_for_each_entry(l_conn, &sess->sess_conn_list, conn_list) {
                if (l_conn->cid == cid) {
                        iscsit_inc_conn_usage_count(l_conn);
 +                      conn_found = true;
                        break;
                }
        }
        spin_unlock_bh(&sess->conn_lock);
  
 -      if (!l_conn)
 +      if (!conn_found)
                return;
  
        if (l_conn->sock)
index 73355f4fca745a71d0fa3cb66956148c64e41f11,5d611d7ba2827dc2b8b6df850a9ee2862b65b012..ce87ce9bdb9c59fd0d5d5eaf886acb7fc4c2b799
@@@ -400,8 -400,6 +400,8 @@@ struct iscsi_cmd *iscsit_find_cmd_from_
  
        spin_lock_bh(&conn->cmd_lock);
        list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
 +              if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT)
 +                      continue;
                if (cmd->init_task_tag == init_task_tag) {
                        spin_unlock_bh(&conn->cmd_lock);
                        return cmd;
@@@ -1481,8 -1479,9 +1481,9 @@@ void iscsit_collect_login_stats
                if (conn->param_list)
                        intrname = iscsi_find_param_from_key(INITIATORNAME,
                                                             conn->param_list);
-               strcpy(ls->last_intr_fail_name,
-                      (intrname ? intrname->value : "Unknown"));
+               strlcpy(ls->last_intr_fail_name,
+                      (intrname ? intrname->value : "Unknown"),
+                      sizeof(ls->last_intr_fail_name));
  
                ls->last_intr_fail_ip_family = conn->login_family;
  
index 756def38c77af934a3e16ef3ae9219e67ceb06cc,b30fc8f53bd820cb1111b7d8bb4ddbdea710b084..79f9296a08ae7195d859c5ba200b2b990f78c0de
@@@ -665,6 -665,9 +665,9 @@@ SE_DEV_ATTR(is_nonrot, S_IRUGO | S_IWUS
  DEF_DEV_ATTRIB(emulate_rest_reord);
  SE_DEV_ATTR(emulate_rest_reord, S_IRUGO | S_IWUSR);
  
+ DEF_DEV_ATTRIB(force_pr_aptpl);
+ SE_DEV_ATTR(force_pr_aptpl, S_IRUGO | S_IWUSR);
  DEF_DEV_ATTRIB_RO(hw_block_size);
  SE_DEV_ATTR_RO(hw_block_size);
  
@@@ -719,6 -722,7 +722,7 @@@ static struct configfs_attribute *targe
        &target_core_dev_attrib_hw_pi_prot_type.attr,
        &target_core_dev_attrib_pi_prot_format.attr,
        &target_core_dev_attrib_enforce_pr_isids.attr,
+       &target_core_dev_attrib_force_pr_aptpl.attr,
        &target_core_dev_attrib_is_nonrot.attr,
        &target_core_dev_attrib_emulate_rest_reord.attr,
        &target_core_dev_attrib_hw_block_size.attr,
@@@ -1263,7 -1267,7 +1267,7 @@@ static ssize_t target_core_dev_pr_store
  {
        unsigned char *i_fabric = NULL, *i_port = NULL, *isid = NULL;
        unsigned char *t_fabric = NULL, *t_port = NULL;
-       char *orig, *ptr, *arg_p, *opts;
+       char *orig, *ptr, *opts;
        substring_t args[MAX_OPT_ARGS];
        unsigned long long tmp_ll;
        u64 sa_res_key = 0;
                token = match_token(ptr, tokens, args);
                switch (token) {
                case Opt_initiator_fabric:
-                       i_fabric = match_strdup(&args[0]);
+                       i_fabric = match_strdup(args);
                        if (!i_fabric) {
                                ret = -ENOMEM;
                                goto out;
                        }
                        break;
                case Opt_initiator_node:
-                       i_port = match_strdup(&args[0]);
+                       i_port = match_strdup(args);
                        if (!i_port) {
                                ret = -ENOMEM;
                                goto out;
                        }
                        break;
                case Opt_initiator_sid:
-                       isid = match_strdup(&args[0]);
+                       isid = match_strdup(args);
                        if (!isid) {
                                ret = -ENOMEM;
                                goto out;
                        }
                        break;
                case Opt_sa_res_key:
-                       arg_p = match_strdup(&args[0]);
-                       if (!arg_p) {
-                               ret = -ENOMEM;
-                               goto out;
-                       }
-                       ret = kstrtoull(arg_p, 0, &tmp_ll);
+                       ret = kstrtoull(args->from, 0, &tmp_ll);
                        if (ret < 0) {
-                               pr_err("kstrtoull() failed for"
-                                       " sa_res_key=\n");
+                               pr_err("kstrtoull() failed for sa_res_key=\n");
                                goto out;
                        }
                        sa_res_key = (u64)tmp_ll;
                 * PR APTPL Metadata for Target Port
                 */
                case Opt_target_fabric:
-                       t_fabric = match_strdup(&args[0]);
+                       t_fabric = match_strdup(args);
                        if (!t_fabric) {
                                ret = -ENOMEM;
                                goto out;
                        }
                        break;
                case Opt_target_node:
-                       t_port = match_strdup(&args[0]);
+                       t_port = match_strdup(args);
                        if (!t_port) {
                                ret = -ENOMEM;
                                goto out;
@@@ -2363,7 -2361,7 +2361,7 @@@ static ssize_t target_core_alua_tg_pt_g
                pr_err("Invalid value '%ld', must be '0' or '1'\n", tmp); \
                return -EINVAL;                                         \
        }                                                               \
 -      if (!tmp)                                                       \
 +      if (tmp)                                                        \
                t->_var |= _bit;                                        \
        else                                                            \
                t->_var &= ~_bit;                                       \
index 70d9f6dabba067b66c8b66fafa79242339315294,a1690a3fdd7ff1dfce145981008b21e7e776e761..7c8291f0bbbce5f519bce28b1c6db8d4269a62bd
@@@ -749,14 -749,18 +749,18 @@@ static ssize_t pscsi_set_configfs_dev_p
                                ret = -EINVAL;
                                goto out;
                        }
-                       match_int(args, &arg);
+                       ret = match_int(args, &arg);
+                       if (ret)
+                               goto out;
                        pdv->pdv_host_id = arg;
                        pr_debug("PSCSI[%d]: Referencing SCSI Host ID:"
                                " %d\n", phv->phv_host_id, pdv->pdv_host_id);
                        pdv->pdv_flags |= PDF_HAS_VIRT_HOST_ID;
                        break;
                case Opt_scsi_channel_id:
-                       match_int(args, &arg);
+                       ret = match_int(args, &arg);
+                       if (ret)
+                               goto out;
                        pdv->pdv_channel_id = arg;
                        pr_debug("PSCSI[%d]: Referencing SCSI Channel"
                                " ID: %d\n",  phv->phv_host_id,
                        pdv->pdv_flags |= PDF_HAS_CHANNEL_ID;
                        break;
                case Opt_scsi_target_id:
-                       match_int(args, &arg);
+                       ret = match_int(args, &arg);
+                       if (ret)
+                               goto out;
                        pdv->pdv_target_id = arg;
                        pr_debug("PSCSI[%d]: Referencing SCSI Target"
                                " ID: %d\n", phv->phv_host_id,
                        pdv->pdv_flags |= PDF_HAS_TARGET_ID;
                        break;
                case Opt_scsi_lun_id:
-                       match_int(args, &arg);
+                       ret = match_int(args, &arg);
+                       if (ret)
+                               goto out;
                        pdv->pdv_lun_id = arg;
                        pr_debug("PSCSI[%d]: Referencing SCSI LUN ID:"
                                " %d\n", phv->phv_host_id, pdv->pdv_lun_id);
@@@ -1050,7 -1058,7 +1058,7 @@@ pscsi_execute_cmd(struct se_cmd *cmd
                req = blk_get_request(pdv->pdv_sd->request_queue,
                                (data_direction == DMA_TO_DEVICE),
                                GFP_KERNEL);
 -              if (!req) {
 +              if (IS_ERR(req)) {
                        pr_err("PSCSI: blk_get_request() failed\n");
                        ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
                        goto fail;
index be783f717f1923013f44caa03482e064d51510c6,aa2b2d0998e30e8fd2e2966b77c9a2d938df29ac..0696de9553d3d7dea5b8e0f1f874f5f5b91ab821
@@@ -40,6 -40,7 +40,7 @@@
  #include <target/target_core_fabric.h>
  
  #include "target_core_internal.h"
+ #include "target_core_pr.h"
  
  extern struct se_device *g_lun0_dev;
  
@@@ -166,6 -167,13 +167,13 @@@ void core_tpg_add_node_to_devs
  
                core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun,
                                lun_access, acl, tpg);
+               /*
+                * Check to see if there are any existing persistent reservation
+                * APTPL pre-registrations that need to be enabled for this dynamic
+                * LUN ACL now..
+                */
+               core_scsi3_check_aptpl_registration(dev, tpg, lun, acl,
+                                                   lun->unpacked_lun);
                spin_lock(&tpg->tpg_lun_lock);
        }
        spin_unlock(&tpg->tpg_lun_lock);
@@@ -335,7 -343,7 +343,7 @@@ void core_tpg_clear_object_luns(struct 
                        continue;
  
                spin_unlock(&tpg->tpg_lun_lock);
-               core_dev_del_lun(tpg, lun->unpacked_lun);
+               core_dev_del_lun(tpg, lun);
                spin_lock(&tpg->tpg_lun_lock);
        }
        spin_unlock(&tpg->tpg_lun_lock);
@@@ -663,13 -671,6 +671,6 @@@ static int core_tpg_setup_virtual_lun0(
        return 0;
  }
  
- static void core_tpg_release_virtual_lun0(struct se_portal_group *se_tpg)
- {
-       struct se_lun *lun = &se_tpg->tpg_virt_lun0;
-       core_tpg_post_dellun(se_tpg, lun);
- }
  int core_tpg_register(
        struct target_core_fabric_ops *tfo,
        struct se_wwn *se_wwn,
@@@ -773,7 -774,7 +774,7 @@@ int core_tpg_deregister(struct se_porta
        spin_unlock_irq(&se_tpg->acl_node_lock);
  
        if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)
-               core_tpg_release_virtual_lun0(se_tpg);
+               core_tpg_remove_lun(se_tpg, &se_tpg->tpg_virt_lun0);
  
        se_tpg->se_tpg_fabric_ptr = NULL;
        array_free(se_tpg->tpg_lun_list, TRANSPORT_MAX_LUNS_PER_TPG);
@@@ -819,8 -820,7 +820,8 @@@ int core_tpg_add_lun
  {
        int ret;
  
 -      ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release);
 +      ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release, 0,
 +                            GFP_KERNEL);
        if (ret < 0)
                return ret;
  
        return 0;
  }
  
- struct se_lun *core_tpg_pre_dellun(
-       struct se_portal_group *tpg,
-       u32 unpacked_lun)
- {
-       struct se_lun *lun;
-       if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
-               pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
-                       "-1: %u for Target Portal Group: %u\n",
-                       tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
-                       TRANSPORT_MAX_LUNS_PER_TPG-1,
-                       tpg->se_tpg_tfo->tpg_get_tag(tpg));
-               return ERR_PTR(-EOVERFLOW);
-       }
-       spin_lock(&tpg->tpg_lun_lock);
-       lun = tpg->tpg_lun_list[unpacked_lun];
-       if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
-               pr_err("%s Logical Unit Number: %u is not active on"
-                       " Target Portal Group: %u, ignoring request.\n",
-                       tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
-                       tpg->se_tpg_tfo->tpg_get_tag(tpg));
-               spin_unlock(&tpg->tpg_lun_lock);
-               return ERR_PTR(-ENODEV);
-       }
-       spin_unlock(&tpg->tpg_lun_lock);
-       return lun;
- }
- int core_tpg_post_dellun(
+ void core_tpg_remove_lun(
        struct se_portal_group *tpg,
        struct se_lun *lun)
  {
        spin_unlock(&tpg->tpg_lun_lock);
  
        percpu_ref_exit(&lun->lun_ref);
-       return 0;
  }
index 6cad97485bad7fd5803f2f48aa1d8c1894208be1,6ebd0d1faf2ea527e1f00d56755b1ee97066f952..b70237e8bc37b49bb9905669fb5d109ed17300fb
@@@ -67,8 -67,6 +67,8 @@@ header-y += bfs_fs.
  header-y += binfmts.h
  header-y += blkpg.h
  header-y += blktrace_api.h
 +header-y += bpf.h
 +header-y += bpf_common.h
  header-y += bpqether.h
  header-y += bsg.h
  header-y += btrfs.h
@@@ -356,7 -354,6 +356,7 @@@ header-y += serio.
  header-y += shm.h
  header-y += signal.h
  header-y += signalfd.h
 +header-y += smiapp.h
  header-y += snmp.h
  header-y += sock_diag.h
  header-y += socket.h
@@@ -374,6 -371,7 +374,7 @@@ header-y += swab.
  header-y += synclink.h
  header-y += sysctl.h
  header-y += sysinfo.h
+ header-y += target_core_user.h
  header-y += taskstats.h
  header-y += tcp.h
  header-y += tcp_metrics.h