1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
4 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
6 #include <linux/mempool.h>
7 #include <linux/errno.h>
8 #include <linux/init.h>
9 #include <linux/workqueue.h>
10 #include <linux/pci.h>
11 #include <linux/scatterlist.h>
12 #include <linux/skbuff.h>
13 #include <linux/spinlock.h>
14 #include <linux/etherdevice.h>
15 #include <linux/if_ether.h>
16 #include <linux/if_vlan.h>
17 #include <linux/delay.h>
18 #include <linux/gfp.h>
19 #include <scsi/scsi.h>
20 #include <scsi/scsi_host.h>
21 #include <scsi/scsi_device.h>
22 #include <scsi/scsi_cmnd.h>
23 #include <scsi/scsi_tcq.h>
24 #include <scsi/fc/fc_els.h>
25 #include <scsi/fc/fc_fcoe.h>
26 #include <scsi/libfc.h>
27 #include <scsi/fc_frame.h>
31 const char *fnic_state_str[] = {
32 [FNIC_IN_FC_MODE] = "FNIC_IN_FC_MODE",
33 [FNIC_IN_FC_TRANS_ETH_MODE] = "FNIC_IN_FC_TRANS_ETH_MODE",
34 [FNIC_IN_ETH_MODE] = "FNIC_IN_ETH_MODE",
35 [FNIC_IN_ETH_TRANS_FC_MODE] = "FNIC_IN_ETH_TRANS_FC_MODE",
38 static const char *fnic_ioreq_state_str[] = {
39 [FNIC_IOREQ_NOT_INITED] = "FNIC_IOREQ_NOT_INITED",
40 [FNIC_IOREQ_CMD_PENDING] = "FNIC_IOREQ_CMD_PENDING",
41 [FNIC_IOREQ_ABTS_PENDING] = "FNIC_IOREQ_ABTS_PENDING",
42 [FNIC_IOREQ_ABTS_COMPLETE] = "FNIC_IOREQ_ABTS_COMPLETE",
43 [FNIC_IOREQ_CMD_COMPLETE] = "FNIC_IOREQ_CMD_COMPLETE",
46 static const char *fcpio_status_str[] = {
47 [FCPIO_SUCCESS] = "FCPIO_SUCCESS", /*0x0*/
48 [FCPIO_INVALID_HEADER] = "FCPIO_INVALID_HEADER",
49 [FCPIO_OUT_OF_RESOURCE] = "FCPIO_OUT_OF_RESOURCE",
50 [FCPIO_INVALID_PARAM] = "FCPIO_INVALID_PARAM]",
51 [FCPIO_REQ_NOT_SUPPORTED] = "FCPIO_REQ_NOT_SUPPORTED",
52 [FCPIO_IO_NOT_FOUND] = "FCPIO_IO_NOT_FOUND",
53 [FCPIO_ABORTED] = "FCPIO_ABORTED", /*0x41*/
54 [FCPIO_TIMEOUT] = "FCPIO_TIMEOUT",
55 [FCPIO_SGL_INVALID] = "FCPIO_SGL_INVALID",
56 [FCPIO_MSS_INVALID] = "FCPIO_MSS_INVALID",
57 [FCPIO_DATA_CNT_MISMATCH] = "FCPIO_DATA_CNT_MISMATCH",
58 [FCPIO_FW_ERR] = "FCPIO_FW_ERR",
59 [FCPIO_ITMF_REJECTED] = "FCPIO_ITMF_REJECTED",
60 [FCPIO_ITMF_FAILED] = "FCPIO_ITMF_FAILED",
61 [FCPIO_ITMF_INCORRECT_LUN] = "FCPIO_ITMF_INCORRECT_LUN",
62 [FCPIO_CMND_REJECTED] = "FCPIO_CMND_REJECTED",
63 [FCPIO_NO_PATH_AVAIL] = "FCPIO_NO_PATH_AVAIL",
64 [FCPIO_PATH_FAILED] = "FCPIO_PATH_FAILED",
65 [FCPIO_LUNMAP_CHNG_PEND] = "FCPIO_LUNHMAP_CHNG_PEND",
68 const char *fnic_state_to_str(unsigned int state)
70 if (state >= ARRAY_SIZE(fnic_state_str) || !fnic_state_str[state])
73 return fnic_state_str[state];
76 static const char *fnic_ioreq_state_to_str(unsigned int state)
78 if (state >= ARRAY_SIZE(fnic_ioreq_state_str) ||
79 !fnic_ioreq_state_str[state])
82 return fnic_ioreq_state_str[state];
85 static const char *fnic_fcpio_status_to_str(unsigned int status)
87 if (status >= ARRAY_SIZE(fcpio_status_str) || !fcpio_status_str[status])
90 return fcpio_status_str[status];
93 static void fnic_cleanup_io(struct fnic *fnic);
95 static inline spinlock_t *fnic_io_lock_hash(struct fnic *fnic,
98 u32 hash = scsi_cmd_to_rq(sc)->tag & (FNIC_IO_LOCKS - 1);
100 return &fnic->io_req_lock[hash];
103 static inline spinlock_t *fnic_io_lock_tag(struct fnic *fnic,
106 return &fnic->io_req_lock[tag & (FNIC_IO_LOCKS - 1)];
110 * Unmap the data buffer and sense buffer for an io_req,
111 * also unmap and free the device-private scatter/gather list.
113 static void fnic_release_ioreq_buf(struct fnic *fnic,
114 struct fnic_io_req *io_req,
115 struct scsi_cmnd *sc)
117 if (io_req->sgl_list_pa)
118 dma_unmap_single(&fnic->pdev->dev, io_req->sgl_list_pa,
119 sizeof(io_req->sgl_list[0]) * io_req->sgl_cnt,
124 mempool_free(io_req->sgl_list_alloc,
125 fnic->io_sgl_pool[io_req->sgl_type]);
126 if (io_req->sense_buf_pa)
127 dma_unmap_single(&fnic->pdev->dev, io_req->sense_buf_pa,
128 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
131 /* Free up Copy Wq descriptors. Called with copy_wq lock held */
132 static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq)
134 /* if no Ack received from firmware, then nothing to clean */
135 if (!fnic->fw_ack_recd[0])
139 * Update desc_available count based on number of freed descriptors
140 * Account for wraparound
142 if (wq->to_clean_index <= fnic->fw_ack_index[0])
143 wq->ring.desc_avail += (fnic->fw_ack_index[0]
144 - wq->to_clean_index + 1);
146 wq->ring.desc_avail += (wq->ring.desc_count
148 + fnic->fw_ack_index[0] + 1);
151 * just bump clean index to ack_index+1 accounting for wraparound
152 * this will essentially free up all descriptors between
153 * to_clean_index and fw_ack_index, both inclusive
156 (fnic->fw_ack_index[0] + 1) % wq->ring.desc_count;
158 /* we have processed the acks received so far */
159 fnic->fw_ack_recd[0] = 0;
165 * __fnic_set_state_flags
166 * Sets/Clears bits in fnic's state_flags
169 __fnic_set_state_flags(struct fnic *fnic, unsigned long st_flags,
170 unsigned long clearbits)
172 unsigned long flags = 0;
173 unsigned long host_lock_flags = 0;
175 spin_lock_irqsave(&fnic->fnic_lock, flags);
176 spin_lock_irqsave(fnic->lport->host->host_lock, host_lock_flags);
179 fnic->state_flags &= ~st_flags;
181 fnic->state_flags |= st_flags;
183 spin_unlock_irqrestore(fnic->lport->host->host_lock, host_lock_flags);
184 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
191 * fnic_fw_reset_handler
192 * Routine to send reset msg to fw
194 int fnic_fw_reset_handler(struct fnic *fnic)
196 struct vnic_wq_copy *wq = &fnic->wq_copy[0];
200 /* indicate fwreset to io path */
201 fnic_set_state_flags(fnic, FNIC_FLAGS_FWRESET);
203 skb_queue_purge(&fnic->frame_queue);
204 skb_queue_purge(&fnic->tx_queue);
206 /* wait for io cmpl */
207 while (atomic_read(&fnic->in_flight))
208 schedule_timeout(msecs_to_jiffies(1));
210 spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
212 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
213 free_wq_copy_descs(fnic, wq);
215 if (!vnic_wq_copy_desc_avail(wq))
218 fnic_queue_wq_copy_desc_fw_reset(wq, SCSI_NO_TAG);
219 atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
220 if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
221 atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
222 atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
224 &fnic->fnic_stats.fw_stats.active_fw_reqs));
227 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
230 atomic64_inc(&fnic->fnic_stats.reset_stats.fw_resets);
231 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
232 "Issued fw reset\n");
234 fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
235 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
236 "Failed to issue fw reset\n");
244 * fnic_flogi_reg_handler
245 * Routine to send flogi register msg to fw
247 int fnic_flogi_reg_handler(struct fnic *fnic, u32 fc_id)
249 struct vnic_wq_copy *wq = &fnic->wq_copy[0];
250 enum fcpio_flogi_reg_format_type format;
251 struct fc_lport *lp = fnic->lport;
256 spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
258 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
259 free_wq_copy_descs(fnic, wq);
261 if (!vnic_wq_copy_desc_avail(wq)) {
263 goto flogi_reg_ioreq_end;
266 if (fnic->ctlr.map_dest) {
267 eth_broadcast_addr(gw_mac);
268 format = FCPIO_FLOGI_REG_DEF_DEST;
270 memcpy(gw_mac, fnic->ctlr.dest_addr, ETH_ALEN);
271 format = FCPIO_FLOGI_REG_GW_DEST;
274 if ((fnic->config.flags & VFCF_FIP_CAPABLE) && !fnic->ctlr.map_dest) {
275 fnic_queue_wq_copy_desc_fip_reg(wq, SCSI_NO_TAG,
278 lp->r_a_tov, lp->e_d_tov);
279 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
280 "FLOGI FIP reg issued fcid %x src %pM dest %pM\n",
281 fc_id, fnic->data_src_addr, gw_mac);
283 fnic_queue_wq_copy_desc_flogi_reg(wq, SCSI_NO_TAG,
284 format, fc_id, gw_mac);
285 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
286 "FLOGI reg issued fcid %x map %d dest %pM\n",
287 fc_id, fnic->ctlr.map_dest, gw_mac);
290 atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
291 if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
292 atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
293 atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
294 atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
297 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
302 * fnic_queue_wq_copy_desc
303 * Routine to enqueue a wq copy desc
305 static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
306 struct vnic_wq_copy *wq,
307 struct fnic_io_req *io_req,
308 struct scsi_cmnd *sc,
311 struct scatterlist *sg;
312 struct fc_rport *rport = starget_to_rport(scsi_target(sc->device));
313 struct fc_rport_libfc_priv *rp = rport->dd_data;
314 struct host_sg_desc *desc;
315 struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
317 unsigned long intr_flags;
320 struct scsi_lun fc_lun;
323 /* For each SGE, create a device desc entry */
324 desc = io_req->sgl_list;
325 for_each_sg(scsi_sglist(sc), sg, sg_count, i) {
326 desc->addr = cpu_to_le64(sg_dma_address(sg));
327 desc->len = cpu_to_le32(sg_dma_len(sg));
332 io_req->sgl_list_pa = dma_map_single(&fnic->pdev->dev,
334 sizeof(io_req->sgl_list[0]) * sg_count,
336 if (dma_mapping_error(&fnic->pdev->dev, io_req->sgl_list_pa)) {
337 printk(KERN_ERR "DMA mapping failed\n");
338 return SCSI_MLQUEUE_HOST_BUSY;
342 io_req->sense_buf_pa = dma_map_single(&fnic->pdev->dev,
344 SCSI_SENSE_BUFFERSIZE,
346 if (dma_mapping_error(&fnic->pdev->dev, io_req->sense_buf_pa)) {
347 dma_unmap_single(&fnic->pdev->dev, io_req->sgl_list_pa,
348 sizeof(io_req->sgl_list[0]) * sg_count,
350 printk(KERN_ERR "DMA mapping failed\n");
351 return SCSI_MLQUEUE_HOST_BUSY;
354 int_to_scsilun(sc->device->lun, &fc_lun);
356 /* Enqueue the descriptor in the Copy WQ */
357 spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags);
359 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
360 free_wq_copy_descs(fnic, wq);
362 if (unlikely(!vnic_wq_copy_desc_avail(wq))) {
363 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
364 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
365 "fnic_queue_wq_copy_desc failure - no descriptors\n");
366 atomic64_inc(&misc_stats->io_cpwq_alloc_failures);
367 return SCSI_MLQUEUE_HOST_BUSY;
371 if (sc->sc_data_direction == DMA_FROM_DEVICE)
372 flags = FCPIO_ICMND_RDDATA;
373 else if (sc->sc_data_direction == DMA_TO_DEVICE)
374 flags = FCPIO_ICMND_WRDATA;
377 if ((fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) &&
378 (rp->flags & FC_RP_FLAGS_RETRY))
379 exch_flags |= FCPIO_ICMND_SRFLAG_RETRY;
381 fnic_queue_wq_copy_desc_icmnd_16(wq, scsi_cmd_to_rq(sc)->tag,
382 0, exch_flags, io_req->sgl_cnt,
383 SCSI_SENSE_BUFFERSIZE,
385 io_req->sense_buf_pa,
386 0, /* scsi cmd ref, always 0 */
387 FCPIO_ICMND_PTA_SIMPLE,
388 /* scsi pri and tag */
389 flags, /* command flags */
390 sc->cmnd, sc->cmd_len,
392 fc_lun.scsi_lun, io_req->port_id,
393 rport->maxframe_size, rp->r_a_tov,
396 atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
397 if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
398 atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
399 atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
400 atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
402 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
408 * Routine to send a scsi cdb
409 * Called with host_lock held and interrupts disabled.
411 static int fnic_queuecommand_lck(struct scsi_cmnd *sc)
413 void (*done)(struct scsi_cmnd *) = scsi_done;
414 const int tag = scsi_cmd_to_rq(sc)->tag;
415 struct fc_lport *lp = shost_priv(sc->device->host);
416 struct fc_rport *rport;
417 struct fnic_io_req *io_req = NULL;
418 struct fnic *fnic = lport_priv(lp);
419 struct fnic_stats *fnic_stats = &fnic->fnic_stats;
420 struct vnic_wq_copy *wq;
424 unsigned long flags = 0;
426 spinlock_t *io_lock = NULL;
427 int io_lock_acquired = 0;
428 struct fc_rport_libfc_priv *rp;
430 if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED)))
431 return SCSI_MLQUEUE_HOST_BUSY;
433 if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_FWRESET)))
434 return SCSI_MLQUEUE_HOST_BUSY;
436 rport = starget_to_rport(scsi_target(sc->device));
438 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
439 "returning DID_NO_CONNECT for IO as rport is NULL\n");
440 sc->result = DID_NO_CONNECT << 16;
445 ret = fc_remote_port_chkready(rport);
447 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
448 "rport is not ready\n");
449 atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
456 if (!rp || rp->rp_state == RPORT_ST_DELETE) {
457 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
458 "rport 0x%x removed, returning DID_NO_CONNECT\n",
461 atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
462 sc->result = DID_NO_CONNECT<<16;
467 if (rp->rp_state != RPORT_ST_READY) {
468 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
469 "rport 0x%x in state 0x%x, returning DID_IMM_RETRY\n",
470 rport->port_id, rp->rp_state);
472 sc->result = DID_IMM_RETRY << 16;
477 if (lp->state != LPORT_ST_READY || !(lp->link_up))
478 return SCSI_MLQUEUE_HOST_BUSY;
480 atomic_inc(&fnic->in_flight);
483 * Release host lock, use driver resource specific locks from here.
484 * Don't re-enable interrupts in case they were disabled prior to the
485 * caller disabling them.
487 spin_unlock(lp->host->host_lock);
488 fnic_priv(sc)->state = FNIC_IOREQ_NOT_INITED;
489 fnic_priv(sc)->flags = FNIC_NO_FLAGS;
491 /* Get a new io_req for this SCSI IO */
492 io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
494 atomic64_inc(&fnic_stats->io_stats.alloc_failures);
495 ret = SCSI_MLQUEUE_HOST_BUSY;
498 memset(io_req, 0, sizeof(*io_req));
500 /* Map the data buffer */
501 sg_count = scsi_dma_map(sc);
503 FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
504 tag, sc, 0, sc->cmnd[0], sg_count, fnic_priv(sc)->state);
505 mempool_free(io_req, fnic->io_req_pool);
509 /* Determine the type of scatter/gather list we need */
510 io_req->sgl_cnt = sg_count;
511 io_req->sgl_type = FNIC_SGL_CACHE_DFLT;
512 if (sg_count > FNIC_DFLT_SG_DESC_CNT)
513 io_req->sgl_type = FNIC_SGL_CACHE_MAX;
517 mempool_alloc(fnic->io_sgl_pool[io_req->sgl_type],
519 if (!io_req->sgl_list) {
520 atomic64_inc(&fnic_stats->io_stats.alloc_failures);
521 ret = SCSI_MLQUEUE_HOST_BUSY;
523 mempool_free(io_req, fnic->io_req_pool);
527 /* Cache sgl list allocated address before alignment */
528 io_req->sgl_list_alloc = io_req->sgl_list;
529 ptr = (unsigned long) io_req->sgl_list;
530 if (ptr % FNIC_SG_DESC_ALIGN) {
531 io_req->sgl_list = (struct host_sg_desc *)
532 (((unsigned long) ptr
533 + FNIC_SG_DESC_ALIGN - 1)
534 & ~(FNIC_SG_DESC_ALIGN - 1));
539 * Will acquire lock defore setting to IO initialized.
542 io_lock = fnic_io_lock_hash(fnic, sc);
543 spin_lock_irqsave(io_lock, flags);
545 /* initialize rest of io_req */
546 io_lock_acquired = 1;
547 io_req->port_id = rport->port_id;
548 io_req->start_time = jiffies;
549 fnic_priv(sc)->state = FNIC_IOREQ_CMD_PENDING;
550 fnic_priv(sc)->io_req = io_req;
551 fnic_priv(sc)->flags |= FNIC_IO_INITIALIZED;
553 /* create copy wq desc and enqueue it */
554 wq = &fnic->wq_copy[0];
555 ret = fnic_queue_wq_copy_desc(fnic, wq, io_req, sc, sg_count);
558 * In case another thread cancelled the request,
559 * refetch the pointer under the lock.
561 FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
562 tag, sc, 0, 0, 0, fnic_flags_and_state(sc));
563 io_req = fnic_priv(sc)->io_req;
564 fnic_priv(sc)->io_req = NULL;
565 fnic_priv(sc)->state = FNIC_IOREQ_CMD_COMPLETE;
566 spin_unlock_irqrestore(io_lock, flags);
568 fnic_release_ioreq_buf(fnic, io_req, sc);
569 mempool_free(io_req, fnic->io_req_pool);
571 atomic_dec(&fnic->in_flight);
572 /* acquire host lock before returning to SCSI */
573 spin_lock(lp->host->host_lock);
576 atomic64_inc(&fnic_stats->io_stats.active_ios);
577 atomic64_inc(&fnic_stats->io_stats.num_ios);
578 if (atomic64_read(&fnic_stats->io_stats.active_ios) >
579 atomic64_read(&fnic_stats->io_stats.max_active_ios))
580 atomic64_set(&fnic_stats->io_stats.max_active_ios,
581 atomic64_read(&fnic_stats->io_stats.active_ios));
583 /* REVISIT: Use per IO lock in the final code */
584 fnic_priv(sc)->flags |= FNIC_IO_ISSUED;
587 cmd_trace = ((u64)sc->cmnd[0] << 56 | (u64)sc->cmnd[7] << 40 |
588 (u64)sc->cmnd[8] << 32 | (u64)sc->cmnd[2] << 24 |
589 (u64)sc->cmnd[3] << 16 | (u64)sc->cmnd[4] << 8 |
592 FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
593 tag, sc, io_req, sg_count, cmd_trace,
594 fnic_flags_and_state(sc));
596 /* if only we issued IO, will we have the io lock */
597 if (io_lock_acquired)
598 spin_unlock_irqrestore(io_lock, flags);
600 atomic_dec(&fnic->in_flight);
601 /* acquire host lock before returning to SCSI */
602 spin_lock(lp->host->host_lock);
606 DEF_SCSI_QCMD(fnic_queuecommand)
609 * fnic_fcpio_fw_reset_cmpl_handler
610 * Routine to handle fw reset completion
612 static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
613 struct fcpio_fw_req *desc)
617 struct fcpio_tag tag;
620 struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats;
622 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
624 atomic64_inc(&reset_stats->fw_reset_completions);
626 /* Clean up all outstanding io requests */
627 fnic_cleanup_io(fnic);
629 atomic64_set(&fnic->fnic_stats.fw_stats.active_fw_reqs, 0);
630 atomic64_set(&fnic->fnic_stats.io_stats.active_ios, 0);
631 atomic64_set(&fnic->io_cmpl_skip, 0);
633 spin_lock_irqsave(&fnic->fnic_lock, flags);
635 /* fnic should be in FC_TRANS_ETH_MODE */
636 if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) {
637 /* Check status of reset completion */
639 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
640 "reset cmpl success\n");
641 /* Ready to send flogi out */
642 fnic->state = FNIC_IN_ETH_MODE;
644 FNIC_SCSI_DBG(KERN_DEBUG,
646 "fnic fw_reset : failed %s\n",
647 fnic_fcpio_status_to_str(hdr_status));
650 * Unable to change to eth mode, cannot send out flogi
651 * Change state to fc mode, so that subsequent Flogi
652 * requests from libFC will cause more attempts to
653 * reset the firmware. Free the cached flogi
655 fnic->state = FNIC_IN_FC_MODE;
656 atomic64_inc(&reset_stats->fw_reset_failures);
660 FNIC_SCSI_DBG(KERN_DEBUG,
662 "Unexpected state %s while processing"
663 " reset cmpl\n", fnic_state_to_str(fnic->state));
664 atomic64_inc(&reset_stats->fw_reset_failures);
668 /* Thread removing device blocks till firmware reset is complete */
669 if (fnic->remove_wait)
670 complete(fnic->remove_wait);
673 * If fnic is being removed, or fw reset failed
674 * free the flogi frame. Else, send it out
676 if (fnic->remove_wait || ret) {
677 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
678 skb_queue_purge(&fnic->tx_queue);
679 goto reset_cmpl_handler_end;
682 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
686 reset_cmpl_handler_end:
687 fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
693 * fnic_fcpio_flogi_reg_cmpl_handler
694 * Routine to handle flogi register completion
696 static int fnic_fcpio_flogi_reg_cmpl_handler(struct fnic *fnic,
697 struct fcpio_fw_req *desc)
701 struct fcpio_tag tag;
705 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
707 /* Update fnic state based on status of flogi reg completion */
708 spin_lock_irqsave(&fnic->fnic_lock, flags);
710 if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) {
712 /* Check flogi registration completion status */
714 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
715 "flog reg succeeded\n");
716 fnic->state = FNIC_IN_FC_MODE;
718 FNIC_SCSI_DBG(KERN_DEBUG,
720 "fnic flogi reg :failed %s\n",
721 fnic_fcpio_status_to_str(hdr_status));
722 fnic->state = FNIC_IN_ETH_MODE;
726 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
727 "Unexpected fnic state %s while"
728 " processing flogi reg completion\n",
729 fnic_state_to_str(fnic->state));
734 if (fnic->stop_rx_link_events) {
735 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
736 goto reg_cmpl_handler_end;
738 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
741 queue_work(fnic_event_queue, &fnic->frame_work);
743 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
746 reg_cmpl_handler_end:
750 static inline int is_ack_index_in_range(struct vnic_wq_copy *wq,
753 if (wq->to_clean_index <= wq->to_use_index) {
754 /* out of range, stale request_out index */
755 if (request_out < wq->to_clean_index ||
756 request_out >= wq->to_use_index)
759 /* out of range, stale request_out index */
760 if (request_out < wq->to_clean_index &&
761 request_out >= wq->to_use_index)
764 /* request_out index is in range */
770 * Mark that ack received and store the Ack index. If there are multiple
771 * acks received before Tx thread cleans it up, the latest value will be
772 * used which is correct behavior. This state should be in the copy Wq
773 * instead of in the fnic
775 static inline void fnic_fcpio_ack_handler(struct fnic *fnic,
776 unsigned int cq_index,
777 struct fcpio_fw_req *desc)
779 struct vnic_wq_copy *wq;
780 u16 request_out = desc->u.ack.request_out;
782 u64 *ox_id_tag = (u64 *)(void *)desc;
784 /* mark the ack state */
785 wq = &fnic->wq_copy[cq_index - fnic->raw_wq_count - fnic->rq_count];
786 spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
788 fnic->fnic_stats.misc_stats.last_ack_time = jiffies;
789 if (is_ack_index_in_range(wq, request_out)) {
790 fnic->fw_ack_index[0] = request_out;
791 fnic->fw_ack_recd[0] = 1;
794 &fnic->fnic_stats.misc_stats.ack_index_out_of_range);
796 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
797 FNIC_TRACE(fnic_fcpio_ack_handler,
798 fnic->lport->host->host_no, 0, 0, ox_id_tag[2], ox_id_tag[3],
799 ox_id_tag[4], ox_id_tag[5]);
803 * fnic_fcpio_icmnd_cmpl_handler
804 * Routine to handle icmnd completions
806 static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
807 struct fcpio_fw_req *desc)
811 struct fcpio_tag tag;
814 struct fcpio_icmnd_cmpl *icmnd_cmpl;
815 struct fnic_io_req *io_req;
816 struct scsi_cmnd *sc;
817 struct fnic_stats *fnic_stats = &fnic->fnic_stats;
821 unsigned long start_time;
822 unsigned long io_duration_time;
824 /* Decode the cmpl description to get the io_req id */
825 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
826 fcpio_tag_id_dec(&tag, &id);
827 icmnd_cmpl = &desc->u.icmnd_cmpl;
829 if (id >= fnic->fnic_max_tag_id) {
830 shost_printk(KERN_ERR, fnic->lport->host,
831 "Tag out of range tag %x hdr status = %s\n",
832 id, fnic_fcpio_status_to_str(hdr_status));
836 sc = scsi_host_find_tag(fnic->lport->host, id);
839 atomic64_inc(&fnic_stats->io_stats.sc_null);
840 shost_printk(KERN_ERR, fnic->lport->host,
841 "icmnd_cmpl sc is null - "
842 "hdr status = %s tag = 0x%x desc = 0x%p\n",
843 fnic_fcpio_status_to_str(hdr_status), id, desc);
844 FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler,
845 fnic->lport->host->host_no, id,
846 ((u64)icmnd_cmpl->_resvd0[1] << 16 |
847 (u64)icmnd_cmpl->_resvd0[0]),
848 ((u64)hdr_status << 16 |
849 (u64)icmnd_cmpl->scsi_status << 8 |
850 (u64)icmnd_cmpl->flags), desc,
851 (u64)icmnd_cmpl->residual, 0);
855 io_lock = fnic_io_lock_hash(fnic, sc);
856 spin_lock_irqsave(io_lock, flags);
857 io_req = fnic_priv(sc)->io_req;
858 WARN_ON_ONCE(!io_req);
860 atomic64_inc(&fnic_stats->io_stats.ioreq_null);
861 fnic_priv(sc)->flags |= FNIC_IO_REQ_NULL;
862 spin_unlock_irqrestore(io_lock, flags);
863 shost_printk(KERN_ERR, fnic->lport->host,
864 "icmnd_cmpl io_req is null - "
865 "hdr status = %s tag = 0x%x sc 0x%p\n",
866 fnic_fcpio_status_to_str(hdr_status), id, sc);
869 start_time = io_req->start_time;
871 /* firmware completed the io */
872 io_req->io_completed = 1;
875 * if SCSI-ML has already issued abort on this command,
876 * set completion of the IO. The abts path will clean it up
878 if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) {
881 * set the FNIC_IO_DONE so that this doesn't get
882 * flagged as 'out of order' if it was not aborted
884 fnic_priv(sc)->flags |= FNIC_IO_DONE;
885 fnic_priv(sc)->flags |= FNIC_IO_ABTS_PENDING;
886 spin_unlock_irqrestore(io_lock, flags);
887 if(FCPIO_ABORTED == hdr_status)
888 fnic_priv(sc)->flags |= FNIC_IO_ABORTED;
890 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
891 "icmnd_cmpl abts pending "
892 "hdr status = %s tag = 0x%x sc = 0x%p "
893 "scsi_status = %x residual = %d\n",
894 fnic_fcpio_status_to_str(hdr_status),
896 icmnd_cmpl->scsi_status,
897 icmnd_cmpl->residual);
901 /* Mark the IO as complete */
902 fnic_priv(sc)->state = FNIC_IOREQ_CMD_COMPLETE;
904 icmnd_cmpl = &desc->u.icmnd_cmpl;
906 switch (hdr_status) {
908 sc->result = (DID_OK << 16) | icmnd_cmpl->scsi_status;
909 xfer_len = scsi_bufflen(sc);
911 if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER) {
912 xfer_len -= icmnd_cmpl->residual;
913 scsi_set_resid(sc, icmnd_cmpl->residual);
916 if (icmnd_cmpl->scsi_status == SAM_STAT_CHECK_CONDITION)
917 atomic64_inc(&fnic_stats->misc_stats.check_condition);
919 if (icmnd_cmpl->scsi_status == SAM_STAT_TASK_SET_FULL)
920 atomic64_inc(&fnic_stats->misc_stats.queue_fulls);
923 case FCPIO_TIMEOUT: /* request was timed out */
924 atomic64_inc(&fnic_stats->misc_stats.fcpio_timeout);
925 sc->result = (DID_TIME_OUT << 16) | icmnd_cmpl->scsi_status;
928 case FCPIO_ABORTED: /* request was aborted */
929 atomic64_inc(&fnic_stats->misc_stats.fcpio_aborted);
930 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
933 case FCPIO_DATA_CNT_MISMATCH: /* recv/sent more/less data than exp. */
934 atomic64_inc(&fnic_stats->misc_stats.data_count_mismatch);
935 scsi_set_resid(sc, icmnd_cmpl->residual);
936 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
939 case FCPIO_OUT_OF_RESOURCE: /* out of resources to complete request */
940 atomic64_inc(&fnic_stats->fw_stats.fw_out_of_resources);
941 sc->result = (DID_REQUEUE << 16) | icmnd_cmpl->scsi_status;
944 case FCPIO_IO_NOT_FOUND: /* requested I/O was not found */
945 atomic64_inc(&fnic_stats->io_stats.io_not_found);
946 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
949 case FCPIO_SGL_INVALID: /* request was aborted due to sgl error */
950 atomic64_inc(&fnic_stats->misc_stats.sgl_invalid);
951 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
954 case FCPIO_FW_ERR: /* request was terminated due fw error */
955 atomic64_inc(&fnic_stats->fw_stats.io_fw_errs);
956 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
959 case FCPIO_MSS_INVALID: /* request was aborted due to mss error */
960 atomic64_inc(&fnic_stats->misc_stats.mss_invalid);
961 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
964 case FCPIO_INVALID_HEADER: /* header contains invalid data */
965 case FCPIO_INVALID_PARAM: /* some parameter in request invalid */
966 case FCPIO_REQ_NOT_SUPPORTED:/* request type is not supported */
968 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
972 /* Break link with the SCSI command */
973 fnic_priv(sc)->io_req = NULL;
974 fnic_priv(sc)->flags |= FNIC_IO_DONE;
976 if (hdr_status != FCPIO_SUCCESS) {
977 atomic64_inc(&fnic_stats->io_stats.io_failures);
978 shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n",
979 fnic_fcpio_status_to_str(hdr_status));
982 fnic_release_ioreq_buf(fnic, io_req, sc);
984 cmd_trace = ((u64)hdr_status << 56) |
985 (u64)icmnd_cmpl->scsi_status << 48 |
986 (u64)icmnd_cmpl->flags << 40 | (u64)sc->cmnd[0] << 32 |
987 (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
988 (u64)sc->cmnd[4] << 8 | sc->cmnd[5];
990 FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler,
991 sc->device->host->host_no, id, sc,
992 ((u64)icmnd_cmpl->_resvd0[1] << 56 |
993 (u64)icmnd_cmpl->_resvd0[0] << 48 |
994 jiffies_to_msecs(jiffies - start_time)),
995 desc, cmd_trace, fnic_flags_and_state(sc));
997 if (sc->sc_data_direction == DMA_FROM_DEVICE) {
998 fnic->lport->host_stats.fcp_input_requests++;
999 fnic->fcp_input_bytes += xfer_len;
1000 } else if (sc->sc_data_direction == DMA_TO_DEVICE) {
1001 fnic->lport->host_stats.fcp_output_requests++;
1002 fnic->fcp_output_bytes += xfer_len;
1004 fnic->lport->host_stats.fcp_control_requests++;
1006 /* Call SCSI completion function to complete the IO */
1008 spin_unlock_irqrestore(io_lock, flags);
1010 mempool_free(io_req, fnic->io_req_pool);
1012 atomic64_dec(&fnic_stats->io_stats.active_ios);
1013 if (atomic64_read(&fnic->io_cmpl_skip))
1014 atomic64_dec(&fnic->io_cmpl_skip);
1016 atomic64_inc(&fnic_stats->io_stats.io_completions);
1019 io_duration_time = jiffies_to_msecs(jiffies) -
1020 jiffies_to_msecs(start_time);
1022 if(io_duration_time <= 10)
1023 atomic64_inc(&fnic_stats->io_stats.io_btw_0_to_10_msec);
1024 else if(io_duration_time <= 100)
1025 atomic64_inc(&fnic_stats->io_stats.io_btw_10_to_100_msec);
1026 else if(io_duration_time <= 500)
1027 atomic64_inc(&fnic_stats->io_stats.io_btw_100_to_500_msec);
1028 else if(io_duration_time <= 5000)
1029 atomic64_inc(&fnic_stats->io_stats.io_btw_500_to_5000_msec);
1030 else if(io_duration_time <= 10000)
1031 atomic64_inc(&fnic_stats->io_stats.io_btw_5000_to_10000_msec);
1032 else if(io_duration_time <= 30000)
1033 atomic64_inc(&fnic_stats->io_stats.io_btw_10000_to_30000_msec);
1035 atomic64_inc(&fnic_stats->io_stats.io_greater_than_30000_msec);
1037 if(io_duration_time > atomic64_read(&fnic_stats->io_stats.current_max_io_time))
1038 atomic64_set(&fnic_stats->io_stats.current_max_io_time, io_duration_time);
1042 /* fnic_fcpio_itmf_cmpl_handler
1043 * Routine to handle itmf completions
1045 static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
1046 struct fcpio_fw_req *desc)
1050 struct fcpio_tag tag;
1052 struct scsi_cmnd *sc;
1053 struct fnic_io_req *io_req;
1054 struct fnic_stats *fnic_stats = &fnic->fnic_stats;
1055 struct abort_stats *abts_stats = &fnic->fnic_stats.abts_stats;
1056 struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
1057 struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
1058 unsigned long flags;
1059 spinlock_t *io_lock;
1060 unsigned long start_time;
1062 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
1063 fcpio_tag_id_dec(&tag, &id);
1065 if ((id & FNIC_TAG_MASK) >= fnic->fnic_max_tag_id) {
1066 shost_printk(KERN_ERR, fnic->lport->host,
1067 "Tag out of range tag %x hdr status = %s\n",
1068 id, fnic_fcpio_status_to_str(hdr_status));
1072 sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK);
1075 atomic64_inc(&fnic_stats->io_stats.sc_null);
1076 shost_printk(KERN_ERR, fnic->lport->host,
1077 "itmf_cmpl sc is null - hdr status = %s tag = 0x%x\n",
1078 fnic_fcpio_status_to_str(hdr_status), id);
1081 io_lock = fnic_io_lock_hash(fnic, sc);
1082 spin_lock_irqsave(io_lock, flags);
1083 io_req = fnic_priv(sc)->io_req;
1084 WARN_ON_ONCE(!io_req);
1086 atomic64_inc(&fnic_stats->io_stats.ioreq_null);
1087 spin_unlock_irqrestore(io_lock, flags);
1088 fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_REQ_NULL;
1089 shost_printk(KERN_ERR, fnic->lport->host,
1090 "itmf_cmpl io_req is null - "
1091 "hdr status = %s tag = 0x%x sc 0x%p\n",
1092 fnic_fcpio_status_to_str(hdr_status), id, sc);
1095 start_time = io_req->start_time;
1097 if ((id & FNIC_TAG_ABORT) && (id & FNIC_TAG_DEV_RST)) {
1098 /* Abort and terminate completion of device reset req */
1099 /* REVISIT : Add asserts about various flags */
1100 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1101 "dev reset abts cmpl recd. id %x status %s\n",
1102 id, fnic_fcpio_status_to_str(hdr_status));
1103 fnic_priv(sc)->state = FNIC_IOREQ_ABTS_COMPLETE;
1104 fnic_priv(sc)->abts_status = hdr_status;
1105 fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE;
1106 if (io_req->abts_done)
1107 complete(io_req->abts_done);
1108 spin_unlock_irqrestore(io_lock, flags);
1109 } else if (id & FNIC_TAG_ABORT) {
1110 /* Completion of abort cmd */
1111 switch (hdr_status) {
1115 if (fnic_priv(sc)->flags & FNIC_IO_ABTS_ISSUED)
1116 atomic64_inc(&abts_stats->abort_fw_timeouts);
1119 &term_stats->terminate_fw_timeouts);
1121 case FCPIO_ITMF_REJECTED:
1122 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
1123 "abort reject recd. id %d\n",
1124 (int)(id & FNIC_TAG_MASK));
1126 case FCPIO_IO_NOT_FOUND:
1127 if (fnic_priv(sc)->flags & FNIC_IO_ABTS_ISSUED)
1128 atomic64_inc(&abts_stats->abort_io_not_found);
1131 &term_stats->terminate_io_not_found);
1134 if (fnic_priv(sc)->flags & FNIC_IO_ABTS_ISSUED)
1135 atomic64_inc(&abts_stats->abort_failures);
1138 &term_stats->terminate_failures);
1141 if (fnic_priv(sc)->state != FNIC_IOREQ_ABTS_PENDING) {
1142 /* This is a late completion. Ignore it */
1143 spin_unlock_irqrestore(io_lock, flags);
1147 fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_DONE;
1148 fnic_priv(sc)->abts_status = hdr_status;
1150 /* If the status is IO not found consider it as success */
1151 if (hdr_status == FCPIO_IO_NOT_FOUND)
1152 fnic_priv(sc)->abts_status = FCPIO_SUCCESS;
1154 if (!(fnic_priv(sc)->flags & (FNIC_IO_ABORTED | FNIC_IO_DONE)))
1155 atomic64_inc(&misc_stats->no_icmnd_itmf_cmpls);
1157 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1158 "abts cmpl recd. id %d status %s\n",
1159 (int)(id & FNIC_TAG_MASK),
1160 fnic_fcpio_status_to_str(hdr_status));
1163 * If scsi_eh thread is blocked waiting for abts to complete,
1164 * signal completion to it. IO will be cleaned in the thread
1165 * else clean it in this context
1167 if (io_req->abts_done) {
1168 complete(io_req->abts_done);
1169 spin_unlock_irqrestore(io_lock, flags);
1171 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1172 "abts cmpl, completing IO\n");
1173 fnic_priv(sc)->io_req = NULL;
1174 sc->result = (DID_ERROR << 16);
1176 spin_unlock_irqrestore(io_lock, flags);
1178 fnic_release_ioreq_buf(fnic, io_req, sc);
1179 mempool_free(io_req, fnic->io_req_pool);
1180 FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
1181 sc->device->host->host_no, id,
1183 jiffies_to_msecs(jiffies - start_time),
1185 (((u64)hdr_status << 40) |
1186 (u64)sc->cmnd[0] << 32 |
1187 (u64)sc->cmnd[2] << 24 |
1188 (u64)sc->cmnd[3] << 16 |
1189 (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
1190 fnic_flags_and_state(sc));
1192 atomic64_dec(&fnic_stats->io_stats.active_ios);
1193 if (atomic64_read(&fnic->io_cmpl_skip))
1194 atomic64_dec(&fnic->io_cmpl_skip);
1196 atomic64_inc(&fnic_stats->io_stats.io_completions);
1198 } else if (id & FNIC_TAG_DEV_RST) {
1199 /* Completion of device reset */
1200 fnic_priv(sc)->lr_status = hdr_status;
1201 if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) {
1202 spin_unlock_irqrestore(io_lock, flags);
1203 fnic_priv(sc)->flags |= FNIC_DEV_RST_ABTS_PENDING;
1204 FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
1205 sc->device->host->host_no, id, sc,
1206 jiffies_to_msecs(jiffies - start_time),
1207 desc, 0, fnic_flags_and_state(sc));
1208 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1209 "Terminate pending "
1210 "dev reset cmpl recd. id %d status %s\n",
1211 (int)(id & FNIC_TAG_MASK),
1212 fnic_fcpio_status_to_str(hdr_status));
1215 if (fnic_priv(sc)->flags & FNIC_DEV_RST_TIMED_OUT) {
1216 /* Need to wait for terminate completion */
1217 spin_unlock_irqrestore(io_lock, flags);
1218 FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
1219 sc->device->host->host_no, id, sc,
1220 jiffies_to_msecs(jiffies - start_time),
1221 desc, 0, fnic_flags_and_state(sc));
1222 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1223 "dev reset cmpl recd after time out. "
1224 "id %d status %s\n",
1225 (int)(id & FNIC_TAG_MASK),
1226 fnic_fcpio_status_to_str(hdr_status));
1229 fnic_priv(sc)->state = FNIC_IOREQ_CMD_COMPLETE;
1230 fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE;
1231 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1232 "dev reset cmpl recd. id %d status %s\n",
1233 (int)(id & FNIC_TAG_MASK),
1234 fnic_fcpio_status_to_str(hdr_status));
1235 if (io_req->dr_done)
1236 complete(io_req->dr_done);
1237 spin_unlock_irqrestore(io_lock, flags);
1240 shost_printk(KERN_ERR, fnic->lport->host,
1241 "Unexpected itmf io state %s tag %x\n",
1242 fnic_ioreq_state_to_str(fnic_priv(sc)->state), id);
1243 spin_unlock_irqrestore(io_lock, flags);
1249 * fnic_fcpio_cmpl_handler
1250 * Routine to service the cq for wq_copy
1252 static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
1253 unsigned int cq_index,
1254 struct fcpio_fw_req *desc)
1256 struct fnic *fnic = vnic_dev_priv(vdev);
1258 switch (desc->hdr.type) {
1259 case FCPIO_ICMND_CMPL: /* fw completed a command */
1260 case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/
1261 case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
1262 case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */
1263 case FCPIO_RESET_CMPL: /* fw completed reset */
1264 atomic64_dec(&fnic->fnic_stats.fw_stats.active_fw_reqs);
1270 switch (desc->hdr.type) {
1271 case FCPIO_ACK: /* fw copied copy wq desc to its queue */
1272 fnic_fcpio_ack_handler(fnic, cq_index, desc);
1275 case FCPIO_ICMND_CMPL: /* fw completed a command */
1276 fnic_fcpio_icmnd_cmpl_handler(fnic, desc);
1279 case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/
1280 fnic_fcpio_itmf_cmpl_handler(fnic, desc);
1283 case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
1284 case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */
1285 fnic_fcpio_flogi_reg_cmpl_handler(fnic, desc);
1288 case FCPIO_RESET_CMPL: /* fw completed reset */
1289 fnic_fcpio_fw_reset_cmpl_handler(fnic, desc);
1293 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1294 "firmware completion type %d\n",
1303 * fnic_wq_copy_cmpl_handler
1304 * Routine to process wq copy
1306 int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do)
1308 unsigned int wq_work_done = 0;
1309 unsigned int i, cq_index;
1310 unsigned int cur_work_done;
1311 struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
1312 u64 start_jiffies = 0;
1313 u64 end_jiffies = 0;
1314 u64 delta_jiffies = 0;
1317 for (i = 0; i < fnic->wq_copy_count; i++) {
1318 cq_index = i + fnic->raw_wq_count + fnic->rq_count;
1320 start_jiffies = jiffies;
1321 cur_work_done = vnic_cq_copy_service(&fnic->cq[cq_index],
1322 fnic_fcpio_cmpl_handler,
1324 end_jiffies = jiffies;
1326 wq_work_done += cur_work_done;
1327 delta_jiffies = end_jiffies - start_jiffies;
1329 (u64) atomic64_read(&misc_stats->max_isr_jiffies)) {
1330 atomic64_set(&misc_stats->max_isr_jiffies,
1332 delta_ms = jiffies_to_msecs(delta_jiffies);
1333 atomic64_set(&misc_stats->max_isr_time_ms, delta_ms);
1334 atomic64_set(&misc_stats->corr_work_done,
1338 return wq_work_done;
1341 static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data)
1343 const int tag = scsi_cmd_to_rq(sc)->tag;
1344 struct fnic *fnic = data;
1345 struct fnic_io_req *io_req;
1346 unsigned long flags = 0;
1347 spinlock_t *io_lock;
1348 unsigned long start_time = 0;
1349 struct fnic_stats *fnic_stats = &fnic->fnic_stats;
1351 io_lock = fnic_io_lock_tag(fnic, tag);
1352 spin_lock_irqsave(io_lock, flags);
1354 io_req = fnic_priv(sc)->io_req;
1355 if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) &&
1356 !(fnic_priv(sc)->flags & FNIC_DEV_RST_DONE)) {
1358 * We will be here only when FW completes reset
1359 * without sending completions for outstanding ios.
1361 fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE;
1362 if (io_req && io_req->dr_done)
1363 complete(io_req->dr_done);
1364 else if (io_req && io_req->abts_done)
1365 complete(io_req->abts_done);
1366 spin_unlock_irqrestore(io_lock, flags);
1368 } else if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) {
1369 spin_unlock_irqrestore(io_lock, flags);
1373 spin_unlock_irqrestore(io_lock, flags);
1374 goto cleanup_scsi_cmd;
1377 fnic_priv(sc)->io_req = NULL;
1379 spin_unlock_irqrestore(io_lock, flags);
1382 * If there is a scsi_cmnd associated with this io_req, then
1383 * free the corresponding state
1385 start_time = io_req->start_time;
1386 fnic_release_ioreq_buf(fnic, io_req, sc);
1387 mempool_free(io_req, fnic->io_req_pool);
1390 sc->result = DID_TRANSPORT_DISRUPTED << 16;
1391 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1392 "fnic_cleanup_io: tag:0x%x : sc:0x%p duration = %lu DID_TRANSPORT_DISRUPTED\n",
1393 tag, sc, jiffies - start_time);
1395 if (atomic64_read(&fnic->io_cmpl_skip))
1396 atomic64_dec(&fnic->io_cmpl_skip);
1398 atomic64_inc(&fnic_stats->io_stats.io_completions);
1400 /* Complete the command to SCSI */
1401 if (!(fnic_priv(sc)->flags & FNIC_IO_ISSUED))
1402 shost_printk(KERN_ERR, fnic->lport->host,
1403 "Calling done for IO not issued to fw: tag:0x%x sc:0x%p\n",
1406 FNIC_TRACE(fnic_cleanup_io,
1407 sc->device->host->host_no, tag, sc,
1408 jiffies_to_msecs(jiffies - start_time),
1409 0, ((u64)sc->cmnd[0] << 32 |
1410 (u64)sc->cmnd[2] << 24 |
1411 (u64)sc->cmnd[3] << 16 |
1412 (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
1413 fnic_flags_and_state(sc));
1420 static void fnic_cleanup_io(struct fnic *fnic)
1422 scsi_host_busy_iter(fnic->lport->host,
1423 fnic_cleanup_io_iter, fnic);
1426 void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
1427 struct fcpio_host_req *desc)
1430 struct fnic *fnic = vnic_dev_priv(wq->vdev);
1431 struct fnic_io_req *io_req;
1432 struct scsi_cmnd *sc;
1433 unsigned long flags;
1434 spinlock_t *io_lock;
1435 unsigned long start_time = 0;
1437 /* get the tag reference */
1438 fcpio_tag_id_dec(&desc->hdr.tag, &id);
1439 id &= FNIC_TAG_MASK;
1441 if (id >= fnic->fnic_max_tag_id)
1444 sc = scsi_host_find_tag(fnic->lport->host, id);
1448 io_lock = fnic_io_lock_hash(fnic, sc);
1449 spin_lock_irqsave(io_lock, flags);
1451 /* Get the IO context which this desc refers to */
1452 io_req = fnic_priv(sc)->io_req;
1454 /* fnic interrupts are turned off by now */
1457 spin_unlock_irqrestore(io_lock, flags);
1458 goto wq_copy_cleanup_scsi_cmd;
1461 fnic_priv(sc)->io_req = NULL;
1463 spin_unlock_irqrestore(io_lock, flags);
1465 start_time = io_req->start_time;
1466 fnic_release_ioreq_buf(fnic, io_req, sc);
1467 mempool_free(io_req, fnic->io_req_pool);
1469 wq_copy_cleanup_scsi_cmd:
1470 sc->result = DID_NO_CONNECT << 16;
1471 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "wq_copy_cleanup_handler:"
1472 " DID_NO_CONNECT\n");
1474 FNIC_TRACE(fnic_wq_copy_cleanup_handler,
1475 sc->device->host->host_no, id, sc,
1476 jiffies_to_msecs(jiffies - start_time),
1477 0, ((u64)sc->cmnd[0] << 32 |
1478 (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
1479 (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
1480 fnic_flags_and_state(sc));
1485 static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
1486 u32 task_req, u8 *fc_lun,
1487 struct fnic_io_req *io_req)
1489 struct vnic_wq_copy *wq = &fnic->wq_copy[0];
1490 struct Scsi_Host *host = fnic->lport->host;
1491 struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
1492 unsigned long flags;
1494 spin_lock_irqsave(host->host_lock, flags);
1495 if (unlikely(fnic_chk_state_flags_locked(fnic,
1496 FNIC_FLAGS_IO_BLOCKED))) {
1497 spin_unlock_irqrestore(host->host_lock, flags);
1500 atomic_inc(&fnic->in_flight);
1501 spin_unlock_irqrestore(host->host_lock, flags);
1503 spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
1505 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
1506 free_wq_copy_descs(fnic, wq);
1508 if (!vnic_wq_copy_desc_avail(wq)) {
1509 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
1510 atomic_dec(&fnic->in_flight);
1511 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1512 "fnic_queue_abort_io_req: failure: no descriptors\n");
1513 atomic64_inc(&misc_stats->abts_cpwq_alloc_failures);
1516 fnic_queue_wq_copy_desc_itmf(wq, tag | FNIC_TAG_ABORT,
1517 0, task_req, tag, fc_lun, io_req->port_id,
1518 fnic->config.ra_tov, fnic->config.ed_tov);
1520 atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
1521 if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
1522 atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
1523 atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
1524 atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
1526 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
1527 atomic_dec(&fnic->in_flight);
1532 struct fnic_rport_abort_io_iter_data {
1538 static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data)
1540 struct fnic_rport_abort_io_iter_data *iter_data = data;
1541 struct fnic *fnic = iter_data->fnic;
1542 int abt_tag = scsi_cmd_to_rq(sc)->tag;
1543 struct fnic_io_req *io_req;
1544 spinlock_t *io_lock;
1545 unsigned long flags;
1546 struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats;
1547 struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
1548 struct scsi_lun fc_lun;
1549 enum fnic_ioreq_state old_ioreq_state;
1551 io_lock = fnic_io_lock_tag(fnic, abt_tag);
1552 spin_lock_irqsave(io_lock, flags);
1554 io_req = fnic_priv(sc)->io_req;
1556 if (!io_req || io_req->port_id != iter_data->port_id) {
1557 spin_unlock_irqrestore(io_lock, flags);
1561 if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) &&
1562 !(fnic_priv(sc)->flags & FNIC_DEV_RST_ISSUED)) {
1563 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1564 "fnic_rport_exch_reset dev rst not pending sc 0x%p\n",
1566 spin_unlock_irqrestore(io_lock, flags);
1571 * Found IO that is still pending with firmware and
1572 * belongs to rport that went away
1574 if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) {
1575 spin_unlock_irqrestore(io_lock, flags);
1578 if (io_req->abts_done) {
1579 shost_printk(KERN_ERR, fnic->lport->host,
1580 "fnic_rport_exch_reset: io_req->abts_done is set "
1582 fnic_ioreq_state_to_str(fnic_priv(sc)->state));
1585 if (!(fnic_priv(sc)->flags & FNIC_IO_ISSUED)) {
1586 shost_printk(KERN_ERR, fnic->lport->host,
1588 "IO not yet issued %p tag 0x%x flags "
1590 sc, abt_tag, fnic_priv(sc)->flags, fnic_priv(sc)->state);
1592 old_ioreq_state = fnic_priv(sc)->state;
1593 fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING;
1594 fnic_priv(sc)->abts_status = FCPIO_INVALID_CODE;
1595 if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) {
1596 atomic64_inc(&reset_stats->device_reset_terminates);
1597 abt_tag |= FNIC_TAG_DEV_RST;
1599 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1600 "fnic_rport_exch_reset dev rst sc 0x%p\n", sc);
1601 BUG_ON(io_req->abts_done);
1603 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1604 "fnic_rport_reset_exch: Issuing abts\n");
1606 spin_unlock_irqrestore(io_lock, flags);
1608 /* Now queue the abort command to firmware */
1609 int_to_scsilun(sc->device->lun, &fc_lun);
1611 if (fnic_queue_abort_io_req(fnic, abt_tag,
1612 FCPIO_ITMF_ABT_TASK_TERM,
1613 fc_lun.scsi_lun, io_req)) {
1615 * Revert the cmd state back to old state, if
1616 * it hasn't changed in between. This cmd will get
1617 * aborted later by scsi_eh, or cleaned up during
1620 spin_lock_irqsave(io_lock, flags);
1621 if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING)
1622 fnic_priv(sc)->state = old_ioreq_state;
1623 spin_unlock_irqrestore(io_lock, flags);
1625 spin_lock_irqsave(io_lock, flags);
1626 if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET)
1627 fnic_priv(sc)->flags |= FNIC_DEV_RST_TERM_ISSUED;
1629 fnic_priv(sc)->flags |= FNIC_IO_INTERNAL_TERM_ISSUED;
1630 spin_unlock_irqrestore(io_lock, flags);
1631 atomic64_inc(&term_stats->terminates);
1632 iter_data->term_cnt++;
1637 static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
1639 struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
1640 struct fnic_rport_abort_io_iter_data iter_data = {
1646 FNIC_SCSI_DBG(KERN_DEBUG,
1648 "fnic_rport_exch_reset called portid 0x%06x\n",
1651 if (fnic->in_remove)
1654 scsi_host_busy_iter(fnic->lport->host, fnic_rport_abort_io_iter,
1656 if (iter_data.term_cnt > atomic64_read(&term_stats->max_terminates))
1657 atomic64_set(&term_stats->max_terminates, iter_data.term_cnt);
1661 void fnic_terminate_rport_io(struct fc_rport *rport)
1663 struct fc_rport_libfc_priv *rdata;
1664 struct fc_lport *lport;
1668 printk(KERN_ERR "fnic_terminate_rport_io: rport is NULL\n");
1671 rdata = rport->dd_data;
1674 printk(KERN_ERR "fnic_terminate_rport_io: rdata is NULL\n");
1677 lport = rdata->local_port;
1680 printk(KERN_ERR "fnic_terminate_rport_io: lport is NULL\n");
1683 fnic = lport_priv(lport);
1684 FNIC_SCSI_DBG(KERN_DEBUG,
1685 fnic->lport->host, "fnic_terminate_rport_io called"
1686 " wwpn 0x%llx, wwnn0x%llx, rport 0x%p, portid 0x%06x\n",
1687 rport->port_name, rport->node_name, rport,
1690 if (fnic->in_remove)
1693 fnic_rport_exch_reset(fnic, rport->port_id);
1697 * This function is exported to SCSI for sending abort cmnds.
1698 * A SCSI IO is represented by a io_req in the driver.
1699 * The ioreq is linked to the SCSI Cmd, thus a link with the ULP's IO.
1701 int fnic_abort_cmd(struct scsi_cmnd *sc)
1703 struct request *const rq = scsi_cmd_to_rq(sc);
1704 struct fc_lport *lp;
1706 struct fnic_io_req *io_req = NULL;
1707 struct fc_rport *rport;
1708 spinlock_t *io_lock;
1709 unsigned long flags;
1710 unsigned long start_time = 0;
1713 struct scsi_lun fc_lun;
1714 struct fnic_stats *fnic_stats;
1715 struct abort_stats *abts_stats;
1716 struct terminate_stats *term_stats;
1717 enum fnic_ioreq_state old_ioreq_state;
1718 const int tag = rq->tag;
1719 unsigned long abt_issued_time;
1720 DECLARE_COMPLETION_ONSTACK(tm_done);
1722 /* Wait for rport to unblock */
1723 fc_block_scsi_eh(sc);
1725 /* Get local-port, check ready and link up */
1726 lp = shost_priv(sc->device->host);
1728 fnic = lport_priv(lp);
1729 fnic_stats = &fnic->fnic_stats;
1730 abts_stats = &fnic->fnic_stats.abts_stats;
1731 term_stats = &fnic->fnic_stats.term_stats;
1733 rport = starget_to_rport(scsi_target(sc->device));
1734 FNIC_SCSI_DBG(KERN_DEBUG,
1736 "Abort Cmd called FCID 0x%x, LUN 0x%llx TAG %x flags %x\n",
1737 rport->port_id, sc->device->lun, tag, fnic_priv(sc)->flags);
1739 fnic_priv(sc)->flags = FNIC_NO_FLAGS;
1741 if (lp->state != LPORT_ST_READY || !(lp->link_up)) {
1743 goto fnic_abort_cmd_end;
1747 * Avoid a race between SCSI issuing the abort and the device
1748 * completing the command.
1750 * If the command is already completed by the fw cmpl code,
1751 * we just return SUCCESS from here. This means that the abort
1752 * succeeded. In the SCSI ML, since the timeout for command has
1753 * happened, the completion wont actually complete the command
1754 * and it will be considered as an aborted command
1756 * .io_req will not be cleared except while holding io_req_lock.
1758 io_lock = fnic_io_lock_hash(fnic, sc);
1759 spin_lock_irqsave(io_lock, flags);
1760 io_req = fnic_priv(sc)->io_req;
1762 spin_unlock_irqrestore(io_lock, flags);
1763 goto fnic_abort_cmd_end;
1766 io_req->abts_done = &tm_done;
1768 if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) {
1769 spin_unlock_irqrestore(io_lock, flags);
1773 abt_issued_time = jiffies_to_msecs(jiffies) - jiffies_to_msecs(io_req->start_time);
1774 if (abt_issued_time <= 6000)
1775 atomic64_inc(&abts_stats->abort_issued_btw_0_to_6_sec);
1776 else if (abt_issued_time > 6000 && abt_issued_time <= 20000)
1777 atomic64_inc(&abts_stats->abort_issued_btw_6_to_20_sec);
1778 else if (abt_issued_time > 20000 && abt_issued_time <= 30000)
1779 atomic64_inc(&abts_stats->abort_issued_btw_20_to_30_sec);
1780 else if (abt_issued_time > 30000 && abt_issued_time <= 40000)
1781 atomic64_inc(&abts_stats->abort_issued_btw_30_to_40_sec);
1782 else if (abt_issued_time > 40000 && abt_issued_time <= 50000)
1783 atomic64_inc(&abts_stats->abort_issued_btw_40_to_50_sec);
1784 else if (abt_issued_time > 50000 && abt_issued_time <= 60000)
1785 atomic64_inc(&abts_stats->abort_issued_btw_50_to_60_sec);
1787 atomic64_inc(&abts_stats->abort_issued_greater_than_60_sec);
1789 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
1790 "CBD Opcode: %02x Abort issued time: %lu msec\n", sc->cmnd[0], abt_issued_time);
1792 * Command is still pending, need to abort it
1793 * If the firmware completes the command after this point,
1794 * the completion wont be done till mid-layer, since abort
1795 * has already started.
1797 old_ioreq_state = fnic_priv(sc)->state;
1798 fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING;
1799 fnic_priv(sc)->abts_status = FCPIO_INVALID_CODE;
1801 spin_unlock_irqrestore(io_lock, flags);
1804 * Check readiness of the remote port. If the path to remote
1805 * port is up, then send abts to the remote port to terminate
1806 * the IO. Else, just locally terminate the IO in the firmware
1808 if (fc_remote_port_chkready(rport) == 0)
1809 task_req = FCPIO_ITMF_ABT_TASK;
1811 atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
1812 task_req = FCPIO_ITMF_ABT_TASK_TERM;
1815 /* Now queue the abort command to firmware */
1816 int_to_scsilun(sc->device->lun, &fc_lun);
1818 if (fnic_queue_abort_io_req(fnic, tag, task_req, fc_lun.scsi_lun,
1820 spin_lock_irqsave(io_lock, flags);
1821 if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING)
1822 fnic_priv(sc)->state = old_ioreq_state;
1823 io_req = fnic_priv(sc)->io_req;
1825 io_req->abts_done = NULL;
1826 spin_unlock_irqrestore(io_lock, flags);
1828 goto fnic_abort_cmd_end;
1830 if (task_req == FCPIO_ITMF_ABT_TASK) {
1831 fnic_priv(sc)->flags |= FNIC_IO_ABTS_ISSUED;
1832 atomic64_inc(&fnic_stats->abts_stats.aborts);
1834 fnic_priv(sc)->flags |= FNIC_IO_TERM_ISSUED;
1835 atomic64_inc(&fnic_stats->term_stats.terminates);
1839 * We queued an abort IO, wait for its completion.
1840 * Once the firmware completes the abort command, it will
1841 * wake up this thread.
1844 wait_for_completion_timeout(&tm_done,
1846 (2 * fnic->config.ra_tov +
1847 fnic->config.ed_tov));
1849 /* Check the abort status */
1850 spin_lock_irqsave(io_lock, flags);
1852 io_req = fnic_priv(sc)->io_req;
1854 atomic64_inc(&fnic_stats->io_stats.ioreq_null);
1855 spin_unlock_irqrestore(io_lock, flags);
1856 fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_REQ_NULL;
1858 goto fnic_abort_cmd_end;
1860 io_req->abts_done = NULL;
1862 /* fw did not complete abort, timed out */
1863 if (fnic_priv(sc)->abts_status == FCPIO_INVALID_CODE) {
1864 spin_unlock_irqrestore(io_lock, flags);
1865 if (task_req == FCPIO_ITMF_ABT_TASK) {
1866 atomic64_inc(&abts_stats->abort_drv_timeouts);
1868 atomic64_inc(&term_stats->terminate_drv_timeouts);
1870 fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_TIMED_OUT;
1872 goto fnic_abort_cmd_end;
1875 /* IO out of order */
1877 if (!(fnic_priv(sc)->flags & (FNIC_IO_ABORTED | FNIC_IO_DONE))) {
1878 spin_unlock_irqrestore(io_lock, flags);
1879 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1880 "Issuing Host reset due to out of order IO\n");
1883 goto fnic_abort_cmd_end;
1886 fnic_priv(sc)->state = FNIC_IOREQ_ABTS_COMPLETE;
1888 start_time = io_req->start_time;
1890 * firmware completed the abort, check the status,
1891 * free the io_req if successful. If abort fails,
1892 * Device reset will clean the I/O.
1894 if (fnic_priv(sc)->abts_status == FCPIO_SUCCESS) {
1895 fnic_priv(sc)->io_req = NULL;
1898 spin_unlock_irqrestore(io_lock, flags);
1899 goto fnic_abort_cmd_end;
1902 spin_unlock_irqrestore(io_lock, flags);
1904 fnic_release_ioreq_buf(fnic, io_req, sc);
1905 mempool_free(io_req, fnic->io_req_pool);
1907 /* Call SCSI completion function to complete the IO */
1908 sc->result = DID_ABORT << 16;
1910 atomic64_dec(&fnic_stats->io_stats.active_ios);
1911 if (atomic64_read(&fnic->io_cmpl_skip))
1912 atomic64_dec(&fnic->io_cmpl_skip);
1914 atomic64_inc(&fnic_stats->io_stats.io_completions);
1917 FNIC_TRACE(fnic_abort_cmd, sc->device->host->host_no, tag, sc,
1918 jiffies_to_msecs(jiffies - start_time),
1919 0, ((u64)sc->cmnd[0] << 32 |
1920 (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
1921 (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
1922 fnic_flags_and_state(sc));
1924 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1925 "Returning from abort cmd type %x %s\n", task_req,
1927 "SUCCESS" : "FAILED");
1931 static inline int fnic_queue_dr_io_req(struct fnic *fnic,
1932 struct scsi_cmnd *sc,
1933 struct fnic_io_req *io_req)
1935 struct vnic_wq_copy *wq = &fnic->wq_copy[0];
1936 struct Scsi_Host *host = fnic->lport->host;
1937 struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
1938 struct scsi_lun fc_lun;
1940 unsigned long intr_flags;
1942 spin_lock_irqsave(host->host_lock, intr_flags);
1943 if (unlikely(fnic_chk_state_flags_locked(fnic,
1944 FNIC_FLAGS_IO_BLOCKED))) {
1945 spin_unlock_irqrestore(host->host_lock, intr_flags);
1948 atomic_inc(&fnic->in_flight);
1949 spin_unlock_irqrestore(host->host_lock, intr_flags);
1951 spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags);
1953 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
1954 free_wq_copy_descs(fnic, wq);
1956 if (!vnic_wq_copy_desc_avail(wq)) {
1957 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1958 "queue_dr_io_req failure - no descriptors\n");
1959 atomic64_inc(&misc_stats->devrst_cpwq_alloc_failures);
1964 /* fill in the lun info */
1965 int_to_scsilun(sc->device->lun, &fc_lun);
1967 fnic_queue_wq_copy_desc_itmf(wq, scsi_cmd_to_rq(sc)->tag | FNIC_TAG_DEV_RST,
1968 0, FCPIO_ITMF_LUN_RESET, SCSI_NO_TAG,
1969 fc_lun.scsi_lun, io_req->port_id,
1970 fnic->config.ra_tov, fnic->config.ed_tov);
1972 atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
1973 if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
1974 atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
1975 atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
1976 atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
1979 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
1980 atomic_dec(&fnic->in_flight);
1985 struct fnic_pending_aborts_iter_data {
1987 struct scsi_cmnd *lr_sc;
1988 struct scsi_device *lun_dev;
1992 static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc, void *data)
1994 struct fnic_pending_aborts_iter_data *iter_data = data;
1995 struct fnic *fnic = iter_data->fnic;
1996 struct scsi_device *lun_dev = iter_data->lun_dev;
1997 int abt_tag = scsi_cmd_to_rq(sc)->tag;
1998 struct fnic_io_req *io_req;
1999 spinlock_t *io_lock;
2000 unsigned long flags;
2001 struct scsi_lun fc_lun;
2002 DECLARE_COMPLETION_ONSTACK(tm_done);
2003 enum fnic_ioreq_state old_ioreq_state;
2005 if (sc == iter_data->lr_sc || sc->device != lun_dev)
2008 io_lock = fnic_io_lock_tag(fnic, abt_tag);
2009 spin_lock_irqsave(io_lock, flags);
2010 io_req = fnic_priv(sc)->io_req;
2012 spin_unlock_irqrestore(io_lock, flags);
2017 * Found IO that is still pending with firmware and
2018 * belongs to the LUN that we are resetting
2020 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2021 "Found IO in %s on lun\n",
2022 fnic_ioreq_state_to_str(fnic_priv(sc)->state));
2024 if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) {
2025 spin_unlock_irqrestore(io_lock, flags);
2028 if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) &&
2029 (!(fnic_priv(sc)->flags & FNIC_DEV_RST_ISSUED))) {
2030 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
2031 "%s dev rst not pending sc 0x%p\n", __func__,
2033 spin_unlock_irqrestore(io_lock, flags);
2037 if (io_req->abts_done)
2038 shost_printk(KERN_ERR, fnic->lport->host,
2039 "%s: io_req->abts_done is set state is %s\n",
2040 __func__, fnic_ioreq_state_to_str(fnic_priv(sc)->state));
2041 old_ioreq_state = fnic_priv(sc)->state;
2043 * Any pending IO issued prior to reset is expected to be
2044 * in abts pending state, if not we need to set
2045 * FNIC_IOREQ_ABTS_PENDING to indicate the IO is abort pending.
2046 * When IO is completed, the IO will be handed over and
2047 * handled in this function.
2049 fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING;
2051 BUG_ON(io_req->abts_done);
2053 if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) {
2054 abt_tag |= FNIC_TAG_DEV_RST;
2055 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
2056 "%s: dev rst sc 0x%p\n", __func__, sc);
2059 fnic_priv(sc)->abts_status = FCPIO_INVALID_CODE;
2060 io_req->abts_done = &tm_done;
2061 spin_unlock_irqrestore(io_lock, flags);
2063 /* Now queue the abort command to firmware */
2064 int_to_scsilun(sc->device->lun, &fc_lun);
2066 if (fnic_queue_abort_io_req(fnic, abt_tag,
2067 FCPIO_ITMF_ABT_TASK_TERM,
2068 fc_lun.scsi_lun, io_req)) {
2069 spin_lock_irqsave(io_lock, flags);
2070 io_req = fnic_priv(sc)->io_req;
2072 io_req->abts_done = NULL;
2073 if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING)
2074 fnic_priv(sc)->state = old_ioreq_state;
2075 spin_unlock_irqrestore(io_lock, flags);
2076 iter_data->ret = FAILED;
2079 spin_lock_irqsave(io_lock, flags);
2080 if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET)
2081 fnic_priv(sc)->flags |= FNIC_DEV_RST_TERM_ISSUED;
2082 spin_unlock_irqrestore(io_lock, flags);
2084 fnic_priv(sc)->flags |= FNIC_IO_INTERNAL_TERM_ISSUED;
2086 wait_for_completion_timeout(&tm_done, msecs_to_jiffies
2087 (fnic->config.ed_tov));
2089 /* Recheck cmd state to check if it is now aborted */
2090 spin_lock_irqsave(io_lock, flags);
2091 io_req = fnic_priv(sc)->io_req;
2093 spin_unlock_irqrestore(io_lock, flags);
2094 fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_REQ_NULL;
2098 io_req->abts_done = NULL;
2100 /* if abort is still pending with fw, fail */
2101 if (fnic_priv(sc)->abts_status == FCPIO_INVALID_CODE) {
2102 spin_unlock_irqrestore(io_lock, flags);
2103 fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_DONE;
2104 iter_data->ret = FAILED;
2107 fnic_priv(sc)->state = FNIC_IOREQ_ABTS_COMPLETE;
2109 /* original sc used for lr is handled by dev reset code */
2110 if (sc != iter_data->lr_sc)
2111 fnic_priv(sc)->io_req = NULL;
2112 spin_unlock_irqrestore(io_lock, flags);
2114 /* original sc used for lr is handled by dev reset code */
2115 if (sc != iter_data->lr_sc) {
2116 fnic_release_ioreq_buf(fnic, io_req, sc);
2117 mempool_free(io_req, fnic->io_req_pool);
2121 * Any IO is returned during reset, it needs to call scsi_done
2122 * to return the scsi_cmnd to upper layer.
2124 /* Set result to let upper SCSI layer retry */
2125 sc->result = DID_RESET << 16;
2132 * Clean up any pending aborts on the lun
2133 * For each outstanding IO on this lun, whose abort is not completed by fw,
2134 * issue a local abort. Wait for abort to complete. Return 0 if all commands
2135 * successfully aborted, 1 otherwise
2137 static int fnic_clean_pending_aborts(struct fnic *fnic,
2138 struct scsi_cmnd *lr_sc,
2143 struct fnic_pending_aborts_iter_data iter_data = {
2145 .lun_dev = lr_sc->device,
2150 iter_data.lr_sc = lr_sc;
2152 scsi_host_busy_iter(fnic->lport->host,
2153 fnic_pending_aborts_iter, &iter_data);
2154 if (iter_data.ret == FAILED) {
2155 ret = iter_data.ret;
2156 goto clean_pending_aborts_end;
2158 schedule_timeout(msecs_to_jiffies(2 * fnic->config.ed_tov));
2160 /* walk again to check, if IOs are still pending in fw */
2161 if (fnic_is_abts_pending(fnic, lr_sc))
2164 clean_pending_aborts_end:
2165 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
2166 "%s: exit status: %d\n", __func__, ret);
2171 * fnic_scsi_host_start_tag
2172 * Allocates tagid from host's tag list
2175 fnic_scsi_host_start_tag(struct fnic *fnic, struct scsi_cmnd *sc)
2177 struct request *rq = scsi_cmd_to_rq(sc);
2178 struct request_queue *q = rq->q;
2179 struct request *dummy;
2181 dummy = blk_mq_alloc_request(q, REQ_OP_WRITE, BLK_MQ_REQ_NOWAIT);
2185 rq->tag = dummy->tag;
2186 sc->host_scribble = (unsigned char *)dummy;
2192 * fnic_scsi_host_end_tag
2193 * frees tag allocated by fnic_scsi_host_start_tag.
2196 fnic_scsi_host_end_tag(struct fnic *fnic, struct scsi_cmnd *sc)
2198 struct request *dummy = (struct request *)sc->host_scribble;
2200 blk_mq_free_request(dummy);
2204 * SCSI Eh thread issues a Lun Reset when one or more commands on a LUN
2205 * fail to get aborted. It calls driver's eh_device_reset with a SCSI command
2208 int fnic_device_reset(struct scsi_cmnd *sc)
2210 struct request *rq = scsi_cmd_to_rq(sc);
2211 struct fc_lport *lp;
2213 struct fnic_io_req *io_req = NULL;
2214 struct fc_rport *rport;
2217 spinlock_t *io_lock;
2218 unsigned long flags;
2219 unsigned long start_time = 0;
2220 struct scsi_lun fc_lun;
2221 struct fnic_stats *fnic_stats;
2222 struct reset_stats *reset_stats;
2224 DECLARE_COMPLETION_ONSTACK(tm_done);
2225 int tag_gen_flag = 0; /*to track tags allocated by fnic driver*/
2228 /* Wait for rport to unblock */
2229 fc_block_scsi_eh(sc);
2231 /* Get local-port, check ready and link up */
2232 lp = shost_priv(sc->device->host);
2234 fnic = lport_priv(lp);
2235 fnic_stats = &fnic->fnic_stats;
2236 reset_stats = &fnic->fnic_stats.reset_stats;
2238 atomic64_inc(&reset_stats->device_resets);
2240 rport = starget_to_rport(scsi_target(sc->device));
2241 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2242 "Device reset called FCID 0x%x, LUN 0x%llx sc 0x%p\n",
2243 rport->port_id, sc->device->lun, sc);
2245 if (lp->state != LPORT_ST_READY || !(lp->link_up))
2246 goto fnic_device_reset_end;
2248 /* Check if remote port up */
2249 if (fc_remote_port_chkready(rport)) {
2250 atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
2251 goto fnic_device_reset_end;
2254 fnic_priv(sc)->flags = FNIC_DEVICE_RESET;
2255 /* Allocate tag if not present */
2257 if (unlikely(tag < 0)) {
2259 * Really should fix the midlayer to pass in a proper
2260 * request for ioctls...
2262 tag = fnic_scsi_host_start_tag(fnic, sc);
2263 if (unlikely(tag == SCSI_NO_TAG))
2264 goto fnic_device_reset_end;
2268 io_lock = fnic_io_lock_hash(fnic, sc);
2269 spin_lock_irqsave(io_lock, flags);
2270 io_req = fnic_priv(sc)->io_req;
2273 * If there is a io_req attached to this command, then use it,
2274 * else allocate a new one.
2277 io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
2279 spin_unlock_irqrestore(io_lock, flags);
2280 goto fnic_device_reset_end;
2282 memset(io_req, 0, sizeof(*io_req));
2283 io_req->port_id = rport->port_id;
2284 fnic_priv(sc)->io_req = io_req;
2286 io_req->dr_done = &tm_done;
2287 fnic_priv(sc)->state = FNIC_IOREQ_CMD_PENDING;
2288 fnic_priv(sc)->lr_status = FCPIO_INVALID_CODE;
2289 spin_unlock_irqrestore(io_lock, flags);
2291 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "TAG %x\n", tag);
2294 * issue the device reset, if enqueue failed, clean up the ioreq
2295 * and break assoc with scsi cmd
2297 if (fnic_queue_dr_io_req(fnic, sc, io_req)) {
2298 spin_lock_irqsave(io_lock, flags);
2299 io_req = fnic_priv(sc)->io_req;
2301 io_req->dr_done = NULL;
2302 goto fnic_device_reset_clean;
2304 spin_lock_irqsave(io_lock, flags);
2305 fnic_priv(sc)->flags |= FNIC_DEV_RST_ISSUED;
2306 spin_unlock_irqrestore(io_lock, flags);
2309 * Wait on the local completion for LUN reset. The io_req may be
2310 * freed while we wait since we hold no lock.
2312 wait_for_completion_timeout(&tm_done,
2313 msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
2315 spin_lock_irqsave(io_lock, flags);
2316 io_req = fnic_priv(sc)->io_req;
2318 spin_unlock_irqrestore(io_lock, flags);
2319 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2320 "io_req is null tag 0x%x sc 0x%p\n", tag, sc);
2321 goto fnic_device_reset_end;
2323 io_req->dr_done = NULL;
2325 status = fnic_priv(sc)->lr_status;
2328 * If lun reset not completed, bail out with failed. io_req
2329 * gets cleaned up during higher levels of EH
2331 if (status == FCPIO_INVALID_CODE) {
2332 atomic64_inc(&reset_stats->device_reset_timeouts);
2333 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2334 "Device reset timed out\n");
2335 fnic_priv(sc)->flags |= FNIC_DEV_RST_TIMED_OUT;
2336 spin_unlock_irqrestore(io_lock, flags);
2337 int_to_scsilun(sc->device->lun, &fc_lun);
2339 * Issue abort and terminate on device reset request.
2340 * If q'ing of terminate fails, retry it after a delay.
2343 spin_lock_irqsave(io_lock, flags);
2344 if (fnic_priv(sc)->flags & FNIC_DEV_RST_TERM_ISSUED) {
2345 spin_unlock_irqrestore(io_lock, flags);
2348 spin_unlock_irqrestore(io_lock, flags);
2349 if (fnic_queue_abort_io_req(fnic,
2350 tag | FNIC_TAG_DEV_RST,
2351 FCPIO_ITMF_ABT_TASK_TERM,
2352 fc_lun.scsi_lun, io_req)) {
2353 wait_for_completion_timeout(&tm_done,
2354 msecs_to_jiffies(FNIC_ABT_TERM_DELAY_TIMEOUT));
2356 spin_lock_irqsave(io_lock, flags);
2357 fnic_priv(sc)->flags |= FNIC_DEV_RST_TERM_ISSUED;
2358 fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING;
2359 io_req->abts_done = &tm_done;
2360 spin_unlock_irqrestore(io_lock, flags);
2361 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2362 "Abort and terminate issued on Device reset "
2363 "tag 0x%x sc 0x%p\n", tag, sc);
2368 spin_lock_irqsave(io_lock, flags);
2369 if (!(fnic_priv(sc)->flags & FNIC_DEV_RST_DONE)) {
2370 spin_unlock_irqrestore(io_lock, flags);
2371 wait_for_completion_timeout(&tm_done,
2372 msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
2375 io_req = fnic_priv(sc)->io_req;
2376 io_req->abts_done = NULL;
2377 goto fnic_device_reset_clean;
2381 spin_unlock_irqrestore(io_lock, flags);
2384 /* Completed, but not successful, clean up the io_req, return fail */
2385 if (status != FCPIO_SUCCESS) {
2386 spin_lock_irqsave(io_lock, flags);
2387 FNIC_SCSI_DBG(KERN_DEBUG,
2389 "Device reset completed - failed\n");
2390 io_req = fnic_priv(sc)->io_req;
2391 goto fnic_device_reset_clean;
2395 * Clean up any aborts on this lun that have still not
2396 * completed. If any of these fail, then LUN reset fails.
2397 * clean_pending_aborts cleans all cmds on this lun except
2398 * the lun reset cmd. If all cmds get cleaned, the lun reset
2401 if (fnic_clean_pending_aborts(fnic, sc, new_sc)) {
2402 spin_lock_irqsave(io_lock, flags);
2403 io_req = fnic_priv(sc)->io_req;
2404 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2405 "Device reset failed"
2406 " since could not abort all IOs\n");
2407 goto fnic_device_reset_clean;
2410 /* Clean lun reset command */
2411 spin_lock_irqsave(io_lock, flags);
2412 io_req = fnic_priv(sc)->io_req;
2414 /* Completed, and successful */
2417 fnic_device_reset_clean:
2419 fnic_priv(sc)->io_req = NULL;
2421 spin_unlock_irqrestore(io_lock, flags);
2424 start_time = io_req->start_time;
2425 fnic_release_ioreq_buf(fnic, io_req, sc);
2426 mempool_free(io_req, fnic->io_req_pool);
2429 fnic_device_reset_end:
2430 FNIC_TRACE(fnic_device_reset, sc->device->host->host_no, rq->tag, sc,
2431 jiffies_to_msecs(jiffies - start_time),
2432 0, ((u64)sc->cmnd[0] << 32 |
2433 (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
2434 (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
2435 fnic_flags_and_state(sc));
2437 /* free tag if it is allocated */
2438 if (unlikely(tag_gen_flag))
2439 fnic_scsi_host_end_tag(fnic, sc);
2441 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2442 "Returning from device reset %s\n",
2444 "SUCCESS" : "FAILED");
2447 atomic64_inc(&reset_stats->device_reset_failures);
2452 /* Clean up all IOs, clean up libFC local port */
2453 int fnic_reset(struct Scsi_Host *shost)
2455 struct fc_lport *lp;
2458 struct reset_stats *reset_stats;
2460 lp = shost_priv(shost);
2461 fnic = lport_priv(lp);
2462 reset_stats = &fnic->fnic_stats.reset_stats;
2464 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2465 "fnic_reset called\n");
2467 atomic64_inc(&reset_stats->fnic_resets);
2470 * Reset local port, this will clean up libFC exchanges,
2471 * reset remote port sessions, and if link is up, begin flogi
2473 ret = fc_lport_reset(lp);
2475 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2476 "Returning from fnic reset %s\n",
2478 "SUCCESS" : "FAILED");
2481 atomic64_inc(&reset_stats->fnic_reset_completions);
2483 atomic64_inc(&reset_stats->fnic_reset_failures);
2489 * SCSI Error handling calls driver's eh_host_reset if all prior
2490 * error handling levels return FAILED. If host reset completes
2491 * successfully, and if link is up, then Fabric login begins.
2493 * Host Reset is the highest level of error recovery. If this fails, then
2494 * host is offlined by SCSI.
2497 int fnic_host_reset(struct scsi_cmnd *sc)
2500 unsigned long wait_host_tmo;
2501 struct Scsi_Host *shost = sc->device->host;
2502 struct fc_lport *lp = shost_priv(shost);
2503 struct fnic *fnic = lport_priv(lp);
2504 unsigned long flags;
2506 spin_lock_irqsave(&fnic->fnic_lock, flags);
2507 if (!fnic->internal_reset_inprogress) {
2508 fnic->internal_reset_inprogress = true;
2510 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2511 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2512 "host reset in progress skipping another host reset\n");
2515 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2518 * If fnic_reset is successful, wait for fabric login to complete
2519 * scsi-ml tries to send a TUR to every device if host reset is
2520 * successful, so before returning to scsi, fabric should be up
2522 ret = (fnic_reset(shost) == 0) ? SUCCESS : FAILED;
2523 if (ret == SUCCESS) {
2524 wait_host_tmo = jiffies + FNIC_HOST_RESET_SETTLE_TIME * HZ;
2526 while (time_before(jiffies, wait_host_tmo)) {
2527 if ((lp->state == LPORT_ST_READY) &&
2536 spin_lock_irqsave(&fnic->fnic_lock, flags);
2537 fnic->internal_reset_inprogress = false;
2538 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2543 * This fxn is called from libFC when host is removed
2545 void fnic_scsi_abort_io(struct fc_lport *lp)
2548 unsigned long flags;
2549 enum fnic_state old_state;
2550 struct fnic *fnic = lport_priv(lp);
2551 DECLARE_COMPLETION_ONSTACK(remove_wait);
2553 /* Issue firmware reset for fnic, wait for reset to complete */
2555 spin_lock_irqsave(&fnic->fnic_lock, flags);
2556 if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) &&
2557 fnic->link_events) {
2558 /* fw reset is in progress, poll for its completion */
2559 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2560 schedule_timeout(msecs_to_jiffies(100));
2561 goto retry_fw_reset;
2564 fnic->remove_wait = &remove_wait;
2565 old_state = fnic->state;
2566 fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
2567 fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr);
2568 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2570 err = fnic_fw_reset_handler(fnic);
2572 spin_lock_irqsave(&fnic->fnic_lock, flags);
2573 if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
2574 fnic->state = old_state;
2575 fnic->remove_wait = NULL;
2576 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2580 /* Wait for firmware reset to complete */
2581 wait_for_completion_timeout(&remove_wait,
2582 msecs_to_jiffies(FNIC_RMDEVICE_TIMEOUT));
2584 spin_lock_irqsave(&fnic->fnic_lock, flags);
2585 fnic->remove_wait = NULL;
2586 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2587 "fnic_scsi_abort_io %s\n",
2588 (fnic->state == FNIC_IN_ETH_MODE) ?
2589 "SUCCESS" : "FAILED");
2590 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2595 * This fxn called from libFC to clean up driver IO state on link down
2597 void fnic_scsi_cleanup(struct fc_lport *lp)
2599 unsigned long flags;
2600 enum fnic_state old_state;
2601 struct fnic *fnic = lport_priv(lp);
2603 /* issue fw reset */
2605 spin_lock_irqsave(&fnic->fnic_lock, flags);
2606 if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) {
2607 /* fw reset is in progress, poll for its completion */
2608 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2609 schedule_timeout(msecs_to_jiffies(100));
2610 goto retry_fw_reset;
2612 old_state = fnic->state;
2613 fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
2614 fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr);
2615 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2617 if (fnic_fw_reset_handler(fnic)) {
2618 spin_lock_irqsave(&fnic->fnic_lock, flags);
2619 if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
2620 fnic->state = old_state;
2621 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2626 void fnic_empty_scsi_cleanup(struct fc_lport *lp)
2630 void fnic_exch_mgr_reset(struct fc_lport *lp, u32 sid, u32 did)
2632 struct fnic *fnic = lport_priv(lp);
2634 /* Non-zero sid, nothing to do */
2636 goto call_fc_exch_mgr_reset;
2639 fnic_rport_exch_reset(fnic, did);
2640 goto call_fc_exch_mgr_reset;
2645 * link down or device being removed
2647 if (!fnic->in_remove)
2648 fnic_scsi_cleanup(lp);
2650 fnic_scsi_abort_io(lp);
2652 /* call libFC exch mgr reset to reset its exchanges */
2653 call_fc_exch_mgr_reset:
2654 fc_exch_mgr_reset(lp, sid, did);
2658 static bool fnic_abts_pending_iter(struct scsi_cmnd *sc, void *data)
2660 struct fnic_pending_aborts_iter_data *iter_data = data;
2661 struct fnic *fnic = iter_data->fnic;
2663 struct fnic_io_req *io_req;
2664 spinlock_t *io_lock;
2665 unsigned long flags;
2668 * ignore this lun reset cmd or cmds that do not belong to
2671 if (iter_data->lr_sc && sc == iter_data->lr_sc)
2673 if (iter_data->lun_dev && sc->device != iter_data->lun_dev)
2676 io_lock = fnic_io_lock_hash(fnic, sc);
2677 spin_lock_irqsave(io_lock, flags);
2679 io_req = fnic_priv(sc)->io_req;
2681 spin_unlock_irqrestore(io_lock, flags);
2686 * Found IO that is still pending with firmware and
2687 * belongs to the LUN that we are resetting
2689 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
2690 "Found IO in %s on lun\n",
2691 fnic_ioreq_state_to_str(fnic_priv(sc)->state));
2692 cmd_state = fnic_priv(sc)->state;
2693 spin_unlock_irqrestore(io_lock, flags);
2694 if (cmd_state == FNIC_IOREQ_ABTS_PENDING)
2697 return iter_data->ret ? false : true;
2701 * fnic_is_abts_pending() is a helper function that
2702 * walks through tag map to check if there is any IOs pending,if there is one,
2703 * then it returns 1 (true), otherwise 0 (false)
2704 * if @lr_sc is non NULL, then it checks IOs specific to particular LUN,
2705 * otherwise, it checks for all IOs.
2707 int fnic_is_abts_pending(struct fnic *fnic, struct scsi_cmnd *lr_sc)
2709 struct fnic_pending_aborts_iter_data iter_data = {
2716 iter_data.lun_dev = lr_sc->device;
2717 iter_data.lr_sc = lr_sc;
2720 /* walk again to check, if IOs are still pending in fw */
2721 scsi_host_busy_iter(fnic->lport->host,
2722 fnic_abts_pending_iter, &iter_data);
2724 return iter_data.ret;