1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Driver for Broadcom MPI3 Storage Controllers
5 * Copyright (C) 2017-2022 Broadcom Inc.
6 * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
12 /* global driver scop variables */
13 LIST_HEAD(mrioc_list);
14 DEFINE_SPINLOCK(mrioc_list_lock);
16 static int warn_non_secure_ctlr;
17 atomic64_t event_counter;
19 MODULE_AUTHOR(MPI3MR_DRIVER_AUTHOR);
20 MODULE_DESCRIPTION(MPI3MR_DRIVER_DESC);
21 MODULE_LICENSE(MPI3MR_DRIVER_LICENSE);
22 MODULE_VERSION(MPI3MR_DRIVER_VERSION);
24 /* Module parameters*/
26 module_param(prot_mask, int, 0);
27 MODULE_PARM_DESC(prot_mask, "Host protection capabilities mask, def=0x07");
29 static int prot_guard_mask = 3;
30 module_param(prot_guard_mask, int, 0);
31 MODULE_PARM_DESC(prot_guard_mask, " Host protection guard mask, def=3");
32 static int logging_level;
33 module_param(logging_level, int, 0);
34 MODULE_PARM_DESC(logging_level,
35 " bits for enabling additional logging info (default=0)");
37 /* Forward declarations*/
38 static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
39 struct mpi3mr_drv_cmd *cmdparam, u32 event_ctx);
42 * mpi3mr_host_tag_for_scmd - Get host tag for a scmd
43 * @mrioc: Adapter instance reference
44 * @scmd: SCSI command reference
46 * Calculate the host tag based on block tag for a given scmd.
48 * Return: Valid host tag or MPI3MR_HOSTTAG_INVALID.
50 static u16 mpi3mr_host_tag_for_scmd(struct mpi3mr_ioc *mrioc,
51 struct scsi_cmnd *scmd)
53 struct scmd_priv *priv = NULL;
55 u16 host_tag, hw_queue;
57 unique_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
59 hw_queue = blk_mq_unique_tag_to_hwq(unique_tag);
60 if (hw_queue >= mrioc->num_op_reply_q)
61 return MPI3MR_HOSTTAG_INVALID;
62 host_tag = blk_mq_unique_tag_to_tag(unique_tag);
64 if (WARN_ON(host_tag >= mrioc->max_host_ios))
65 return MPI3MR_HOSTTAG_INVALID;
67 priv = scsi_cmd_priv(scmd);
68 /*host_tag 0 is invalid hence incrementing by 1*/
69 priv->host_tag = host_tag + 1;
71 priv->in_lld_scope = 1;
72 priv->req_q_idx = hw_queue;
73 priv->meta_chain_idx = -1;
75 priv->meta_sg_valid = 0;
76 return priv->host_tag;
80 * mpi3mr_scmd_from_host_tag - Get SCSI command from host tag
81 * @mrioc: Adapter instance reference
83 * @qidx: Operational queue index
85 * Identify the block tag from the host tag and queue index and
86 * retrieve associated scsi command using scsi_host_find_tag().
88 * Return: SCSI command reference or NULL.
90 static struct scsi_cmnd *mpi3mr_scmd_from_host_tag(
91 struct mpi3mr_ioc *mrioc, u16 host_tag, u16 qidx)
93 struct scsi_cmnd *scmd = NULL;
94 struct scmd_priv *priv = NULL;
95 u32 unique_tag = host_tag - 1;
97 if (WARN_ON(host_tag > mrioc->max_host_ios))
100 unique_tag |= (qidx << BLK_MQ_UNIQUE_TAG_BITS);
102 scmd = scsi_host_find_tag(mrioc->shost, unique_tag);
104 priv = scsi_cmd_priv(scmd);
105 if (!priv->in_lld_scope)
113 * mpi3mr_clear_scmd_priv - Cleanup SCSI command private date
114 * @mrioc: Adapter instance reference
115 * @scmd: SCSI command reference
117 * Invalidate the SCSI command private data to mark the command
118 * is not in LLD scope anymore.
122 static void mpi3mr_clear_scmd_priv(struct mpi3mr_ioc *mrioc,
123 struct scsi_cmnd *scmd)
125 struct scmd_priv *priv = NULL;
127 priv = scsi_cmd_priv(scmd);
129 if (WARN_ON(priv->in_lld_scope == 0))
131 priv->host_tag = MPI3MR_HOSTTAG_INVALID;
132 priv->req_q_idx = 0xFFFF;
134 priv->in_lld_scope = 0;
135 priv->meta_sg_valid = 0;
136 if (priv->chain_idx >= 0) {
137 clear_bit(priv->chain_idx, mrioc->chain_bitmap);
138 priv->chain_idx = -1;
140 if (priv->meta_chain_idx >= 0) {
141 clear_bit(priv->meta_chain_idx, mrioc->chain_bitmap);
142 priv->meta_chain_idx = -1;
146 static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_ioc *mrioc, u16 handle,
147 struct mpi3mr_drv_cmd *cmdparam, u8 iou_rc);
148 static void mpi3mr_fwevt_worker(struct work_struct *work);
151 * mpi3mr_fwevt_free - firmware event memory dealloctor
152 * @r: k reference pointer of the firmware event
154 * Free firmware event memory when no reference.
156 static void mpi3mr_fwevt_free(struct kref *r)
158 kfree(container_of(r, struct mpi3mr_fwevt, ref_count));
162 * mpi3mr_fwevt_get - k reference incrementor
163 * @fwevt: Firmware event reference
165 * Increment firmware event reference count.
167 static void mpi3mr_fwevt_get(struct mpi3mr_fwevt *fwevt)
169 kref_get(&fwevt->ref_count);
173 * mpi3mr_fwevt_put - k reference decrementor
174 * @fwevt: Firmware event reference
176 * decrement firmware event reference count.
178 static void mpi3mr_fwevt_put(struct mpi3mr_fwevt *fwevt)
180 kref_put(&fwevt->ref_count, mpi3mr_fwevt_free);
184 * mpi3mr_alloc_fwevt - Allocate firmware event
185 * @len: length of firmware event data to allocate
187 * Allocate firmware event with required length and initialize
188 * the reference counter.
190 * Return: firmware event reference.
192 static struct mpi3mr_fwevt *mpi3mr_alloc_fwevt(int len)
194 struct mpi3mr_fwevt *fwevt;
196 fwevt = kzalloc(sizeof(*fwevt) + len, GFP_ATOMIC);
200 kref_init(&fwevt->ref_count);
205 * mpi3mr_fwevt_add_to_list - Add firmware event to the list
206 * @mrioc: Adapter instance reference
207 * @fwevt: Firmware event reference
209 * Add the given firmware event to the firmware event list.
213 static void mpi3mr_fwevt_add_to_list(struct mpi3mr_ioc *mrioc,
214 struct mpi3mr_fwevt *fwevt)
218 if (!mrioc->fwevt_worker_thread)
221 spin_lock_irqsave(&mrioc->fwevt_lock, flags);
222 /* get fwevt reference count while adding it to fwevt_list */
223 mpi3mr_fwevt_get(fwevt);
224 INIT_LIST_HEAD(&fwevt->list);
225 list_add_tail(&fwevt->list, &mrioc->fwevt_list);
226 INIT_WORK(&fwevt->work, mpi3mr_fwevt_worker);
227 /* get fwevt reference count while enqueueing it to worker queue */
228 mpi3mr_fwevt_get(fwevt);
229 queue_work(mrioc->fwevt_worker_thread, &fwevt->work);
230 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
234 * mpi3mr_fwevt_del_from_list - Delete firmware event from list
235 * @mrioc: Adapter instance reference
236 * @fwevt: Firmware event reference
238 * Delete the given firmware event from the firmware event list.
242 static void mpi3mr_fwevt_del_from_list(struct mpi3mr_ioc *mrioc,
243 struct mpi3mr_fwevt *fwevt)
247 spin_lock_irqsave(&mrioc->fwevt_lock, flags);
248 if (!list_empty(&fwevt->list)) {
249 list_del_init(&fwevt->list);
251 * Put fwevt reference count after
252 * removing it from fwevt_list
254 mpi3mr_fwevt_put(fwevt);
256 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
260 * mpi3mr_dequeue_fwevt - Dequeue firmware event from the list
261 * @mrioc: Adapter instance reference
263 * Dequeue a firmware event from the firmware event list.
265 * Return: firmware event.
267 static struct mpi3mr_fwevt *mpi3mr_dequeue_fwevt(
268 struct mpi3mr_ioc *mrioc)
271 struct mpi3mr_fwevt *fwevt = NULL;
273 spin_lock_irqsave(&mrioc->fwevt_lock, flags);
274 if (!list_empty(&mrioc->fwevt_list)) {
275 fwevt = list_first_entry(&mrioc->fwevt_list,
276 struct mpi3mr_fwevt, list);
277 list_del_init(&fwevt->list);
279 * Put fwevt reference count after
280 * removing it from fwevt_list
282 mpi3mr_fwevt_put(fwevt);
284 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
290 * mpi3mr_cancel_work - cancel firmware event
291 * @fwevt: fwevt object which needs to be canceled
295 static void mpi3mr_cancel_work(struct mpi3mr_fwevt *fwevt)
298 * Wait on the fwevt to complete. If this returns 1, then
299 * the event was never executed.
301 * If it did execute, we wait for it to finish, and the put will
302 * happen from mpi3mr_process_fwevt()
304 if (cancel_work_sync(&fwevt->work)) {
306 * Put fwevt reference count after
307 * dequeuing it from worker queue
309 mpi3mr_fwevt_put(fwevt);
311 * Put fwevt reference count to neutralize
312 * kref_init increment
314 mpi3mr_fwevt_put(fwevt);
319 * mpi3mr_cleanup_fwevt_list - Cleanup firmware event list
320 * @mrioc: Adapter instance reference
322 * Flush all pending firmware events from the firmware event
327 void mpi3mr_cleanup_fwevt_list(struct mpi3mr_ioc *mrioc)
329 struct mpi3mr_fwevt *fwevt = NULL;
331 if ((list_empty(&mrioc->fwevt_list) && !mrioc->current_event) ||
332 !mrioc->fwevt_worker_thread)
335 while ((fwevt = mpi3mr_dequeue_fwevt(mrioc)))
336 mpi3mr_cancel_work(fwevt);
338 if (mrioc->current_event) {
339 fwevt = mrioc->current_event;
341 * Don't call cancel_work_sync() API for the
342 * fwevt work if the controller reset is
343 * get called as part of processing the
344 * same fwevt work (or) when worker thread is
345 * waiting for device add/remove APIs to complete.
346 * Otherwise we will see deadlock.
348 if (current_work() == &fwevt->work || fwevt->pending_at_sml) {
353 mpi3mr_cancel_work(fwevt);
358 * mpi3mr_invalidate_devhandles -Invalidate device handles
359 * @mrioc: Adapter instance reference
361 * Invalidate the device handles in the target device structures
362 * . Called post reset prior to reinitializing the controller.
366 void mpi3mr_invalidate_devhandles(struct mpi3mr_ioc *mrioc)
368 struct mpi3mr_tgt_dev *tgtdev;
369 struct mpi3mr_stgt_priv_data *tgt_priv;
371 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
372 tgtdev->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
373 if (tgtdev->starget && tgtdev->starget->hostdata) {
374 tgt_priv = tgtdev->starget->hostdata;
375 tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
381 * mpi3mr_print_scmd - print individual SCSI command
383 * @data: Adapter instance reference
385 * Print the SCSI command details if it is in LLD scope.
387 * Return: true always.
389 static bool mpi3mr_print_scmd(struct request *rq, void *data)
391 struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data;
392 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
393 struct scmd_priv *priv = NULL;
396 priv = scsi_cmd_priv(scmd);
397 if (!priv->in_lld_scope)
400 ioc_info(mrioc, "%s :Host Tag = %d, qid = %d\n",
401 __func__, priv->host_tag, priv->req_q_idx + 1);
402 scsi_print_command(scmd);
410 * mpi3mr_flush_scmd - Flush individual SCSI command
412 * @data: Adapter instance reference
414 * Return the SCSI command to the upper layers if it is in LLD
417 * Return: true always.
420 static bool mpi3mr_flush_scmd(struct request *rq, void *data)
422 struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data;
423 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
424 struct scmd_priv *priv = NULL;
427 priv = scsi_cmd_priv(scmd);
428 if (!priv->in_lld_scope)
431 if (priv->meta_sg_valid)
432 dma_unmap_sg(&mrioc->pdev->dev, scsi_prot_sglist(scmd),
433 scsi_prot_sg_count(scmd), scmd->sc_data_direction);
434 mpi3mr_clear_scmd_priv(mrioc, scmd);
435 scsi_dma_unmap(scmd);
436 scmd->result = DID_RESET << 16;
437 scsi_print_command(scmd);
439 mrioc->flush_io_count++;
447 * mpi3mr_count_dev_pending - Count commands pending for a lun
449 * @data: SCSI device reference
451 * This is an iterator function called for each SCSI command in
452 * a host and if the command is pending in the LLD for the
453 * specific device(lun) then device specific pending I/O counter
454 * is updated in the device structure.
456 * Return: true always.
459 static bool mpi3mr_count_dev_pending(struct request *rq, void *data)
461 struct scsi_device *sdev = (struct scsi_device *)data;
462 struct mpi3mr_sdev_priv_data *sdev_priv_data = sdev->hostdata;
463 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
464 struct scmd_priv *priv;
467 priv = scsi_cmd_priv(scmd);
468 if (!priv->in_lld_scope)
470 if (scmd->device == sdev)
471 sdev_priv_data->pend_count++;
479 * mpi3mr_count_tgt_pending - Count commands pending for target
481 * @data: SCSI target reference
483 * This is an iterator function called for each SCSI command in
484 * a host and if the command is pending in the LLD for the
485 * specific target then target specific pending I/O counter is
486 * updated in the target structure.
488 * Return: true always.
491 static bool mpi3mr_count_tgt_pending(struct request *rq, void *data)
493 struct scsi_target *starget = (struct scsi_target *)data;
494 struct mpi3mr_stgt_priv_data *stgt_priv_data = starget->hostdata;
495 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
496 struct scmd_priv *priv;
499 priv = scsi_cmd_priv(scmd);
500 if (!priv->in_lld_scope)
502 if (scmd->device && (scsi_target(scmd->device) == starget))
503 stgt_priv_data->pend_count++;
511 * mpi3mr_flush_host_io - Flush host I/Os
512 * @mrioc: Adapter instance reference
514 * Flush all of the pending I/Os by calling
515 * blk_mq_tagset_busy_iter() for each possible tag. This is
516 * executed post controller reset
520 void mpi3mr_flush_host_io(struct mpi3mr_ioc *mrioc)
522 struct Scsi_Host *shost = mrioc->shost;
524 mrioc->flush_io_count = 0;
525 ioc_info(mrioc, "%s :Flushing Host I/O cmds post reset\n", __func__);
526 blk_mq_tagset_busy_iter(&shost->tag_set,
527 mpi3mr_flush_scmd, (void *)mrioc);
528 ioc_info(mrioc, "%s :Flushed %d Host I/O cmds\n", __func__,
529 mrioc->flush_io_count);
533 * mpi3mr_alloc_tgtdev - target device allocator
535 * Allocate target device instance and initialize the reference
538 * Return: target device instance.
540 static struct mpi3mr_tgt_dev *mpi3mr_alloc_tgtdev(void)
542 struct mpi3mr_tgt_dev *tgtdev;
544 tgtdev = kzalloc(sizeof(*tgtdev), GFP_ATOMIC);
547 kref_init(&tgtdev->ref_count);
552 * mpi3mr_tgtdev_add_to_list -Add tgtdevice to the list
553 * @mrioc: Adapter instance reference
554 * @tgtdev: Target device
556 * Add the target device to the target device list
560 static void mpi3mr_tgtdev_add_to_list(struct mpi3mr_ioc *mrioc,
561 struct mpi3mr_tgt_dev *tgtdev)
565 spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
566 mpi3mr_tgtdev_get(tgtdev);
567 INIT_LIST_HEAD(&tgtdev->list);
568 list_add_tail(&tgtdev->list, &mrioc->tgtdev_list);
569 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
573 * mpi3mr_tgtdev_del_from_list -Delete tgtdevice from the list
574 * @mrioc: Adapter instance reference
575 * @tgtdev: Target device
577 * Remove the target device from the target device list
581 static void mpi3mr_tgtdev_del_from_list(struct mpi3mr_ioc *mrioc,
582 struct mpi3mr_tgt_dev *tgtdev)
586 spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
587 if (!list_empty(&tgtdev->list)) {
588 list_del_init(&tgtdev->list);
589 mpi3mr_tgtdev_put(tgtdev);
591 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
595 * __mpi3mr_get_tgtdev_by_handle -Get tgtdev from device handle
596 * @mrioc: Adapter instance reference
597 * @handle: Device handle
599 * Accessor to retrieve target device from the device handle.
602 * Return: Target device reference.
604 static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_handle(
605 struct mpi3mr_ioc *mrioc, u16 handle)
607 struct mpi3mr_tgt_dev *tgtdev;
609 assert_spin_locked(&mrioc->tgtdev_lock);
610 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list)
611 if (tgtdev->dev_handle == handle)
616 mpi3mr_tgtdev_get(tgtdev);
621 * mpi3mr_get_tgtdev_by_handle -Get tgtdev from device handle
622 * @mrioc: Adapter instance reference
623 * @handle: Device handle
625 * Accessor to retrieve target device from the device handle.
628 * Return: Target device reference.
630 struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_handle(
631 struct mpi3mr_ioc *mrioc, u16 handle)
633 struct mpi3mr_tgt_dev *tgtdev;
636 spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
637 tgtdev = __mpi3mr_get_tgtdev_by_handle(mrioc, handle);
638 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
643 * __mpi3mr_get_tgtdev_by_perst_id -Get tgtdev from persist ID
644 * @mrioc: Adapter instance reference
645 * @persist_id: Persistent ID
647 * Accessor to retrieve target device from the Persistent ID.
650 * Return: Target device reference.
652 static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_perst_id(
653 struct mpi3mr_ioc *mrioc, u16 persist_id)
655 struct mpi3mr_tgt_dev *tgtdev;
657 assert_spin_locked(&mrioc->tgtdev_lock);
658 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list)
659 if (tgtdev->perst_id == persist_id)
664 mpi3mr_tgtdev_get(tgtdev);
669 * mpi3mr_get_tgtdev_by_perst_id -Get tgtdev from persistent ID
670 * @mrioc: Adapter instance reference
671 * @persist_id: Persistent ID
673 * Accessor to retrieve target device from the Persistent ID.
676 * Return: Target device reference.
678 static struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_perst_id(
679 struct mpi3mr_ioc *mrioc, u16 persist_id)
681 struct mpi3mr_tgt_dev *tgtdev;
684 spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
685 tgtdev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, persist_id);
686 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
691 * __mpi3mr_get_tgtdev_from_tgtpriv -Get tgtdev from tgt private
692 * @mrioc: Adapter instance reference
693 * @tgt_priv: Target private data
695 * Accessor to return target device from the target private
696 * data. Non Lock version
698 * Return: Target device reference.
700 static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_from_tgtpriv(
701 struct mpi3mr_ioc *mrioc, struct mpi3mr_stgt_priv_data *tgt_priv)
703 struct mpi3mr_tgt_dev *tgtdev;
705 assert_spin_locked(&mrioc->tgtdev_lock);
706 tgtdev = tgt_priv->tgt_dev;
708 mpi3mr_tgtdev_get(tgtdev);
713 * mpi3mr_print_device_event_notice - print notice related to post processing of
714 * device event after controller reset.
716 * @mrioc: Adapter instance reference
717 * @device_add: true for device add event and false for device removal event
721 static void mpi3mr_print_device_event_notice(struct mpi3mr_ioc *mrioc,
724 ioc_notice(mrioc, "Device %s was in progress before the reset and\n",
725 (device_add ? "addition" : "removal"));
726 ioc_notice(mrioc, "completed after reset, verify whether the exposed devices\n");
727 ioc_notice(mrioc, "are matched with attached devices for correctness\n");
731 * mpi3mr_remove_tgtdev_from_host - Remove dev from upper layers
732 * @mrioc: Adapter instance reference
733 * @tgtdev: Target device structure
735 * Checks whether the device is exposed to upper layers and if it
736 * is then remove the device from upper layers by calling
737 * scsi_remove_target().
739 * Return: 0 on success, non zero on failure.
741 static void mpi3mr_remove_tgtdev_from_host(struct mpi3mr_ioc *mrioc,
742 struct mpi3mr_tgt_dev *tgtdev)
744 struct mpi3mr_stgt_priv_data *tgt_priv;
746 ioc_info(mrioc, "%s :Removing handle(0x%04x), wwid(0x%016llx)\n",
747 __func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid);
748 if (tgtdev->starget && tgtdev->starget->hostdata) {
749 tgt_priv = tgtdev->starget->hostdata;
750 tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
753 if (tgtdev->starget) {
754 if (mrioc->current_event)
755 mrioc->current_event->pending_at_sml = 1;
756 scsi_remove_target(&tgtdev->starget->dev);
757 tgtdev->host_exposed = 0;
758 if (mrioc->current_event) {
759 mrioc->current_event->pending_at_sml = 0;
760 if (mrioc->current_event->discard) {
761 mpi3mr_print_device_event_notice(mrioc, false);
766 ioc_info(mrioc, "%s :Removed handle(0x%04x), wwid(0x%016llx)\n",
767 __func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid);
771 * mpi3mr_report_tgtdev_to_host - Expose device to upper layers
772 * @mrioc: Adapter instance reference
773 * @perst_id: Persistent ID of the device
775 * Checks whether the device can be exposed to upper layers and
776 * if it is not then expose the device to upper layers by
777 * calling scsi_scan_target().
779 * Return: 0 on success, non zero on failure.
781 static int mpi3mr_report_tgtdev_to_host(struct mpi3mr_ioc *mrioc,
785 struct mpi3mr_tgt_dev *tgtdev;
787 tgtdev = mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id);
792 if (tgtdev->is_hidden) {
796 if (!tgtdev->host_exposed && !mrioc->reset_in_progress) {
797 tgtdev->host_exposed = 1;
798 if (mrioc->current_event)
799 mrioc->current_event->pending_at_sml = 1;
800 scsi_scan_target(&mrioc->shost->shost_gendev, 0,
802 SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
803 if (!tgtdev->starget)
804 tgtdev->host_exposed = 0;
805 if (mrioc->current_event) {
806 mrioc->current_event->pending_at_sml = 0;
807 if (mrioc->current_event->discard) {
808 mpi3mr_print_device_event_notice(mrioc, true);
815 mpi3mr_tgtdev_put(tgtdev);
821 * mpi3mr_change_queue_depth- Change QD callback handler
822 * @sdev: SCSI device reference
823 * @q_depth: Queue depth
825 * Validate and limit QD and call scsi_change_queue_depth.
827 * Return: return value of scsi_change_queue_depth
829 static int mpi3mr_change_queue_depth(struct scsi_device *sdev,
832 struct scsi_target *starget = scsi_target(sdev);
833 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
836 if (!sdev->tagged_supported)
838 if (q_depth > shost->can_queue)
839 q_depth = shost->can_queue;
841 q_depth = MPI3MR_DEFAULT_SDEV_QD;
842 retval = scsi_change_queue_depth(sdev, q_depth);
848 * mpi3mr_update_sdev - Update SCSI device information
849 * @sdev: SCSI device reference
850 * @data: target device reference
852 * This is an iterator function called for each SCSI device in a
853 * target to update the target specific information into each
859 mpi3mr_update_sdev(struct scsi_device *sdev, void *data)
861 struct mpi3mr_tgt_dev *tgtdev;
863 tgtdev = (struct mpi3mr_tgt_dev *)data;
867 mpi3mr_change_queue_depth(sdev, tgtdev->q_depth);
868 switch (tgtdev->dev_type) {
869 case MPI3_DEVICE_DEVFORM_PCIE:
870 /*The block layer hw sector size = 512*/
871 if ((tgtdev->dev_spec.pcie_inf.dev_info &
872 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) ==
873 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) {
874 blk_queue_max_hw_sectors(sdev->request_queue,
875 tgtdev->dev_spec.pcie_inf.mdts / 512);
876 if (tgtdev->dev_spec.pcie_inf.pgsz == 0)
877 blk_queue_virt_boundary(sdev->request_queue,
878 ((1 << MPI3MR_DEFAULT_PGSZEXP) - 1));
880 blk_queue_virt_boundary(sdev->request_queue,
881 ((1 << tgtdev->dev_spec.pcie_inf.pgsz) - 1));
890 * mpi3mr_rfresh_tgtdevs - Refresh target device exposure
891 * @mrioc: Adapter instance reference
893 * This is executed post controller reset to identify any
894 * missing devices during reset and remove from the upper layers
895 * or expose any newly detected device to the upper layers.
900 void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc)
902 struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next;
904 list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list,
906 if (tgtdev->dev_handle == MPI3MR_INVALID_DEV_HANDLE) {
907 dprint_reset(mrioc, "removing target device with perst_id(%d)\n",
909 if (tgtdev->host_exposed)
910 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
911 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev);
912 mpi3mr_tgtdev_put(tgtdev);
917 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
918 if ((tgtdev->dev_handle != MPI3MR_INVALID_DEV_HANDLE) &&
919 !tgtdev->is_hidden && !tgtdev->host_exposed)
920 mpi3mr_report_tgtdev_to_host(mrioc, tgtdev->perst_id);
925 * mpi3mr_update_tgtdev - DevStatusChange evt bottomhalf
926 * @mrioc: Adapter instance reference
927 * @tgtdev: Target device internal structure
928 * @dev_pg0: New device page0
930 * Update the information from the device page0 into the driver
931 * cached target device structure.
935 static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc,
936 struct mpi3mr_tgt_dev *tgtdev, struct mpi3_device_page0 *dev_pg0)
939 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
942 tgtdev->perst_id = le16_to_cpu(dev_pg0->persistent_id);
943 tgtdev->dev_handle = le16_to_cpu(dev_pg0->dev_handle);
944 tgtdev->dev_type = dev_pg0->device_form;
945 tgtdev->encl_handle = le16_to_cpu(dev_pg0->enclosure_handle);
946 tgtdev->parent_handle = le16_to_cpu(dev_pg0->parent_dev_handle);
947 tgtdev->slot = le16_to_cpu(dev_pg0->slot);
948 tgtdev->q_depth = le16_to_cpu(dev_pg0->queue_depth);
949 tgtdev->wwid = le64_to_cpu(dev_pg0->wwid);
951 flags = le16_to_cpu(dev_pg0->flags);
952 tgtdev->is_hidden = (flags & MPI3_DEVICE0_FLAGS_HIDDEN);
954 if (tgtdev->starget && tgtdev->starget->hostdata) {
955 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
956 tgtdev->starget->hostdata;
957 scsi_tgt_priv_data->perst_id = tgtdev->perst_id;
958 scsi_tgt_priv_data->dev_handle = tgtdev->dev_handle;
959 scsi_tgt_priv_data->dev_type = tgtdev->dev_type;
962 switch (dev_pg0->access_status) {
963 case MPI3_DEVICE0_ASTATUS_NO_ERRORS:
964 case MPI3_DEVICE0_ASTATUS_PREPARE:
965 case MPI3_DEVICE0_ASTATUS_NEEDS_INITIALIZATION:
966 case MPI3_DEVICE0_ASTATUS_DEVICE_MISSING_DELAY:
969 tgtdev->is_hidden = 1;
973 switch (tgtdev->dev_type) {
974 case MPI3_DEVICE_DEVFORM_SAS_SATA:
976 struct mpi3_device0_sas_sata_format *sasinf =
977 &dev_pg0->device_specific.sas_sata_format;
978 u16 dev_info = le16_to_cpu(sasinf->device_info);
980 tgtdev->dev_spec.sas_sata_inf.dev_info = dev_info;
981 tgtdev->dev_spec.sas_sata_inf.sas_address =
982 le64_to_cpu(sasinf->sas_address);
983 if ((dev_info & MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_MASK) !=
984 MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_END_DEVICE)
985 tgtdev->is_hidden = 1;
986 else if (!(dev_info & (MPI3_SAS_DEVICE_INFO_STP_SATA_TARGET |
987 MPI3_SAS_DEVICE_INFO_SSP_TARGET)))
988 tgtdev->is_hidden = 1;
991 case MPI3_DEVICE_DEVFORM_PCIE:
993 struct mpi3_device0_pcie_format *pcieinf =
994 &dev_pg0->device_specific.pcie_format;
995 u16 dev_info = le16_to_cpu(pcieinf->device_info);
997 tgtdev->dev_spec.pcie_inf.dev_info = dev_info;
998 tgtdev->dev_spec.pcie_inf.capb =
999 le32_to_cpu(pcieinf->capabilities);
1000 tgtdev->dev_spec.pcie_inf.mdts = MPI3MR_DEFAULT_MDTS;
1002 tgtdev->dev_spec.pcie_inf.pgsz = 12;
1003 if (dev_pg0->access_status == MPI3_DEVICE0_ASTATUS_NO_ERRORS) {
1004 tgtdev->dev_spec.pcie_inf.mdts =
1005 le32_to_cpu(pcieinf->maximum_data_transfer_size);
1006 tgtdev->dev_spec.pcie_inf.pgsz = pcieinf->page_size;
1007 tgtdev->dev_spec.pcie_inf.reset_to =
1008 max_t(u8, pcieinf->controller_reset_to,
1009 MPI3MR_INTADMCMD_TIMEOUT);
1010 tgtdev->dev_spec.pcie_inf.abort_to =
1011 max_t(u8, pcieinf->nvme_abort_to,
1012 MPI3MR_INTADMCMD_TIMEOUT);
1014 if (tgtdev->dev_spec.pcie_inf.mdts > (1024 * 1024))
1015 tgtdev->dev_spec.pcie_inf.mdts = (1024 * 1024);
1016 if (((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) !=
1017 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) &&
1018 ((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) !=
1019 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_SCSI_DEVICE))
1020 tgtdev->is_hidden = 1;
1023 prot_mask = scsi_host_get_prot(mrioc->shost);
1024 if (prot_mask & SHOST_DIX_TYPE0_PROTECTION) {
1025 scsi_host_set_prot(mrioc->shost, prot_mask & 0x77);
1027 "%s : Disabling DIX0 prot capability\n", __func__);
1029 "because HBA does not support DIX0 operation on NVME drives\n");
1033 case MPI3_DEVICE_DEVFORM_VD:
1035 struct mpi3_device0_vd_format *vdinf =
1036 &dev_pg0->device_specific.vd_format;
1038 tgtdev->dev_spec.vol_inf.state = vdinf->vd_state;
1039 if (vdinf->vd_state == MPI3_DEVICE0_VD_STATE_OFFLINE)
1040 tgtdev->is_hidden = 1;
1049 * mpi3mr_devstatuschg_evt_bh - DevStatusChange evt bottomhalf
1050 * @mrioc: Adapter instance reference
1051 * @fwevt: Firmware event information.
1053 * Process Device status Change event and based on device's new
1054 * information, either expose the device to the upper layers, or
1055 * remove the device from upper layers.
1059 static void mpi3mr_devstatuschg_evt_bh(struct mpi3mr_ioc *mrioc,
1060 struct mpi3mr_fwevt *fwevt)
1063 u8 uhide = 0, delete = 0, cleanup = 0;
1064 struct mpi3mr_tgt_dev *tgtdev = NULL;
1065 struct mpi3_event_data_device_status_change *evtdata =
1066 (struct mpi3_event_data_device_status_change *)fwevt->event_data;
1068 dev_handle = le16_to_cpu(evtdata->dev_handle);
1070 "%s :device status change: handle(0x%04x): reason code(0x%x)\n",
1071 __func__, dev_handle, evtdata->reason_code);
1072 switch (evtdata->reason_code) {
1073 case MPI3_EVENT_DEV_STAT_RC_HIDDEN:
1076 case MPI3_EVENT_DEV_STAT_RC_NOT_HIDDEN:
1079 case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING:
1084 ioc_info(mrioc, "%s :Unhandled reason code(0x%x)\n", __func__,
1085 evtdata->reason_code);
1089 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
1093 tgtdev->is_hidden = 0;
1094 if (!tgtdev->host_exposed)
1095 mpi3mr_report_tgtdev_to_host(mrioc, tgtdev->perst_id);
1097 if (tgtdev->starget && tgtdev->starget->hostdata) {
1099 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
1102 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev);
1103 mpi3mr_tgtdev_put(tgtdev);
1108 mpi3mr_tgtdev_put(tgtdev);
1112 * mpi3mr_devinfochg_evt_bh - DeviceInfoChange evt bottomhalf
1113 * @mrioc: Adapter instance reference
1114 * @dev_pg0: New device page0
1116 * Process Device Info Change event and based on device's new
1117 * information, either expose the device to the upper layers, or
1118 * remove the device from upper layers or update the details of
1123 static void mpi3mr_devinfochg_evt_bh(struct mpi3mr_ioc *mrioc,
1124 struct mpi3_device_page0 *dev_pg0)
1126 struct mpi3mr_tgt_dev *tgtdev = NULL;
1127 u16 dev_handle = 0, perst_id = 0;
1129 perst_id = le16_to_cpu(dev_pg0->persistent_id);
1130 dev_handle = le16_to_cpu(dev_pg0->dev_handle);
1132 "%s :Device info change: handle(0x%04x): persist_id(0x%x)\n",
1133 __func__, dev_handle, perst_id);
1134 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
1137 mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0);
1138 if (!tgtdev->is_hidden && !tgtdev->host_exposed)
1139 mpi3mr_report_tgtdev_to_host(mrioc, perst_id);
1140 if (tgtdev->is_hidden && tgtdev->host_exposed)
1141 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
1142 if (!tgtdev->is_hidden && tgtdev->host_exposed && tgtdev->starget)
1143 starget_for_each_device(tgtdev->starget, (void *)tgtdev,
1144 mpi3mr_update_sdev);
1147 mpi3mr_tgtdev_put(tgtdev);
1151 * mpi3mr_sastopochg_evt_debug - SASTopoChange details
1152 * @mrioc: Adapter instance reference
1153 * @event_data: SAS topology change list event data
1155 * Prints information about the SAS topology change event.
1160 mpi3mr_sastopochg_evt_debug(struct mpi3mr_ioc *mrioc,
1161 struct mpi3_event_data_sas_topology_change_list *event_data)
1165 u8 reason_code, phy_number;
1166 char *status_str = NULL;
1167 u8 link_rate, prev_link_rate;
1169 switch (event_data->exp_status) {
1170 case MPI3_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
1171 status_str = "remove";
1173 case MPI3_EVENT_SAS_TOPO_ES_RESPONDING:
1174 status_str = "responding";
1176 case MPI3_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
1177 status_str = "remove delay";
1179 case MPI3_EVENT_SAS_TOPO_ES_NO_EXPANDER:
1180 status_str = "direct attached";
1183 status_str = "unknown status";
1186 ioc_info(mrioc, "%s :sas topology change: (%s)\n",
1187 __func__, status_str);
1189 "%s :\texpander_handle(0x%04x), enclosure_handle(0x%04x) start_phy(%02d), num_entries(%d)\n",
1190 __func__, le16_to_cpu(event_data->expander_dev_handle),
1191 le16_to_cpu(event_data->enclosure_handle),
1192 event_data->start_phy_num, event_data->num_entries);
1193 for (i = 0; i < event_data->num_entries; i++) {
1194 handle = le16_to_cpu(event_data->phy_entry[i].attached_dev_handle);
1197 phy_number = event_data->start_phy_num + i;
1198 reason_code = event_data->phy_entry[i].status &
1199 MPI3_EVENT_SAS_TOPO_PHY_RC_MASK;
1200 switch (reason_code) {
1201 case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING:
1202 status_str = "target remove";
1204 case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING:
1205 status_str = "delay target remove";
1207 case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED:
1208 status_str = "link status change";
1210 case MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE:
1211 status_str = "link status no change";
1213 case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING:
1214 status_str = "target responding";
1217 status_str = "unknown";
1220 link_rate = event_data->phy_entry[i].link_rate >> 4;
1221 prev_link_rate = event_data->phy_entry[i].link_rate & 0xF;
1223 "%s :\tphy(%02d), attached_handle(0x%04x): %s: link rate: new(0x%02x), old(0x%02x)\n",
1224 __func__, phy_number, handle, status_str, link_rate,
1230 * mpi3mr_sastopochg_evt_bh - SASTopologyChange evt bottomhalf
1231 * @mrioc: Adapter instance reference
1232 * @fwevt: Firmware event reference
1234 * Prints information about the SAS topology change event and
1235 * for "not responding" event code, removes the device from the
1240 static void mpi3mr_sastopochg_evt_bh(struct mpi3mr_ioc *mrioc,
1241 struct mpi3mr_fwevt *fwevt)
1243 struct mpi3_event_data_sas_topology_change_list *event_data =
1244 (struct mpi3_event_data_sas_topology_change_list *)fwevt->event_data;
1248 struct mpi3mr_tgt_dev *tgtdev = NULL;
1250 mpi3mr_sastopochg_evt_debug(mrioc, event_data);
1252 for (i = 0; i < event_data->num_entries; i++) {
1255 handle = le16_to_cpu(event_data->phy_entry[i].attached_dev_handle);
1258 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
1262 reason_code = event_data->phy_entry[i].status &
1263 MPI3_EVENT_SAS_TOPO_PHY_RC_MASK;
1265 switch (reason_code) {
1266 case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING:
1267 if (tgtdev->host_exposed)
1268 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
1269 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev);
1270 mpi3mr_tgtdev_put(tgtdev);
1276 mpi3mr_tgtdev_put(tgtdev);
1281 * mpi3mr_pcietopochg_evt_debug - PCIeTopoChange details
1282 * @mrioc: Adapter instance reference
1283 * @event_data: PCIe topology change list event data
1285 * Prints information about the PCIe topology change event.
1290 mpi3mr_pcietopochg_evt_debug(struct mpi3mr_ioc *mrioc,
1291 struct mpi3_event_data_pcie_topology_change_list *event_data)
1297 char *status_str = NULL;
1298 u8 link_rate, prev_link_rate;
1300 switch (event_data->switch_status) {
1301 case MPI3_EVENT_PCIE_TOPO_SS_NOT_RESPONDING:
1302 status_str = "remove";
1304 case MPI3_EVENT_PCIE_TOPO_SS_RESPONDING:
1305 status_str = "responding";
1307 case MPI3_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING:
1308 status_str = "remove delay";
1310 case MPI3_EVENT_PCIE_TOPO_SS_NO_PCIE_SWITCH:
1311 status_str = "direct attached";
1314 status_str = "unknown status";
1317 ioc_info(mrioc, "%s :pcie topology change: (%s)\n",
1318 __func__, status_str);
1320 "%s :\tswitch_handle(0x%04x), enclosure_handle(0x%04x) start_port(%02d), num_entries(%d)\n",
1321 __func__, le16_to_cpu(event_data->switch_dev_handle),
1322 le16_to_cpu(event_data->enclosure_handle),
1323 event_data->start_port_num, event_data->num_entries);
1324 for (i = 0; i < event_data->num_entries; i++) {
1326 le16_to_cpu(event_data->port_entry[i].attached_dev_handle);
1329 port_number = event_data->start_port_num + i;
1330 reason_code = event_data->port_entry[i].port_status;
1331 switch (reason_code) {
1332 case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
1333 status_str = "target remove";
1335 case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
1336 status_str = "delay target remove";
1338 case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
1339 status_str = "link status change";
1341 case MPI3_EVENT_PCIE_TOPO_PS_NO_CHANGE:
1342 status_str = "link status no change";
1344 case MPI3_EVENT_PCIE_TOPO_PS_RESPONDING:
1345 status_str = "target responding";
1348 status_str = "unknown";
1351 link_rate = event_data->port_entry[i].current_port_info &
1352 MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK;
1353 prev_link_rate = event_data->port_entry[i].previous_port_info &
1354 MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK;
1356 "%s :\tport(%02d), attached_handle(0x%04x): %s: link rate: new(0x%02x), old(0x%02x)\n",
1357 __func__, port_number, handle, status_str, link_rate,
1363 * mpi3mr_pcietopochg_evt_bh - PCIeTopologyChange evt bottomhalf
1364 * @mrioc: Adapter instance reference
1365 * @fwevt: Firmware event reference
1367 * Prints information about the PCIe topology change event and
1368 * for "not responding" event code, removes the device from the
1373 static void mpi3mr_pcietopochg_evt_bh(struct mpi3mr_ioc *mrioc,
1374 struct mpi3mr_fwevt *fwevt)
1376 struct mpi3_event_data_pcie_topology_change_list *event_data =
1377 (struct mpi3_event_data_pcie_topology_change_list *)fwevt->event_data;
1381 struct mpi3mr_tgt_dev *tgtdev = NULL;
1383 mpi3mr_pcietopochg_evt_debug(mrioc, event_data);
1385 for (i = 0; i < event_data->num_entries; i++) {
1389 le16_to_cpu(event_data->port_entry[i].attached_dev_handle);
1392 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
1396 reason_code = event_data->port_entry[i].port_status;
1398 switch (reason_code) {
1399 case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
1400 if (tgtdev->host_exposed)
1401 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
1402 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev);
1403 mpi3mr_tgtdev_put(tgtdev);
1409 mpi3mr_tgtdev_put(tgtdev);
1414 * mpi3mr_logdata_evt_bh - Log data event bottomhalf
1415 * @mrioc: Adapter instance reference
1416 * @fwevt: Firmware event reference
1418 * Extracts the event data and calls application interfacing
1419 * function to process the event further.
1423 static void mpi3mr_logdata_evt_bh(struct mpi3mr_ioc *mrioc,
1424 struct mpi3mr_fwevt *fwevt)
1426 mpi3mr_app_save_logdata(mrioc, fwevt->event_data,
1427 fwevt->event_data_size);
1431 * mpi3mr_fwevt_bh - Firmware event bottomhalf handler
1432 * @mrioc: Adapter instance reference
1433 * @fwevt: Firmware event reference
1435 * Identifies the firmware event and calls corresponding bottomg
1436 * half handler and sends event acknowledgment if required.
1440 static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc,
1441 struct mpi3mr_fwevt *fwevt)
1443 mpi3mr_fwevt_del_from_list(mrioc, fwevt);
1444 mrioc->current_event = fwevt;
1446 if (mrioc->stop_drv_processing)
1449 if (!fwevt->process_evt)
1452 switch (fwevt->event_id) {
1453 case MPI3_EVENT_DEVICE_ADDED:
1455 struct mpi3_device_page0 *dev_pg0 =
1456 (struct mpi3_device_page0 *)fwevt->event_data;
1457 mpi3mr_report_tgtdev_to_host(mrioc,
1458 le16_to_cpu(dev_pg0->persistent_id));
1461 case MPI3_EVENT_DEVICE_INFO_CHANGED:
1463 mpi3mr_devinfochg_evt_bh(mrioc,
1464 (struct mpi3_device_page0 *)fwevt->event_data);
1467 case MPI3_EVENT_DEVICE_STATUS_CHANGE:
1469 mpi3mr_devstatuschg_evt_bh(mrioc, fwevt);
1472 case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
1474 mpi3mr_sastopochg_evt_bh(mrioc, fwevt);
1477 case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
1479 mpi3mr_pcietopochg_evt_bh(mrioc, fwevt);
1482 case MPI3_EVENT_LOG_DATA:
1484 mpi3mr_logdata_evt_bh(mrioc, fwevt);
1492 if (fwevt->send_ack)
1493 mpi3mr_process_event_ack(mrioc, fwevt->event_id,
1496 /* Put fwevt reference count to neutralize kref_init increment */
1497 mpi3mr_fwevt_put(fwevt);
1498 mrioc->current_event = NULL;
1502 * mpi3mr_fwevt_worker - Firmware event worker
1503 * @work: Work struct containing firmware event
1505 * Extracts the firmware event and calls mpi3mr_fwevt_bh.
1509 static void mpi3mr_fwevt_worker(struct work_struct *work)
1511 struct mpi3mr_fwevt *fwevt = container_of(work, struct mpi3mr_fwevt,
1513 mpi3mr_fwevt_bh(fwevt->mrioc, fwevt);
1515 * Put fwevt reference count after
1516 * dequeuing it from worker queue
1518 mpi3mr_fwevt_put(fwevt);
1522 * mpi3mr_create_tgtdev - Create and add a target device
1523 * @mrioc: Adapter instance reference
1524 * @dev_pg0: Device Page 0 data
1526 * If the device specified by the device page 0 data is not
1527 * present in the driver's internal list, allocate the memory
1528 * for the device, populate the data and add to the list, else
1529 * update the device data. The key is persistent ID.
1531 * Return: 0 on success, -ENOMEM on memory allocation failure
1533 static int mpi3mr_create_tgtdev(struct mpi3mr_ioc *mrioc,
1534 struct mpi3_device_page0 *dev_pg0)
1537 struct mpi3mr_tgt_dev *tgtdev = NULL;
1540 perst_id = le16_to_cpu(dev_pg0->persistent_id);
1541 tgtdev = mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id);
1543 mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0);
1544 mpi3mr_tgtdev_put(tgtdev);
1546 tgtdev = mpi3mr_alloc_tgtdev();
1549 mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0);
1550 mpi3mr_tgtdev_add_to_list(mrioc, tgtdev);
1557 * mpi3mr_flush_delayed_cmd_lists - Flush pending commands
1558 * @mrioc: Adapter instance reference
1560 * Flush pending commands in the delayed lists due to a
1561 * controller reset or driver removal as a cleanup.
1565 void mpi3mr_flush_delayed_cmd_lists(struct mpi3mr_ioc *mrioc)
1567 struct delayed_dev_rmhs_node *_rmhs_node;
1568 struct delayed_evt_ack_node *_evtack_node;
1570 dprint_reset(mrioc, "flushing delayed dev_remove_hs commands\n");
1571 while (!list_empty(&mrioc->delayed_rmhs_list)) {
1572 _rmhs_node = list_entry(mrioc->delayed_rmhs_list.next,
1573 struct delayed_dev_rmhs_node, list);
1574 list_del(&_rmhs_node->list);
1577 dprint_reset(mrioc, "flushing delayed event ack commands\n");
1578 while (!list_empty(&mrioc->delayed_evtack_cmds_list)) {
1579 _evtack_node = list_entry(mrioc->delayed_evtack_cmds_list.next,
1580 struct delayed_evt_ack_node, list);
1581 list_del(&_evtack_node->list);
1582 kfree(_evtack_node);
1587 * mpi3mr_dev_rmhs_complete_iou - Device removal IOUC completion
1588 * @mrioc: Adapter instance reference
1589 * @drv_cmd: Internal command tracker
1591 * Issues a target reset TM to the firmware from the device
1592 * removal TM pend list or retry the removal handshake sequence
1593 * based on the IOU control request IOC status.
1597 static void mpi3mr_dev_rmhs_complete_iou(struct mpi3mr_ioc *mrioc,
1598 struct mpi3mr_drv_cmd *drv_cmd)
1600 u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
1601 struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL;
1603 if (drv_cmd->state & MPI3MR_CMD_RESET)
1607 "%s :dev_rmhs_iouctrl_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x)\n",
1608 __func__, drv_cmd->dev_handle, drv_cmd->ioc_status,
1609 drv_cmd->ioc_loginfo);
1610 if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) {
1611 if (drv_cmd->retry_count < MPI3MR_DEV_RMHS_RETRY_COUNT) {
1612 drv_cmd->retry_count++;
1614 "%s :dev_rmhs_iouctrl_complete: handle(0x%04x)retrying handshake retry=%d\n",
1615 __func__, drv_cmd->dev_handle,
1616 drv_cmd->retry_count);
1617 mpi3mr_dev_rmhs_send_tm(mrioc, drv_cmd->dev_handle,
1618 drv_cmd, drv_cmd->iou_rc);
1622 "%s :dev removal handshake failed after all retries: handle(0x%04x)\n",
1623 __func__, drv_cmd->dev_handle);
1626 "%s :dev removal handshake completed successfully: handle(0x%04x)\n",
1627 __func__, drv_cmd->dev_handle);
1628 clear_bit(drv_cmd->dev_handle, mrioc->removepend_bitmap);
1631 if (!list_empty(&mrioc->delayed_rmhs_list)) {
1632 delayed_dev_rmhs = list_entry(mrioc->delayed_rmhs_list.next,
1633 struct delayed_dev_rmhs_node, list);
1634 drv_cmd->dev_handle = delayed_dev_rmhs->handle;
1635 drv_cmd->retry_count = 0;
1636 drv_cmd->iou_rc = delayed_dev_rmhs->iou_rc;
1638 "%s :dev_rmhs_iouctrl_complete: processing delayed TM: handle(0x%04x)\n",
1639 __func__, drv_cmd->dev_handle);
1640 mpi3mr_dev_rmhs_send_tm(mrioc, drv_cmd->dev_handle, drv_cmd,
1642 list_del(&delayed_dev_rmhs->list);
1643 kfree(delayed_dev_rmhs);
1648 drv_cmd->state = MPI3MR_CMD_NOTUSED;
1649 drv_cmd->callback = NULL;
1650 drv_cmd->retry_count = 0;
1651 drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
1652 clear_bit(cmd_idx, mrioc->devrem_bitmap);
1656 * mpi3mr_dev_rmhs_complete_tm - Device removal TM completion
1657 * @mrioc: Adapter instance reference
1658 * @drv_cmd: Internal command tracker
1660 * Issues a target reset TM to the firmware from the device
1661 * removal TM pend list or issue IO unit control request as
1662 * part of device removal or hidden acknowledgment handshake.
1666 static void mpi3mr_dev_rmhs_complete_tm(struct mpi3mr_ioc *mrioc,
1667 struct mpi3mr_drv_cmd *drv_cmd)
1669 struct mpi3_iounit_control_request iou_ctrl;
1670 u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
1671 struct mpi3_scsi_task_mgmt_reply *tm_reply = NULL;
1674 if (drv_cmd->state & MPI3MR_CMD_RESET)
1677 if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID)
1678 tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply;
1682 "dev_rmhs_tr_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x), term_count(%d)\n",
1683 mrioc->name, drv_cmd->dev_handle, drv_cmd->ioc_status,
1684 drv_cmd->ioc_loginfo,
1685 le32_to_cpu(tm_reply->termination_count));
1687 pr_info(IOCNAME "Issuing IOU CTL: handle(0x%04x) dev_rmhs idx(%d)\n",
1688 mrioc->name, drv_cmd->dev_handle, cmd_idx);
1690 memset(&iou_ctrl, 0, sizeof(iou_ctrl));
1692 drv_cmd->state = MPI3MR_CMD_PENDING;
1693 drv_cmd->is_waiting = 0;
1694 drv_cmd->callback = mpi3mr_dev_rmhs_complete_iou;
1695 iou_ctrl.operation = drv_cmd->iou_rc;
1696 iou_ctrl.param16[0] = cpu_to_le16(drv_cmd->dev_handle);
1697 iou_ctrl.host_tag = cpu_to_le16(drv_cmd->host_tag);
1698 iou_ctrl.function = MPI3_FUNCTION_IO_UNIT_CONTROL;
1700 retval = mpi3mr_admin_request_post(mrioc, &iou_ctrl, sizeof(iou_ctrl),
1703 pr_err(IOCNAME "Issue DevRmHsTMIOUCTL: Admin post failed\n",
1710 drv_cmd->state = MPI3MR_CMD_NOTUSED;
1711 drv_cmd->callback = NULL;
1712 drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
1713 drv_cmd->retry_count = 0;
1714 clear_bit(cmd_idx, mrioc->devrem_bitmap);
1718 * mpi3mr_dev_rmhs_send_tm - Issue TM for device removal
1719 * @mrioc: Adapter instance reference
1720 * @handle: Device handle
1721 * @cmdparam: Internal command tracker
1722 * @iou_rc: IO unit reason code
1724 * Issues a target reset TM to the firmware or add it to a pend
1725 * list as part of device removal or hidden acknowledgment
1730 static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_ioc *mrioc, u16 handle,
1731 struct mpi3mr_drv_cmd *cmdparam, u8 iou_rc)
1733 struct mpi3_scsi_task_mgmt_request tm_req;
1735 u16 cmd_idx = MPI3MR_NUM_DEVRMCMD;
1737 struct mpi3mr_drv_cmd *drv_cmd = cmdparam;
1738 struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL;
1743 cmd_idx = find_first_zero_bit(mrioc->devrem_bitmap,
1744 MPI3MR_NUM_DEVRMCMD);
1745 if (cmd_idx < MPI3MR_NUM_DEVRMCMD) {
1746 if (!test_and_set_bit(cmd_idx, mrioc->devrem_bitmap))
1748 cmd_idx = MPI3MR_NUM_DEVRMCMD;
1750 } while (retrycount--);
1752 if (cmd_idx >= MPI3MR_NUM_DEVRMCMD) {
1753 delayed_dev_rmhs = kzalloc(sizeof(*delayed_dev_rmhs),
1755 if (!delayed_dev_rmhs)
1757 INIT_LIST_HEAD(&delayed_dev_rmhs->list);
1758 delayed_dev_rmhs->handle = handle;
1759 delayed_dev_rmhs->iou_rc = iou_rc;
1760 list_add_tail(&delayed_dev_rmhs->list,
1761 &mrioc->delayed_rmhs_list);
1762 ioc_info(mrioc, "%s :DevRmHs: tr:handle(0x%04x) is postponed\n",
1766 drv_cmd = &mrioc->dev_rmhs_cmds[cmd_idx];
1769 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
1771 "%s :Issuing TR TM: for devhandle 0x%04x with dev_rmhs %d\n",
1772 __func__, handle, cmd_idx);
1774 memset(&tm_req, 0, sizeof(tm_req));
1775 if (drv_cmd->state & MPI3MR_CMD_PENDING) {
1776 ioc_err(mrioc, "%s :Issue TM: Command is in use\n", __func__);
1779 drv_cmd->state = MPI3MR_CMD_PENDING;
1780 drv_cmd->is_waiting = 0;
1781 drv_cmd->callback = mpi3mr_dev_rmhs_complete_tm;
1782 drv_cmd->dev_handle = handle;
1783 drv_cmd->iou_rc = iou_rc;
1784 tm_req.dev_handle = cpu_to_le16(handle);
1785 tm_req.task_type = MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
1786 tm_req.host_tag = cpu_to_le16(drv_cmd->host_tag);
1787 tm_req.task_host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INVALID);
1788 tm_req.function = MPI3_FUNCTION_SCSI_TASK_MGMT;
1790 set_bit(handle, mrioc->removepend_bitmap);
1791 retval = mpi3mr_admin_request_post(mrioc, &tm_req, sizeof(tm_req), 1);
1793 ioc_err(mrioc, "%s :Issue DevRmHsTM: Admin Post failed\n",
1800 drv_cmd->state = MPI3MR_CMD_NOTUSED;
1801 drv_cmd->callback = NULL;
1802 drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
1803 drv_cmd->retry_count = 0;
1804 clear_bit(cmd_idx, mrioc->devrem_bitmap);
1808 * mpi3mr_complete_evt_ack - event ack request completion
1809 * @mrioc: Adapter instance reference
1810 * @drv_cmd: Internal command tracker
1812 * This is the completion handler for non blocking event
1813 * acknowledgment sent to the firmware and this will issue any
1814 * pending event acknowledgment request.
1818 static void mpi3mr_complete_evt_ack(struct mpi3mr_ioc *mrioc,
1819 struct mpi3mr_drv_cmd *drv_cmd)
1821 u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
1822 struct delayed_evt_ack_node *delayed_evtack = NULL;
1824 if (drv_cmd->state & MPI3MR_CMD_RESET)
1827 if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) {
1828 dprint_event_th(mrioc,
1829 "immediate event ack failed with ioc_status(0x%04x) log_info(0x%08x)\n",
1830 (drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
1831 drv_cmd->ioc_loginfo);
1834 if (!list_empty(&mrioc->delayed_evtack_cmds_list)) {
1836 list_entry(mrioc->delayed_evtack_cmds_list.next,
1837 struct delayed_evt_ack_node, list);
1838 mpi3mr_send_event_ack(mrioc, delayed_evtack->event, drv_cmd,
1839 delayed_evtack->event_ctx);
1840 list_del(&delayed_evtack->list);
1841 kfree(delayed_evtack);
1845 drv_cmd->state = MPI3MR_CMD_NOTUSED;
1846 drv_cmd->callback = NULL;
1847 clear_bit(cmd_idx, mrioc->evtack_cmds_bitmap);
1851 * mpi3mr_send_event_ack - Issue event acknwoledgment request
1852 * @mrioc: Adapter instance reference
1853 * @event: MPI3 event id
1854 * @cmdparam: Internal command tracker
1855 * @event_ctx: event context
1857 * Issues event acknowledgment request to the firmware if there
1858 * is a free command to send the event ack else it to a pend
1859 * list so that it will be processed on a completion of a prior
1860 * event acknowledgment .
1864 static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
1865 struct mpi3mr_drv_cmd *cmdparam, u32 event_ctx)
1867 struct mpi3_event_ack_request evtack_req;
1870 u16 cmd_idx = MPI3MR_NUM_EVTACKCMD;
1871 struct mpi3mr_drv_cmd *drv_cmd = cmdparam;
1872 struct delayed_evt_ack_node *delayed_evtack = NULL;
1875 dprint_event_th(mrioc,
1876 "sending delayed event ack in the top half for event(0x%02x), event_ctx(0x%08x)\n",
1880 dprint_event_th(mrioc,
1881 "sending event ack in the top half for event(0x%02x), event_ctx(0x%08x)\n",
1884 cmd_idx = find_first_zero_bit(mrioc->evtack_cmds_bitmap,
1885 MPI3MR_NUM_EVTACKCMD);
1886 if (cmd_idx < MPI3MR_NUM_EVTACKCMD) {
1887 if (!test_and_set_bit(cmd_idx,
1888 mrioc->evtack_cmds_bitmap))
1890 cmd_idx = MPI3MR_NUM_EVTACKCMD;
1892 } while (retrycount--);
1894 if (cmd_idx >= MPI3MR_NUM_EVTACKCMD) {
1895 delayed_evtack = kzalloc(sizeof(*delayed_evtack),
1897 if (!delayed_evtack)
1899 INIT_LIST_HEAD(&delayed_evtack->list);
1900 delayed_evtack->event = event;
1901 delayed_evtack->event_ctx = event_ctx;
1902 list_add_tail(&delayed_evtack->list,
1903 &mrioc->delayed_evtack_cmds_list);
1904 dprint_event_th(mrioc,
1905 "event ack in the top half for event(0x%02x), event_ctx(0x%08x) is postponed\n",
1909 drv_cmd = &mrioc->evtack_cmds[cmd_idx];
1912 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
1914 memset(&evtack_req, 0, sizeof(evtack_req));
1915 if (drv_cmd->state & MPI3MR_CMD_PENDING) {
1916 dprint_event_th(mrioc,
1917 "sending event ack failed due to command in use\n");
1920 drv_cmd->state = MPI3MR_CMD_PENDING;
1921 drv_cmd->is_waiting = 0;
1922 drv_cmd->callback = mpi3mr_complete_evt_ack;
1923 evtack_req.host_tag = cpu_to_le16(drv_cmd->host_tag);
1924 evtack_req.function = MPI3_FUNCTION_EVENT_ACK;
1925 evtack_req.event = event;
1926 evtack_req.event_context = cpu_to_le32(event_ctx);
1927 retval = mpi3mr_admin_request_post(mrioc, &evtack_req,
1928 sizeof(evtack_req), 1);
1930 dprint_event_th(mrioc,
1931 "posting event ack request is failed\n");
1935 dprint_event_th(mrioc,
1936 "event ack in the top half for event(0x%02x), event_ctx(0x%08x) is posted\n",
1941 drv_cmd->state = MPI3MR_CMD_NOTUSED;
1942 drv_cmd->callback = NULL;
1943 clear_bit(cmd_idx, mrioc->evtack_cmds_bitmap);
1947 * mpi3mr_pcietopochg_evt_th - PCIETopologyChange evt tophalf
1948 * @mrioc: Adapter instance reference
1949 * @event_reply: event data
1951 * Checks for the reason code and based on that either block I/O
1952 * to device, or unblock I/O to the device, or start the device
1953 * removal handshake with reason as remove with the firmware for
1958 static void mpi3mr_pcietopochg_evt_th(struct mpi3mr_ioc *mrioc,
1959 struct mpi3_event_notification_reply *event_reply)
1961 struct mpi3_event_data_pcie_topology_change_list *topo_evt =
1962 (struct mpi3_event_data_pcie_topology_change_list *)event_reply->event_data;
1966 struct mpi3mr_tgt_dev *tgtdev = NULL;
1967 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
1969 for (i = 0; i < topo_evt->num_entries; i++) {
1970 handle = le16_to_cpu(topo_evt->port_entry[i].attached_dev_handle);
1973 reason_code = topo_evt->port_entry[i].port_status;
1974 scsi_tgt_priv_data = NULL;
1975 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
1976 if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata)
1977 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
1978 tgtdev->starget->hostdata;
1979 switch (reason_code) {
1980 case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
1981 if (scsi_tgt_priv_data) {
1982 scsi_tgt_priv_data->dev_removed = 1;
1983 scsi_tgt_priv_data->dev_removedelay = 0;
1984 atomic_set(&scsi_tgt_priv_data->block_io, 0);
1986 mpi3mr_dev_rmhs_send_tm(mrioc, handle, NULL,
1987 MPI3_CTRL_OP_REMOVE_DEVICE);
1989 case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
1990 if (scsi_tgt_priv_data) {
1991 scsi_tgt_priv_data->dev_removedelay = 1;
1992 atomic_inc(&scsi_tgt_priv_data->block_io);
1995 case MPI3_EVENT_PCIE_TOPO_PS_RESPONDING:
1996 if (scsi_tgt_priv_data &&
1997 scsi_tgt_priv_data->dev_removedelay) {
1998 scsi_tgt_priv_data->dev_removedelay = 0;
1999 atomic_dec_if_positive
2000 (&scsi_tgt_priv_data->block_io);
2003 case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
2008 mpi3mr_tgtdev_put(tgtdev);
2013 * mpi3mr_sastopochg_evt_th - SASTopologyChange evt tophalf
2014 * @mrioc: Adapter instance reference
2015 * @event_reply: event data
2017 * Checks for the reason code and based on that either block I/O
2018 * to device, or unblock I/O to the device, or start the device
2019 * removal handshake with reason as remove with the firmware for
2024 static void mpi3mr_sastopochg_evt_th(struct mpi3mr_ioc *mrioc,
2025 struct mpi3_event_notification_reply *event_reply)
2027 struct mpi3_event_data_sas_topology_change_list *topo_evt =
2028 (struct mpi3_event_data_sas_topology_change_list *)event_reply->event_data;
2032 struct mpi3mr_tgt_dev *tgtdev = NULL;
2033 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
2035 for (i = 0; i < topo_evt->num_entries; i++) {
2036 handle = le16_to_cpu(topo_evt->phy_entry[i].attached_dev_handle);
2039 reason_code = topo_evt->phy_entry[i].status &
2040 MPI3_EVENT_SAS_TOPO_PHY_RC_MASK;
2041 scsi_tgt_priv_data = NULL;
2042 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
2043 if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata)
2044 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
2045 tgtdev->starget->hostdata;
2046 switch (reason_code) {
2047 case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING:
2048 if (scsi_tgt_priv_data) {
2049 scsi_tgt_priv_data->dev_removed = 1;
2050 scsi_tgt_priv_data->dev_removedelay = 0;
2051 atomic_set(&scsi_tgt_priv_data->block_io, 0);
2053 mpi3mr_dev_rmhs_send_tm(mrioc, handle, NULL,
2054 MPI3_CTRL_OP_REMOVE_DEVICE);
2056 case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING:
2057 if (scsi_tgt_priv_data) {
2058 scsi_tgt_priv_data->dev_removedelay = 1;
2059 atomic_inc(&scsi_tgt_priv_data->block_io);
2062 case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING:
2063 if (scsi_tgt_priv_data &&
2064 scsi_tgt_priv_data->dev_removedelay) {
2065 scsi_tgt_priv_data->dev_removedelay = 0;
2066 atomic_dec_if_positive
2067 (&scsi_tgt_priv_data->block_io);
2070 case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED:
2075 mpi3mr_tgtdev_put(tgtdev);
2080 * mpi3mr_devstatuschg_evt_th - DeviceStatusChange evt tophalf
2081 * @mrioc: Adapter instance reference
2082 * @event_reply: event data
2084 * Checks for the reason code and based on that either block I/O
2085 * to device, or unblock I/O to the device, or start the device
2086 * removal handshake with reason as remove/hide acknowledgment
2087 * with the firmware.
2091 static void mpi3mr_devstatuschg_evt_th(struct mpi3mr_ioc *mrioc,
2092 struct mpi3_event_notification_reply *event_reply)
2095 u8 ublock = 0, block = 0, hide = 0, delete = 0, remove = 0;
2096 struct mpi3mr_tgt_dev *tgtdev = NULL;
2097 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
2098 struct mpi3_event_data_device_status_change *evtdata =
2099 (struct mpi3_event_data_device_status_change *)event_reply->event_data;
2101 if (mrioc->stop_drv_processing)
2104 dev_handle = le16_to_cpu(evtdata->dev_handle);
2106 switch (evtdata->reason_code) {
2107 case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_STRT:
2108 case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_STRT:
2111 case MPI3_EVENT_DEV_STAT_RC_HIDDEN:
2115 case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING:
2119 case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_CMP:
2120 case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_CMP:
2127 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
2131 tgtdev->is_hidden = hide;
2132 if (tgtdev->starget && tgtdev->starget->hostdata) {
2133 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
2134 tgtdev->starget->hostdata;
2136 atomic_inc(&scsi_tgt_priv_data->block_io);
2138 scsi_tgt_priv_data->dev_removed = 1;
2140 atomic_dec_if_positive(&scsi_tgt_priv_data->block_io);
2143 mpi3mr_dev_rmhs_send_tm(mrioc, dev_handle, NULL,
2144 MPI3_CTRL_OP_REMOVE_DEVICE);
2146 mpi3mr_dev_rmhs_send_tm(mrioc, dev_handle, NULL,
2147 MPI3_CTRL_OP_HIDDEN_ACK);
2151 mpi3mr_tgtdev_put(tgtdev);
2155 * mpi3mr_preparereset_evt_th - Prepare for reset event tophalf
2156 * @mrioc: Adapter instance reference
2157 * @event_reply: event data
2159 * Blocks and unblocks host level I/O based on the reason code
2163 static void mpi3mr_preparereset_evt_th(struct mpi3mr_ioc *mrioc,
2164 struct mpi3_event_notification_reply *event_reply)
2166 struct mpi3_event_data_prepare_for_reset *evtdata =
2167 (struct mpi3_event_data_prepare_for_reset *)event_reply->event_data;
2169 if (evtdata->reason_code == MPI3_EVENT_PREPARE_RESET_RC_START) {
2170 dprint_event_th(mrioc,
2171 "prepare for reset event top half with rc=start\n");
2172 if (mrioc->prepare_for_reset)
2174 mrioc->prepare_for_reset = 1;
2175 mrioc->prepare_for_reset_timeout_counter = 0;
2176 } else if (evtdata->reason_code == MPI3_EVENT_PREPARE_RESET_RC_ABORT) {
2177 dprint_event_th(mrioc,
2178 "prepare for reset top half with rc=abort\n");
2179 mrioc->prepare_for_reset = 0;
2180 mrioc->prepare_for_reset_timeout_counter = 0;
2182 if ((event_reply->msg_flags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK)
2183 == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED)
2184 mpi3mr_send_event_ack(mrioc, event_reply->event, NULL,
2185 le32_to_cpu(event_reply->event_context));
2189 * mpi3mr_energypackchg_evt_th - Energy pack change evt tophalf
2190 * @mrioc: Adapter instance reference
2191 * @event_reply: event data
2193 * Identifies the new shutdown timeout value and update.
2197 static void mpi3mr_energypackchg_evt_th(struct mpi3mr_ioc *mrioc,
2198 struct mpi3_event_notification_reply *event_reply)
2200 struct mpi3_event_data_energy_pack_change *evtdata =
2201 (struct mpi3_event_data_energy_pack_change *)event_reply->event_data;
2202 u16 shutdown_timeout = le16_to_cpu(evtdata->shutdown_timeout);
2204 if (shutdown_timeout <= 0) {
2206 "%s :Invalid Shutdown Timeout received = %d\n",
2207 __func__, shutdown_timeout);
2212 "%s :Previous Shutdown Timeout Value = %d New Shutdown Timeout Value = %d\n",
2213 __func__, mrioc->facts.shutdown_timeout, shutdown_timeout);
2214 mrioc->facts.shutdown_timeout = shutdown_timeout;
2218 * mpi3mr_cablemgmt_evt_th - Cable management event tophalf
2219 * @mrioc: Adapter instance reference
2220 * @event_reply: event data
2222 * Displays Cable manegemt event details.
2226 static void mpi3mr_cablemgmt_evt_th(struct mpi3mr_ioc *mrioc,
2227 struct mpi3_event_notification_reply *event_reply)
2229 struct mpi3_event_data_cable_management *evtdata =
2230 (struct mpi3_event_data_cable_management *)event_reply->event_data;
2232 switch (evtdata->status) {
2233 case MPI3_EVENT_CABLE_MGMT_STATUS_INSUFFICIENT_POWER:
2235 ioc_info(mrioc, "An active cable with receptacle_id %d cannot be powered.\n"
2236 "Devices connected to this cable are not detected.\n"
2237 "This cable requires %d mW of power.\n",
2238 evtdata->receptacle_id,
2239 le32_to_cpu(evtdata->active_cable_power_requirement));
2242 case MPI3_EVENT_CABLE_MGMT_STATUS_DEGRADED:
2244 ioc_info(mrioc, "A cable with receptacle_id %d is not running at optimal speed\n",
2245 evtdata->receptacle_id);
2254 * mpi3mr_os_handle_events - Firmware event handler
2255 * @mrioc: Adapter instance reference
2256 * @event_reply: event data
2258 * Identify whteher the event has to handled and acknowledged
2259 * and either process the event in the tophalf and/or schedule a
2260 * bottom half through mpi3mr_fwevt_worker.
2264 void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc,
2265 struct mpi3_event_notification_reply *event_reply)
2268 struct mpi3mr_fwevt *fwevt = NULL;
2269 bool ack_req = 0, process_evt_bh = 0;
2271 if (mrioc->stop_drv_processing)
2274 if ((event_reply->msg_flags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK)
2275 == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED)
2278 evt_type = event_reply->event;
2281 case MPI3_EVENT_DEVICE_ADDED:
2283 struct mpi3_device_page0 *dev_pg0 =
2284 (struct mpi3_device_page0 *)event_reply->event_data;
2285 if (mpi3mr_create_tgtdev(mrioc, dev_pg0))
2287 "%s :Failed to add device in the device add event\n",
2293 case MPI3_EVENT_DEVICE_STATUS_CHANGE:
2296 mpi3mr_devstatuschg_evt_th(mrioc, event_reply);
2299 case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
2302 mpi3mr_sastopochg_evt_th(mrioc, event_reply);
2305 case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
2308 mpi3mr_pcietopochg_evt_th(mrioc, event_reply);
2311 case MPI3_EVENT_PREPARE_FOR_RESET:
2313 mpi3mr_preparereset_evt_th(mrioc, event_reply);
2317 case MPI3_EVENT_DEVICE_INFO_CHANGED:
2318 case MPI3_EVENT_LOG_DATA:
2323 case MPI3_EVENT_ENERGY_PACK_CHANGE:
2325 mpi3mr_energypackchg_evt_th(mrioc, event_reply);
2328 case MPI3_EVENT_CABLE_MGMT:
2330 mpi3mr_cablemgmt_evt_th(mrioc, event_reply);
2333 case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
2334 case MPI3_EVENT_SAS_DISCOVERY:
2335 case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
2336 case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE:
2337 case MPI3_EVENT_PCIE_ENUMERATION:
2340 ioc_info(mrioc, "%s :event 0x%02x is not handled\n",
2341 __func__, evt_type);
2344 if (process_evt_bh || ack_req) {
2345 sz = event_reply->event_data_length * 4;
2346 fwevt = mpi3mr_alloc_fwevt(sz);
2348 ioc_info(mrioc, "%s :failure at %s:%d/%s()!\n",
2349 __func__, __FILE__, __LINE__, __func__);
2353 memcpy(fwevt->event_data, event_reply->event_data, sz);
2354 fwevt->mrioc = mrioc;
2355 fwevt->event_id = evt_type;
2356 fwevt->send_ack = ack_req;
2357 fwevt->process_evt = process_evt_bh;
2358 fwevt->evt_ctx = le32_to_cpu(event_reply->event_context);
2359 mpi3mr_fwevt_add_to_list(mrioc, fwevt);
2364 * mpi3mr_setup_eedp - Setup EEDP information in MPI3 SCSI IO
2365 * @mrioc: Adapter instance reference
2366 * @scmd: SCSI command reference
2367 * @scsiio_req: MPI3 SCSI IO request
2369 * Identifies the protection information flags from the SCSI
2370 * command and set appropriate flags in the MPI3 SCSI IO
2375 static void mpi3mr_setup_eedp(struct mpi3mr_ioc *mrioc,
2376 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req)
2379 unsigned char prot_op = scsi_get_prot_op(scmd);
2382 case SCSI_PROT_NORMAL:
2384 case SCSI_PROT_READ_STRIP:
2385 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE;
2387 case SCSI_PROT_WRITE_INSERT:
2388 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_INSERT;
2390 case SCSI_PROT_READ_INSERT:
2391 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_INSERT;
2392 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID;
2394 case SCSI_PROT_WRITE_STRIP:
2395 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE;
2396 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID;
2398 case SCSI_PROT_READ_PASS:
2399 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK;
2400 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID;
2402 case SCSI_PROT_WRITE_PASS:
2403 if (scmd->prot_flags & SCSI_PROT_IP_CHECKSUM) {
2404 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REGEN;
2405 scsiio_req->sgl[0].eedp.application_tag_translation_mask =
2408 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK;
2410 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID;
2416 if (scmd->prot_flags & SCSI_PROT_GUARD_CHECK)
2417 eedp_flags |= MPI3_EEDPFLAGS_CHK_GUARD;
2419 if (scmd->prot_flags & SCSI_PROT_IP_CHECKSUM)
2420 eedp_flags |= MPI3_EEDPFLAGS_HOST_GUARD_IP_CHKSUM;
2422 if (scmd->prot_flags & SCSI_PROT_REF_CHECK) {
2423 eedp_flags |= MPI3_EEDPFLAGS_CHK_REF_TAG |
2424 MPI3_EEDPFLAGS_INCR_PRI_REF_TAG;
2425 scsiio_req->cdb.eedp32.primary_reference_tag =
2426 cpu_to_be32(scsi_prot_ref_tag(scmd));
2429 if (scmd->prot_flags & SCSI_PROT_REF_INCREMENT)
2430 eedp_flags |= MPI3_EEDPFLAGS_INCR_PRI_REF_TAG;
2432 eedp_flags |= MPI3_EEDPFLAGS_ESC_MODE_APPTAG_DISABLE;
2434 switch (scsi_prot_interval(scmd)) {
2436 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_512;
2439 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_520;
2442 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4080;
2445 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4088;
2448 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4096;
2451 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4104;
2454 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4160;
2460 scsiio_req->sgl[0].eedp.eedp_flags = cpu_to_le16(eedp_flags);
2461 scsiio_req->sgl[0].eedp.flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_EXTENDED;
2465 * mpi3mr_build_sense_buffer - Map sense information
2467 * @buf: Sense buffer to populate
2469 * @asc: Additional sense code
2470 * @ascq: Additional sense code qualifier
2472 * Maps the given sense information into either descriptor or
2473 * fixed format sense data.
2477 static inline void mpi3mr_build_sense_buffer(int desc, u8 *buf, u8 key,
2481 buf[0] = 0x72; /* descriptor, current */
2487 buf[0] = 0x70; /* fixed, current */
2496 * mpi3mr_map_eedp_error - Map EEDP errors from IOC status
2497 * @scmd: SCSI command reference
2498 * @ioc_status: status of MPI3 request
2500 * Maps the EEDP error status of the SCSI IO request to sense
2505 static void mpi3mr_map_eedp_error(struct scsi_cmnd *scmd,
2510 switch (ioc_status) {
2511 case MPI3_IOCSTATUS_EEDP_GUARD_ERROR:
2514 case MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR:
2517 case MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR:
2525 mpi3mr_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
2527 scmd->result = (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
2531 * mpi3mr_process_op_reply_desc - reply descriptor handler
2532 * @mrioc: Adapter instance reference
2533 * @reply_desc: Operational reply descriptor
2534 * @reply_dma: place holder for reply DMA address
2535 * @qidx: Operational queue index
2537 * Process the operational reply descriptor and identifies the
2538 * descriptor type. Based on the descriptor map the MPI3 request
2539 * status to a SCSI command status and calls scsi_done call
2544 void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc,
2545 struct mpi3_default_reply_descriptor *reply_desc, u64 *reply_dma, u16 qidx)
2547 u16 reply_desc_type, host_tag = 0;
2548 u16 ioc_status = MPI3_IOCSTATUS_SUCCESS;
2549 u32 ioc_loginfo = 0;
2550 struct mpi3_status_reply_descriptor *status_desc = NULL;
2551 struct mpi3_address_reply_descriptor *addr_desc = NULL;
2552 struct mpi3_success_reply_descriptor *success_desc = NULL;
2553 struct mpi3_scsi_io_reply *scsi_reply = NULL;
2554 struct scsi_cmnd *scmd = NULL;
2555 struct scmd_priv *priv = NULL;
2556 u8 *sense_buf = NULL;
2557 u8 scsi_state = 0, scsi_status = 0, sense_state = 0;
2558 u32 xfer_count = 0, sense_count = 0, resp_data = 0;
2559 u16 dev_handle = 0xFFFF;
2560 struct scsi_sense_hdr sshdr;
2563 reply_desc_type = le16_to_cpu(reply_desc->reply_flags) &
2564 MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK;
2565 switch (reply_desc_type) {
2566 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS:
2567 status_desc = (struct mpi3_status_reply_descriptor *)reply_desc;
2568 host_tag = le16_to_cpu(status_desc->host_tag);
2569 ioc_status = le16_to_cpu(status_desc->ioc_status);
2571 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
2572 ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info);
2573 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
2575 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY:
2576 addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc;
2577 *reply_dma = le64_to_cpu(addr_desc->reply_frame_address);
2578 scsi_reply = mpi3mr_get_reply_virt_addr(mrioc,
2581 panic("%s: scsi_reply is NULL, this shouldn't happen\n",
2585 host_tag = le16_to_cpu(scsi_reply->host_tag);
2586 ioc_status = le16_to_cpu(scsi_reply->ioc_status);
2587 scsi_status = scsi_reply->scsi_status;
2588 scsi_state = scsi_reply->scsi_state;
2589 dev_handle = le16_to_cpu(scsi_reply->dev_handle);
2590 sense_state = (scsi_state & MPI3_SCSI_STATE_SENSE_MASK);
2591 xfer_count = le32_to_cpu(scsi_reply->transfer_count);
2592 sense_count = le32_to_cpu(scsi_reply->sense_count);
2593 resp_data = le32_to_cpu(scsi_reply->response_data);
2594 sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc,
2595 le64_to_cpu(scsi_reply->sense_data_buffer_address));
2597 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
2598 ioc_loginfo = le32_to_cpu(scsi_reply->ioc_log_info);
2599 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
2600 if (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY)
2601 panic("%s: Ran out of sense buffers\n", mrioc->name);
2603 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS:
2604 success_desc = (struct mpi3_success_reply_descriptor *)reply_desc;
2605 host_tag = le16_to_cpu(success_desc->host_tag);
2610 scmd = mpi3mr_scmd_from_host_tag(mrioc, host_tag, qidx);
2612 panic("%s: Cannot Identify scmd for host_tag 0x%x\n",
2613 mrioc->name, host_tag);
2616 priv = scsi_cmd_priv(scmd);
2618 scmd->result = DID_OK << 16;
2622 scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_count);
2623 if (ioc_status == MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN &&
2624 xfer_count == 0 && (scsi_status == MPI3_SCSI_STATUS_BUSY ||
2625 scsi_status == MPI3_SCSI_STATUS_RESERVATION_CONFLICT ||
2626 scsi_status == MPI3_SCSI_STATUS_TASK_SET_FULL))
2627 ioc_status = MPI3_IOCSTATUS_SUCCESS;
2629 if ((sense_state == MPI3_SCSI_STATE_SENSE_VALID) && sense_count &&
2631 u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE, sense_count);
2633 memcpy(scmd->sense_buffer, sense_buf, sz);
2636 switch (ioc_status) {
2637 case MPI3_IOCSTATUS_BUSY:
2638 case MPI3_IOCSTATUS_INSUFFICIENT_RESOURCES:
2639 scmd->result = SAM_STAT_BUSY;
2641 case MPI3_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2642 scmd->result = DID_NO_CONNECT << 16;
2644 case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED:
2645 scmd->result = DID_SOFT_ERROR << 16;
2647 case MPI3_IOCSTATUS_SCSI_TASK_TERMINATED:
2648 case MPI3_IOCSTATUS_SCSI_EXT_TERMINATED:
2649 scmd->result = DID_RESET << 16;
2651 case MPI3_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2652 if ((xfer_count == 0) || (scmd->underflow > xfer_count))
2653 scmd->result = DID_SOFT_ERROR << 16;
2655 scmd->result = (DID_OK << 16) | scsi_status;
2657 case MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN:
2658 scmd->result = (DID_OK << 16) | scsi_status;
2659 if (sense_state == MPI3_SCSI_STATE_SENSE_VALID)
2661 if (xfer_count < scmd->underflow) {
2662 if (scsi_status == SAM_STAT_BUSY)
2663 scmd->result = SAM_STAT_BUSY;
2665 scmd->result = DID_SOFT_ERROR << 16;
2666 } else if ((scsi_state & (MPI3_SCSI_STATE_NO_SCSI_STATUS)) ||
2667 (sense_state != MPI3_SCSI_STATE_SENSE_NOT_AVAILABLE))
2668 scmd->result = DID_SOFT_ERROR << 16;
2669 else if (scsi_state & MPI3_SCSI_STATE_TERMINATED)
2670 scmd->result = DID_RESET << 16;
2672 case MPI3_IOCSTATUS_SCSI_DATA_OVERRUN:
2673 scsi_set_resid(scmd, 0);
2675 case MPI3_IOCSTATUS_SCSI_RECOVERED_ERROR:
2676 case MPI3_IOCSTATUS_SUCCESS:
2677 scmd->result = (DID_OK << 16) | scsi_status;
2678 if ((scsi_state & (MPI3_SCSI_STATE_NO_SCSI_STATUS)) ||
2679 (sense_state == MPI3_SCSI_STATE_SENSE_FAILED) ||
2680 (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY))
2681 scmd->result = DID_SOFT_ERROR << 16;
2682 else if (scsi_state & MPI3_SCSI_STATE_TERMINATED)
2683 scmd->result = DID_RESET << 16;
2685 case MPI3_IOCSTATUS_EEDP_GUARD_ERROR:
2686 case MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR:
2687 case MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR:
2688 mpi3mr_map_eedp_error(scmd, ioc_status);
2690 case MPI3_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2691 case MPI3_IOCSTATUS_INVALID_FUNCTION:
2692 case MPI3_IOCSTATUS_INVALID_SGL:
2693 case MPI3_IOCSTATUS_INTERNAL_ERROR:
2694 case MPI3_IOCSTATUS_INVALID_FIELD:
2695 case MPI3_IOCSTATUS_INVALID_STATE:
2696 case MPI3_IOCSTATUS_SCSI_IO_DATA_ERROR:
2697 case MPI3_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2698 case MPI3_IOCSTATUS_INSUFFICIENT_POWER:
2700 scmd->result = DID_SOFT_ERROR << 16;
2704 if (scmd->result != (DID_OK << 16) && (scmd->cmnd[0] != ATA_12) &&
2705 (scmd->cmnd[0] != ATA_16)) {
2706 ioc_info(mrioc, "%s :scmd->result 0x%x\n", __func__,
2708 scsi_print_command(scmd);
2710 "%s :Command issued to handle 0x%02x returned with error 0x%04x loginfo 0x%08x, qid %d\n",
2711 __func__, dev_handle, ioc_status, ioc_loginfo,
2712 priv->req_q_idx + 1);
2714 " host_tag %d scsi_state 0x%02x scsi_status 0x%02x, xfer_cnt %d resp_data 0x%x\n",
2715 host_tag, scsi_state, scsi_status, xfer_count, resp_data);
2717 scsi_normalize_sense(sense_buf, sense_count, &sshdr);
2719 "%s :sense_count 0x%x, sense_key 0x%x ASC 0x%x, ASCQ 0x%x\n",
2720 __func__, sense_count, sshdr.sense_key,
2721 sshdr.asc, sshdr.ascq);
2725 if (priv->meta_sg_valid) {
2726 dma_unmap_sg(&mrioc->pdev->dev, scsi_prot_sglist(scmd),
2727 scsi_prot_sg_count(scmd), scmd->sc_data_direction);
2729 mpi3mr_clear_scmd_priv(mrioc, scmd);
2730 scsi_dma_unmap(scmd);
2734 mpi3mr_repost_sense_buf(mrioc,
2735 le64_to_cpu(scsi_reply->sense_data_buffer_address));
2739 * mpi3mr_get_chain_idx - get free chain buffer index
2740 * @mrioc: Adapter instance reference
2742 * Try to get a free chain buffer index from the free pool.
2744 * Return: -1 on failure or the free chain buffer index
2746 static int mpi3mr_get_chain_idx(struct mpi3mr_ioc *mrioc)
2752 spin_lock(&mrioc->chain_buf_lock);
2753 cmd_idx = find_first_zero_bit(mrioc->chain_bitmap,
2754 mrioc->chain_buf_count);
2755 if (cmd_idx < mrioc->chain_buf_count) {
2756 set_bit(cmd_idx, mrioc->chain_bitmap);
2757 spin_unlock(&mrioc->chain_buf_lock);
2760 spin_unlock(&mrioc->chain_buf_lock);
2762 } while (retry_count--);
2767 * mpi3mr_prepare_sg_scmd - build scatter gather list
2768 * @mrioc: Adapter instance reference
2769 * @scmd: SCSI command reference
2770 * @scsiio_req: MPI3 SCSI IO request
2772 * This function maps SCSI command's data and protection SGEs to
2773 * MPI request SGEs. If required additional 4K chain buffer is
2774 * used to send the SGEs.
2776 * Return: 0 on success, -ENOMEM on dma_map_sg failure
2778 static int mpi3mr_prepare_sg_scmd(struct mpi3mr_ioc *mrioc,
2779 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req)
2781 dma_addr_t chain_dma;
2782 struct scatterlist *sg_scmd;
2783 void *sg_local, *chain;
2785 int sges_left, chain_idx;
2786 u32 sges_in_segment;
2787 u8 simple_sgl_flags;
2788 u8 simple_sgl_flags_last;
2789 u8 last_chain_sgl_flags;
2790 struct chain_element *chain_req;
2791 struct scmd_priv *priv = NULL;
2792 u32 meta_sg = le32_to_cpu(scsiio_req->flags) &
2793 MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI;
2795 priv = scsi_cmd_priv(scmd);
2797 simple_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE |
2798 MPI3_SGE_FLAGS_DLAS_SYSTEM;
2799 simple_sgl_flags_last = simple_sgl_flags |
2800 MPI3_SGE_FLAGS_END_OF_LIST;
2801 last_chain_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_LAST_CHAIN |
2802 MPI3_SGE_FLAGS_DLAS_SYSTEM;
2805 sg_local = &scsiio_req->sgl[MPI3_SCSIIO_METASGL_INDEX];
2807 sg_local = &scsiio_req->sgl;
2809 if (!scsiio_req->data_length && !meta_sg) {
2810 mpi3mr_build_zero_len_sge(sg_local);
2815 sg_scmd = scsi_prot_sglist(scmd);
2816 sges_left = dma_map_sg(&mrioc->pdev->dev,
2817 scsi_prot_sglist(scmd),
2818 scsi_prot_sg_count(scmd),
2819 scmd->sc_data_direction);
2820 priv->meta_sg_valid = 1; /* To unmap meta sg DMA */
2822 sg_scmd = scsi_sglist(scmd);
2823 sges_left = scsi_dma_map(scmd);
2826 if (sges_left < 0) {
2827 sdev_printk(KERN_ERR, scmd->device,
2828 "scsi_dma_map failed: request for %d bytes!\n",
2829 scsi_bufflen(scmd));
2832 if (sges_left > MPI3MR_SG_DEPTH) {
2833 sdev_printk(KERN_ERR, scmd->device,
2834 "scsi_dma_map returned unsupported sge count %d!\n",
2839 sges_in_segment = (mrioc->facts.op_req_sz -
2840 offsetof(struct mpi3_scsi_io_request, sgl)) / sizeof(struct mpi3_sge_common);
2842 if (scsiio_req->sgl[0].eedp.flags ==
2843 MPI3_SGE_FLAGS_ELEMENT_TYPE_EXTENDED && !meta_sg) {
2844 sg_local += sizeof(struct mpi3_sge_common);
2846 /* Reserve 1st segment (scsiio_req->sgl[0]) for eedp */
2849 if (scsiio_req->msg_flags ==
2850 MPI3_SCSIIO_MSGFLAGS_METASGL_VALID && !meta_sg) {
2852 /* Reserve last segment (scsiio_req->sgl[3]) for meta sg */
2856 sges_in_segment = 1;
2858 if (sges_left <= sges_in_segment)
2859 goto fill_in_last_segment;
2861 /* fill in main message segment when there is a chain following */
2862 while (sges_in_segment > 1) {
2863 mpi3mr_add_sg_single(sg_local, simple_sgl_flags,
2864 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2865 sg_scmd = sg_next(sg_scmd);
2866 sg_local += sizeof(struct mpi3_sge_common);
2871 chain_idx = mpi3mr_get_chain_idx(mrioc);
2874 chain_req = &mrioc->chain_sgl_list[chain_idx];
2876 priv->meta_chain_idx = chain_idx;
2878 priv->chain_idx = chain_idx;
2880 chain = chain_req->addr;
2881 chain_dma = chain_req->dma_addr;
2882 sges_in_segment = sges_left;
2883 chain_length = sges_in_segment * sizeof(struct mpi3_sge_common);
2885 mpi3mr_add_sg_single(sg_local, last_chain_sgl_flags,
2886 chain_length, chain_dma);
2890 fill_in_last_segment:
2891 while (sges_left > 0) {
2893 mpi3mr_add_sg_single(sg_local,
2894 simple_sgl_flags_last, sg_dma_len(sg_scmd),
2895 sg_dma_address(sg_scmd));
2897 mpi3mr_add_sg_single(sg_local, simple_sgl_flags,
2898 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2899 sg_scmd = sg_next(sg_scmd);
2900 sg_local += sizeof(struct mpi3_sge_common);
2908 * mpi3mr_build_sg_scmd - build scatter gather list for SCSI IO
2909 * @mrioc: Adapter instance reference
2910 * @scmd: SCSI command reference
2911 * @scsiio_req: MPI3 SCSI IO request
2913 * This function calls mpi3mr_prepare_sg_scmd for constructing
2914 * both data SGEs and protection information SGEs in the MPI
2915 * format from the SCSI Command as appropriate .
2917 * Return: return value of mpi3mr_prepare_sg_scmd.
2919 static int mpi3mr_build_sg_scmd(struct mpi3mr_ioc *mrioc,
2920 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req)
2924 ret = mpi3mr_prepare_sg_scmd(mrioc, scmd, scsiio_req);
2928 if (scsiio_req->msg_flags == MPI3_SCSIIO_MSGFLAGS_METASGL_VALID) {
2929 /* There is a valid meta sg */
2930 scsiio_req->flags |=
2931 cpu_to_le32(MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI);
2932 ret = mpi3mr_prepare_sg_scmd(mrioc, scmd, scsiio_req);
2939 * mpi3mr_tm_response_name - get TM response as a string
2940 * @resp_code: TM response code
2942 * Convert known task management response code as a readable
2945 * Return: response code string.
2947 static const char *mpi3mr_tm_response_name(u8 resp_code)
2951 switch (resp_code) {
2952 case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE:
2953 desc = "task management request completed";
2955 case MPI3_SCSITASKMGMT_RSPCODE_INVALID_FRAME:
2956 desc = "invalid frame";
2958 case MPI3_SCSITASKMGMT_RSPCODE_TM_FUNCTION_NOT_SUPPORTED:
2959 desc = "task management request not supported";
2961 case MPI3_SCSITASKMGMT_RSPCODE_TM_FAILED:
2962 desc = "task management request failed";
2964 case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED:
2965 desc = "task management request succeeded";
2967 case MPI3_SCSITASKMGMT_RSPCODE_TM_INVALID_LUN:
2968 desc = "invalid LUN";
2970 case MPI3_SCSITASKMGMT_RSPCODE_TM_OVERLAPPED_TAG:
2971 desc = "overlapped tag attempted";
2973 case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC:
2974 desc = "task queued, however not sent to target";
2976 case MPI3_SCSITASKMGMT_RSPCODE_TM_NVME_DENIED:
2977 desc = "task management request denied by NVMe device";
2987 inline void mpi3mr_poll_pend_io_completions(struct mpi3mr_ioc *mrioc)
2990 int num_of_reply_queues =
2991 mrioc->num_op_reply_q + mrioc->op_reply_q_offset;
2993 for (i = mrioc->op_reply_q_offset; i < num_of_reply_queues; i++)
2994 mpi3mr_process_op_reply_q(mrioc,
2995 mrioc->intr_info[i].op_reply_q);
2999 * mpi3mr_issue_tm - Issue Task Management request
3000 * @mrioc: Adapter instance reference
3001 * @tm_type: Task Management type
3002 * @handle: Device handle
3004 * @htag: Host tag of the TM request
3005 * @timeout: TM timeout value
3006 * @drv_cmd: Internal command tracker
3007 * @resp_code: Response code place holder
3008 * @scmd: SCSI command
3010 * Issues a Task Management Request to the controller for a
3011 * specified target, lun and command and wait for its completion
3012 * and check TM response. Recover the TM if it timed out by
3013 * issuing controller reset.
3015 * Return: 0 on success, non-zero on errors
3017 int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type,
3018 u16 handle, uint lun, u16 htag, ulong timeout,
3019 struct mpi3mr_drv_cmd *drv_cmd,
3020 u8 *resp_code, struct scsi_cmnd *scmd)
3022 struct mpi3_scsi_task_mgmt_request tm_req;
3023 struct mpi3_scsi_task_mgmt_reply *tm_reply = NULL;
3025 struct mpi3mr_tgt_dev *tgtdev = NULL;
3026 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
3027 struct scmd_priv *cmd_priv = NULL;
3028 struct scsi_device *sdev = NULL;
3029 struct mpi3mr_sdev_priv_data *sdev_priv_data = NULL;
3031 ioc_info(mrioc, "%s :Issue TM: TM type (0x%x) for devhandle 0x%04x\n",
3032 __func__, tm_type, handle);
3033 if (mrioc->unrecoverable) {
3035 ioc_err(mrioc, "%s :Issue TM: Unrecoverable controller\n",
3040 memset(&tm_req, 0, sizeof(tm_req));
3041 mutex_lock(&drv_cmd->mutex);
3042 if (drv_cmd->state & MPI3MR_CMD_PENDING) {
3044 ioc_err(mrioc, "%s :Issue TM: Command is in use\n", __func__);
3045 mutex_unlock(&drv_cmd->mutex);
3048 if (mrioc->reset_in_progress) {
3050 ioc_err(mrioc, "%s :Issue TM: Reset in progress\n", __func__);
3051 mutex_unlock(&drv_cmd->mutex);
3055 drv_cmd->state = MPI3MR_CMD_PENDING;
3056 drv_cmd->is_waiting = 1;
3057 drv_cmd->callback = NULL;
3058 tm_req.dev_handle = cpu_to_le16(handle);
3059 tm_req.task_type = tm_type;
3060 tm_req.host_tag = cpu_to_le16(htag);
3062 int_to_scsilun(lun, (struct scsi_lun *)tm_req.lun);
3063 tm_req.function = MPI3_FUNCTION_SCSI_TASK_MGMT;
3065 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
3068 sdev = scmd->device;
3069 sdev_priv_data = sdev->hostdata;
3070 scsi_tgt_priv_data = ((sdev_priv_data) ?
3071 sdev_priv_data->tgt_priv_data : NULL);
3073 if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata)
3074 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
3075 tgtdev->starget->hostdata;
3078 if (scsi_tgt_priv_data)
3079 atomic_inc(&scsi_tgt_priv_data->block_io);
3081 if (tgtdev && (tgtdev->dev_type == MPI3_DEVICE_DEVFORM_PCIE)) {
3082 if (cmd_priv && tgtdev->dev_spec.pcie_inf.abort_to)
3083 timeout = tgtdev->dev_spec.pcie_inf.abort_to;
3084 else if (!cmd_priv && tgtdev->dev_spec.pcie_inf.reset_to)
3085 timeout = tgtdev->dev_spec.pcie_inf.reset_to;
3088 init_completion(&drv_cmd->done);
3089 retval = mpi3mr_admin_request_post(mrioc, &tm_req, sizeof(tm_req), 1);
3091 ioc_err(mrioc, "%s :Issue TM: Admin Post failed\n", __func__);
3094 wait_for_completion_timeout(&drv_cmd->done, (timeout * HZ));
3096 if (!(drv_cmd->state & MPI3MR_CMD_COMPLETE)) {
3097 drv_cmd->is_waiting = 0;
3099 if (!(drv_cmd->state & MPI3MR_CMD_RESET)) {
3101 "task management request timed out after %ld seconds\n",
3103 if (mrioc->logging_level & MPI3_DEBUG_TM)
3104 dprint_dump_req(&tm_req, sizeof(tm_req)/4);
3105 mpi3mr_soft_reset_handler(mrioc,
3106 MPI3MR_RESET_FROM_TM_TIMEOUT, 1);
3111 if (!(drv_cmd->state & MPI3MR_CMD_REPLY_VALID)) {
3112 dprint_tm(mrioc, "invalid task management reply message\n");
3117 tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply;
3119 switch (drv_cmd->ioc_status) {
3120 case MPI3_IOCSTATUS_SUCCESS:
3121 *resp_code = le32_to_cpu(tm_reply->response_data) &
3122 MPI3MR_RI_MASK_RESPCODE;
3124 case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED:
3125 *resp_code = MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE;
3129 "task management request to handle(0x%04x) is failed with ioc_status(0x%04x) log_info(0x%08x)\n",
3130 handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo);
3135 switch (*resp_code) {
3136 case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED:
3137 case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE:
3139 case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC:
3140 if (tm_type != MPI3_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
3149 "task management request type(%d) completed for handle(0x%04x) with ioc_status(0x%04x), log_info(0x%08x), termination_count(%d), response:%s(0x%x)\n",
3150 tm_type, handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo,
3151 le32_to_cpu(tm_reply->termination_count),
3152 mpi3mr_tm_response_name(*resp_code), *resp_code);
3155 mpi3mr_ioc_disable_intr(mrioc);
3156 mpi3mr_poll_pend_io_completions(mrioc);
3157 mpi3mr_ioc_enable_intr(mrioc);
3158 mpi3mr_poll_pend_io_completions(mrioc);
3161 case MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
3162 if (!scsi_tgt_priv_data)
3164 scsi_tgt_priv_data->pend_count = 0;
3165 blk_mq_tagset_busy_iter(&mrioc->shost->tag_set,
3166 mpi3mr_count_tgt_pending,
3167 (void *)scsi_tgt_priv_data->starget);
3169 case MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
3170 if (!sdev_priv_data)
3172 sdev_priv_data->pend_count = 0;
3173 blk_mq_tagset_busy_iter(&mrioc->shost->tag_set,
3174 mpi3mr_count_dev_pending, (void *)sdev);
3181 drv_cmd->state = MPI3MR_CMD_NOTUSED;
3182 mutex_unlock(&drv_cmd->mutex);
3183 if (scsi_tgt_priv_data)
3184 atomic_dec_if_positive(&scsi_tgt_priv_data->block_io);
3186 mpi3mr_tgtdev_put(tgtdev);
3192 * mpi3mr_bios_param - BIOS param callback
3193 * @sdev: SCSI device reference
3194 * @bdev: Block device reference
3195 * @capacity: Capacity in logical sectors
3196 * @params: Parameter array
3198 * Just the parameters with heads/secots/cylinders.
3202 static int mpi3mr_bios_param(struct scsi_device *sdev,
3203 struct block_device *bdev, sector_t capacity, int params[])
3213 dummy = heads * sectors;
3214 cylinders = capacity;
3215 sector_div(cylinders, dummy);
3217 if ((ulong)capacity >= 0x200000) {
3220 dummy = heads * sectors;
3221 cylinders = capacity;
3222 sector_div(cylinders, dummy);
3226 params[1] = sectors;
3227 params[2] = cylinders;
3232 * mpi3mr_map_queues - Map queues callback handler
3233 * @shost: SCSI host reference
3235 * Maps default and poll queues.
3237 * Return: return zero.
3239 static int mpi3mr_map_queues(struct Scsi_Host *shost)
3241 struct mpi3mr_ioc *mrioc = shost_priv(shost);
3242 int i, qoff, offset;
3243 struct blk_mq_queue_map *map = NULL;
3245 offset = mrioc->op_reply_q_offset;
3247 for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
3248 map = &shost->tag_set.map[i];
3252 if (i == HCTX_TYPE_DEFAULT)
3253 map->nr_queues = mrioc->default_qcount;
3254 else if (i == HCTX_TYPE_POLL)
3255 map->nr_queues = mrioc->active_poll_qcount;
3257 if (!map->nr_queues) {
3258 BUG_ON(i == HCTX_TYPE_DEFAULT);
3263 * The poll queue(s) doesn't have an IRQ (and hence IRQ
3264 * affinity), so use the regular blk-mq cpu mapping
3266 map->queue_offset = qoff;
3267 if (i != HCTX_TYPE_POLL)
3268 blk_mq_pci_map_queues(map, mrioc->pdev, offset);
3270 blk_mq_map_queues(map);
3272 qoff += map->nr_queues;
3273 offset += map->nr_queues;
3281 * mpi3mr_get_fw_pending_ios - Calculate pending I/O count
3282 * @mrioc: Adapter instance reference
3284 * Calculate the pending I/Os for the controller and return.
3286 * Return: Number of pending I/Os
3288 static inline int mpi3mr_get_fw_pending_ios(struct mpi3mr_ioc *mrioc)
3293 for (i = 0; i < mrioc->num_op_reply_q; i++)
3294 pend_ios += atomic_read(&mrioc->op_reply_qinfo[i].pend_ios);
3299 * mpi3mr_print_pending_host_io - print pending I/Os
3300 * @mrioc: Adapter instance reference
3302 * Print number of pending I/Os and each I/O details prior to
3303 * reset for debug purpose.
3307 static void mpi3mr_print_pending_host_io(struct mpi3mr_ioc *mrioc)
3309 struct Scsi_Host *shost = mrioc->shost;
3311 ioc_info(mrioc, "%s :Pending commands prior to reset: %d\n",
3312 __func__, mpi3mr_get_fw_pending_ios(mrioc));
3313 blk_mq_tagset_busy_iter(&shost->tag_set,
3314 mpi3mr_print_scmd, (void *)mrioc);
3318 * mpi3mr_wait_for_host_io - block for I/Os to complete
3319 * @mrioc: Adapter instance reference
3320 * @timeout: time out in seconds
3321 * Waits for pending I/Os for the given adapter to complete or
3322 * to hit the timeout.
3326 void mpi3mr_wait_for_host_io(struct mpi3mr_ioc *mrioc, u32 timeout)
3328 enum mpi3mr_iocstate iocstate;
3331 iocstate = mpi3mr_get_iocstate(mrioc);
3332 if (iocstate != MRIOC_STATE_READY)
3335 if (!mpi3mr_get_fw_pending_ios(mrioc))
3338 "%s :Waiting for %d seconds prior to reset for %d I/O\n",
3339 __func__, timeout, mpi3mr_get_fw_pending_ios(mrioc));
3341 for (i = 0; i < timeout; i++) {
3342 if (!mpi3mr_get_fw_pending_ios(mrioc))
3344 iocstate = mpi3mr_get_iocstate(mrioc);
3345 if (iocstate != MRIOC_STATE_READY)
3350 ioc_info(mrioc, "%s :Pending I/Os after wait is: %d\n", __func__,
3351 mpi3mr_get_fw_pending_ios(mrioc));
3355 * mpi3mr_eh_host_reset - Host reset error handling callback
3356 * @scmd: SCSI command reference
3358 * Issue controller reset if the scmd is for a Physical Device,
3359 * if the scmd is for RAID volume, then wait for
3360 * MPI3MR_RAID_ERRREC_RESET_TIMEOUT and checke whether any
3361 * pending I/Os prior to issuing reset to the controller.
3363 * Return: SUCCESS of successful reset else FAILED
3365 static int mpi3mr_eh_host_reset(struct scsi_cmnd *scmd)
3367 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host);
3368 struct mpi3mr_stgt_priv_data *stgt_priv_data;
3369 struct mpi3mr_sdev_priv_data *sdev_priv_data;
3370 u8 dev_type = MPI3_DEVICE_DEVFORM_VD;
3371 int retval = FAILED, ret;
3373 sdev_priv_data = scmd->device->hostdata;
3374 if (sdev_priv_data && sdev_priv_data->tgt_priv_data) {
3375 stgt_priv_data = sdev_priv_data->tgt_priv_data;
3376 dev_type = stgt_priv_data->dev_type;
3379 if (dev_type == MPI3_DEVICE_DEVFORM_VD) {
3380 mpi3mr_wait_for_host_io(mrioc,
3381 MPI3MR_RAID_ERRREC_RESET_TIMEOUT);
3382 if (!mpi3mr_get_fw_pending_ios(mrioc)) {
3388 mpi3mr_print_pending_host_io(mrioc);
3389 ret = mpi3mr_soft_reset_handler(mrioc,
3390 MPI3MR_RESET_FROM_EH_HOS, 1);
3396 sdev_printk(KERN_INFO, scmd->device,
3397 "Host reset is %s for scmd(%p)\n",
3398 ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3404 * mpi3mr_eh_target_reset - Target reset error handling callback
3405 * @scmd: SCSI command reference
3407 * Issue Target reset Task Management and verify the scmd is
3408 * terminated successfully and return status accordingly.
3410 * Return: SUCCESS of successful termination of the scmd else
3413 static int mpi3mr_eh_target_reset(struct scsi_cmnd *scmd)
3415 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host);
3416 struct mpi3mr_stgt_priv_data *stgt_priv_data;
3417 struct mpi3mr_sdev_priv_data *sdev_priv_data;
3420 int retval = FAILED, ret = 0;
3422 sdev_printk(KERN_INFO, scmd->device,
3423 "Attempting Target Reset! scmd(%p)\n", scmd);
3424 scsi_print_command(scmd);
3426 sdev_priv_data = scmd->device->hostdata;
3427 if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) {
3428 sdev_printk(KERN_INFO, scmd->device,
3429 "SCSI device is not available\n");
3434 stgt_priv_data = sdev_priv_data->tgt_priv_data;
3435 dev_handle = stgt_priv_data->dev_handle;
3436 if (stgt_priv_data->dev_removed) {
3437 sdev_printk(KERN_INFO, scmd->device,
3438 "%s:target(handle = 0x%04x) is removed, target reset is not issued\n",
3439 mrioc->name, dev_handle);
3443 sdev_printk(KERN_INFO, scmd->device,
3444 "Target Reset is issued to handle(0x%04x)\n",
3447 ret = mpi3mr_issue_tm(mrioc,
3448 MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET, dev_handle,
3449 sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS,
3450 MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, scmd);
3455 if (stgt_priv_data->pend_count) {
3456 sdev_printk(KERN_INFO, scmd->device,
3457 "%s: target has %d pending commands, target reset is failed\n",
3458 mrioc->name, stgt_priv_data->pend_count);
3464 sdev_printk(KERN_INFO, scmd->device,
3465 "%s: target reset is %s for scmd(%p)\n", mrioc->name,
3466 ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3472 * mpi3mr_eh_dev_reset- Device reset error handling callback
3473 * @scmd: SCSI command reference
3475 * Issue lun reset Task Management and verify the scmd is
3476 * terminated successfully and return status accordingly.
3478 * Return: SUCCESS of successful termination of the scmd else
3481 static int mpi3mr_eh_dev_reset(struct scsi_cmnd *scmd)
3483 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host);
3484 struct mpi3mr_stgt_priv_data *stgt_priv_data;
3485 struct mpi3mr_sdev_priv_data *sdev_priv_data;
3488 int retval = FAILED, ret = 0;
3490 sdev_printk(KERN_INFO, scmd->device,
3491 "Attempting Device(lun) Reset! scmd(%p)\n", scmd);
3492 scsi_print_command(scmd);
3494 sdev_priv_data = scmd->device->hostdata;
3495 if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) {
3496 sdev_printk(KERN_INFO, scmd->device,
3497 "SCSI device is not available\n");
3502 stgt_priv_data = sdev_priv_data->tgt_priv_data;
3503 dev_handle = stgt_priv_data->dev_handle;
3504 if (stgt_priv_data->dev_removed) {
3505 sdev_printk(KERN_INFO, scmd->device,
3506 "%s: device(handle = 0x%04x) is removed, device(LUN) reset is not issued\n",
3507 mrioc->name, dev_handle);
3511 sdev_printk(KERN_INFO, scmd->device,
3512 "Device(lun) Reset is issued to handle(0x%04x)\n", dev_handle);
3514 ret = mpi3mr_issue_tm(mrioc,
3515 MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, dev_handle,
3516 sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS,
3517 MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, scmd);
3522 if (sdev_priv_data->pend_count) {
3523 sdev_printk(KERN_INFO, scmd->device,
3524 "%s: device has %d pending commands, device(LUN) reset is failed\n",
3525 mrioc->name, sdev_priv_data->pend_count);
3530 sdev_printk(KERN_INFO, scmd->device,
3531 "%s: device(LUN) reset is %s for scmd(%p)\n", mrioc->name,
3532 ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3538 * mpi3mr_scan_start - Scan start callback handler
3539 * @shost: SCSI host reference
3541 * Issue port enable request asynchronously.
3545 static void mpi3mr_scan_start(struct Scsi_Host *shost)
3547 struct mpi3mr_ioc *mrioc = shost_priv(shost);
3549 mrioc->scan_started = 1;
3550 ioc_info(mrioc, "%s :Issuing Port Enable\n", __func__);
3551 if (mpi3mr_issue_port_enable(mrioc, 1)) {
3552 ioc_err(mrioc, "%s :Issuing port enable failed\n", __func__);
3553 mrioc->scan_started = 0;
3554 mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR;
3559 * mpi3mr_scan_finished - Scan finished callback handler
3560 * @shost: SCSI host reference
3561 * @time: Jiffies from the scan start
3563 * Checks whether the port enable is completed or timedout or
3564 * failed and set the scan status accordingly after taking any
3565 * recovery if required.
3567 * Return: 1 on scan finished or timed out, 0 for in progress
3569 static int mpi3mr_scan_finished(struct Scsi_Host *shost,
3572 struct mpi3mr_ioc *mrioc = shost_priv(shost);
3573 u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT;
3574 u32 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
3576 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
3577 (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
3578 ioc_err(mrioc, "port enable failed due to fault or reset\n");
3579 mpi3mr_print_fault_info(mrioc);
3580 mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR;
3581 mrioc->scan_started = 0;
3582 mrioc->init_cmds.is_waiting = 0;
3583 mrioc->init_cmds.callback = NULL;
3584 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3587 if (time >= (pe_timeout * HZ)) {
3588 ioc_err(mrioc, "port enable failed due to time out\n");
3589 mpi3mr_check_rh_fault_ioc(mrioc,
3590 MPI3MR_RESET_FROM_PE_TIMEOUT);
3591 mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR;
3592 mrioc->scan_started = 0;
3593 mrioc->init_cmds.is_waiting = 0;
3594 mrioc->init_cmds.callback = NULL;
3595 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3598 if (mrioc->scan_started)
3601 if (mrioc->scan_failed) {
3603 "port enable failed with status=0x%04x\n",
3604 mrioc->scan_failed);
3606 ioc_info(mrioc, "port enable is successfully completed\n");
3608 mpi3mr_start_watchdog(mrioc);
3609 mrioc->is_driver_loading = 0;
3610 mrioc->stop_bsgs = 0;
3615 * mpi3mr_slave_destroy - Slave destroy callback handler
3616 * @sdev: SCSI device reference
3618 * Cleanup and free per device(lun) private data.
3622 static void mpi3mr_slave_destroy(struct scsi_device *sdev)
3624 struct Scsi_Host *shost;
3625 struct mpi3mr_ioc *mrioc;
3626 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
3627 struct mpi3mr_tgt_dev *tgt_dev;
3628 unsigned long flags;
3629 struct scsi_target *starget;
3631 if (!sdev->hostdata)
3634 starget = scsi_target(sdev);
3635 shost = dev_to_shost(&starget->dev);
3636 mrioc = shost_priv(shost);
3637 scsi_tgt_priv_data = starget->hostdata;
3639 scsi_tgt_priv_data->num_luns--;
3641 spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
3642 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
3643 if (tgt_dev && (!scsi_tgt_priv_data->num_luns))
3644 tgt_dev->starget = NULL;
3646 mpi3mr_tgtdev_put(tgt_dev);
3647 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
3649 kfree(sdev->hostdata);
3650 sdev->hostdata = NULL;
3654 * mpi3mr_target_destroy - Target destroy callback handler
3655 * @starget: SCSI target reference
3657 * Cleanup and free per target private data.
3661 static void mpi3mr_target_destroy(struct scsi_target *starget)
3663 struct Scsi_Host *shost;
3664 struct mpi3mr_ioc *mrioc;
3665 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
3666 struct mpi3mr_tgt_dev *tgt_dev;
3667 unsigned long flags;
3669 if (!starget->hostdata)
3672 shost = dev_to_shost(&starget->dev);
3673 mrioc = shost_priv(shost);
3674 scsi_tgt_priv_data = starget->hostdata;
3676 spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
3677 tgt_dev = __mpi3mr_get_tgtdev_from_tgtpriv(mrioc, scsi_tgt_priv_data);
3678 if (tgt_dev && (tgt_dev->starget == starget) &&
3679 (tgt_dev->perst_id == starget->id))
3680 tgt_dev->starget = NULL;
3682 scsi_tgt_priv_data->tgt_dev = NULL;
3683 scsi_tgt_priv_data->perst_id = 0;
3684 mpi3mr_tgtdev_put(tgt_dev);
3685 mpi3mr_tgtdev_put(tgt_dev);
3687 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
3689 kfree(starget->hostdata);
3690 starget->hostdata = NULL;
3694 * mpi3mr_slave_configure - Slave configure callback handler
3695 * @sdev: SCSI device reference
3697 * Configure queue depth, max hardware sectors and virt boundary
3702 static int mpi3mr_slave_configure(struct scsi_device *sdev)
3704 struct scsi_target *starget;
3705 struct Scsi_Host *shost;
3706 struct mpi3mr_ioc *mrioc;
3707 struct mpi3mr_tgt_dev *tgt_dev;
3708 unsigned long flags;
3711 starget = scsi_target(sdev);
3712 shost = dev_to_shost(&starget->dev);
3713 mrioc = shost_priv(shost);
3715 spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
3716 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
3717 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
3721 mpi3mr_change_queue_depth(sdev, tgt_dev->q_depth);
3723 sdev->eh_timeout = MPI3MR_EH_SCMD_TIMEOUT;
3724 blk_queue_rq_timeout(sdev->request_queue, MPI3MR_SCMD_TIMEOUT);
3726 switch (tgt_dev->dev_type) {
3727 case MPI3_DEVICE_DEVFORM_PCIE:
3728 /*The block layer hw sector size = 512*/
3729 if ((tgt_dev->dev_spec.pcie_inf.dev_info &
3730 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) ==
3731 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) {
3732 blk_queue_max_hw_sectors(sdev->request_queue,
3733 tgt_dev->dev_spec.pcie_inf.mdts / 512);
3734 if (tgt_dev->dev_spec.pcie_inf.pgsz == 0)
3735 blk_queue_virt_boundary(sdev->request_queue,
3736 ((1 << MPI3MR_DEFAULT_PGSZEXP) - 1));
3738 blk_queue_virt_boundary(sdev->request_queue,
3739 ((1 << tgt_dev->dev_spec.pcie_inf.pgsz) - 1));
3746 mpi3mr_tgtdev_put(tgt_dev);
3752 * mpi3mr_slave_alloc -Slave alloc callback handler
3753 * @sdev: SCSI device reference
3755 * Allocate per device(lun) private data and initialize it.
3757 * Return: 0 on success -ENOMEM on memory allocation failure.
3759 static int mpi3mr_slave_alloc(struct scsi_device *sdev)
3761 struct Scsi_Host *shost;
3762 struct mpi3mr_ioc *mrioc;
3763 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
3764 struct mpi3mr_tgt_dev *tgt_dev;
3765 struct mpi3mr_sdev_priv_data *scsi_dev_priv_data;
3766 unsigned long flags;
3767 struct scsi_target *starget;
3770 starget = scsi_target(sdev);
3771 shost = dev_to_shost(&starget->dev);
3772 mrioc = shost_priv(shost);
3773 scsi_tgt_priv_data = starget->hostdata;
3775 spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
3776 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
3779 if (tgt_dev->starget == NULL)
3780 tgt_dev->starget = starget;
3781 mpi3mr_tgtdev_put(tgt_dev);
3784 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
3788 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
3790 scsi_dev_priv_data = kzalloc(sizeof(*scsi_dev_priv_data), GFP_KERNEL);
3791 if (!scsi_dev_priv_data)
3794 scsi_dev_priv_data->lun_id = sdev->lun;
3795 scsi_dev_priv_data->tgt_priv_data = scsi_tgt_priv_data;
3796 sdev->hostdata = scsi_dev_priv_data;
3798 scsi_tgt_priv_data->num_luns++;
3804 * mpi3mr_target_alloc - Target alloc callback handler
3805 * @starget: SCSI target reference
3807 * Allocate per target private data and initialize it.
3809 * Return: 0 on success -ENOMEM on memory allocation failure.
3811 static int mpi3mr_target_alloc(struct scsi_target *starget)
3813 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
3814 struct mpi3mr_ioc *mrioc = shost_priv(shost);
3815 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
3816 struct mpi3mr_tgt_dev *tgt_dev;
3817 unsigned long flags;
3820 scsi_tgt_priv_data = kzalloc(sizeof(*scsi_tgt_priv_data), GFP_KERNEL);
3821 if (!scsi_tgt_priv_data)
3824 starget->hostdata = scsi_tgt_priv_data;
3826 spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
3827 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
3828 if (tgt_dev && !tgt_dev->is_hidden) {
3829 scsi_tgt_priv_data->starget = starget;
3830 scsi_tgt_priv_data->dev_handle = tgt_dev->dev_handle;
3831 scsi_tgt_priv_data->perst_id = tgt_dev->perst_id;
3832 scsi_tgt_priv_data->dev_type = tgt_dev->dev_type;
3833 scsi_tgt_priv_data->tgt_dev = tgt_dev;
3834 tgt_dev->starget = starget;
3835 atomic_set(&scsi_tgt_priv_data->block_io, 0);
3839 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
3845 * mpi3mr_check_return_unmap - Whether an unmap is allowed
3846 * @mrioc: Adapter instance reference
3847 * @scmd: SCSI Command reference
3849 * The controller hardware cannot handle certain unmap commands
3850 * for NVMe drives, this routine checks those and return true
3851 * and completes the SCSI command with proper status and sense
3854 * Return: TRUE for not allowed unmap, FALSE otherwise.
3856 static bool mpi3mr_check_return_unmap(struct mpi3mr_ioc *mrioc,
3857 struct scsi_cmnd *scmd)
3860 u16 param_len, desc_len, trunc_param_len;
3862 trunc_param_len = param_len = get_unaligned_be16(scmd->cmnd + 7);
3864 if (mrioc->pdev->revision) {
3865 if ((param_len > 24) && ((param_len - 8) & 0xF)) {
3866 trunc_param_len -= (param_len - 8) & 0xF;
3867 dprint_scsi_command(mrioc, scmd, MPI3_DEBUG_SCSI_ERROR);
3868 dprint_scsi_err(mrioc,
3869 "truncating param_len from (%d) to (%d)\n",
3870 param_len, trunc_param_len);
3871 put_unaligned_be16(trunc_param_len, scmd->cmnd + 7);
3872 dprint_scsi_command(mrioc, scmd, MPI3_DEBUG_SCSI_ERROR);
3879 "%s: cdb received with zero parameter length\n",
3881 scsi_print_command(scmd);
3882 scmd->result = DID_OK << 16;
3887 if (param_len < 24) {
3889 "%s: cdb received with invalid param_len: %d\n",
3890 __func__, param_len);
3891 scsi_print_command(scmd);
3892 scmd->result = SAM_STAT_CHECK_CONDITION;
3893 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
3898 if (param_len != scsi_bufflen(scmd)) {
3900 "%s: cdb received with param_len: %d bufflen: %d\n",
3901 __func__, param_len, scsi_bufflen(scmd));
3902 scsi_print_command(scmd);
3903 scmd->result = SAM_STAT_CHECK_CONDITION;
3904 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
3909 buf = kzalloc(scsi_bufflen(scmd), GFP_ATOMIC);
3911 scsi_print_command(scmd);
3912 scmd->result = SAM_STAT_CHECK_CONDITION;
3913 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
3918 scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd));
3919 desc_len = get_unaligned_be16(&buf[2]);
3921 if (desc_len < 16) {
3923 "%s: Invalid descriptor length in param list: %d\n",
3924 __func__, desc_len);
3925 scsi_print_command(scmd);
3926 scmd->result = SAM_STAT_CHECK_CONDITION;
3927 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
3934 if (param_len > (desc_len + 8)) {
3935 trunc_param_len = desc_len + 8;
3936 scsi_print_command(scmd);
3937 dprint_scsi_err(mrioc,
3938 "truncating param_len(%d) to desc_len+8(%d)\n",
3939 param_len, trunc_param_len);
3940 put_unaligned_be16(trunc_param_len, scmd->cmnd + 7);
3941 scsi_print_command(scmd);
3949 * mpi3mr_allow_scmd_to_fw - Command is allowed during shutdown
3950 * @scmd: SCSI Command reference
3952 * Checks whether a cdb is allowed during shutdown or not.
3954 * Return: TRUE for allowed commands, FALSE otherwise.
3957 inline bool mpi3mr_allow_scmd_to_fw(struct scsi_cmnd *scmd)
3959 switch (scmd->cmnd[0]) {
3960 case SYNCHRONIZE_CACHE:
3969 * mpi3mr_qcmd - I/O request despatcher
3970 * @shost: SCSI Host reference
3971 * @scmd: SCSI Command reference
3973 * Issues the SCSI Command as an MPI3 request.
3975 * Return: 0 on successful queueing of the request or if the
3976 * request is completed with failure.
3977 * SCSI_MLQUEUE_DEVICE_BUSY when the device is busy.
3978 * SCSI_MLQUEUE_HOST_BUSY when the host queue is full.
3980 static int mpi3mr_qcmd(struct Scsi_Host *shost,
3981 struct scsi_cmnd *scmd)
3983 struct mpi3mr_ioc *mrioc = shost_priv(shost);
3984 struct mpi3mr_stgt_priv_data *stgt_priv_data;
3985 struct mpi3mr_sdev_priv_data *sdev_priv_data;
3986 struct scmd_priv *scmd_priv_data = NULL;
3987 struct mpi3_scsi_io_request *scsiio_req = NULL;
3988 struct op_req_qinfo *op_req_q = NULL;
3992 u32 scsiio_flags = 0;
3993 struct request *rq = scsi_cmd_to_rq(scmd);
3997 if (mrioc->unrecoverable) {
3998 scmd->result = DID_ERROR << 16;
4003 sdev_priv_data = scmd->device->hostdata;
4004 if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) {
4005 scmd->result = DID_NO_CONNECT << 16;
4010 if (mrioc->stop_drv_processing &&
4011 !(mpi3mr_allow_scmd_to_fw(scmd))) {
4012 scmd->result = DID_NO_CONNECT << 16;
4017 if (mrioc->reset_in_progress) {
4018 retval = SCSI_MLQUEUE_HOST_BUSY;
4022 stgt_priv_data = sdev_priv_data->tgt_priv_data;
4024 dev_handle = stgt_priv_data->dev_handle;
4025 if (dev_handle == MPI3MR_INVALID_DEV_HANDLE) {
4026 scmd->result = DID_NO_CONNECT << 16;
4030 if (stgt_priv_data->dev_removed) {
4031 scmd->result = DID_NO_CONNECT << 16;
4036 if (atomic_read(&stgt_priv_data->block_io)) {
4037 if (mrioc->stop_drv_processing) {
4038 scmd->result = DID_NO_CONNECT << 16;
4042 retval = SCSI_MLQUEUE_DEVICE_BUSY;
4046 if (stgt_priv_data->dev_type == MPI3_DEVICE_DEVFORM_PCIE)
4048 if ((scmd->cmnd[0] == UNMAP) && is_pcie_dev &&
4049 (mrioc->pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) &&
4050 mpi3mr_check_return_unmap(mrioc, scmd))
4053 host_tag = mpi3mr_host_tag_for_scmd(mrioc, scmd);
4054 if (host_tag == MPI3MR_HOSTTAG_INVALID) {
4055 scmd->result = DID_ERROR << 16;
4060 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
4061 scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_READ;
4062 else if (scmd->sc_data_direction == DMA_TO_DEVICE)
4063 scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_WRITE;
4065 scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_NO_DATA_TRANSFER;
4067 scsiio_flags |= MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_SIMPLEQ;
4069 if (sdev_priv_data->ncq_prio_enable) {
4070 iprio_class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
4071 if (iprio_class == IOPRIO_CLASS_RT)
4072 scsiio_flags |= 1 << MPI3_SCSIIO_FLAGS_CMDPRI_SHIFT;
4075 if (scmd->cmd_len > 16)
4076 scsiio_flags |= MPI3_SCSIIO_FLAGS_CDB_GREATER_THAN_16;
4078 scmd_priv_data = scsi_cmd_priv(scmd);
4079 memset(scmd_priv_data->mpi3mr_scsiio_req, 0, MPI3MR_ADMIN_REQ_FRAME_SZ);
4080 scsiio_req = (struct mpi3_scsi_io_request *)scmd_priv_data->mpi3mr_scsiio_req;
4081 scsiio_req->function = MPI3_FUNCTION_SCSI_IO;
4082 scsiio_req->host_tag = cpu_to_le16(host_tag);
4084 mpi3mr_setup_eedp(mrioc, scmd, scsiio_req);
4086 memcpy(scsiio_req->cdb.cdb32, scmd->cmnd, scmd->cmd_len);
4087 scsiio_req->data_length = cpu_to_le32(scsi_bufflen(scmd));
4088 scsiio_req->dev_handle = cpu_to_le16(dev_handle);
4089 scsiio_req->flags = cpu_to_le32(scsiio_flags);
4090 int_to_scsilun(sdev_priv_data->lun_id,
4091 (struct scsi_lun *)scsiio_req->lun);
4093 if (mpi3mr_build_sg_scmd(mrioc, scmd, scsiio_req)) {
4094 mpi3mr_clear_scmd_priv(mrioc, scmd);
4095 retval = SCSI_MLQUEUE_HOST_BUSY;
4098 op_req_q = &mrioc->req_qinfo[scmd_priv_data->req_q_idx];
4100 if (mpi3mr_op_request_post(mrioc, op_req_q,
4101 scmd_priv_data->mpi3mr_scsiio_req)) {
4102 mpi3mr_clear_scmd_priv(mrioc, scmd);
4103 retval = SCSI_MLQUEUE_HOST_BUSY;
4111 static struct scsi_host_template mpi3mr_driver_template = {
4112 .module = THIS_MODULE,
4113 .name = "MPI3 Storage Controller",
4114 .proc_name = MPI3MR_DRIVER_NAME,
4115 .queuecommand = mpi3mr_qcmd,
4116 .target_alloc = mpi3mr_target_alloc,
4117 .slave_alloc = mpi3mr_slave_alloc,
4118 .slave_configure = mpi3mr_slave_configure,
4119 .target_destroy = mpi3mr_target_destroy,
4120 .slave_destroy = mpi3mr_slave_destroy,
4121 .scan_finished = mpi3mr_scan_finished,
4122 .scan_start = mpi3mr_scan_start,
4123 .change_queue_depth = mpi3mr_change_queue_depth,
4124 .eh_device_reset_handler = mpi3mr_eh_dev_reset,
4125 .eh_target_reset_handler = mpi3mr_eh_target_reset,
4126 .eh_host_reset_handler = mpi3mr_eh_host_reset,
4127 .bios_param = mpi3mr_bios_param,
4128 .map_queues = mpi3mr_map_queues,
4129 .mq_poll = mpi3mr_blk_mq_poll,
4133 .sg_tablesize = MPI3MR_SG_DEPTH,
4134 /* max xfer supported is 1M (2K in 512 byte sized sectors)
4136 .max_sectors = 2048,
4137 .cmd_per_lun = MPI3MR_MAX_CMDS_LUN,
4138 .max_segment_size = 0xffffffff,
4139 .track_queue_depth = 1,
4140 .cmd_size = sizeof(struct scmd_priv),
4141 .shost_groups = mpi3mr_host_groups,
4142 .sdev_groups = mpi3mr_dev_groups,
4146 * mpi3mr_init_drv_cmd - Initialize internal command tracker
4147 * @cmdptr: Internal command tracker
4148 * @host_tag: Host tag used for the specific command
4150 * Initialize the internal command tracker structure with
4151 * specified host tag.
4155 static inline void mpi3mr_init_drv_cmd(struct mpi3mr_drv_cmd *cmdptr,
4158 mutex_init(&cmdptr->mutex);
4159 cmdptr->reply = NULL;
4160 cmdptr->state = MPI3MR_CMD_NOTUSED;
4161 cmdptr->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
4162 cmdptr->host_tag = host_tag;
4166 * osintfc_mrioc_security_status -Check controller secure status
4167 * @pdev: PCI device instance
4169 * Read the Device Serial Number capability from PCI config
4170 * space and decide whether the controller is secure or not.
4172 * Return: 0 on success, non-zero on failure.
4175 osintfc_mrioc_security_status(struct pci_dev *pdev)
4183 base = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
4186 "%s: PCI_EXT_CAP_ID_DSN is not supported\n", __func__);
4190 pci_read_config_dword(pdev, base + 4, &cap_data);
4192 debug_status = cap_data & MPI3MR_CTLR_SECURE_DBG_STATUS_MASK;
4193 ctlr_status = cap_data & MPI3MR_CTLR_SECURITY_STATUS_MASK;
4195 switch (ctlr_status) {
4196 case MPI3MR_INVALID_DEVICE:
4198 "%s: Non secure ctlr (Invalid) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n",
4199 __func__, pdev->device, pdev->subsystem_vendor,
4200 pdev->subsystem_device);
4203 case MPI3MR_CONFIG_SECURE_DEVICE:
4205 dev_info(&pdev->dev,
4206 "%s: Config secure ctlr is detected\n",
4209 case MPI3MR_HARD_SECURE_DEVICE:
4211 case MPI3MR_TAMPERED_DEVICE:
4213 "%s: Non secure ctlr (Tampered) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n",
4214 __func__, pdev->device, pdev->subsystem_vendor,
4215 pdev->subsystem_device);
4223 if (!retval && debug_status) {
4225 "%s: Non secure ctlr (Secure Dbg) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n",
4226 __func__, pdev->device, pdev->subsystem_vendor,
4227 pdev->subsystem_device);
4235 * mpi3mr_probe - PCI probe callback
4236 * @pdev: PCI device instance
4237 * @id: PCI device ID details
4239 * controller initialization routine. Checks the security status
4240 * of the controller and if it is invalid or tampered return the
4241 * probe without initializing the controller. Otherwise,
4242 * allocate per adapter instance through shost_priv and
4243 * initialize controller specific data structures, initializae
4244 * the controller hardware, add shost to the SCSI subsystem.
4246 * Return: 0 on success, non-zero on failure.
4250 mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
4252 struct mpi3mr_ioc *mrioc = NULL;
4253 struct Scsi_Host *shost = NULL;
4256 if (osintfc_mrioc_security_status(pdev)) {
4257 warn_non_secure_ctlr = 1;
4258 return 1; /* For Invalid and Tampered device */
4261 shost = scsi_host_alloc(&mpi3mr_driver_template,
4262 sizeof(struct mpi3mr_ioc));
4268 mrioc = shost_priv(shost);
4269 mrioc->id = mrioc_ids++;
4270 sprintf(mrioc->driver_name, "%s", MPI3MR_DRIVER_NAME);
4271 sprintf(mrioc->name, "%s%d", mrioc->driver_name, mrioc->id);
4272 INIT_LIST_HEAD(&mrioc->list);
4273 spin_lock(&mrioc_list_lock);
4274 list_add_tail(&mrioc->list, &mrioc_list);
4275 spin_unlock(&mrioc_list_lock);
4277 spin_lock_init(&mrioc->admin_req_lock);
4278 spin_lock_init(&mrioc->reply_free_queue_lock);
4279 spin_lock_init(&mrioc->sbq_lock);
4280 spin_lock_init(&mrioc->fwevt_lock);
4281 spin_lock_init(&mrioc->tgtdev_lock);
4282 spin_lock_init(&mrioc->watchdog_lock);
4283 spin_lock_init(&mrioc->chain_buf_lock);
4285 INIT_LIST_HEAD(&mrioc->fwevt_list);
4286 INIT_LIST_HEAD(&mrioc->tgtdev_list);
4287 INIT_LIST_HEAD(&mrioc->delayed_rmhs_list);
4288 INIT_LIST_HEAD(&mrioc->delayed_evtack_cmds_list);
4290 mutex_init(&mrioc->reset_mutex);
4291 mpi3mr_init_drv_cmd(&mrioc->init_cmds, MPI3MR_HOSTTAG_INITCMDS);
4292 mpi3mr_init_drv_cmd(&mrioc->host_tm_cmds, MPI3MR_HOSTTAG_BLK_TMS);
4293 mpi3mr_init_drv_cmd(&mrioc->bsg_cmds, MPI3MR_HOSTTAG_BSG_CMDS);
4295 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++)
4296 mpi3mr_init_drv_cmd(&mrioc->dev_rmhs_cmds[i],
4297 MPI3MR_HOSTTAG_DEVRMCMD_MIN + i);
4300 mrioc->enable_segqueue = true;
4302 init_waitqueue_head(&mrioc->reset_waitq);
4303 mrioc->logging_level = logging_level;
4304 mrioc->shost = shost;
4306 mrioc->stop_bsgs = 1;
4308 /* init shost parameters */
4309 shost->max_cmd_len = MPI3MR_MAX_CDB_LENGTH;
4310 shost->max_lun = -1;
4311 shost->unique_id = mrioc->id;
4313 shost->max_channel = 0;
4314 shost->max_id = 0xFFFFFFFF;
4317 scsi_host_set_prot(shost, prot_mask);
4319 prot_mask = SHOST_DIF_TYPE1_PROTECTION
4320 | SHOST_DIF_TYPE2_PROTECTION
4321 | SHOST_DIF_TYPE3_PROTECTION;
4322 scsi_host_set_prot(shost, prot_mask);
4326 "%s :host protection capabilities enabled %s%s%s%s%s%s%s\n",
4328 (prot_mask & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
4329 (prot_mask & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
4330 (prot_mask & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
4331 (prot_mask & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
4332 (prot_mask & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
4333 (prot_mask & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
4334 (prot_mask & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
4336 if (prot_guard_mask)
4337 scsi_host_set_guard(shost, (prot_guard_mask & 3));
4339 scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
4341 snprintf(mrioc->fwevt_worker_name, sizeof(mrioc->fwevt_worker_name),
4342 "%s%d_fwevt_wrkr", mrioc->driver_name, mrioc->id);
4343 mrioc->fwevt_worker_thread = alloc_ordered_workqueue(
4344 mrioc->fwevt_worker_name, 0);
4345 if (!mrioc->fwevt_worker_thread) {
4346 ioc_err(mrioc, "failure at %s:%d/%s()!\n",
4347 __FILE__, __LINE__, __func__);
4349 goto fwevtthread_failed;
4352 mrioc->is_driver_loading = 1;
4353 mrioc->cpu_count = num_online_cpus();
4354 if (mpi3mr_setup_resources(mrioc)) {
4355 ioc_err(mrioc, "setup resources failed\n");
4357 goto resource_alloc_failed;
4359 if (mpi3mr_init_ioc(mrioc)) {
4360 ioc_err(mrioc, "initializing IOC failed\n");
4362 goto init_ioc_failed;
4365 shost->nr_hw_queues = mrioc->num_op_reply_q;
4366 if (mrioc->active_poll_qcount)
4369 shost->can_queue = mrioc->max_host_ios;
4370 shost->sg_tablesize = MPI3MR_SG_DEPTH;
4371 shost->max_id = mrioc->facts.max_perids + 1;
4373 retval = scsi_add_host(shost, &pdev->dev);
4375 ioc_err(mrioc, "failure at %s:%d/%s()!\n",
4376 __FILE__, __LINE__, __func__);
4377 goto addhost_failed;
4380 scsi_scan_host(shost);
4381 mpi3mr_bsg_init(mrioc);
4385 mpi3mr_stop_watchdog(mrioc);
4386 mpi3mr_cleanup_ioc(mrioc);
4388 mpi3mr_free_mem(mrioc);
4389 mpi3mr_cleanup_resources(mrioc);
4390 resource_alloc_failed:
4391 destroy_workqueue(mrioc->fwevt_worker_thread);
4393 spin_lock(&mrioc_list_lock);
4394 list_del(&mrioc->list);
4395 spin_unlock(&mrioc_list_lock);
4396 scsi_host_put(shost);
4402 * mpi3mr_remove - PCI remove callback
4403 * @pdev: PCI device instance
4405 * Cleanup the IOC by issuing MUR and shutdown notification.
4406 * Free up all memory and resources associated with the
4407 * controllerand target devices, unregister the shost.
4411 static void mpi3mr_remove(struct pci_dev *pdev)
4413 struct Scsi_Host *shost = pci_get_drvdata(pdev);
4414 struct mpi3mr_ioc *mrioc;
4415 struct workqueue_struct *wq;
4416 unsigned long flags;
4417 struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next;
4422 mrioc = shost_priv(shost);
4423 while (mrioc->reset_in_progress || mrioc->is_driver_loading)
4426 mpi3mr_bsg_exit(mrioc);
4427 mrioc->stop_drv_processing = 1;
4428 mpi3mr_cleanup_fwevt_list(mrioc);
4429 spin_lock_irqsave(&mrioc->fwevt_lock, flags);
4430 wq = mrioc->fwevt_worker_thread;
4431 mrioc->fwevt_worker_thread = NULL;
4432 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
4434 destroy_workqueue(wq);
4435 scsi_remove_host(shost);
4437 list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list,
4439 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
4440 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev);
4441 mpi3mr_tgtdev_put(tgtdev);
4443 mpi3mr_stop_watchdog(mrioc);
4444 mpi3mr_cleanup_ioc(mrioc);
4445 mpi3mr_free_mem(mrioc);
4446 mpi3mr_cleanup_resources(mrioc);
4448 spin_lock(&mrioc_list_lock);
4449 list_del(&mrioc->list);
4450 spin_unlock(&mrioc_list_lock);
4452 scsi_host_put(shost);
4456 * mpi3mr_shutdown - PCI shutdown callback
4457 * @pdev: PCI device instance
4459 * Free up all memory and resources associated with the
4464 static void mpi3mr_shutdown(struct pci_dev *pdev)
4466 struct Scsi_Host *shost = pci_get_drvdata(pdev);
4467 struct mpi3mr_ioc *mrioc;
4468 struct workqueue_struct *wq;
4469 unsigned long flags;
4474 mrioc = shost_priv(shost);
4475 while (mrioc->reset_in_progress || mrioc->is_driver_loading)
4478 mrioc->stop_drv_processing = 1;
4479 mpi3mr_cleanup_fwevt_list(mrioc);
4480 spin_lock_irqsave(&mrioc->fwevt_lock, flags);
4481 wq = mrioc->fwevt_worker_thread;
4482 mrioc->fwevt_worker_thread = NULL;
4483 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
4485 destroy_workqueue(wq);
4487 mpi3mr_stop_watchdog(mrioc);
4488 mpi3mr_cleanup_ioc(mrioc);
4489 mpi3mr_cleanup_resources(mrioc);
4494 * mpi3mr_suspend - PCI power management suspend callback
4495 * @pdev: PCI device instance
4496 * @state: New power state
4498 * Change the power state to the given value and cleanup the IOC
4499 * by issuing MUR and shutdown notification
4503 static int mpi3mr_suspend(struct pci_dev *pdev, pm_message_t state)
4505 struct Scsi_Host *shost = pci_get_drvdata(pdev);
4506 struct mpi3mr_ioc *mrioc;
4507 pci_power_t device_state;
4512 mrioc = shost_priv(shost);
4513 while (mrioc->reset_in_progress || mrioc->is_driver_loading)
4515 mrioc->stop_drv_processing = 1;
4516 mpi3mr_cleanup_fwevt_list(mrioc);
4517 scsi_block_requests(shost);
4518 mpi3mr_stop_watchdog(mrioc);
4519 mpi3mr_cleanup_ioc(mrioc);
4521 device_state = pci_choose_state(pdev, state);
4522 ioc_info(mrioc, "pdev=0x%p, slot=%s, entering operating state [D%d]\n",
4523 pdev, pci_name(pdev), device_state);
4524 pci_save_state(pdev);
4525 mpi3mr_cleanup_resources(mrioc);
4526 pci_set_power_state(pdev, device_state);
4532 * mpi3mr_resume - PCI power management resume callback
4533 * @pdev: PCI device instance
4535 * Restore the power state to D0 and reinitialize the controller
4536 * and resume I/O operations to the target devices
4538 * Return: 0 on success, non-zero on failure
4540 static int mpi3mr_resume(struct pci_dev *pdev)
4542 struct Scsi_Host *shost = pci_get_drvdata(pdev);
4543 struct mpi3mr_ioc *mrioc;
4544 pci_power_t device_state = pdev->current_state;
4550 mrioc = shost_priv(shost);
4552 ioc_info(mrioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
4553 pdev, pci_name(pdev), device_state);
4554 pci_set_power_state(pdev, PCI_D0);
4555 pci_enable_wake(pdev, PCI_D0, 0);
4556 pci_restore_state(pdev);
4558 mrioc->cpu_count = num_online_cpus();
4559 r = mpi3mr_setup_resources(mrioc);
4561 ioc_info(mrioc, "%s: Setup resources failed[%d]\n",
4566 mrioc->stop_drv_processing = 0;
4567 mpi3mr_memset_buffers(mrioc);
4568 r = mpi3mr_reinit_ioc(mrioc, 1);
4570 ioc_err(mrioc, "resuming controller failed[%d]\n", r);
4573 scsi_unblock_requests(shost);
4574 mpi3mr_start_watchdog(mrioc);
4580 static const struct pci_device_id mpi3mr_pci_id_table[] = {
4582 PCI_DEVICE_SUB(MPI3_MFGPAGE_VENDORID_BROADCOM,
4583 MPI3_MFGPAGE_DEVID_SAS4116, PCI_ANY_ID, PCI_ANY_ID)
4587 MODULE_DEVICE_TABLE(pci, mpi3mr_pci_id_table);
4589 static struct pci_driver mpi3mr_pci_driver = {
4590 .name = MPI3MR_DRIVER_NAME,
4591 .id_table = mpi3mr_pci_id_table,
4592 .probe = mpi3mr_probe,
4593 .remove = mpi3mr_remove,
4594 .shutdown = mpi3mr_shutdown,
4596 .suspend = mpi3mr_suspend,
4597 .resume = mpi3mr_resume,
4601 static ssize_t event_counter_show(struct device_driver *dd, char *buf)
4603 return sprintf(buf, "%llu\n", atomic64_read(&event_counter));
4605 static DRIVER_ATTR_RO(event_counter);
4607 static int __init mpi3mr_init(void)
4611 pr_info("Loading %s version %s\n", MPI3MR_DRIVER_NAME,
4612 MPI3MR_DRIVER_VERSION);
4614 ret_val = pci_register_driver(&mpi3mr_pci_driver);
4616 pr_err("%s failed to load due to pci register driver failure\n",
4617 MPI3MR_DRIVER_NAME);
4621 ret_val = driver_create_file(&mpi3mr_pci_driver.driver,
4622 &driver_attr_event_counter);
4624 pci_unregister_driver(&mpi3mr_pci_driver);
4629 static void __exit mpi3mr_exit(void)
4631 if (warn_non_secure_ctlr)
4633 "Unloading %s version %s while managing a non secure controller\n",
4634 MPI3MR_DRIVER_NAME, MPI3MR_DRIVER_VERSION);
4636 pr_info("Unloading %s version %s\n", MPI3MR_DRIVER_NAME,
4637 MPI3MR_DRIVER_VERSION);
4639 driver_remove_file(&mpi3mr_pci_driver.driver,
4640 &driver_attr_event_counter);
4641 pci_unregister_driver(&mpi3mr_pci_driver);
4644 module_init(mpi3mr_init);
4645 module_exit(mpi3mr_exit);