2 * Linux MegaRAID driver for SAS based RAID controllers
4 * Copyright (c) 2009-2013 LSI Corporation
5 * Copyright (c) 2013-2014 Avago Technologies
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2
10 * of the License, or (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 * FILE: megaraid_sas_fusion.c
22 * Authors: Avago Technologies
25 * Kashyap Desai <kashyap.desai@avagotech.com>
26 * Sumit Saxena <sumit.saxena@avagotech.com>
28 * Send feedback to: megaraidlinux.pdl@avagotech.com
30 * Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
31 * San Jose, California 95131
34 #include <linux/kernel.h>
35 #include <linux/types.h>
36 #include <linux/pci.h>
37 #include <linux/list.h>
38 #include <linux/moduleparam.h>
39 #include <linux/module.h>
40 #include <linux/spinlock.h>
41 #include <linux/interrupt.h>
42 #include <linux/delay.h>
43 #include <linux/uio.h>
44 #include <linux/uaccess.h>
46 #include <linux/compat.h>
47 #include <linux/blkdev.h>
48 #include <linux/mutex.h>
49 #include <linux/poll.h>
51 #include <scsi/scsi.h>
52 #include <scsi/scsi_cmnd.h>
53 #include <scsi/scsi_device.h>
54 #include <scsi/scsi_host.h>
55 #include <scsi/scsi_dbg.h>
56 #include <linux/dmi.h>
58 #include "megaraid_sas_fusion.h"
59 #include "megaraid_sas.h"
62 extern void megasas_free_cmds(struct megasas_instance *instance);
63 extern struct megasas_cmd *megasas_get_cmd(struct megasas_instance
66 megasas_complete_cmd(struct megasas_instance *instance,
67 struct megasas_cmd *cmd, u8 alt_status);
69 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
73 megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd);
74 int megasas_alloc_cmds(struct megasas_instance *instance);
76 megasas_clear_intr_fusion(struct megasas_register_set __iomem *regs);
78 megasas_issue_polled(struct megasas_instance *instance,
79 struct megasas_cmd *cmd);
81 megasas_check_and_restore_queue_depth(struct megasas_instance *instance);
83 int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
84 void megaraid_sas_kill_hba(struct megasas_instance *instance);
86 extern u32 megasas_dbg_lvl;
87 void megasas_sriov_heartbeat_handler(unsigned long instance_addr);
88 int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
90 void megasas_start_timer(struct megasas_instance *instance,
91 struct timer_list *timer,
92 void *fn, unsigned long interval);
93 extern struct megasas_mgmt_info megasas_mgmt_info;
94 extern unsigned int resetwaittime;
95 extern unsigned int dual_qdepth_disable;
96 static void megasas_free_rdpq_fusion(struct megasas_instance *instance);
97 static void megasas_free_reply_fusion(struct megasas_instance *instance);
102 * megasas_enable_intr_fusion - Enables interrupts
103 * @regs: MFI register set
106 megasas_enable_intr_fusion(struct megasas_instance *instance)
108 struct megasas_register_set __iomem *regs;
109 regs = instance->reg_set;
111 instance->mask_interrupts = 0;
112 /* For Thunderbolt/Invader also clear intr on enable */
113 writel(~0, ®s->outbound_intr_status);
114 readl(®s->outbound_intr_status);
116 writel(~MFI_FUSION_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
118 /* Dummy readl to force pci flush */
119 readl(®s->outbound_intr_mask);
123 * megasas_disable_intr_fusion - Disables interrupt
124 * @regs: MFI register set
127 megasas_disable_intr_fusion(struct megasas_instance *instance)
129 u32 mask = 0xFFFFFFFF;
131 struct megasas_register_set __iomem *regs;
132 regs = instance->reg_set;
133 instance->mask_interrupts = 1;
135 writel(mask, ®s->outbound_intr_mask);
136 /* Dummy readl to force pci flush */
137 status = readl(®s->outbound_intr_mask);
141 megasas_clear_intr_fusion(struct megasas_register_set __iomem *regs)
145 * Check if it is our interrupt
147 status = readl(®s->outbound_intr_status);
150 writel(status, ®s->outbound_intr_status);
151 readl(®s->outbound_intr_status);
154 if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
161 * megasas_get_cmd_fusion - Get a command from the free pool
162 * @instance: Adapter soft state
164 * Returns a blk_tag indexed mpt frame
166 inline struct megasas_cmd_fusion *megasas_get_cmd_fusion(struct megasas_instance
167 *instance, u32 blk_tag)
169 struct fusion_context *fusion;
171 fusion = instance->ctrl_context;
172 return fusion->cmd_list[blk_tag];
176 * megasas_return_cmd_fusion - Return a cmd to free command pool
177 * @instance: Adapter soft state
178 * @cmd: Command packet to be returned to free command pool
180 inline void megasas_return_cmd_fusion(struct megasas_instance *instance,
181 struct megasas_cmd_fusion *cmd)
184 memset(cmd->io_request, 0, sizeof(struct MPI2_RAID_SCSI_IO_REQUEST));
188 * megasas_fire_cmd_fusion - Sends command to the FW
191 megasas_fire_cmd_fusion(struct megasas_instance *instance,
192 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc)
194 #if defined(writeq) && defined(CONFIG_64BIT)
195 u64 req_data = (((u64)le32_to_cpu(req_desc->u.high) << 32) |
196 le32_to_cpu(req_desc->u.low));
198 writeq(req_data, &instance->reg_set->inbound_low_queue_port);
202 spin_lock_irqsave(&instance->hba_lock, flags);
203 writel(le32_to_cpu(req_desc->u.low),
204 &instance->reg_set->inbound_low_queue_port);
205 writel(le32_to_cpu(req_desc->u.high),
206 &instance->reg_set->inbound_high_queue_port);
208 spin_unlock_irqrestore(&instance->hba_lock, flags);
213 * megasas_fusion_update_can_queue - Do all Adapter Queue depth related calculations here
214 * @instance: Adapter soft state
215 * fw_boot_context: Whether this function called during probe or after OCR
217 * This function is only for fusion controllers.
218 * Update host can queue, if firmware downgrade max supported firmware commands.
219 * Firmware upgrade case will be skiped because underlying firmware has
220 * more resource than exposed to the OS.
224 megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_context)
226 u16 cur_max_fw_cmds = 0;
227 u16 ldio_threshold = 0;
228 struct megasas_register_set __iomem *reg_set;
230 reg_set = instance->reg_set;
232 cur_max_fw_cmds = readl(&instance->reg_set->outbound_scratch_pad_3) & 0x00FFFF;
234 if (dual_qdepth_disable || !cur_max_fw_cmds)
235 cur_max_fw_cmds = instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF;
238 (instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF) - MEGASAS_FUSION_IOCTL_CMDS;
240 dev_info(&instance->pdev->dev,
241 "Current firmware maximum commands: %d\t LDIO threshold: %d\n",
242 cur_max_fw_cmds, ldio_threshold);
244 if (fw_boot_context == OCR_CONTEXT) {
245 cur_max_fw_cmds = cur_max_fw_cmds - 1;
246 if (cur_max_fw_cmds <= instance->max_fw_cmds) {
247 instance->cur_can_queue =
248 cur_max_fw_cmds - (MEGASAS_FUSION_INTERNAL_CMDS +
249 MEGASAS_FUSION_IOCTL_CMDS);
250 instance->host->can_queue = instance->cur_can_queue;
251 instance->ldio_threshold = ldio_threshold;
254 instance->max_fw_cmds = cur_max_fw_cmds;
255 instance->ldio_threshold = ldio_threshold;
257 if (!instance->is_rdpq)
258 instance->max_fw_cmds = min_t(u16, instance->max_fw_cmds, 1024);
261 instance->max_fw_cmds = min(instance->max_fw_cmds,
262 (u16)MEGASAS_KDUMP_QUEUE_DEPTH);
264 * Reduce the max supported cmds by 1. This is to ensure that the
265 * reply_q_sz (1 more than the max cmd that driver may send)
266 * does not exceed max cmds that the FW can support
268 instance->max_fw_cmds = instance->max_fw_cmds-1;
270 instance->max_scsi_cmds = instance->max_fw_cmds -
271 (MEGASAS_FUSION_INTERNAL_CMDS +
272 MEGASAS_FUSION_IOCTL_CMDS);
273 instance->cur_can_queue = instance->max_scsi_cmds;
277 * megasas_free_cmds_fusion - Free all the cmds in the free cmd pool
278 * @instance: Adapter soft state
281 megasas_free_cmds_fusion(struct megasas_instance *instance)
284 struct fusion_context *fusion = instance->ctrl_context;
285 struct megasas_cmd_fusion *cmd;
288 for (i = 0; i < instance->max_fw_cmds; i++) {
289 cmd = fusion->cmd_list[i];
292 pci_pool_free(fusion->sg_dma_pool, cmd->sg_frame,
293 cmd->sg_frame_phys_addr);
295 pci_pool_free(fusion->sense_dma_pool, cmd->sense,
296 cmd->sense_phys_addr);
300 if (fusion->sg_dma_pool) {
301 pci_pool_destroy(fusion->sg_dma_pool);
302 fusion->sg_dma_pool = NULL;
304 if (fusion->sense_dma_pool) {
305 pci_pool_destroy(fusion->sense_dma_pool);
306 fusion->sense_dma_pool = NULL;
310 /* Reply Frame, Desc*/
311 if (instance->is_rdpq)
312 megasas_free_rdpq_fusion(instance);
314 megasas_free_reply_fusion(instance);
316 /* Request Frame, Desc*/
317 if (fusion->req_frames_desc)
318 dma_free_coherent(&instance->pdev->dev,
319 fusion->request_alloc_sz, fusion->req_frames_desc,
320 fusion->req_frames_desc_phys);
321 if (fusion->io_request_frames)
322 pci_pool_free(fusion->io_request_frames_pool,
323 fusion->io_request_frames,
324 fusion->io_request_frames_phys);
325 if (fusion->io_request_frames_pool) {
326 pci_pool_destroy(fusion->io_request_frames_pool);
327 fusion->io_request_frames_pool = NULL;
332 for (i = 0; i < instance->max_fw_cmds; i++)
333 kfree(fusion->cmd_list[i]);
335 kfree(fusion->cmd_list);
339 * megasas_create_sg_sense_fusion - Creates DMA pool for cmd frames
340 * @instance: Adapter soft state
343 static int megasas_create_sg_sense_fusion(struct megasas_instance *instance)
347 struct fusion_context *fusion;
348 struct megasas_cmd_fusion *cmd;
350 fusion = instance->ctrl_context;
351 max_cmd = instance->max_fw_cmds;
354 fusion->sg_dma_pool =
355 pci_pool_create("mr_sg", instance->pdev,
356 instance->max_chain_frame_sz, 4, 0);
357 /* SCSI_SENSE_BUFFERSIZE = 96 bytes */
358 fusion->sense_dma_pool =
359 pci_pool_create("mr_sense", instance->pdev,
360 SCSI_SENSE_BUFFERSIZE, 64, 0);
362 if (!fusion->sense_dma_pool || !fusion->sg_dma_pool) {
363 dev_err(&instance->pdev->dev,
364 "Failed from %s %d\n", __func__, __LINE__);
369 * Allocate and attach a frame to each of the commands in cmd_list
371 for (i = 0; i < max_cmd; i++) {
372 cmd = fusion->cmd_list[i];
373 cmd->sg_frame = pci_pool_alloc(fusion->sg_dma_pool,
374 GFP_KERNEL, &cmd->sg_frame_phys_addr);
376 cmd->sense = pci_pool_alloc(fusion->sense_dma_pool,
377 GFP_KERNEL, &cmd->sense_phys_addr);
378 if (!cmd->sg_frame || !cmd->sense) {
379 dev_err(&instance->pdev->dev,
380 "Failed from %s %d\n", __func__, __LINE__);
388 megasas_alloc_cmdlist_fusion(struct megasas_instance *instance)
391 struct fusion_context *fusion;
393 fusion = instance->ctrl_context;
395 max_cmd = instance->max_fw_cmds;
398 * fusion->cmd_list is an array of struct megasas_cmd_fusion pointers.
399 * Allocate the dynamic array first and then allocate individual
402 fusion->cmd_list = kzalloc(sizeof(struct megasas_cmd_fusion *) * max_cmd,
404 if (!fusion->cmd_list) {
405 dev_err(&instance->pdev->dev,
406 "Failed from %s %d\n", __func__, __LINE__);
410 for (i = 0; i < max_cmd; i++) {
411 fusion->cmd_list[i] = kzalloc(sizeof(struct megasas_cmd_fusion),
413 if (!fusion->cmd_list[i]) {
414 dev_err(&instance->pdev->dev,
415 "Failed from %s %d\n", __func__, __LINE__);
422 megasas_alloc_request_fusion(struct megasas_instance *instance)
424 struct fusion_context *fusion;
426 fusion = instance->ctrl_context;
428 fusion->req_frames_desc =
429 dma_alloc_coherent(&instance->pdev->dev,
430 fusion->request_alloc_sz,
431 &fusion->req_frames_desc_phys, GFP_KERNEL);
432 if (!fusion->req_frames_desc) {
433 dev_err(&instance->pdev->dev,
434 "Failed from %s %d\n", __func__, __LINE__);
438 fusion->io_request_frames_pool =
439 pci_pool_create("mr_ioreq", instance->pdev,
440 fusion->io_frames_alloc_sz, 16, 0);
442 if (!fusion->io_request_frames_pool) {
443 dev_err(&instance->pdev->dev,
444 "Failed from %s %d\n", __func__, __LINE__);
448 fusion->io_request_frames =
449 pci_pool_alloc(fusion->io_request_frames_pool,
450 GFP_KERNEL, &fusion->io_request_frames_phys);
451 if (!fusion->io_request_frames) {
452 dev_err(&instance->pdev->dev,
453 "Failed from %s %d\n", __func__, __LINE__);
460 megasas_alloc_reply_fusion(struct megasas_instance *instance)
463 struct fusion_context *fusion;
464 union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
465 fusion = instance->ctrl_context;
467 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
468 fusion->reply_frames_desc_pool =
469 pci_pool_create("mr_reply", instance->pdev,
470 fusion->reply_alloc_sz * count, 16, 0);
472 if (!fusion->reply_frames_desc_pool) {
473 dev_err(&instance->pdev->dev,
474 "Failed from %s %d\n", __func__, __LINE__);
478 fusion->reply_frames_desc[0] =
479 pci_pool_alloc(fusion->reply_frames_desc_pool,
480 GFP_KERNEL, &fusion->reply_frames_desc_phys[0]);
481 if (!fusion->reply_frames_desc[0]) {
482 dev_err(&instance->pdev->dev,
483 "Failed from %s %d\n", __func__, __LINE__);
486 reply_desc = fusion->reply_frames_desc[0];
487 for (i = 0; i < fusion->reply_q_depth * count; i++, reply_desc++)
488 reply_desc->Words = cpu_to_le64(ULLONG_MAX);
490 /* This is not a rdpq mode, but driver still populate
491 * reply_frame_desc array to use same msix index in ISR path.
493 for (i = 0; i < (count - 1); i++)
494 fusion->reply_frames_desc[i + 1] =
495 fusion->reply_frames_desc[i] +
496 (fusion->reply_alloc_sz)/sizeof(union MPI2_REPLY_DESCRIPTORS_UNION);
502 megasas_alloc_rdpq_fusion(struct megasas_instance *instance)
505 struct fusion_context *fusion;
506 union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
508 fusion = instance->ctrl_context;
510 fusion->rdpq_virt = pci_alloc_consistent(instance->pdev,
511 sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * MAX_MSIX_QUEUES_FUSION,
513 if (!fusion->rdpq_virt) {
514 dev_err(&instance->pdev->dev,
515 "Failed from %s %d\n", __func__, __LINE__);
519 memset(fusion->rdpq_virt, 0,
520 sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * MAX_MSIX_QUEUES_FUSION);
521 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
522 fusion->reply_frames_desc_pool = pci_pool_create("mr_rdpq",
523 instance->pdev, fusion->reply_alloc_sz, 16, 0);
525 if (!fusion->reply_frames_desc_pool) {
526 dev_err(&instance->pdev->dev,
527 "Failed from %s %d\n", __func__, __LINE__);
531 for (i = 0; i < count; i++) {
532 fusion->reply_frames_desc[i] =
533 pci_pool_alloc(fusion->reply_frames_desc_pool,
534 GFP_KERNEL, &fusion->reply_frames_desc_phys[i]);
535 if (!fusion->reply_frames_desc[i]) {
536 dev_err(&instance->pdev->dev,
537 "Failed from %s %d\n", __func__, __LINE__);
541 fusion->rdpq_virt[i].RDPQBaseAddress =
542 fusion->reply_frames_desc_phys[i];
544 reply_desc = fusion->reply_frames_desc[i];
545 for (j = 0; j < fusion->reply_q_depth; j++, reply_desc++)
546 reply_desc->Words = cpu_to_le64(ULLONG_MAX);
552 megasas_free_rdpq_fusion(struct megasas_instance *instance) {
555 struct fusion_context *fusion;
557 fusion = instance->ctrl_context;
559 for (i = 0; i < MAX_MSIX_QUEUES_FUSION; i++) {
560 if (fusion->reply_frames_desc[i])
561 pci_pool_free(fusion->reply_frames_desc_pool,
562 fusion->reply_frames_desc[i],
563 fusion->reply_frames_desc_phys[i]);
566 if (fusion->reply_frames_desc_pool)
567 pci_pool_destroy(fusion->reply_frames_desc_pool);
569 if (fusion->rdpq_virt)
570 pci_free_consistent(instance->pdev,
571 sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * MAX_MSIX_QUEUES_FUSION,
572 fusion->rdpq_virt, fusion->rdpq_phys);
576 megasas_free_reply_fusion(struct megasas_instance *instance) {
578 struct fusion_context *fusion;
580 fusion = instance->ctrl_context;
582 if (fusion->reply_frames_desc[0])
583 pci_pool_free(fusion->reply_frames_desc_pool,
584 fusion->reply_frames_desc[0],
585 fusion->reply_frames_desc_phys[0]);
587 if (fusion->reply_frames_desc_pool)
588 pci_pool_destroy(fusion->reply_frames_desc_pool);
594 * megasas_alloc_cmds_fusion - Allocates the command packets
595 * @instance: Adapter soft state
598 * Each frame has a 32-bit field called context. This context is used to get
599 * back the megasas_cmd_fusion from the frame when a frame gets completed
600 * In this driver, the 32 bit values are the indices into an array cmd_list.
601 * This array is used only to look up the megasas_cmd_fusion given the context.
602 * The free commands themselves are maintained in a linked list called cmd_pool.
604 * cmds are formed in the io_request and sg_frame members of the
605 * megasas_cmd_fusion. The context field is used to get a request descriptor
606 * and is used as SMID of the cmd.
607 * SMID value range is from 1 to max_fw_cmds.
610 megasas_alloc_cmds_fusion(struct megasas_instance *instance)
613 struct fusion_context *fusion;
614 struct megasas_cmd_fusion *cmd;
616 dma_addr_t io_req_base_phys;
620 fusion = instance->ctrl_context;
622 if (megasas_alloc_cmdlist_fusion(instance))
625 if (megasas_alloc_request_fusion(instance))
628 if (instance->is_rdpq) {
629 if (megasas_alloc_rdpq_fusion(instance))
632 if (megasas_alloc_reply_fusion(instance))
636 /* The first 256 bytes (SMID 0) is not used. Don't add to the cmd list */
637 io_req_base = fusion->io_request_frames + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
638 io_req_base_phys = fusion->io_request_frames_phys + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
641 * Add all the commands to command pool (fusion->cmd_pool)
644 /* SMID 0 is reserved. Set SMID/index from 1 */
645 for (i = 0; i < instance->max_fw_cmds; i++) {
646 cmd = fusion->cmd_list[i];
647 offset = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i;
648 memset(cmd, 0, sizeof(struct megasas_cmd_fusion));
651 cmd->sync_cmd_idx = (i >= instance->max_scsi_cmds) ?
652 (i - instance->max_scsi_cmds) :
653 (u32)ULONG_MAX; /* Set to Invalid */
654 cmd->instance = instance;
656 (struct MPI2_RAID_SCSI_IO_REQUEST *)
657 (io_req_base + offset);
658 memset(cmd->io_request, 0,
659 sizeof(struct MPI2_RAID_SCSI_IO_REQUEST));
660 cmd->io_request_phys_addr = io_req_base_phys + offset;
663 if (megasas_create_sg_sense_fusion(instance))
669 megasas_free_cmds_fusion(instance);
674 * wait_and_poll - Issues a polling command
675 * @instance: Adapter soft state
676 * @cmd: Command packet to be issued
678 * For polling, MFI requires the cmd_status to be set to 0xFF before posting.
681 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
685 struct megasas_header *frame_hdr = &cmd->frame->hdr;
686 struct fusion_context *fusion;
688 u32 msecs = seconds * 1000;
690 fusion = instance->ctrl_context;
692 * Wait for cmd_status to change
694 for (i = 0; (i < msecs) && (frame_hdr->cmd_status == 0xff); i += 20) {
699 if (frame_hdr->cmd_status == MFI_STAT_INVALID_STATUS)
701 else if (frame_hdr->cmd_status == MFI_STAT_OK)
708 * megasas_ioc_init_fusion - Initializes the FW
709 * @instance: Adapter soft state
711 * Issues the IOC Init cmd
714 megasas_ioc_init_fusion(struct megasas_instance *instance)
716 struct megasas_init_frame *init_frame;
717 struct MPI2_IOC_INIT_REQUEST *IOCInitMessage = NULL;
718 dma_addr_t ioc_init_handle;
719 struct megasas_cmd *cmd;
720 u8 ret, cur_rdpq_mode;
721 struct fusion_context *fusion;
722 union MEGASAS_REQUEST_DESCRIPTOR_UNION req_desc;
724 struct megasas_header *frame_hdr;
725 const char *sys_info;
726 MFI_CAPABILITIES *drv_ops;
729 fusion = instance->ctrl_context;
731 cmd = megasas_get_cmd(instance);
734 dev_err(&instance->pdev->dev, "Could not allocate cmd for INIT Frame\n");
739 scratch_pad_2 = readl
740 (&instance->reg_set->outbound_scratch_pad_2);
742 cur_rdpq_mode = (scratch_pad_2 & MR_RDPQ_MODE_OFFSET) ? 1 : 0;
744 if (instance->is_rdpq && !cur_rdpq_mode) {
745 dev_err(&instance->pdev->dev, "Firmware downgrade *NOT SUPPORTED*"
746 " from RDPQ mode to non RDPQ mode\n");
751 instance->fw_sync_cache_support = (scratch_pad_2 &
752 MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0;
753 dev_info(&instance->pdev->dev, "FW supports sync cache\t: %s\n",
754 instance->fw_sync_cache_support ? "Yes" : "No");
757 dma_alloc_coherent(&instance->pdev->dev,
758 sizeof(struct MPI2_IOC_INIT_REQUEST),
759 &ioc_init_handle, GFP_KERNEL);
761 if (!IOCInitMessage) {
762 dev_err(&instance->pdev->dev, "Could not allocate memory for "
768 memset(IOCInitMessage, 0, sizeof(struct MPI2_IOC_INIT_REQUEST));
770 IOCInitMessage->Function = MPI2_FUNCTION_IOC_INIT;
771 IOCInitMessage->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
772 IOCInitMessage->MsgVersion = cpu_to_le16(MPI2_VERSION);
773 IOCInitMessage->HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
774 IOCInitMessage->SystemRequestFrameSize = cpu_to_le16(MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4);
776 IOCInitMessage->ReplyDescriptorPostQueueDepth = cpu_to_le16(fusion->reply_q_depth);
777 IOCInitMessage->ReplyDescriptorPostQueueAddress = instance->is_rdpq ?
778 cpu_to_le64(fusion->rdpq_phys) :
779 cpu_to_le64(fusion->reply_frames_desc_phys[0]);
780 IOCInitMessage->MsgFlags = instance->is_rdpq ?
781 MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE : 0;
782 IOCInitMessage->SystemRequestFrameBaseAddress = cpu_to_le64(fusion->io_request_frames_phys);
783 IOCInitMessage->HostMSIxVectors = instance->msix_vectors;
784 init_frame = (struct megasas_init_frame *)cmd->frame;
785 memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
787 frame_hdr = &cmd->frame->hdr;
788 frame_hdr->cmd_status = 0xFF;
789 frame_hdr->flags = cpu_to_le16(
790 le16_to_cpu(frame_hdr->flags) |
791 MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
793 init_frame->cmd = MFI_CMD_INIT;
794 init_frame->cmd_status = 0xFF;
796 drv_ops = (MFI_CAPABILITIES *) &(init_frame->driver_operations);
798 /* driver support Extended MSIX */
799 if (fusion->adapter_type == INVADER_SERIES)
800 drv_ops->mfi_capabilities.support_additional_msix = 1;
801 /* driver supports HA / Remote LUN over Fast Path interface */
802 drv_ops->mfi_capabilities.support_fp_remote_lun = 1;
804 drv_ops->mfi_capabilities.support_max_255lds = 1;
805 drv_ops->mfi_capabilities.support_ndrive_r1_lb = 1;
806 drv_ops->mfi_capabilities.security_protocol_cmds_fw = 1;
808 if (instance->max_chain_frame_sz > MEGASAS_CHAIN_FRAME_SZ_MIN)
809 drv_ops->mfi_capabilities.support_ext_io_size = 1;
811 drv_ops->mfi_capabilities.support_fp_rlbypass = 1;
812 if (!dual_qdepth_disable)
813 drv_ops->mfi_capabilities.support_ext_queue_depth = 1;
815 drv_ops->mfi_capabilities.support_qd_throttling = 1;
816 /* Convert capability to LE32 */
817 cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities);
819 sys_info = dmi_get_system_info(DMI_PRODUCT_UUID);
820 if (instance->system_info_buf && sys_info) {
821 memcpy(instance->system_info_buf->systemId, sys_info,
822 strlen(sys_info) > 64 ? 64 : strlen(sys_info));
823 instance->system_info_buf->systemIdLength =
824 strlen(sys_info) > 64 ? 64 : strlen(sys_info);
825 init_frame->system_info_lo = instance->system_info_h;
826 init_frame->system_info_hi = 0;
829 init_frame->queue_info_new_phys_addr_hi =
830 cpu_to_le32(upper_32_bits(ioc_init_handle));
831 init_frame->queue_info_new_phys_addr_lo =
832 cpu_to_le32(lower_32_bits(ioc_init_handle));
833 init_frame->data_xfer_len = cpu_to_le32(sizeof(struct MPI2_IOC_INIT_REQUEST));
835 req_desc.u.low = cpu_to_le32(lower_32_bits(cmd->frame_phys_addr));
836 req_desc.u.high = cpu_to_le32(upper_32_bits(cmd->frame_phys_addr));
837 req_desc.MFAIo.RequestFlags =
838 (MEGASAS_REQ_DESCRIPT_FLAGS_MFA <<
839 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
842 * disable the intr before firing the init frame
844 instance->instancet->disable_intr(instance);
846 for (i = 0; i < (10 * 1000); i += 20) {
847 if (readl(&instance->reg_set->doorbell) & 1)
853 megasas_fire_cmd_fusion(instance, &req_desc);
855 wait_and_poll(instance, cmd, MFI_POLL_TIMEOUT_SECS);
857 frame_hdr = &cmd->frame->hdr;
858 if (frame_hdr->cmd_status != 0) {
862 dev_info(&instance->pdev->dev, "Init cmd success\n");
867 megasas_return_cmd(instance, cmd);
869 dma_free_coherent(&instance->pdev->dev,
870 sizeof(struct MPI2_IOC_INIT_REQUEST),
871 IOCInitMessage, ioc_init_handle);
877 * megasas_sync_pd_seq_num - JBOD SEQ MAP
878 * @instance: Adapter soft state
879 * @pend: set to 1, if it is pended jbod map.
881 * Issue Jbod map to the firmware. If it is pended command,
882 * issue command and return. If it is first instance of jbod map
883 * issue and receive command.
886 megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend) {
889 struct megasas_cmd *cmd;
890 struct megasas_dcmd_frame *dcmd;
891 struct fusion_context *fusion = instance->ctrl_context;
892 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
895 pd_sync = (void *)fusion->pd_seq_sync[(instance->pd_seq_map_id & 1)];
896 pd_seq_h = fusion->pd_seq_phys[(instance->pd_seq_map_id & 1)];
897 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
898 (sizeof(struct MR_PD_CFG_SEQ) *
899 (MAX_PHYSICAL_DEVICES - 1));
901 cmd = megasas_get_cmd(instance);
903 dev_err(&instance->pdev->dev,
904 "Could not get mfi cmd. Fail from %s %d\n",
909 dcmd = &cmd->frame->dcmd;
911 memset(pd_sync, 0, pd_seq_map_sz);
912 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
913 dcmd->cmd = MFI_CMD_DCMD;
914 dcmd->cmd_status = 0xFF;
918 dcmd->data_xfer_len = cpu_to_le32(pd_seq_map_sz);
919 dcmd->opcode = cpu_to_le32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO);
920 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(pd_seq_h);
921 dcmd->sgl.sge32[0].length = cpu_to_le32(pd_seq_map_sz);
924 dcmd->mbox.b[0] = MEGASAS_DCMD_MBOX_PEND_FLAG;
925 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_WRITE);
926 instance->jbod_seq_cmd = cmd;
927 instance->instancet->issue_dcmd(instance, cmd);
931 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
933 /* Below code is only for non pended DCMD */
934 if (instance->ctrl_context && !instance->mask_interrupts)
935 ret = megasas_issue_blocked_cmd(instance, cmd,
936 MFI_IO_TIMEOUT_SECS);
938 ret = megasas_issue_polled(instance, cmd);
940 if (le32_to_cpu(pd_sync->count) > MAX_PHYSICAL_DEVICES) {
941 dev_warn(&instance->pdev->dev,
942 "driver supports max %d JBOD, but FW reports %d\n",
943 MAX_PHYSICAL_DEVICES, le32_to_cpu(pd_sync->count));
947 if (ret == DCMD_TIMEOUT && instance->ctrl_context)
948 megaraid_sas_kill_hba(instance);
950 if (ret == DCMD_SUCCESS)
951 instance->pd_seq_map_id++;
953 megasas_return_cmd(instance, cmd);
958 * megasas_get_ld_map_info - Returns FW's ld_map structure
959 * @instance: Adapter soft state
960 * @pend: Pend the command or not
961 * Issues an internal command (DCMD) to get the FW's controller PD
962 * list structure. This information is mainly used to find out SYSTEM
963 * supported by the FW.
964 * dcmd.mbox value setting for MR_DCMD_LD_MAP_GET_INFO
965 * dcmd.mbox.b[0] - number of LDs being sync'd
966 * dcmd.mbox.b[1] - 0 - complete command immediately.
967 * - 1 - pend till config change
968 * dcmd.mbox.b[2] - 0 - supports max 64 lds and uses legacy MR_FW_RAID_MAP
969 * - 1 - supports max MAX_LOGICAL_DRIVES_EXT lds and
970 * uses extended struct MR_FW_RAID_MAP_EXT
973 megasas_get_ld_map_info(struct megasas_instance *instance)
976 struct megasas_cmd *cmd;
977 struct megasas_dcmd_frame *dcmd;
981 struct fusion_context *fusion;
983 cmd = megasas_get_cmd(instance);
986 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get cmd for map info\n");
990 fusion = instance->ctrl_context;
993 megasas_return_cmd(instance, cmd);
997 dcmd = &cmd->frame->dcmd;
999 size_map_info = fusion->current_map_sz;
1001 ci = (void *) fusion->ld_map[(instance->map_id & 1)];
1002 ci_h = fusion->ld_map_phys[(instance->map_id & 1)];
1005 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem for ld_map_info\n");
1006 megasas_return_cmd(instance, cmd);
1010 memset(ci, 0, fusion->max_map_sz);
1011 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
1013 dev_dbg(&instance->pdev->dev,
1014 "%s sending MR_DCMD_LD_MAP_GET_INFO with size %d\n",
1015 __func__, cpu_to_le32(size_map_info));
1017 dcmd->cmd = MFI_CMD_DCMD;
1018 dcmd->cmd_status = 0xFF;
1019 dcmd->sge_count = 1;
1020 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
1023 dcmd->data_xfer_len = cpu_to_le32(size_map_info);
1024 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO);
1025 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
1026 dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info);
1028 if (instance->ctrl_context && !instance->mask_interrupts)
1029 ret = megasas_issue_blocked_cmd(instance, cmd,
1030 MFI_IO_TIMEOUT_SECS);
1032 ret = megasas_issue_polled(instance, cmd);
1034 if (ret == DCMD_TIMEOUT && instance->ctrl_context)
1035 megaraid_sas_kill_hba(instance);
1037 megasas_return_cmd(instance, cmd);
1043 megasas_get_map_info(struct megasas_instance *instance)
1045 struct fusion_context *fusion = instance->ctrl_context;
1047 fusion->fast_path_io = 0;
1048 if (!megasas_get_ld_map_info(instance)) {
1049 if (MR_ValidateMapInfo(instance)) {
1050 fusion->fast_path_io = 1;
1058 * megasas_sync_map_info - Returns FW's ld_map structure
1059 * @instance: Adapter soft state
1061 * Issues an internal command (DCMD) to get the FW's controller PD
1062 * list structure. This information is mainly used to find out SYSTEM
1063 * supported by the FW.
1066 megasas_sync_map_info(struct megasas_instance *instance)
1069 struct megasas_cmd *cmd;
1070 struct megasas_dcmd_frame *dcmd;
1071 u32 size_sync_info, num_lds;
1072 struct fusion_context *fusion;
1073 struct MR_LD_TARGET_SYNC *ci = NULL;
1074 struct MR_DRV_RAID_MAP_ALL *map;
1075 struct MR_LD_RAID *raid;
1076 struct MR_LD_TARGET_SYNC *ld_sync;
1077 dma_addr_t ci_h = 0;
1080 cmd = megasas_get_cmd(instance);
1083 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get cmd for sync info\n");
1087 fusion = instance->ctrl_context;
1090 megasas_return_cmd(instance, cmd);
1094 map = fusion->ld_drv_map[instance->map_id & 1];
1096 num_lds = le16_to_cpu(map->raidMap.ldCount);
1098 dcmd = &cmd->frame->dcmd;
1100 size_sync_info = sizeof(struct MR_LD_TARGET_SYNC) *num_lds;
1102 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
1104 ci = (struct MR_LD_TARGET_SYNC *)
1105 fusion->ld_map[(instance->map_id - 1) & 1];
1106 memset(ci, 0, fusion->max_map_sz);
1108 ci_h = fusion->ld_map_phys[(instance->map_id - 1) & 1];
1110 ld_sync = (struct MR_LD_TARGET_SYNC *)ci;
1112 for (i = 0; i < num_lds; i++, ld_sync++) {
1113 raid = MR_LdRaidGet(i, map);
1114 ld_sync->targetId = MR_GetLDTgtId(i, map);
1115 ld_sync->seqNum = raid->seqNum;
1118 size_map_info = fusion->current_map_sz;
1120 dcmd->cmd = MFI_CMD_DCMD;
1121 dcmd->cmd_status = 0xFF;
1122 dcmd->sge_count = 1;
1123 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_WRITE);
1126 dcmd->data_xfer_len = cpu_to_le32(size_map_info);
1127 dcmd->mbox.b[0] = num_lds;
1128 dcmd->mbox.b[1] = MEGASAS_DCMD_MBOX_PEND_FLAG;
1129 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO);
1130 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
1131 dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info);
1133 instance->map_update_cmd = cmd;
1135 instance->instancet->issue_dcmd(instance, cmd);
1141 * meagasas_display_intel_branding - Display branding string
1142 * @instance: per adapter object
1147 megasas_display_intel_branding(struct megasas_instance *instance)
1149 if (instance->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL)
1152 switch (instance->pdev->device) {
1153 case PCI_DEVICE_ID_LSI_INVADER:
1154 switch (instance->pdev->subsystem_device) {
1155 case MEGARAID_INTEL_RS3DC080_SSDID:
1156 dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
1157 instance->host->host_no,
1158 MEGARAID_INTEL_RS3DC080_BRANDING);
1160 case MEGARAID_INTEL_RS3DC040_SSDID:
1161 dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
1162 instance->host->host_no,
1163 MEGARAID_INTEL_RS3DC040_BRANDING);
1165 case MEGARAID_INTEL_RS3SC008_SSDID:
1166 dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
1167 instance->host->host_no,
1168 MEGARAID_INTEL_RS3SC008_BRANDING);
1170 case MEGARAID_INTEL_RS3MC044_SSDID:
1171 dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
1172 instance->host->host_no,
1173 MEGARAID_INTEL_RS3MC044_BRANDING);
1179 case PCI_DEVICE_ID_LSI_FURY:
1180 switch (instance->pdev->subsystem_device) {
1181 case MEGARAID_INTEL_RS3WC080_SSDID:
1182 dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
1183 instance->host->host_no,
1184 MEGARAID_INTEL_RS3WC080_BRANDING);
1186 case MEGARAID_INTEL_RS3WC040_SSDID:
1187 dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
1188 instance->host->host_no,
1189 MEGARAID_INTEL_RS3WC040_BRANDING);
1195 case PCI_DEVICE_ID_LSI_CUTLASS_52:
1196 case PCI_DEVICE_ID_LSI_CUTLASS_53:
1197 switch (instance->pdev->subsystem_device) {
1198 case MEGARAID_INTEL_RMS3BC160_SSDID:
1199 dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
1200 instance->host->host_no,
1201 MEGARAID_INTEL_RMS3BC160_BRANDING);
1213 * megasas_init_adapter_fusion - Initializes the FW
1214 * @instance: Adapter soft state
1216 * This is the main function for initializing firmware.
1219 megasas_init_adapter_fusion(struct megasas_instance *instance)
1221 struct megasas_register_set __iomem *reg_set;
1222 struct fusion_context *fusion;
1223 u32 max_cmd, scratch_pad_2;
1226 fusion = instance->ctrl_context;
1228 reg_set = instance->reg_set;
1230 megasas_fusion_update_can_queue(instance, PROBE_CONTEXT);
1233 * Reduce the max supported cmds by 1. This is to ensure that the
1234 * reply_q_sz (1 more than the max cmd that driver may send)
1235 * does not exceed max cmds that the FW can support
1237 instance->max_fw_cmds = instance->max_fw_cmds-1;
1240 * Only Driver's internal DCMDs and IOCTL DCMDs needs to have MFI frames
1242 instance->max_mfi_cmds =
1243 MEGASAS_FUSION_INTERNAL_CMDS + MEGASAS_FUSION_IOCTL_CMDS;
1245 max_cmd = instance->max_fw_cmds;
1247 fusion->reply_q_depth = 2 * (((max_cmd + 1 + 15)/16)*16);
1249 fusion->request_alloc_sz =
1250 sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) *max_cmd;
1251 fusion->reply_alloc_sz = sizeof(union MPI2_REPLY_DESCRIPTORS_UNION)
1252 *(fusion->reply_q_depth);
1253 fusion->io_frames_alloc_sz = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE +
1254 (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE *
1255 (max_cmd + 1)); /* Extra 1 for SMID 0 */
1257 scratch_pad_2 = readl(&instance->reg_set->outbound_scratch_pad_2);
1258 /* If scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set,
1259 * Firmware support extended IO chain frame which is 4 times more than
1261 * Legacy Firmware - Frame size is (8 * 128) = 1K
1262 * 1M IO Firmware - Frame size is (8 * 128 * 4) = 4K
1264 if (scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK)
1265 instance->max_chain_frame_sz =
1266 ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >>
1267 MEGASAS_MAX_CHAIN_SHIFT) * MEGASAS_1MB_IO;
1269 instance->max_chain_frame_sz =
1270 ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >>
1271 MEGASAS_MAX_CHAIN_SHIFT) * MEGASAS_256K_IO;
1273 if (instance->max_chain_frame_sz < MEGASAS_CHAIN_FRAME_SZ_MIN) {
1274 dev_warn(&instance->pdev->dev, "frame size %d invalid, fall back to legacy max frame size %d\n",
1275 instance->max_chain_frame_sz,
1276 MEGASAS_CHAIN_FRAME_SZ_MIN);
1277 instance->max_chain_frame_sz = MEGASAS_CHAIN_FRAME_SZ_MIN;
1280 fusion->max_sge_in_main_msg =
1281 (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE
1282 - offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL))/16;
1284 fusion->max_sge_in_chain =
1285 instance->max_chain_frame_sz
1286 / sizeof(union MPI2_SGE_IO_UNION);
1288 instance->max_num_sge =
1289 rounddown_pow_of_two(fusion->max_sge_in_main_msg
1290 + fusion->max_sge_in_chain - 2);
1292 /* Used for pass thru MFI frame (DCMD) */
1293 fusion->chain_offset_mfi_pthru =
1294 offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL)/16;
1296 fusion->chain_offset_io_request =
1297 (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
1298 sizeof(union MPI2_SGE_IO_UNION))/16;
1300 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
1301 for (i = 0 ; i < count; i++)
1302 fusion->last_reply_idx[i] = 0;
1305 * For fusion adapters, 3 commands for IOCTL and 5 commands
1306 * for driver's internal DCMDs.
1308 instance->max_scsi_cmds = instance->max_fw_cmds -
1309 (MEGASAS_FUSION_INTERNAL_CMDS +
1310 MEGASAS_FUSION_IOCTL_CMDS);
1311 sema_init(&instance->ioctl_sem, MEGASAS_FUSION_IOCTL_CMDS);
1314 * Allocate memory for descriptors
1315 * Create a pool of commands
1317 if (megasas_alloc_cmds(instance))
1318 goto fail_alloc_mfi_cmds;
1319 if (megasas_alloc_cmds_fusion(instance))
1320 goto fail_alloc_cmds;
1322 if (megasas_ioc_init_fusion(instance))
1325 megasas_display_intel_branding(instance);
1326 if (megasas_get_ctrl_info(instance)) {
1327 dev_err(&instance->pdev->dev,
1328 "Could not get controller info. Fail from %s %d\n",
1329 __func__, __LINE__);
1333 instance->flag_ieee = 1;
1334 fusion->fast_path_io = 0;
1336 fusion->drv_map_pages = get_order(fusion->drv_map_sz);
1337 for (i = 0; i < 2; i++) {
1338 fusion->ld_map[i] = NULL;
1339 fusion->ld_drv_map[i] = (void *)__get_free_pages(GFP_KERNEL,
1340 fusion->drv_map_pages);
1341 if (!fusion->ld_drv_map[i]) {
1342 dev_err(&instance->pdev->dev, "Could not allocate "
1343 "memory for local map info for %d pages\n",
1344 fusion->drv_map_pages);
1346 free_pages((ulong)fusion->ld_drv_map[0],
1347 fusion->drv_map_pages);
1350 memset(fusion->ld_drv_map[i], 0,
1351 ((1 << PAGE_SHIFT) << fusion->drv_map_pages));
1354 for (i = 0; i < 2; i++) {
1355 fusion->ld_map[i] = dma_alloc_coherent(&instance->pdev->dev,
1357 &fusion->ld_map_phys[i],
1359 if (!fusion->ld_map[i]) {
1360 dev_err(&instance->pdev->dev, "Could not allocate memory "
1366 if (!megasas_get_map_info(instance))
1367 megasas_sync_map_info(instance);
1373 dma_free_coherent(&instance->pdev->dev, fusion->max_map_sz,
1374 fusion->ld_map[0], fusion->ld_map_phys[0]);
1376 megasas_free_cmds_fusion(instance);
1378 megasas_free_cmds(instance);
1379 fail_alloc_mfi_cmds:
1384 * map_cmd_status - Maps FW cmd status to OS cmd status
1385 * @cmd : Pointer to cmd
1386 * @status : status of cmd returned by FW
1387 * @ext_status : ext status of cmd returned by FW
1391 map_cmd_status(struct megasas_cmd_fusion *cmd, u8 status, u8 ext_status)
1397 cmd->scmd->result = DID_OK << 16;
1400 case MFI_STAT_SCSI_IO_FAILED:
1401 case MFI_STAT_LD_INIT_IN_PROGRESS:
1402 cmd->scmd->result = (DID_ERROR << 16) | ext_status;
1405 case MFI_STAT_SCSI_DONE_WITH_ERROR:
1407 cmd->scmd->result = (DID_OK << 16) | ext_status;
1408 if (ext_status == SAM_STAT_CHECK_CONDITION) {
1409 memset(cmd->scmd->sense_buffer, 0,
1410 SCSI_SENSE_BUFFERSIZE);
1411 memcpy(cmd->scmd->sense_buffer, cmd->sense,
1412 SCSI_SENSE_BUFFERSIZE);
1413 cmd->scmd->result |= DRIVER_SENSE << 24;
1417 case MFI_STAT_LD_OFFLINE:
1418 case MFI_STAT_DEVICE_NOT_FOUND:
1419 cmd->scmd->result = DID_BAD_TARGET << 16;
1421 case MFI_STAT_CONFIG_SEQ_MISMATCH:
1422 cmd->scmd->result = DID_IMM_RETRY << 16;
1425 dev_printk(KERN_DEBUG, &cmd->instance->pdev->dev, "FW status %#x\n", status);
1426 cmd->scmd->result = DID_ERROR << 16;
1432 * megasas_make_sgl_fusion - Prepares 32-bit SGL
1433 * @instance: Adapter soft state
1434 * @scp: SCSI command from the mid-layer
1435 * @sgl_ptr: SGL to be filled in
1436 * @cmd: cmd we are working on
1438 * If successful, this function returns the number of SG elements.
1441 megasas_make_sgl_fusion(struct megasas_instance *instance,
1442 struct scsi_cmnd *scp,
1443 struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr,
1444 struct megasas_cmd_fusion *cmd)
1446 int i, sg_processed, sge_count;
1447 struct scatterlist *os_sgl;
1448 struct fusion_context *fusion;
1450 fusion = instance->ctrl_context;
1452 if (fusion->adapter_type == INVADER_SERIES) {
1453 struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end = sgl_ptr;
1454 sgl_ptr_end += fusion->max_sge_in_main_msg - 1;
1455 sgl_ptr_end->Flags = 0;
1458 sge_count = scsi_dma_map(scp);
1460 BUG_ON(sge_count < 0);
1462 if (sge_count > instance->max_num_sge || !sge_count)
1465 scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1466 sgl_ptr->Length = cpu_to_le32(sg_dma_len(os_sgl));
1467 sgl_ptr->Address = cpu_to_le64(sg_dma_address(os_sgl));
1469 if (fusion->adapter_type == INVADER_SERIES)
1470 if (i == sge_count - 1)
1471 sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST;
1474 sg_processed = i + 1;
1476 if ((sg_processed == (fusion->max_sge_in_main_msg - 1)) &&
1477 (sge_count > fusion->max_sge_in_main_msg)) {
1479 struct MPI25_IEEE_SGE_CHAIN64 *sg_chain;
1480 if (fusion->adapter_type == INVADER_SERIES) {
1481 if ((le16_to_cpu(cmd->io_request->IoFlags) &
1482 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) !=
1483 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
1484 cmd->io_request->ChainOffset =
1486 chain_offset_io_request;
1488 cmd->io_request->ChainOffset = 0;
1490 cmd->io_request->ChainOffset =
1491 fusion->chain_offset_io_request;
1494 /* Prepare chain element */
1495 sg_chain->NextChainOffset = 0;
1496 if (fusion->adapter_type == INVADER_SERIES)
1497 sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT;
1500 (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
1501 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR);
1502 sg_chain->Length = cpu_to_le32((sizeof(union MPI2_SGE_IO_UNION) * (sge_count - sg_processed)));
1503 sg_chain->Address = cpu_to_le64(cmd->sg_frame_phys_addr);
1506 (struct MPI25_IEEE_SGE_CHAIN64 *)cmd->sg_frame;
1507 memset(sgl_ptr, 0, instance->max_chain_frame_sz);
1515 * megasas_set_pd_lba - Sets PD LBA
1517 * @cdb_len: cdb length
1518 * @start_blk: Start block of IO
1520 * Used to set the PD LBA in CDB for FP IOs
1523 megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
1524 struct IO_REQUEST_INFO *io_info, struct scsi_cmnd *scp,
1525 struct MR_DRV_RAID_MAP_ALL *local_map_ptr, u32 ref_tag)
1527 struct MR_LD_RAID *raid;
1529 u64 start_blk = io_info->pdBlock;
1530 u8 *cdb = io_request->CDB.CDB32;
1531 u32 num_blocks = io_info->numBlocks;
1532 u8 opcode = 0, flagvals = 0, groupnum = 0, control = 0;
1534 /* Check if T10 PI (DIF) is enabled for this LD */
1535 ld = MR_TargetIdToLdGet(io_info->ldTgtId, local_map_ptr);
1536 raid = MR_LdRaidGet(ld, local_map_ptr);
1537 if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) {
1538 memset(cdb, 0, sizeof(io_request->CDB.CDB32));
1539 cdb[0] = MEGASAS_SCSI_VARIABLE_LENGTH_CMD;
1540 cdb[7] = MEGASAS_SCSI_ADDL_CDB_LEN;
1542 if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
1543 cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_READ32;
1545 cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_WRITE32;
1546 cdb[10] = MEGASAS_RD_WR_PROTECT_CHECK_ALL;
1549 cdb[12] = (u8)((start_blk >> 56) & 0xff);
1550 cdb[13] = (u8)((start_blk >> 48) & 0xff);
1551 cdb[14] = (u8)((start_blk >> 40) & 0xff);
1552 cdb[15] = (u8)((start_blk >> 32) & 0xff);
1553 cdb[16] = (u8)((start_blk >> 24) & 0xff);
1554 cdb[17] = (u8)((start_blk >> 16) & 0xff);
1555 cdb[18] = (u8)((start_blk >> 8) & 0xff);
1556 cdb[19] = (u8)(start_blk & 0xff);
1558 /* Logical block reference tag */
1559 io_request->CDB.EEDP32.PrimaryReferenceTag =
1560 cpu_to_be32(ref_tag);
1561 io_request->CDB.EEDP32.PrimaryApplicationTagMask = cpu_to_be16(0xffff);
1562 io_request->IoFlags = cpu_to_le16(32); /* Specify 32-byte cdb */
1564 /* Transfer length */
1565 cdb[28] = (u8)((num_blocks >> 24) & 0xff);
1566 cdb[29] = (u8)((num_blocks >> 16) & 0xff);
1567 cdb[30] = (u8)((num_blocks >> 8) & 0xff);
1568 cdb[31] = (u8)(num_blocks & 0xff);
1570 /* set SCSI IO EEDPFlags */
1571 if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) {
1572 io_request->EEDPFlags = cpu_to_le16(
1573 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1574 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1575 MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP |
1576 MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG |
1577 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1579 io_request->EEDPFlags = cpu_to_le16(
1580 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1581 MPI2_SCSIIO_EEDPFLAGS_INSERT_OP);
1583 io_request->Control |= cpu_to_le32((0x4 << 26));
1584 io_request->EEDPBlockSize = cpu_to_le32(scp->device->sector_size);
1586 /* Some drives don't support 16/12 byte CDB's, convert to 10 */
1587 if (((cdb_len == 12) || (cdb_len == 16)) &&
1588 (start_blk <= 0xffffffff)) {
1589 if (cdb_len == 16) {
1590 opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10;
1595 opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10;
1601 memset(cdb, 0, sizeof(io_request->CDB.CDB32));
1608 /* Transfer length */
1609 cdb[8] = (u8)(num_blocks & 0xff);
1610 cdb[7] = (u8)((num_blocks >> 8) & 0xff);
1612 io_request->IoFlags = cpu_to_le16(10); /* Specify 10-byte cdb */
1614 } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) {
1615 /* Convert to 16 byte CDB for large LBA's */
1618 opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16;
1623 cdb[0] == READ_10 ? READ_16 : WRITE_16;
1630 cdb[0] == READ_12 ? READ_16 : WRITE_16;
1637 memset(cdb, 0, sizeof(io_request->CDB.CDB32));
1644 /* Transfer length */
1645 cdb[13] = (u8)(num_blocks & 0xff);
1646 cdb[12] = (u8)((num_blocks >> 8) & 0xff);
1647 cdb[11] = (u8)((num_blocks >> 16) & 0xff);
1648 cdb[10] = (u8)((num_blocks >> 24) & 0xff);
1650 io_request->IoFlags = cpu_to_le16(16); /* Specify 16-byte cdb */
1654 /* Normal case, just load LBA here */
1658 u8 val = cdb[1] & 0xE0;
1659 cdb[3] = (u8)(start_blk & 0xff);
1660 cdb[2] = (u8)((start_blk >> 8) & 0xff);
1661 cdb[1] = val | ((u8)(start_blk >> 16) & 0x1f);
1665 cdb[5] = (u8)(start_blk & 0xff);
1666 cdb[4] = (u8)((start_blk >> 8) & 0xff);
1667 cdb[3] = (u8)((start_blk >> 16) & 0xff);
1668 cdb[2] = (u8)((start_blk >> 24) & 0xff);
1671 cdb[5] = (u8)(start_blk & 0xff);
1672 cdb[4] = (u8)((start_blk >> 8) & 0xff);
1673 cdb[3] = (u8)((start_blk >> 16) & 0xff);
1674 cdb[2] = (u8)((start_blk >> 24) & 0xff);
1677 cdb[9] = (u8)(start_blk & 0xff);
1678 cdb[8] = (u8)((start_blk >> 8) & 0xff);
1679 cdb[7] = (u8)((start_blk >> 16) & 0xff);
1680 cdb[6] = (u8)((start_blk >> 24) & 0xff);
1681 cdb[5] = (u8)((start_blk >> 32) & 0xff);
1682 cdb[4] = (u8)((start_blk >> 40) & 0xff);
1683 cdb[3] = (u8)((start_blk >> 48) & 0xff);
1684 cdb[2] = (u8)((start_blk >> 56) & 0xff);
1691 * megasas_build_ldio_fusion - Prepares IOs to devices
1692 * @instance: Adapter soft state
1693 * @scp: SCSI command
1694 * @cmd: Command to be prepared
1696 * Prepares the io_request and chain elements (sg_frame) for IO
1697 * The IO can be for PD (Fast Path) or LD
1700 megasas_build_ldio_fusion(struct megasas_instance *instance,
1701 struct scsi_cmnd *scp,
1702 struct megasas_cmd_fusion *cmd)
1705 u32 start_lba_lo, start_lba_hi, device_id, datalength = 0;
1706 struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
1707 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
1708 struct IO_REQUEST_INFO io_info;
1709 struct fusion_context *fusion;
1710 struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
1713 device_id = MEGASAS_DEV_INDEX(scp);
1715 fusion = instance->ctrl_context;
1717 io_request = cmd->io_request;
1718 io_request->RaidContext.VirtualDiskTgtId = cpu_to_le16(device_id);
1719 io_request->RaidContext.status = 0;
1720 io_request->RaidContext.exStatus = 0;
1722 req_desc = (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)cmd->request_desc;
1729 * 6-byte READ(0x08) or WRITE(0x0A) cdb
1731 if (scp->cmd_len == 6) {
1732 datalength = (u32) scp->cmnd[4];
1733 start_lba_lo = ((u32) scp->cmnd[1] << 16) |
1734 ((u32) scp->cmnd[2] << 8) | (u32) scp->cmnd[3];
1736 start_lba_lo &= 0x1FFFFF;
1740 * 10-byte READ(0x28) or WRITE(0x2A) cdb
1742 else if (scp->cmd_len == 10) {
1743 datalength = (u32) scp->cmnd[8] |
1744 ((u32) scp->cmnd[7] << 8);
1745 start_lba_lo = ((u32) scp->cmnd[2] << 24) |
1746 ((u32) scp->cmnd[3] << 16) |
1747 ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
1751 * 12-byte READ(0xA8) or WRITE(0xAA) cdb
1753 else if (scp->cmd_len == 12) {
1754 datalength = ((u32) scp->cmnd[6] << 24) |
1755 ((u32) scp->cmnd[7] << 16) |
1756 ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9];
1757 start_lba_lo = ((u32) scp->cmnd[2] << 24) |
1758 ((u32) scp->cmnd[3] << 16) |
1759 ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
1763 * 16-byte READ(0x88) or WRITE(0x8A) cdb
1765 else if (scp->cmd_len == 16) {
1766 datalength = ((u32) scp->cmnd[10] << 24) |
1767 ((u32) scp->cmnd[11] << 16) |
1768 ((u32) scp->cmnd[12] << 8) | (u32) scp->cmnd[13];
1769 start_lba_lo = ((u32) scp->cmnd[6] << 24) |
1770 ((u32) scp->cmnd[7] << 16) |
1771 ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9];
1773 start_lba_hi = ((u32) scp->cmnd[2] << 24) |
1774 ((u32) scp->cmnd[3] << 16) |
1775 ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
1778 memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO));
1779 io_info.ldStartBlock = ((u64)start_lba_hi << 32) | start_lba_lo;
1780 io_info.numBlocks = datalength;
1781 io_info.ldTgtId = device_id;
1782 io_request->DataLength = cpu_to_le32(scsi_bufflen(scp));
1784 if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
1787 local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
1789 if ((MR_TargetIdToLdGet(device_id, local_map_ptr) >=
1790 instance->fw_supported_vd_count) || (!fusion->fast_path_io)) {
1791 io_request->RaidContext.regLockFlags = 0;
1794 if (MR_BuildRaidContext(instance, &io_info,
1795 &io_request->RaidContext,
1796 local_map_ptr, &raidLUN))
1797 fp_possible = io_info.fpOkForIo;
1800 /* Use raw_smp_processor_id() for now until cmd->request->cpu is CPU
1801 id by default, not CPU group id, otherwise all MSI-X queues won't
1803 cmd->request_desc->SCSIIO.MSIxIndex = instance->msix_vectors ?
1804 raw_smp_processor_id() % instance->msix_vectors : 0;
1807 megasas_set_pd_lba(io_request, scp->cmd_len, &io_info, scp,
1808 local_map_ptr, start_lba_lo);
1809 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1810 cmd->request_desc->SCSIIO.RequestFlags =
1811 (MPI2_REQ_DESCRIPT_FLAGS_FP_IO
1812 << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1813 if (fusion->adapter_type == INVADER_SERIES) {
1814 if (io_request->RaidContext.regLockFlags ==
1816 cmd->request_desc->SCSIIO.RequestFlags =
1817 (MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
1818 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1819 io_request->RaidContext.Type = MPI2_TYPE_CUDA;
1820 io_request->RaidContext.nseg = 0x1;
1821 io_request->IoFlags |= cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
1822 io_request->RaidContext.regLockFlags |=
1823 (MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
1824 MR_RL_FLAGS_SEQ_NUM_ENABLE);
1826 if ((fusion->load_balance_info[device_id].loadBalanceFlag) &&
1829 get_updated_dev_handle(instance,
1830 &fusion->load_balance_info[device_id],
1832 scp->SCp.Status |= MEGASAS_LOAD_BALANCE_FLAG;
1833 cmd->pd_r1_lb = io_info.pd_after_lb;
1835 scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG;
1837 if ((raidLUN[0] == 1) &&
1838 (local_map_ptr->raidMap.devHndlInfo[io_info.pd_after_lb].validHandles > 1)) {
1839 instance->dev_handle = !(instance->dev_handle);
1841 local_map_ptr->raidMap.devHndlInfo[io_info.pd_after_lb].devHandle[instance->dev_handle];
1844 cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle;
1845 io_request->DevHandle = io_info.devHandle;
1846 /* populate the LUN field */
1847 memcpy(io_request->LUN, raidLUN, 8);
1849 io_request->RaidContext.timeoutValue =
1850 cpu_to_le16(local_map_ptr->raidMap.fpPdIoTimeoutSec);
1851 cmd->request_desc->SCSIIO.RequestFlags =
1852 (MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO
1853 << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1854 if (fusion->adapter_type == INVADER_SERIES) {
1855 if (io_info.do_fp_rlbypass ||
1856 (io_request->RaidContext.regLockFlags == REGION_TYPE_UNUSED))
1857 cmd->request_desc->SCSIIO.RequestFlags =
1858 (MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
1859 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1860 io_request->RaidContext.Type = MPI2_TYPE_CUDA;
1861 io_request->RaidContext.regLockFlags |=
1862 (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 |
1863 MR_RL_FLAGS_SEQ_NUM_ENABLE);
1864 io_request->RaidContext.nseg = 0x1;
1866 io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
1867 io_request->DevHandle = cpu_to_le16(device_id);
1872 * megasas_build_ld_nonrw_fusion - prepares non rw ios for virtual disk
1873 * @instance: Adapter soft state
1874 * @scp: SCSI command
1875 * @cmd: Command to be prepared
1877 * Prepares the io_request frame for non-rw io cmds for vd.
1879 static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance,
1880 struct scsi_cmnd *scmd, struct megasas_cmd_fusion *cmd)
1883 struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
1885 struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
1886 struct fusion_context *fusion = instance->ctrl_context;
1890 struct MR_LD_RAID *raid;
1891 struct RAID_CONTEXT *pRAID_Context;
1894 io_request = cmd->io_request;
1895 device_id = MEGASAS_DEV_INDEX(scmd);
1896 pd_index = MEGASAS_PD_INDEX(scmd);
1897 local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
1898 io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
1899 /* get RAID_Context pointer */
1900 pRAID_Context = &io_request->RaidContext;
1901 /* Check with FW team */
1902 pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
1903 pRAID_Context->regLockRowLBA = 0;
1904 pRAID_Context->regLockLength = 0;
1906 if (fusion->fast_path_io && (
1907 device_id < instance->fw_supported_vd_count)) {
1909 ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
1910 if (ld >= instance->fw_supported_vd_count)
1913 raid = MR_LdRaidGet(ld, local_map_ptr);
1914 if (!(raid->capability.fpNonRWCapable))
1920 io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
1921 io_request->DevHandle = cpu_to_le16(device_id);
1922 io_request->LUN[1] = scmd->device->lun;
1923 pRAID_Context->timeoutValue =
1924 cpu_to_le16 (scmd->request->timeout / HZ);
1925 cmd->request_desc->SCSIIO.RequestFlags =
1926 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
1927 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1930 /* set RAID context values */
1931 pRAID_Context->configSeqNum = raid->seqNum;
1932 pRAID_Context->regLockFlags = REGION_TYPE_SHARED_READ;
1933 pRAID_Context->timeoutValue = cpu_to_le16(raid->fpIoTimeoutForLd);
1935 /* get the DevHandle for the PD (since this is
1936 fpNonRWCapable, this is a single disk RAID0) */
1938 arRef = MR_LdSpanArrayGet(ld, span, local_map_ptr);
1939 pd = MR_ArPdGet(arRef, physArm, local_map_ptr);
1940 devHandle = MR_PdDevHandleGet(pd, local_map_ptr);
1942 /* build request descriptor */
1943 cmd->request_desc->SCSIIO.RequestFlags =
1944 (MPI2_REQ_DESCRIPT_FLAGS_FP_IO <<
1945 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1946 cmd->request_desc->SCSIIO.DevHandle = devHandle;
1948 /* populate the LUN field */
1949 memcpy(io_request->LUN, raid->LUN, 8);
1951 /* build the raidScsiIO structure */
1952 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1953 io_request->DevHandle = devHandle;
1958 * megasas_build_syspd_fusion - prepares rw/non-rw ios for syspd
1959 * @instance: Adapter soft state
1960 * @scp: SCSI command
1961 * @cmd: Command to be prepared
1962 * @fp_possible: parameter to detect fast path or firmware path io.
1964 * Prepares the io_request frame for rw/non-rw io cmds for syspds
1967 megasas_build_syspd_fusion(struct megasas_instance *instance,
1968 struct scsi_cmnd *scmd, struct megasas_cmd_fusion *cmd, u8 fp_possible)
1971 struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
1973 u16 os_timeout_value;
1975 struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
1976 struct RAID_CONTEXT *pRAID_Context;
1977 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
1978 struct fusion_context *fusion = instance->ctrl_context;
1979 pd_sync = (void *)fusion->pd_seq_sync[(instance->pd_seq_map_id - 1) & 1];
1981 device_id = MEGASAS_DEV_INDEX(scmd);
1982 pd_index = MEGASAS_PD_INDEX(scmd);
1983 os_timeout_value = scmd->request->timeout / HZ;
1985 io_request = cmd->io_request;
1986 /* get RAID_Context pointer */
1987 pRAID_Context = &io_request->RaidContext;
1988 pRAID_Context->regLockFlags = 0;
1989 pRAID_Context->regLockRowLBA = 0;
1990 pRAID_Context->regLockLength = 0;
1991 io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
1992 io_request->LUN[1] = scmd->device->lun;
1993 pRAID_Context->RAIDFlags = MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD
1994 << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;
1996 /* If FW supports PD sequence number */
1997 if (instance->use_seqnum_jbod_fp &&
1998 instance->pd_list[pd_index].driveType == TYPE_DISK) {
1999 /* TgtId must be incremented by 255 as jbod seq number is index
2002 pRAID_Context->VirtualDiskTgtId =
2003 cpu_to_le16(device_id + (MAX_PHYSICAL_DEVICES - 1));
2004 pRAID_Context->configSeqNum = pd_sync->seq[pd_index].seqNum;
2005 io_request->DevHandle = pd_sync->seq[pd_index].devHandle;
2006 pRAID_Context->regLockFlags |=
2007 (MR_RL_FLAGS_SEQ_NUM_ENABLE|MR_RL_FLAGS_GRANT_DESTINATION_CUDA);
2008 pRAID_Context->Type = MPI2_TYPE_CUDA;
2009 pRAID_Context->nseg = 0x1;
2010 } else if (fusion->fast_path_io) {
2011 pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
2012 pRAID_Context->configSeqNum = 0;
2013 local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
2014 io_request->DevHandle =
2015 local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
2017 /* Want to send all IO via FW path */
2018 pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
2019 pRAID_Context->configSeqNum = 0;
2020 io_request->DevHandle = cpu_to_le16(0xFFFF);
2023 cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle;
2024 cmd->request_desc->SCSIIO.MSIxIndex =
2025 instance->msix_vectors ?
2026 (raw_smp_processor_id() % instance->msix_vectors) : 0;
2030 /* system pd firmware path */
2031 io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
2032 cmd->request_desc->SCSIIO.RequestFlags =
2033 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
2034 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2035 pRAID_Context->timeoutValue = cpu_to_le16(os_timeout_value);
2036 pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
2038 /* system pd Fast Path */
2039 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
2040 timeout_limit = (scmd->device->type == TYPE_DISK) ?
2042 pRAID_Context->timeoutValue =
2043 cpu_to_le16((os_timeout_value > timeout_limit) ?
2044 timeout_limit : os_timeout_value);
2045 if (fusion->adapter_type == INVADER_SERIES)
2046 io_request->IoFlags |=
2047 cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
2049 cmd->request_desc->SCSIIO.RequestFlags =
2050 (MPI2_REQ_DESCRIPT_FLAGS_FP_IO <<
2051 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2056 * megasas_build_io_fusion - Prepares IOs to devices
2057 * @instance: Adapter soft state
2058 * @scp: SCSI command
2059 * @cmd: Command to be prepared
2061 * Invokes helper functions to prepare request frames
2062 * and sets flags appropriate for IO/Non-IO cmd
2065 megasas_build_io_fusion(struct megasas_instance *instance,
2066 struct scsi_cmnd *scp,
2067 struct megasas_cmd_fusion *cmd)
2071 struct MPI2_RAID_SCSI_IO_REQUEST *io_request = cmd->io_request;
2073 /* Zero out some fields so they don't get reused */
2074 memset(io_request->LUN, 0x0, 8);
2075 io_request->CDB.EEDP32.PrimaryReferenceTag = 0;
2076 io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0;
2077 io_request->EEDPFlags = 0;
2078 io_request->Control = 0;
2079 io_request->EEDPBlockSize = 0;
2080 io_request->ChainOffset = 0;
2081 io_request->RaidContext.RAIDFlags = 0;
2082 io_request->RaidContext.Type = 0;
2083 io_request->RaidContext.nseg = 0;
2085 memcpy(io_request->CDB.CDB32, scp->cmnd, scp->cmd_len);
2087 * Just the CDB length,rest of the Flags are zero
2088 * This will be modified for FP in build_ldio_fusion
2090 io_request->IoFlags = cpu_to_le16(scp->cmd_len);
2092 switch (cmd_type = megasas_cmd_type(scp)) {
2093 case READ_WRITE_LDIO:
2094 megasas_build_ldio_fusion(instance, scp, cmd);
2096 case NON_READ_WRITE_LDIO:
2097 megasas_build_ld_nonrw_fusion(instance, scp, cmd);
2099 case READ_WRITE_SYSPDIO:
2100 case NON_READ_WRITE_SYSPDIO:
2101 if (instance->secure_jbod_support &&
2102 (cmd_type == NON_READ_WRITE_SYSPDIO))
2103 megasas_build_syspd_fusion(instance, scp, cmd, 0);
2105 megasas_build_syspd_fusion(instance, scp, cmd, 1);
2116 megasas_make_sgl_fusion(instance, scp,
2117 (struct MPI25_IEEE_SGE_CHAIN64 *)
2118 &io_request->SGL, cmd);
2120 if (sge_count > instance->max_num_sge) {
2121 dev_err(&instance->pdev->dev, "Error. sge_count (0x%x) exceeds "
2122 "max (0x%x) allowed\n", sge_count,
2123 instance->max_num_sge);
2127 /* numSGE store lower 8 bit of sge_count.
2128 * numSGEExt store higher 8 bit of sge_count
2130 io_request->RaidContext.numSGE = sge_count;
2131 io_request->RaidContext.numSGEExt = (u8)(sge_count >> 8);
2133 io_request->SGLFlags = cpu_to_le16(MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
2135 if (scp->sc_data_direction == PCI_DMA_TODEVICE)
2136 io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_WRITE);
2137 else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
2138 io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_READ);
2140 io_request->SGLOffset0 =
2141 offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4;
2143 io_request->SenseBufferLowAddress = cpu_to_le32(cmd->sense_phys_addr);
2144 io_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
2147 scp->SCp.ptr = (char *)cmd;
2152 union MEGASAS_REQUEST_DESCRIPTOR_UNION *
2153 megasas_get_request_descriptor(struct megasas_instance *instance, u16 index)
2156 struct fusion_context *fusion;
2158 if (index >= instance->max_fw_cmds) {
2159 dev_err(&instance->pdev->dev, "Invalid SMID (0x%x)request for "
2160 "descriptor for scsi%d\n", index,
2161 instance->host->host_no);
2164 fusion = instance->ctrl_context;
2165 p = fusion->req_frames_desc
2166 +sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) *index;
2168 return (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)p;
2172 * megasas_build_and_issue_cmd_fusion -Main routine for building and
2173 * issuing non IOCTL cmd
2174 * @instance: Adapter soft state
2175 * @scmd: pointer to scsi cmd from OS
2178 megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
2179 struct scsi_cmnd *scmd)
2181 struct megasas_cmd_fusion *cmd;
2182 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2184 struct fusion_context *fusion;
2186 fusion = instance->ctrl_context;
2188 if ((megasas_cmd_type(scmd) == READ_WRITE_LDIO) &&
2189 instance->ldio_threshold &&
2190 (atomic_inc_return(&instance->ldio_outstanding) >
2191 instance->ldio_threshold)) {
2192 atomic_dec(&instance->ldio_outstanding);
2193 return SCSI_MLQUEUE_DEVICE_BUSY;
2196 cmd = megasas_get_cmd_fusion(instance, scmd->request->tag);
2200 req_desc = megasas_get_request_descriptor(instance, index-1);
2202 return SCSI_MLQUEUE_HOST_BUSY;
2204 req_desc->Words = 0;
2205 cmd->request_desc = req_desc;
2207 if (megasas_build_io_fusion(instance, scmd, cmd)) {
2208 megasas_return_cmd_fusion(instance, cmd);
2209 dev_err(&instance->pdev->dev, "Error building command\n");
2210 cmd->request_desc = NULL;
2211 return SCSI_MLQUEUE_HOST_BUSY;
2214 req_desc = cmd->request_desc;
2215 req_desc->SCSIIO.SMID = cpu_to_le16(index);
2217 if (cmd->io_request->ChainOffset != 0 &&
2218 cmd->io_request->ChainOffset != 0xF)
2219 dev_err(&instance->pdev->dev, "The chain offset value is not "
2220 "correct : %x\n", cmd->io_request->ChainOffset);
2223 * Issue the command to the FW
2225 atomic_inc(&instance->fw_outstanding);
2227 megasas_fire_cmd_fusion(instance, req_desc);
2233 * complete_cmd_fusion - Completes command
2234 * @instance: Adapter soft state
2235 * Completes all commands that is in reply descriptor queue
2238 complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
2240 union MPI2_REPLY_DESCRIPTORS_UNION *desc;
2241 struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc;
2242 struct MPI2_RAID_SCSI_IO_REQUEST *scsi_io_req;
2243 struct fusion_context *fusion;
2244 struct megasas_cmd *cmd_mfi;
2245 struct megasas_cmd_fusion *cmd_fusion;
2246 u16 smid, num_completed;
2247 u8 reply_descript_type;
2248 u32 status, extStatus, device_id;
2249 union desc_value d_val;
2250 struct LD_LOAD_BALANCE_INFO *lbinfo;
2251 int threshold_reply_count = 0;
2252 struct scsi_cmnd *scmd_local = NULL;
2253 struct MR_TASK_MANAGE_REQUEST *mr_tm_req;
2254 struct MPI2_SCSI_TASK_MANAGE_REQUEST *mpi_tm_req;
2256 fusion = instance->ctrl_context;
2258 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
2261 desc = fusion->reply_frames_desc[MSIxIndex] +
2262 fusion->last_reply_idx[MSIxIndex];
2264 reply_desc = (struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
2266 d_val.word = desc->Words;
2268 reply_descript_type = reply_desc->ReplyFlags &
2269 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2271 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
2276 while (d_val.u.low != cpu_to_le32(UINT_MAX) &&
2277 d_val.u.high != cpu_to_le32(UINT_MAX)) {
2278 smid = le16_to_cpu(reply_desc->SMID);
2280 cmd_fusion = fusion->cmd_list[smid - 1];
2283 (struct MPI2_RAID_SCSI_IO_REQUEST *)
2284 cmd_fusion->io_request;
2286 if (cmd_fusion->scmd)
2287 cmd_fusion->scmd->SCp.ptr = NULL;
2289 scmd_local = cmd_fusion->scmd;
2290 status = scsi_io_req->RaidContext.status;
2291 extStatus = scsi_io_req->RaidContext.exStatus;
2293 switch (scsi_io_req->Function) {
2294 case MPI2_FUNCTION_SCSI_TASK_MGMT:
2295 mr_tm_req = (struct MR_TASK_MANAGE_REQUEST *)
2296 cmd_fusion->io_request;
2297 mpi_tm_req = (struct MPI2_SCSI_TASK_MANAGE_REQUEST *)
2298 &mr_tm_req->TmRequest;
2299 dev_dbg(&instance->pdev->dev, "TM completion:"
2300 "type: 0x%x TaskMID: 0x%x\n",
2301 mpi_tm_req->TaskType, mpi_tm_req->TaskMID);
2302 complete(&cmd_fusion->done);
2304 case MPI2_FUNCTION_SCSI_IO_REQUEST: /*Fast Path IO.*/
2305 /* Update load balancing info */
2306 device_id = MEGASAS_DEV_INDEX(scmd_local);
2307 lbinfo = &fusion->load_balance_info[device_id];
2308 if (cmd_fusion->scmd->SCp.Status &
2309 MEGASAS_LOAD_BALANCE_FLAG) {
2310 atomic_dec(&lbinfo->scsi_pending_cmds[cmd_fusion->pd_r1_lb]);
2311 cmd_fusion->scmd->SCp.Status &=
2312 ~MEGASAS_LOAD_BALANCE_FLAG;
2314 if (reply_descript_type ==
2315 MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) {
2316 if (megasas_dbg_lvl == 5)
2317 dev_err(&instance->pdev->dev, "\nFAST Path "
2320 /* Fall thru and complete IO */
2321 case MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST: /* LD-IO Path */
2322 /* Map the FW Cmd Status */
2323 map_cmd_status(cmd_fusion, status, extStatus);
2324 scsi_io_req->RaidContext.status = 0;
2325 scsi_io_req->RaidContext.exStatus = 0;
2326 if (megasas_cmd_type(scmd_local) == READ_WRITE_LDIO)
2327 atomic_dec(&instance->ldio_outstanding);
2328 megasas_return_cmd_fusion(instance, cmd_fusion);
2329 scsi_dma_unmap(scmd_local);
2330 scmd_local->scsi_done(scmd_local);
2331 atomic_dec(&instance->fw_outstanding);
2334 case MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /*MFI command */
2335 cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
2337 /* Poll mode. Dummy free.
2338 * In case of Interrupt mode, caller has reverse check.
2340 if (cmd_mfi->flags & DRV_DCMD_POLLED_MODE) {
2341 cmd_mfi->flags &= ~DRV_DCMD_POLLED_MODE;
2342 megasas_return_cmd(instance, cmd_mfi);
2344 megasas_complete_cmd(instance, cmd_mfi, DID_OK);
2348 fusion->last_reply_idx[MSIxIndex]++;
2349 if (fusion->last_reply_idx[MSIxIndex] >=
2350 fusion->reply_q_depth)
2351 fusion->last_reply_idx[MSIxIndex] = 0;
2353 desc->Words = cpu_to_le64(ULLONG_MAX);
2355 threshold_reply_count++;
2357 /* Get the next reply descriptor */
2358 if (!fusion->last_reply_idx[MSIxIndex])
2359 desc = fusion->reply_frames_desc[MSIxIndex];
2364 (struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
2366 d_val.word = desc->Words;
2368 reply_descript_type = reply_desc->ReplyFlags &
2369 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2371 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
2374 * Write to reply post host index register after completing threshold
2375 * number of reply counts and still there are more replies in reply queue
2376 * pending to be completed
2378 if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) {
2379 if (fusion->adapter_type == INVADER_SERIES)
2380 writel(((MSIxIndex & 0x7) << 24) |
2381 fusion->last_reply_idx[MSIxIndex],
2382 instance->reply_post_host_index_addr[MSIxIndex/8]);
2384 writel((MSIxIndex << 24) |
2385 fusion->last_reply_idx[MSIxIndex],
2386 instance->reply_post_host_index_addr[0]);
2387 threshold_reply_count = 0;
2395 if (fusion->adapter_type == INVADER_SERIES)
2396 writel(((MSIxIndex & 0x7) << 24) |
2397 fusion->last_reply_idx[MSIxIndex],
2398 instance->reply_post_host_index_addr[MSIxIndex/8]);
2400 writel((MSIxIndex << 24) |
2401 fusion->last_reply_idx[MSIxIndex],
2402 instance->reply_post_host_index_addr[0]);
2403 megasas_check_and_restore_queue_depth(instance);
2408 * megasas_complete_cmd_dpc_fusion - Completes command
2409 * @instance: Adapter soft state
2411 * Tasklet to complete cmds
2414 megasas_complete_cmd_dpc_fusion(unsigned long instance_addr)
2416 struct megasas_instance *instance =
2417 (struct megasas_instance *)instance_addr;
2418 unsigned long flags;
2419 u32 count, MSIxIndex;
2421 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
2423 /* If we have already declared adapter dead, donot complete cmds */
2424 spin_lock_irqsave(&instance->hba_lock, flags);
2425 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2426 spin_unlock_irqrestore(&instance->hba_lock, flags);
2429 spin_unlock_irqrestore(&instance->hba_lock, flags);
2431 for (MSIxIndex = 0 ; MSIxIndex < count; MSIxIndex++)
2432 complete_cmd_fusion(instance, MSIxIndex);
2436 * megasas_isr_fusion - isr entry point
2438 irqreturn_t megasas_isr_fusion(int irq, void *devp)
2440 struct megasas_irq_context *irq_context = devp;
2441 struct megasas_instance *instance = irq_context->instance;
2442 u32 mfiStatus, fw_state, dma_state;
2444 if (instance->mask_interrupts)
2447 if (!instance->msix_vectors) {
2448 mfiStatus = instance->instancet->clear_intr(instance->reg_set);
2453 /* If we are resetting, bail */
2454 if (test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags)) {
2455 instance->instancet->clear_intr(instance->reg_set);
2459 if (!complete_cmd_fusion(instance, irq_context->MSIxIndex)) {
2460 instance->instancet->clear_intr(instance->reg_set);
2461 /* If we didn't complete any commands, check for FW fault */
2462 fw_state = instance->instancet->read_fw_status_reg(
2463 instance->reg_set) & MFI_STATE_MASK;
2464 dma_state = instance->instancet->read_fw_status_reg
2465 (instance->reg_set) & MFI_STATE_DMADONE;
2466 if (instance->crash_dump_drv_support &&
2467 instance->crash_dump_app_support) {
2468 /* Start collecting crash, if DMA bit is done */
2469 if ((fw_state == MFI_STATE_FAULT) && dma_state)
2470 schedule_work(&instance->crash_init);
2471 else if (fw_state == MFI_STATE_FAULT) {
2472 if (instance->unload == 0)
2473 schedule_work(&instance->work_init);
2475 } else if (fw_state == MFI_STATE_FAULT) {
2476 dev_warn(&instance->pdev->dev, "Iop2SysDoorbellInt"
2477 "for scsi%d\n", instance->host->host_no);
2478 if (instance->unload == 0)
2479 schedule_work(&instance->work_init);
2487 * build_mpt_mfi_pass_thru - builds a cmd fo MFI Pass thru
2488 * @instance: Adapter soft state
2489 * mfi_cmd: megasas_cmd pointer
2493 build_mpt_mfi_pass_thru(struct megasas_instance *instance,
2494 struct megasas_cmd *mfi_cmd)
2496 struct MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
2497 struct MPI2_RAID_SCSI_IO_REQUEST *io_req;
2498 struct megasas_cmd_fusion *cmd;
2499 struct fusion_context *fusion;
2500 struct megasas_header *frame_hdr = &mfi_cmd->frame->hdr;
2502 fusion = instance->ctrl_context;
2504 cmd = megasas_get_cmd_fusion(instance,
2505 instance->max_scsi_cmds + mfi_cmd->index);
2507 /* Save the smid. To be used for returning the cmd */
2508 mfi_cmd->context.smid = cmd->index;
2511 * For cmds where the flag is set, store the flag and check
2512 * on completion. For cmds with this flag, don't call
2513 * megasas_complete_cmd
2516 if (frame_hdr->flags & cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE))
2517 mfi_cmd->flags |= DRV_DCMD_POLLED_MODE;
2519 io_req = cmd->io_request;
2521 if (fusion->adapter_type == INVADER_SERIES) {
2522 struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end =
2523 (struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL;
2524 sgl_ptr_end += fusion->max_sge_in_main_msg - 1;
2525 sgl_ptr_end->Flags = 0;
2529 (struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL.IeeeChain;
2531 io_req->Function = MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
2532 io_req->SGLOffset0 = offsetof(struct MPI2_RAID_SCSI_IO_REQUEST,
2534 io_req->ChainOffset = fusion->chain_offset_mfi_pthru;
2536 mpi25_ieee_chain->Address = cpu_to_le64(mfi_cmd->frame_phys_addr);
2538 mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2539 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
2541 mpi25_ieee_chain->Length = cpu_to_le32(instance->max_chain_frame_sz);
2547 * build_mpt_cmd - Calls helper function to build a cmd MFI Pass thru cmd
2548 * @instance: Adapter soft state
2549 * @cmd: mfi cmd to build
2552 union MEGASAS_REQUEST_DESCRIPTOR_UNION *
2553 build_mpt_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
2555 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2558 if (build_mpt_mfi_pass_thru(instance, cmd)) {
2559 dev_err(&instance->pdev->dev, "Couldn't build MFI pass thru cmd\n");
2563 index = cmd->context.smid;
2565 req_desc = megasas_get_request_descriptor(instance, index - 1);
2570 req_desc->Words = 0;
2571 req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
2572 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2574 req_desc->SCSIIO.SMID = cpu_to_le16(index);
2580 * megasas_issue_dcmd_fusion - Issues a MFI Pass thru cmd
2581 * @instance: Adapter soft state
2582 * @cmd: mfi cmd pointer
2586 megasas_issue_dcmd_fusion(struct megasas_instance *instance,
2587 struct megasas_cmd *cmd)
2589 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2591 req_desc = build_mpt_cmd(instance, cmd);
2593 dev_info(&instance->pdev->dev, "Failed from %s %d\n",
2594 __func__, __LINE__);
2595 return DCMD_NOT_FIRED;
2598 megasas_fire_cmd_fusion(instance, req_desc);
2599 return DCMD_SUCCESS;
2603 * megasas_release_fusion - Reverses the FW initialization
2604 * @instance: Adapter soft state
2607 megasas_release_fusion(struct megasas_instance *instance)
2609 megasas_free_cmds(instance);
2610 megasas_free_cmds_fusion(instance);
2612 iounmap(instance->reg_set);
2614 pci_release_selected_regions(instance->pdev, 1<<instance->bar);
2618 * megasas_read_fw_status_reg_fusion - returns the current FW status value
2619 * @regs: MFI register set
2622 megasas_read_fw_status_reg_fusion(struct megasas_register_set __iomem *regs)
2624 return readl(&(regs)->outbound_scratch_pad);
2628 * megasas_alloc_host_crash_buffer - Host buffers for Crash dump collection from Firmware
2629 * @instance: Controller's soft instance
2630 * return: Number of allocated host crash buffers
2633 megasas_alloc_host_crash_buffer(struct megasas_instance *instance)
2637 instance->crash_buf_pages = get_order(CRASH_DMA_BUF_SIZE);
2638 for (i = 0; i < MAX_CRASH_DUMP_SIZE; i++) {
2639 instance->crash_buf[i] = (void *)__get_free_pages(GFP_KERNEL,
2640 instance->crash_buf_pages);
2641 if (!instance->crash_buf[i]) {
2642 dev_info(&instance->pdev->dev, "Firmware crash dump "
2643 "memory allocation failed at index %d\n", i);
2646 memset(instance->crash_buf[i], 0,
2647 ((1 << PAGE_SHIFT) << instance->crash_buf_pages));
2649 instance->drv_buf_alloc = i;
2653 * megasas_free_host_crash_buffer - Host buffers for Crash dump collection from Firmware
2654 * @instance: Controller's soft instance
2657 megasas_free_host_crash_buffer(struct megasas_instance *instance)
2661 for (i = 0; i < instance->drv_buf_alloc; i++) {
2662 if (instance->crash_buf[i])
2663 free_pages((ulong)instance->crash_buf[i],
2664 instance->crash_buf_pages);
2666 instance->drv_buf_index = 0;
2667 instance->drv_buf_alloc = 0;
2668 instance->fw_crash_state = UNAVAILABLE;
2669 instance->fw_crash_buffer_size = 0;
2673 * megasas_adp_reset_fusion - For controller reset
2674 * @regs: MFI register set
2677 megasas_adp_reset_fusion(struct megasas_instance *instance,
2678 struct megasas_register_set __iomem *regs)
2680 u32 host_diag, abs_state, retry;
2682 /* Now try to reset the chip */
2683 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
2684 writel(MPI2_WRSEQ_1ST_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
2685 writel(MPI2_WRSEQ_2ND_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
2686 writel(MPI2_WRSEQ_3RD_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
2687 writel(MPI2_WRSEQ_4TH_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
2688 writel(MPI2_WRSEQ_5TH_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
2689 writel(MPI2_WRSEQ_6TH_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
2691 /* Check that the diag write enable (DRWE) bit is on */
2692 host_diag = readl(&instance->reg_set->fusion_host_diag);
2694 while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) {
2696 host_diag = readl(&instance->reg_set->fusion_host_diag);
2697 if (retry++ == 100) {
2698 dev_warn(&instance->pdev->dev,
2699 "Host diag unlock failed from %s %d\n",
2700 __func__, __LINE__);
2704 if (!(host_diag & HOST_DIAG_WRITE_ENABLE))
2707 /* Send chip reset command */
2708 writel(host_diag | HOST_DIAG_RESET_ADAPTER,
2709 &instance->reg_set->fusion_host_diag);
2712 /* Make sure reset adapter bit is cleared */
2713 host_diag = readl(&instance->reg_set->fusion_host_diag);
2715 while (host_diag & HOST_DIAG_RESET_ADAPTER) {
2717 host_diag = readl(&instance->reg_set->fusion_host_diag);
2718 if (retry++ == 1000) {
2719 dev_warn(&instance->pdev->dev,
2720 "Diag reset adapter never cleared %s %d\n",
2721 __func__, __LINE__);
2725 if (host_diag & HOST_DIAG_RESET_ADAPTER)
2728 abs_state = instance->instancet->read_fw_status_reg(instance->reg_set)
2732 while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
2734 abs_state = instance->instancet->
2735 read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK;
2737 if (abs_state <= MFI_STATE_FW_INIT) {
2738 dev_warn(&instance->pdev->dev,
2739 "fw state < MFI_STATE_FW_INIT, state = 0x%x %s %d\n",
2740 abs_state, __func__, __LINE__);
2748 * megasas_check_reset_fusion - For controller reset check
2749 * @regs: MFI register set
2752 megasas_check_reset_fusion(struct megasas_instance *instance,
2753 struct megasas_register_set __iomem *regs)
2758 /* This function waits for outstanding commands on fusion to complete */
2759 int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
2760 int reason, int *convert)
2762 int i, outstanding, retval = 0, hb_seconds_missed = 0;
2765 for (i = 0; i < resetwaittime; i++) {
2766 /* Check if firmware is in fault state */
2767 fw_state = instance->instancet->read_fw_status_reg(
2768 instance->reg_set) & MFI_STATE_MASK;
2769 if (fw_state == MFI_STATE_FAULT) {
2770 dev_warn(&instance->pdev->dev, "Found FW in FAULT state,"
2771 " will reset adapter scsi%d.\n",
2772 instance->host->host_no);
2773 megasas_complete_cmd_dpc_fusion((unsigned long)instance);
2778 if (reason == MFI_IO_TIMEOUT_OCR) {
2779 dev_info(&instance->pdev->dev,
2780 "MFI IO is timed out, initiating OCR\n");
2781 megasas_complete_cmd_dpc_fusion((unsigned long)instance);
2786 /* If SR-IOV VF mode & heartbeat timeout, don't wait */
2787 if (instance->requestorId && !reason) {
2792 /* If SR-IOV VF mode & I/O timeout, check for HB timeout */
2793 if (instance->requestorId && reason) {
2794 if (instance->hb_host_mem->HB.fwCounter !=
2795 instance->hb_host_mem->HB.driverCounter) {
2796 instance->hb_host_mem->HB.driverCounter =
2797 instance->hb_host_mem->HB.fwCounter;
2798 hb_seconds_missed = 0;
2800 hb_seconds_missed++;
2801 if (hb_seconds_missed ==
2802 (MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF/HZ)) {
2803 dev_warn(&instance->pdev->dev, "SR-IOV:"
2804 " Heartbeat never completed "
2805 " while polling during I/O "
2806 " timeout handling for "
2808 instance->host->host_no);
2816 outstanding = atomic_read(&instance->fw_outstanding);
2820 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
2821 dev_notice(&instance->pdev->dev, "[%2d]waiting for %d "
2822 "commands to complete for scsi%d\n", i,
2823 outstanding, instance->host->host_no);
2824 megasas_complete_cmd_dpc_fusion(
2825 (unsigned long)instance);
2830 if (atomic_read(&instance->fw_outstanding)) {
2831 dev_err(&instance->pdev->dev, "pending commands remain after waiting, "
2832 "will reset adapter scsi%d.\n",
2833 instance->host->host_no);
2841 void megasas_reset_reply_desc(struct megasas_instance *instance)
2844 struct fusion_context *fusion;
2845 union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
2847 fusion = instance->ctrl_context;
2848 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
2849 for (i = 0 ; i < count ; i++) {
2850 fusion->last_reply_idx[i] = 0;
2851 reply_desc = fusion->reply_frames_desc[i];
2852 for (j = 0 ; j < fusion->reply_q_depth; j++, reply_desc++)
2853 reply_desc->Words = cpu_to_le64(ULLONG_MAX);
2858 * megasas_refire_mgmt_cmd : Re-fire management commands
2859 * @instance: Controller's soft instance
2861 void megasas_refire_mgmt_cmd(struct megasas_instance *instance)
2864 struct megasas_cmd_fusion *cmd_fusion;
2865 struct fusion_context *fusion;
2866 struct megasas_cmd *cmd_mfi;
2867 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2869 bool refire_cmd = 0;
2871 fusion = instance->ctrl_context;
2873 /* Re-fire management commands.
2874 * Do not traverse complet MPT frame pool. Start from max_scsi_cmds.
2876 for (j = instance->max_scsi_cmds ; j < instance->max_fw_cmds; j++) {
2877 cmd_fusion = fusion->cmd_list[j];
2878 cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
2879 smid = le16_to_cpu(cmd_mfi->context.smid);
2883 req_desc = megasas_get_request_descriptor
2884 (instance, smid - 1);
2885 refire_cmd = req_desc && ((cmd_mfi->frame->dcmd.opcode !=
2886 cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO)) &&
2887 (cmd_mfi->frame->dcmd.opcode !=
2888 cpu_to_le32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO)))
2889 && !(cmd_mfi->flags & DRV_DCMD_SKIP_REFIRE);
2891 megasas_fire_cmd_fusion(instance, req_desc);
2893 megasas_return_cmd(instance, cmd_mfi);
2898 * megasas_track_scsiio : Track SCSI IOs outstanding to a SCSI device
2899 * @instance: per adapter struct
2900 * @channel: the channel assigned by the OS
2901 * @id: the id assigned by the OS
2903 * Returns SUCCESS if no IOs pending to SCSI device, else return FAILED
2906 static int megasas_track_scsiio(struct megasas_instance *instance,
2907 int id, int channel)
2910 struct megasas_cmd_fusion *cmd_fusion;
2911 struct fusion_context *fusion;
2912 fusion = instance->ctrl_context;
2914 for (i = 0 ; i < instance->max_scsi_cmds; i++) {
2915 cmd_fusion = fusion->cmd_list[i];
2916 if (cmd_fusion->scmd &&
2917 (cmd_fusion->scmd->device->id == id &&
2918 cmd_fusion->scmd->device->channel == channel)) {
2919 dev_info(&instance->pdev->dev,
2920 "SCSI commands pending to target"
2921 "channel %d id %d \tSMID: 0x%x\n",
2922 channel, id, cmd_fusion->index);
2923 scsi_print_command(cmd_fusion->scmd);
2929 return found ? FAILED : SUCCESS;
2933 * megasas_tm_response_code - translation of device response code
2934 * @ioc: per adapter object
2935 * @mpi_reply: MPI reply returned by firmware
2940 megasas_tm_response_code(struct megasas_instance *instance,
2941 struct MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply)
2945 switch (mpi_reply->ResponseCode) {
2946 case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
2947 desc = "task management request completed";
2949 case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
2950 desc = "invalid frame";
2952 case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
2953 desc = "task management request not supported";
2955 case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
2956 desc = "task management request failed";
2958 case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
2959 desc = "task management request succeeded";
2961 case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
2962 desc = "invalid lun";
2965 desc = "overlapped tag attempted";
2967 case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
2968 desc = "task queued, however not sent to target";
2974 dev_dbg(&instance->pdev->dev, "response_code(%01x): %s\n",
2975 mpi_reply->ResponseCode, desc);
2976 dev_dbg(&instance->pdev->dev,
2977 "TerminationCount/DevHandle/Function/TaskType/IOCStat/IOCLoginfo"
2978 " 0x%x/0x%x/0x%x/0x%x/0x%x/0x%x\n",
2979 mpi_reply->TerminationCount, mpi_reply->DevHandle,
2980 mpi_reply->Function, mpi_reply->TaskType,
2981 mpi_reply->IOCStatus, mpi_reply->IOCLogInfo);
2985 * megasas_issue_tm - main routine for sending tm requests
2986 * @instance: per adapter struct
2987 * @device_handle: device handle
2988 * @channel: the channel assigned by the OS
2989 * @id: the id assigned by the OS
2990 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in megaraid_sas_fusion.c)
2991 * @smid_task: smid assigned to the task
2992 * @m_type: TM_MUTEX_ON or TM_MUTEX_OFF
2995 * MegaRaid use MPT interface for Task Magement request.
2996 * A generic API for sending task management requests to firmware.
2998 * Return SUCCESS or FAILED.
3001 megasas_issue_tm(struct megasas_instance *instance, u16 device_handle,
3002 uint channel, uint id, u16 smid_task, u8 type)
3004 struct MR_TASK_MANAGE_REQUEST *mr_request;
3005 struct MPI2_SCSI_TASK_MANAGE_REQUEST *mpi_request;
3006 unsigned long timeleft;
3007 struct megasas_cmd_fusion *cmd_fusion;
3008 struct megasas_cmd *cmd_mfi;
3009 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3010 struct fusion_context *fusion;
3011 struct megasas_cmd_fusion *scsi_lookup;
3013 struct MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply;
3015 fusion = instance->ctrl_context;
3017 cmd_mfi = megasas_get_cmd(instance);
3020 dev_err(&instance->pdev->dev, "Failed from %s %d\n",
3021 __func__, __LINE__);
3025 cmd_fusion = megasas_get_cmd_fusion(instance,
3026 instance->max_scsi_cmds + cmd_mfi->index);
3028 /* Save the smid. To be used for returning the cmd */
3029 cmd_mfi->context.smid = cmd_fusion->index;
3031 req_desc = megasas_get_request_descriptor(instance,
3032 (cmd_fusion->index - 1));
3034 dev_err(&instance->pdev->dev, "Failed from %s %d\n",
3035 __func__, __LINE__);
3036 megasas_return_cmd(instance, cmd_mfi);
3040 cmd_fusion->request_desc = req_desc;
3041 req_desc->Words = 0;
3043 scsi_lookup = fusion->cmd_list[smid_task - 1];
3045 mr_request = (struct MR_TASK_MANAGE_REQUEST *) cmd_fusion->io_request;
3046 memset(mr_request, 0, sizeof(struct MR_TASK_MANAGE_REQUEST));
3047 mpi_request = (struct MPI2_SCSI_TASK_MANAGE_REQUEST *) &mr_request->TmRequest;
3048 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3049 mpi_request->DevHandle = cpu_to_le16(device_handle);
3050 mpi_request->TaskType = type;
3051 mpi_request->TaskMID = cpu_to_le16(smid_task);
3052 mpi_request->LUN[1] = 0;
3055 req_desc = cmd_fusion->request_desc;
3056 req_desc->HighPriority.SMID = cpu_to_le16(cmd_fusion->index);
3057 req_desc->HighPriority.RequestFlags =
3058 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
3059 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
3060 req_desc->HighPriority.MSIxIndex = 0;
3061 req_desc->HighPriority.LMID = 0;
3062 req_desc->HighPriority.Reserved1 = 0;
3064 if (channel < MEGASAS_MAX_PD_CHANNELS)
3065 mr_request->tmReqFlags.isTMForPD = 1;
3067 mr_request->tmReqFlags.isTMForLD = 1;
3069 init_completion(&cmd_fusion->done);
3070 megasas_fire_cmd_fusion(instance, req_desc);
3072 timeleft = wait_for_completion_timeout(&cmd_fusion->done, 50 * HZ);
3075 dev_err(&instance->pdev->dev,
3076 "task mgmt type 0x%x timed out\n", type);
3077 cmd_mfi->flags |= DRV_DCMD_SKIP_REFIRE;
3078 mutex_unlock(&instance->reset_mutex);
3079 rc = megasas_reset_fusion(instance->host, MFI_IO_TIMEOUT_OCR);
3080 mutex_lock(&instance->reset_mutex);
3084 mpi_reply = (struct MPI2_SCSI_TASK_MANAGE_REPLY *) &mr_request->TMReply;
3085 megasas_tm_response_code(instance, mpi_reply);
3087 megasas_return_cmd(instance, cmd_mfi);
3090 case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
3091 if (scsi_lookup->scmd == NULL)
3094 instance->instancet->disable_intr(instance);
3096 megasas_complete_cmd_dpc_fusion
3097 ((unsigned long)instance);
3098 instance->instancet->enable_intr(instance);
3099 if (scsi_lookup->scmd == NULL)
3105 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
3106 if ((channel == 0xFFFFFFFF) && (id == 0xFFFFFFFF))
3108 instance->instancet->disable_intr(instance);
3110 megasas_complete_cmd_dpc_fusion
3111 ((unsigned long)instance);
3112 rc = megasas_track_scsiio(instance, id, channel);
3113 instance->instancet->enable_intr(instance);
3116 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
3117 case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
3129 * megasas_fusion_smid_lookup : Look for fusion command correpspodning to SCSI
3130 * @instance: per adapter struct
3132 * Return Non Zero index, if SMID found in outstanding commands
3134 static u16 megasas_fusion_smid_lookup(struct scsi_cmnd *scmd)
3137 struct megasas_instance *instance;
3138 struct megasas_cmd_fusion *cmd_fusion;
3139 struct fusion_context *fusion;
3141 instance = (struct megasas_instance *)scmd->device->host->hostdata;
3143 fusion = instance->ctrl_context;
3145 for (i = 0; i < instance->max_scsi_cmds; i++) {
3146 cmd_fusion = fusion->cmd_list[i];
3147 if (cmd_fusion->scmd && (cmd_fusion->scmd == scmd)) {
3148 scmd_printk(KERN_NOTICE, scmd, "Abort request is for"
3149 " SMID: %d\n", cmd_fusion->index);
3150 ret = cmd_fusion->index;
3159 * megasas_get_tm_devhandle - Get devhandle for TM request
3160 * @sdev- OS provided scsi device
3162 * Returns- devhandle/targetID of SCSI device
3164 static u16 megasas_get_tm_devhandle(struct scsi_device *sdev)
3168 struct megasas_instance *instance;
3169 struct fusion_context *fusion;
3170 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
3171 u16 devhandle = (u16)ULONG_MAX;
3173 instance = (struct megasas_instance *)sdev->host->hostdata;
3174 fusion = instance->ctrl_context;
3176 if (sdev->channel < MEGASAS_MAX_PD_CHANNELS) {
3177 if (instance->use_seqnum_jbod_fp) {
3178 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
3180 pd_sync = (void *)fusion->pd_seq_sync
3181 [(instance->pd_seq_map_id - 1) & 1];
3182 devhandle = pd_sync->seq[pd_index].devHandle;
3184 sdev_printk(KERN_ERR, sdev, "Firmware expose tmCapable"
3185 " without JBOD MAP support from %s %d\n", __func__, __LINE__);
3187 device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL)
3189 devhandle = device_id;
3196 * megasas_task_abort_fusion : SCSI task abort function for fusion adapters
3197 * @scmd : pointer to scsi command object
3199 * Return SUCCESS, if command aborted else FAILED
3202 int megasas_task_abort_fusion(struct scsi_cmnd *scmd)
3204 struct megasas_instance *instance;
3205 u16 smid, devhandle;
3206 struct fusion_context *fusion;
3208 struct MR_PRIV_DEVICE *mr_device_priv_data;
3209 mr_device_priv_data = scmd->device->hostdata;
3212 instance = (struct megasas_instance *)scmd->device->host->hostdata;
3213 fusion = instance->ctrl_context;
3215 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
3216 dev_err(&instance->pdev->dev, "Controller is not OPERATIONAL,"
3217 "SCSI host:%d\n", instance->host->host_no);
3222 if (!mr_device_priv_data) {
3223 sdev_printk(KERN_INFO, scmd->device, "device been deleted! "
3224 "scmd(%p)\n", scmd);
3225 scmd->result = DID_NO_CONNECT << 16;
3231 if (!mr_device_priv_data->is_tm_capable) {
3236 mutex_lock(&instance->reset_mutex);
3238 smid = megasas_fusion_smid_lookup(scmd);
3242 scmd_printk(KERN_NOTICE, scmd, "Command for which abort is"
3243 " issued is not found in oustanding commands\n");
3244 mutex_unlock(&instance->reset_mutex);
3248 devhandle = megasas_get_tm_devhandle(scmd->device);
3250 if (devhandle == (u16)ULONG_MAX) {
3252 sdev_printk(KERN_INFO, scmd->device,
3253 "task abort issued for invalid devhandle\n");
3254 mutex_unlock(&instance->reset_mutex);
3257 sdev_printk(KERN_INFO, scmd->device,
3258 "attempting task abort! scmd(%p) tm_dev_handle 0x%x\n",
3261 mr_device_priv_data->tm_busy = 1;
3262 ret = megasas_issue_tm(instance, devhandle,
3263 scmd->device->channel, scmd->device->id, smid,
3264 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK);
3265 mr_device_priv_data->tm_busy = 0;
3267 mutex_unlock(&instance->reset_mutex);
3269 sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n",
3270 ((ret == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3276 * megasas_reset_target_fusion : target reset function for fusion adapters
3277 * scmd: SCSI command pointer
3279 * Returns SUCCESS if all commands associated with target aborted else FAILED
3282 int megasas_reset_target_fusion(struct scsi_cmnd *scmd)
3285 struct megasas_instance *instance;
3288 struct fusion_context *fusion;
3289 struct MR_PRIV_DEVICE *mr_device_priv_data;
3290 mr_device_priv_data = scmd->device->hostdata;
3292 instance = (struct megasas_instance *)scmd->device->host->hostdata;
3293 fusion = instance->ctrl_context;
3295 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
3296 dev_err(&instance->pdev->dev, "Controller is not OPERATIONAL,"
3297 "SCSI host:%d\n", instance->host->host_no);
3302 if (!mr_device_priv_data) {
3303 sdev_printk(KERN_INFO, scmd->device, "device been deleted! "
3304 "scmd(%p)\n", scmd);
3305 scmd->result = DID_NO_CONNECT << 16;
3311 if (!mr_device_priv_data->is_tm_capable) {
3316 mutex_lock(&instance->reset_mutex);
3317 devhandle = megasas_get_tm_devhandle(scmd->device);
3319 if (devhandle == (u16)ULONG_MAX) {
3321 sdev_printk(KERN_INFO, scmd->device,
3322 "target reset issued for invalid devhandle\n");
3323 mutex_unlock(&instance->reset_mutex);
3327 sdev_printk(KERN_INFO, scmd->device,
3328 "attempting target reset! scmd(%p) tm_dev_handle 0x%x\n",
3330 mr_device_priv_data->tm_busy = 1;
3331 ret = megasas_issue_tm(instance, devhandle,
3332 scmd->device->channel, scmd->device->id, 0,
3333 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
3334 mr_device_priv_data->tm_busy = 0;
3335 mutex_unlock(&instance->reset_mutex);
3337 scmd_printk(KERN_NOTICE, scmd, "megasas: target reset %s!!\n",
3338 (ret == SUCCESS) ? "SUCCESS" : "FAILED");
3343 /*SRIOV get other instance in cluster if any*/
3344 struct megasas_instance *megasas_get_peer_instance(struct megasas_instance *instance)
3348 for (i = 0; i < MAX_MGMT_ADAPTERS; i++) {
3349 if (megasas_mgmt_info.instance[i] &&
3350 (megasas_mgmt_info.instance[i] != instance) &&
3351 megasas_mgmt_info.instance[i]->requestorId &&
3352 megasas_mgmt_info.instance[i]->peerIsPresent &&
3353 (memcmp((megasas_mgmt_info.instance[i]->clusterId),
3354 instance->clusterId, MEGASAS_CLUSTER_ID_SIZE) == 0))
3355 return megasas_mgmt_info.instance[i];
3360 /* Check for a second path that is currently UP */
3361 int megasas_check_mpio_paths(struct megasas_instance *instance,
3362 struct scsi_cmnd *scmd)
3364 struct megasas_instance *peer_instance = NULL;
3365 int retval = (DID_RESET << 16);
3367 if (instance->peerIsPresent) {
3368 peer_instance = megasas_get_peer_instance(instance);
3369 if ((peer_instance) &&
3370 (atomic_read(&peer_instance->adprecovery) ==
3371 MEGASAS_HBA_OPERATIONAL))
3372 retval = (DID_NO_CONNECT << 16);
3377 /* Core fusion reset function */
3378 int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
3380 int retval = SUCCESS, i, convert = 0;
3381 struct megasas_instance *instance;
3382 struct megasas_cmd_fusion *cmd_fusion;
3383 struct fusion_context *fusion;
3384 u32 abs_state, status_reg, reset_adapter;
3385 u32 io_timeout_in_crash_mode = 0;
3386 struct scsi_cmnd *scmd_local = NULL;
3387 struct scsi_device *sdev;
3389 instance = (struct megasas_instance *)shost->hostdata;
3390 fusion = instance->ctrl_context;
3392 mutex_lock(&instance->reset_mutex);
3394 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
3395 dev_warn(&instance->pdev->dev, "Hardware critical error, "
3396 "returning FAILED for scsi%d.\n",
3397 instance->host->host_no);
3398 mutex_unlock(&instance->reset_mutex);
3401 status_reg = instance->instancet->read_fw_status_reg(instance->reg_set);
3402 abs_state = status_reg & MFI_STATE_MASK;
3404 /* IO timeout detected, forcibly put FW in FAULT state */
3405 if (abs_state != MFI_STATE_FAULT && instance->crash_dump_buf &&
3406 instance->crash_dump_app_support && reason) {
3407 dev_info(&instance->pdev->dev, "IO/DCMD timeout is detected, "
3408 "forcibly FAULT Firmware\n");
3409 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
3410 status_reg = readl(&instance->reg_set->doorbell);
3411 writel(status_reg | MFI_STATE_FORCE_OCR,
3412 &instance->reg_set->doorbell);
3413 readl(&instance->reg_set->doorbell);
3414 mutex_unlock(&instance->reset_mutex);
3417 io_timeout_in_crash_mode++;
3418 dev_dbg(&instance->pdev->dev, "waiting for [%d] "
3419 "seconds for crash dump collection and OCR "
3420 "to be done\n", (io_timeout_in_crash_mode * 3));
3421 } while ((atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) &&
3422 (io_timeout_in_crash_mode < 80));
3424 if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) {
3425 dev_info(&instance->pdev->dev, "OCR done for IO "
3429 dev_info(&instance->pdev->dev, "Controller is not "
3430 "operational after 240 seconds wait for IO "
3431 "timeout case in FW crash dump mode\n do "
3432 "OCR/kill adapter\n");
3433 retval = megasas_reset_fusion(shost, 0);
3438 if (instance->requestorId && !instance->skip_heartbeat_timer_del)
3439 del_timer_sync(&instance->sriov_heartbeat_timer);
3440 set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
3441 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_POLLING);
3442 instance->instancet->disable_intr(instance);
3445 /* First try waiting for commands to complete */
3446 if (megasas_wait_for_outstanding_fusion(instance, reason,
3448 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
3449 dev_warn(&instance->pdev->dev, "resetting fusion "
3450 "adapter scsi%d.\n", instance->host->host_no);
3454 /* Now return commands back to the OS */
3455 for (i = 0 ; i < instance->max_scsi_cmds; i++) {
3456 cmd_fusion = fusion->cmd_list[i];
3457 scmd_local = cmd_fusion->scmd;
3458 if (cmd_fusion->scmd) {
3459 scmd_local->result =
3460 megasas_check_mpio_paths(instance,
3462 if (megasas_cmd_type(scmd_local) == READ_WRITE_LDIO)
3463 atomic_dec(&instance->ldio_outstanding);
3464 megasas_return_cmd_fusion(instance, cmd_fusion);
3465 scsi_dma_unmap(scmd_local);
3466 scmd_local->scsi_done(scmd_local);
3467 atomic_dec(&instance->fw_outstanding);
3471 status_reg = instance->instancet->read_fw_status_reg(
3473 abs_state = status_reg & MFI_STATE_MASK;
3474 reset_adapter = status_reg & MFI_RESET_ADAPTER;
3475 if (instance->disableOnlineCtrlReset ||
3476 (abs_state == MFI_STATE_FAULT && !reset_adapter)) {
3477 /* Reset not supported, kill adapter */
3478 dev_warn(&instance->pdev->dev, "Reset not supported"
3479 ", killing adapter scsi%d.\n",
3480 instance->host->host_no);
3481 megaraid_sas_kill_hba(instance);
3482 instance->skip_heartbeat_timer_del = 1;
3487 /* Let SR-IOV VF & PF sync up if there was a HB failure */
3488 if (instance->requestorId && !reason) {
3489 msleep(MEGASAS_OCR_SETTLE_TIME_VF);
3490 goto transition_to_ready;
3493 /* Now try to reset the chip */
3494 for (i = 0; i < MEGASAS_FUSION_MAX_RESET_TRIES; i++) {
3496 if (instance->instancet->adp_reset
3497 (instance, instance->reg_set))
3499 transition_to_ready:
3500 /* Wait for FW to become ready */
3501 if (megasas_transition_to_ready(instance, 1)) {
3502 dev_warn(&instance->pdev->dev,
3503 "Failed to transition controller to ready for "
3504 "scsi%d.\n", instance->host->host_no);
3505 if (instance->requestorId && !reason)
3506 goto fail_kill_adapter;
3510 megasas_reset_reply_desc(instance);
3511 megasas_fusion_update_can_queue(instance, OCR_CONTEXT);
3513 if (megasas_ioc_init_fusion(instance)) {
3514 dev_warn(&instance->pdev->dev,
3515 "megasas_ioc_init_fusion() failed! for "
3516 "scsi%d\n", instance->host->host_no);
3517 if (instance->requestorId && !reason)
3518 goto fail_kill_adapter;
3523 megasas_refire_mgmt_cmd(instance);
3525 if (megasas_get_ctrl_info(instance)) {
3526 dev_info(&instance->pdev->dev,
3527 "Failed from %s %d\n",
3528 __func__, __LINE__);
3529 megaraid_sas_kill_hba(instance);
3532 /* Reset load balance info */
3533 memset(fusion->load_balance_info, 0,
3534 sizeof(struct LD_LOAD_BALANCE_INFO)
3535 *MAX_LOGICAL_DRIVES_EXT);
3537 if (!megasas_get_map_info(instance))
3538 megasas_sync_map_info(instance);
3540 megasas_setup_jbod_map(instance);
3542 shost_for_each_device(sdev, shost)
3543 megasas_update_sdev_properties(sdev);
3545 clear_bit(MEGASAS_FUSION_IN_RESET,
3546 &instance->reset_flags);
3547 instance->instancet->enable_intr(instance);
3548 atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
3550 /* Restart SR-IOV heartbeat */
3551 if (instance->requestorId) {
3552 if (!megasas_sriov_start_heartbeat(instance, 0))
3553 megasas_start_timer(instance,
3554 &instance->sriov_heartbeat_timer,
3555 megasas_sriov_heartbeat_handler,
3556 MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
3558 instance->skip_heartbeat_timer_del = 1;
3561 /* Adapter reset completed successfully */
3562 dev_warn(&instance->pdev->dev, "Reset "
3563 "successful for scsi%d.\n",
3564 instance->host->host_no);
3566 if (instance->crash_dump_drv_support &&
3567 instance->crash_dump_app_support)
3568 megasas_set_crash_dump_params(instance,
3569 MR_CRASH_BUF_TURN_ON);
3571 megasas_set_crash_dump_params(instance,
3572 MR_CRASH_BUF_TURN_OFF);
3578 /* Reset failed, kill the adapter */
3579 dev_warn(&instance->pdev->dev, "Reset failed, killing "
3580 "adapter scsi%d.\n", instance->host->host_no);
3581 megaraid_sas_kill_hba(instance);
3582 instance->skip_heartbeat_timer_del = 1;
3585 /* For VF: Restart HB timer if we didn't OCR */
3586 if (instance->requestorId) {
3587 megasas_start_timer(instance,
3588 &instance->sriov_heartbeat_timer,
3589 megasas_sriov_heartbeat_handler,
3590 MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
3592 clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
3593 instance->instancet->enable_intr(instance);
3594 atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
3597 clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
3598 mutex_unlock(&instance->reset_mutex);
3602 /* Fusion Crash dump collection work queue */
3603 void megasas_fusion_crash_dump_wq(struct work_struct *work)
3605 struct megasas_instance *instance =
3606 container_of(work, struct megasas_instance, crash_init);
3608 u8 partial_copy = 0;
3611 status_reg = instance->instancet->read_fw_status_reg(instance->reg_set);
3614 * Allocate host crash buffers to copy data from 1 MB DMA crash buffer
3615 * to host crash buffers
3617 if (instance->drv_buf_index == 0) {
3618 /* Buffer is already allocated for old Crash dump.
3619 * Do OCR and do not wait for crash dump collection
3621 if (instance->drv_buf_alloc) {
3622 dev_info(&instance->pdev->dev, "earlier crash dump is "
3623 "not yet copied by application, ignoring this "
3624 "crash dump and initiating OCR\n");
3625 status_reg |= MFI_STATE_CRASH_DUMP_DONE;
3627 &instance->reg_set->outbound_scratch_pad);
3628 readl(&instance->reg_set->outbound_scratch_pad);
3631 megasas_alloc_host_crash_buffer(instance);
3632 dev_info(&instance->pdev->dev, "Number of host crash buffers "
3633 "allocated: %d\n", instance->drv_buf_alloc);
3637 * Driver has allocated max buffers, which can be allocated
3638 * and FW has more crash dump data, then driver will
3641 if (instance->drv_buf_index >= (instance->drv_buf_alloc)) {
3642 dev_info(&instance->pdev->dev, "Driver is done copying "
3643 "the buffer: %d\n", instance->drv_buf_alloc);
3644 status_reg |= MFI_STATE_CRASH_DUMP_DONE;
3647 memcpy(instance->crash_buf[instance->drv_buf_index],
3648 instance->crash_dump_buf, CRASH_DMA_BUF_SIZE);
3649 instance->drv_buf_index++;
3650 status_reg &= ~MFI_STATE_DMADONE;
3653 if (status_reg & MFI_STATE_CRASH_DUMP_DONE) {
3654 dev_info(&instance->pdev->dev, "Crash Dump is available,number "
3655 "of copied buffers: %d\n", instance->drv_buf_index);
3656 instance->fw_crash_buffer_size = instance->drv_buf_index;
3657 instance->fw_crash_state = AVAILABLE;
3658 instance->drv_buf_index = 0;
3659 writel(status_reg, &instance->reg_set->outbound_scratch_pad);
3660 readl(&instance->reg_set->outbound_scratch_pad);
3662 megasas_reset_fusion(instance->host, 0);
3664 writel(status_reg, &instance->reg_set->outbound_scratch_pad);
3665 readl(&instance->reg_set->outbound_scratch_pad);
3670 /* Fusion OCR work queue */
3671 void megasas_fusion_ocr_wq(struct work_struct *work)
3673 struct megasas_instance *instance =
3674 container_of(work, struct megasas_instance, work_init);
3676 megasas_reset_fusion(instance->host, 0);
3679 struct megasas_instance_template megasas_instance_template_fusion = {
3680 .enable_intr = megasas_enable_intr_fusion,
3681 .disable_intr = megasas_disable_intr_fusion,
3682 .clear_intr = megasas_clear_intr_fusion,
3683 .read_fw_status_reg = megasas_read_fw_status_reg_fusion,
3684 .adp_reset = megasas_adp_reset_fusion,
3685 .check_reset = megasas_check_reset_fusion,
3686 .service_isr = megasas_isr_fusion,
3687 .tasklet = megasas_complete_cmd_dpc_fusion,
3688 .init_adapter = megasas_init_adapter_fusion,
3689 .build_and_issue_cmd = megasas_build_and_issue_cmd_fusion,
3690 .issue_dcmd = megasas_issue_dcmd_fusion,