1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
24 #include <linux/blkdev.h>
25 #include <linux/delay.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/idr.h>
28 #include <linux/interrupt.h>
29 #include <linux/module.h>
30 #include <linux/kthread.h>
31 #include <linux/pci.h>
32 #include <linux/spinlock.h>
33 #include <linux/sched/clock.h>
34 #include <linux/ctype.h>
35 #include <linux/aer.h>
36 #include <linux/slab.h>
37 #include <linux/firmware.h>
38 #include <linux/miscdevice.h>
39 #include <linux/percpu.h>
40 #include <linux/irq.h>
41 #include <linux/bitops.h>
42 #include <linux/crash_dump.h>
43 #include <linux/cpu.h>
44 #include <linux/cpuhotplug.h>
46 #include <scsi/scsi.h>
47 #include <scsi/scsi_device.h>
48 #include <scsi/scsi_host.h>
49 #include <scsi/scsi_transport_fc.h>
50 #include <scsi/scsi_tcq.h>
51 #include <scsi/fc/fc_fs.h>
56 #include "lpfc_sli4.h"
58 #include "lpfc_disc.h"
60 #include "lpfc_scsi.h"
61 #include "lpfc_nvme.h"
62 #include "lpfc_logmsg.h"
63 #include "lpfc_crtn.h"
64 #include "lpfc_vport.h"
65 #include "lpfc_version.h"
68 static enum cpuhp_state lpfc_cpuhp_state;
69 /* Used when mapping IRQ vectors in a driver centric manner */
70 static uint32_t lpfc_present_cpu;
71 static bool lpfc_pldv_detect;
73 static void __lpfc_cpuhp_remove(struct lpfc_hba *phba);
74 static void lpfc_cpuhp_remove(struct lpfc_hba *phba);
75 static void lpfc_cpuhp_add(struct lpfc_hba *phba);
76 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
77 static int lpfc_post_rcv_buf(struct lpfc_hba *);
78 static int lpfc_sli4_queue_verify(struct lpfc_hba *);
79 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
80 static int lpfc_setup_endian_order(struct lpfc_hba *);
81 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
82 static void lpfc_free_els_sgl_list(struct lpfc_hba *);
83 static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *);
84 static void lpfc_init_sgl_list(struct lpfc_hba *);
85 static int lpfc_init_active_sgl_array(struct lpfc_hba *);
86 static void lpfc_free_active_sgl(struct lpfc_hba *);
87 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
88 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
89 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
90 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
91 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
92 static void lpfc_sli4_disable_intr(struct lpfc_hba *);
93 static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
94 static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
95 static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int);
96 static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *);
97 static int lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *);
98 static void lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba);
100 static struct scsi_transport_template *lpfc_transport_template = NULL;
101 static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
102 static DEFINE_IDR(lpfc_hba_index);
103 #define LPFC_NVMET_BUF_POST 254
104 static int lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport);
107 * lpfc_config_port_prep - Perform lpfc initialization prior to config port
108 * @phba: pointer to lpfc hba data structure.
110 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
111 * mailbox command. It retrieves the revision information from the HBA and
112 * collects the Vital Product Data (VPD) about the HBA for preparing the
113 * configuration of the HBA.
117 * -ERESTART - requests the SLI layer to reset the HBA and try again.
118 * Any other value - indicates an error.
121 lpfc_config_port_prep(struct lpfc_hba *phba)
123 lpfc_vpd_t *vp = &phba->vpd;
127 char *lpfc_vpd_data = NULL;
129 static char licensed[56] =
130 "key unlock for use with gnu public licensed code only\0";
131 static int init_key = 1;
133 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
135 phba->link_state = LPFC_HBA_ERROR;
140 phba->link_state = LPFC_INIT_MBX_CMDS;
142 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
144 uint32_t *ptext = (uint32_t *) licensed;
146 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
147 *ptext = cpu_to_be32(*ptext);
151 lpfc_read_nv(phba, pmb);
152 memset((char*)mb->un.varRDnvp.rsvd3, 0,
153 sizeof (mb->un.varRDnvp.rsvd3));
154 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
157 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
159 if (rc != MBX_SUCCESS) {
160 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
161 "0324 Config Port initialization "
162 "error, mbxCmd x%x READ_NVPARM, "
164 mb->mbxCommand, mb->mbxStatus);
165 mempool_free(pmb, phba->mbox_mem_pool);
168 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
170 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
175 * Clear all option bits except LPFC_SLI3_BG_ENABLED,
176 * which was already set in lpfc_get_cfgparam()
178 phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED;
180 /* Setup and issue mailbox READ REV command */
181 lpfc_read_rev(phba, pmb);
182 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
183 if (rc != MBX_SUCCESS) {
184 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
185 "0439 Adapter failed to init, mbxCmd x%x "
186 "READ_REV, mbxStatus x%x\n",
187 mb->mbxCommand, mb->mbxStatus);
188 mempool_free( pmb, phba->mbox_mem_pool);
194 * The value of rr must be 1 since the driver set the cv field to 1.
195 * This setting requires the FW to set all revision fields.
197 if (mb->un.varRdRev.rr == 0) {
199 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
200 "0440 Adapter failed to init, READ_REV has "
201 "missing revision information.\n");
202 mempool_free(pmb, phba->mbox_mem_pool);
206 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
207 mempool_free(pmb, phba->mbox_mem_pool);
211 /* Save information as VPD data */
213 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
214 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
215 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
216 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
217 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
218 vp->rev.biuRev = mb->un.varRdRev.biuRev;
219 vp->rev.smRev = mb->un.varRdRev.smRev;
220 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
221 vp->rev.endecRev = mb->un.varRdRev.endecRev;
222 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
223 vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
224 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
225 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
226 vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
227 vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
229 /* If the sli feature level is less then 9, we must
230 * tear down all RPIs and VPIs on link down if NPIV
233 if (vp->rev.feaLevelHigh < 9)
234 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
236 if (lpfc_is_LC_HBA(phba->pcidev->device))
237 memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
238 sizeof (phba->RandomData));
240 /* Get adapter VPD information */
241 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
245 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
246 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
248 if (rc != MBX_SUCCESS) {
249 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
250 "0441 VPD not present on adapter, "
251 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
252 mb->mbxCommand, mb->mbxStatus);
253 mb->un.varDmp.word_cnt = 0;
255 /* dump mem may return a zero when finished or we got a
256 * mailbox error, either way we are done.
258 if (mb->un.varDmp.word_cnt == 0)
261 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
262 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
263 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
264 lpfc_vpd_data + offset,
265 mb->un.varDmp.word_cnt);
266 offset += mb->un.varDmp.word_cnt;
267 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
269 lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
271 kfree(lpfc_vpd_data);
273 mempool_free(pmb, phba->mbox_mem_pool);
278 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
279 * @phba: pointer to lpfc hba data structure.
280 * @pmboxq: pointer to the driver internal queue element for mailbox command.
282 * This is the completion handler for driver's configuring asynchronous event
283 * mailbox command to the device. If the mailbox command returns successfully,
284 * it will set internal async event support flag to 1; otherwise, it will
285 * set internal async event support flag to 0.
288 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
290 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
291 phba->temp_sensor_support = 1;
293 phba->temp_sensor_support = 0;
294 mempool_free(pmboxq, phba->mbox_mem_pool);
299 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
300 * @phba: pointer to lpfc hba data structure.
301 * @pmboxq: pointer to the driver internal queue element for mailbox command.
303 * This is the completion handler for dump mailbox command for getting
304 * wake up parameters. When this command complete, the response contain
305 * Option rom version of the HBA. This function translate the version number
306 * into a human readable string and store it in OptionROMVersion.
309 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
312 uint32_t prog_id_word;
314 /* character array used for decoding dist type. */
315 char dist_char[] = "nabx";
317 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
318 mempool_free(pmboxq, phba->mbox_mem_pool);
322 prg = (struct prog_id *) &prog_id_word;
324 /* word 7 contain option rom version */
325 prog_id_word = pmboxq->u.mb.un.varWords[7];
327 /* Decode the Option rom version word to a readable string */
328 dist = dist_char[prg->dist];
330 if ((prg->dist == 3) && (prg->num == 0))
331 snprintf(phba->OptionROMVersion, 32, "%d.%d%d",
332 prg->ver, prg->rev, prg->lev);
334 snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d",
335 prg->ver, prg->rev, prg->lev,
337 mempool_free(pmboxq, phba->mbox_mem_pool);
342 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
343 * @vport: pointer to lpfc vport data structure.
350 lpfc_update_vport_wwn(struct lpfc_vport *vport)
352 struct lpfc_hba *phba = vport->phba;
355 * If the name is empty or there exists a soft name
356 * then copy the service params name, otherwise use the fc name
358 if (vport->fc_nodename.u.wwn[0] == 0)
359 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
360 sizeof(struct lpfc_name));
362 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
363 sizeof(struct lpfc_name));
366 * If the port name has changed, then set the Param changes flag
369 if (vport->fc_portname.u.wwn[0] != 0 &&
370 memcmp(&vport->fc_portname, &vport->fc_sparam.portName,
371 sizeof(struct lpfc_name))) {
372 vport->vport_flag |= FAWWPN_PARAM_CHG;
374 if (phba->sli_rev == LPFC_SLI_REV4 &&
375 vport->port_type == LPFC_PHYSICAL_PORT &&
376 phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_FABRIC) {
377 if (!(phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG))
378 phba->sli4_hba.fawwpn_flag &=
380 lpfc_printf_log(phba, KERN_INFO,
381 LOG_SLI | LOG_DISCOVERY | LOG_ELS,
382 "2701 FA-PWWN change WWPN from %llx to "
383 "%llx: vflag x%x fawwpn_flag x%x\n",
384 wwn_to_u64(vport->fc_portname.u.wwn),
386 (vport->fc_sparam.portName.u.wwn),
388 phba->sli4_hba.fawwpn_flag);
389 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
390 sizeof(struct lpfc_name));
394 if (vport->fc_portname.u.wwn[0] == 0)
395 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
396 sizeof(struct lpfc_name));
398 memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
399 sizeof(struct lpfc_name));
403 * lpfc_config_port_post - Perform lpfc initialization after config port
404 * @phba: pointer to lpfc hba data structure.
406 * This routine will do LPFC initialization after the CONFIG_PORT mailbox
407 * command call. It performs all internal resource and state setups on the
408 * port: post IOCB buffers, enable appropriate host interrupt attentions,
409 * ELS ring timers, etc.
413 * Any other value - error.
416 lpfc_config_port_post(struct lpfc_hba *phba)
418 struct lpfc_vport *vport = phba->pport;
419 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
422 struct lpfc_dmabuf *mp;
423 struct lpfc_sli *psli = &phba->sli;
424 uint32_t status, timeout;
428 spin_lock_irq(&phba->hbalock);
430 * If the Config port completed correctly the HBA is not
431 * over heated any more.
433 if (phba->over_temp_state == HBA_OVER_TEMP)
434 phba->over_temp_state = HBA_NORMAL_TEMP;
435 spin_unlock_irq(&phba->hbalock);
437 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
439 phba->link_state = LPFC_HBA_ERROR;
444 /* Get login parameters for NID. */
445 rc = lpfc_read_sparam(phba, pmb, 0);
447 mempool_free(pmb, phba->mbox_mem_pool);
452 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
453 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
454 "0448 Adapter failed init, mbxCmd x%x "
455 "READ_SPARM mbxStatus x%x\n",
456 mb->mbxCommand, mb->mbxStatus);
457 phba->link_state = LPFC_HBA_ERROR;
458 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
462 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
464 /* This dmabuf was allocated by lpfc_read_sparam. The dmabuf is no
465 * longer needed. Prevent unintended ctx_buf access as the mbox is
468 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
469 lpfc_mbuf_free(phba, mp->virt, mp->phys);
472 lpfc_update_vport_wwn(vport);
474 /* Update the fc_host data structures with new wwn. */
475 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
476 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
477 fc_host_max_npiv_vports(shost) = phba->max_vpi;
479 /* If no serial number in VPD data, use low 6 bytes of WWNN */
480 /* This should be consolidated into parse_vpd ? - mr */
481 if (phba->SerialNumber[0] == 0) {
484 outptr = &vport->fc_nodename.u.s.IEEE[0];
485 for (i = 0; i < 12; i++) {
487 j = ((status & 0xf0) >> 4);
489 phba->SerialNumber[i] =
490 (char)((uint8_t) 0x30 + (uint8_t) j);
492 phba->SerialNumber[i] =
493 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
497 phba->SerialNumber[i] =
498 (char)((uint8_t) 0x30 + (uint8_t) j);
500 phba->SerialNumber[i] =
501 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
505 lpfc_read_config(phba, pmb);
507 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
508 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
509 "0453 Adapter failed to init, mbxCmd x%x "
510 "READ_CONFIG, mbxStatus x%x\n",
511 mb->mbxCommand, mb->mbxStatus);
512 phba->link_state = LPFC_HBA_ERROR;
513 mempool_free( pmb, phba->mbox_mem_pool);
517 /* Check if the port is disabled */
518 lpfc_sli_read_link_ste(phba);
520 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
521 if (phba->cfg_hba_queue_depth > mb->un.varRdConfig.max_xri) {
522 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
523 "3359 HBA queue depth changed from %d to %d\n",
524 phba->cfg_hba_queue_depth,
525 mb->un.varRdConfig.max_xri);
526 phba->cfg_hba_queue_depth = mb->un.varRdConfig.max_xri;
529 phba->lmt = mb->un.varRdConfig.lmt;
531 /* Get the default values for Model Name and Description */
532 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
534 phba->link_state = LPFC_LINK_DOWN;
536 /* Only process IOCBs on ELS ring till hba_state is READY */
537 if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr)
538 psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT;
539 if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr)
540 psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT;
542 /* Post receive buffers for desired rings */
543 if (phba->sli_rev != 3)
544 lpfc_post_rcv_buf(phba);
547 * Configure HBA MSI-X attention conditions to messages if MSI-X mode
549 if (phba->intr_type == MSIX) {
550 rc = lpfc_config_msi(phba, pmb);
552 mempool_free(pmb, phba->mbox_mem_pool);
555 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
556 if (rc != MBX_SUCCESS) {
557 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
558 "0352 Config MSI mailbox command "
559 "failed, mbxCmd x%x, mbxStatus x%x\n",
560 pmb->u.mb.mbxCommand,
561 pmb->u.mb.mbxStatus);
562 mempool_free(pmb, phba->mbox_mem_pool);
567 spin_lock_irq(&phba->hbalock);
568 /* Initialize ERATT handling flag */
569 phba->hba_flag &= ~HBA_ERATT_HANDLED;
571 /* Enable appropriate host interrupts */
572 if (lpfc_readl(phba->HCregaddr, &status)) {
573 spin_unlock_irq(&phba->hbalock);
576 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
577 if (psli->num_rings > 0)
578 status |= HC_R0INT_ENA;
579 if (psli->num_rings > 1)
580 status |= HC_R1INT_ENA;
581 if (psli->num_rings > 2)
582 status |= HC_R2INT_ENA;
583 if (psli->num_rings > 3)
584 status |= HC_R3INT_ENA;
586 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
587 (phba->cfg_poll & DISABLE_FCP_RING_INT))
588 status &= ~(HC_R0INT_ENA);
590 writel(status, phba->HCregaddr);
591 readl(phba->HCregaddr); /* flush */
592 spin_unlock_irq(&phba->hbalock);
594 /* Set up ring-0 (ELS) timer */
595 timeout = phba->fc_ratov * 2;
596 mod_timer(&vport->els_tmofunc,
597 jiffies + msecs_to_jiffies(1000 * timeout));
598 /* Set up heart beat (HB) timer */
599 mod_timer(&phba->hb_tmofunc,
600 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
601 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
602 phba->last_completion_time = jiffies;
603 /* Set up error attention (ERATT) polling timer */
604 mod_timer(&phba->eratt_poll,
605 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
607 if (phba->hba_flag & LINK_DISABLED) {
608 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
609 "2598 Adapter Link is disabled.\n");
610 lpfc_down_link(phba, pmb);
611 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
612 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
613 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
614 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
615 "2599 Adapter failed to issue DOWN_LINK"
616 " mbox command rc 0x%x\n", rc);
618 mempool_free(pmb, phba->mbox_mem_pool);
621 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
622 mempool_free(pmb, phba->mbox_mem_pool);
623 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
627 /* MBOX buffer will be freed in mbox compl */
628 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
630 phba->link_state = LPFC_HBA_ERROR;
634 lpfc_config_async(phba, pmb, LPFC_ELS_RING);
635 pmb->mbox_cmpl = lpfc_config_async_cmpl;
636 pmb->vport = phba->pport;
637 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
639 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
640 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
641 "0456 Adapter failed to issue "
642 "ASYNCEVT_ENABLE mbox status x%x\n",
644 mempool_free(pmb, phba->mbox_mem_pool);
647 /* Get Option rom version */
648 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
650 phba->link_state = LPFC_HBA_ERROR;
654 lpfc_dump_wakeup_param(phba, pmb);
655 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
656 pmb->vport = phba->pport;
657 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
659 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
660 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
661 "0435 Adapter failed "
662 "to get Option ROM version status x%x\n", rc);
663 mempool_free(pmb, phba->mbox_mem_pool);
670 * lpfc_sli4_refresh_params - update driver copy of params.
671 * @phba: Pointer to HBA context object.
673 * This is called to refresh driver copy of dynamic fields from the
674 * common_get_sli4_parameters descriptor.
677 lpfc_sli4_refresh_params(struct lpfc_hba *phba)
680 struct lpfc_mqe *mqe;
681 struct lpfc_sli4_parameters *mbx_sli4_parameters;
684 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
689 /* Read the port's SLI4 Config Parameters */
690 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
691 sizeof(struct lpfc_sli4_cfg_mhdr));
692 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
693 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
694 length, LPFC_SLI4_MBX_EMBED);
696 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
698 mempool_free(mboxq, phba->mbox_mem_pool);
701 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
702 phba->sli4_hba.pc_sli4_params.mi_cap =
703 bf_get(cfg_mi_ver, mbx_sli4_parameters);
705 /* Are we forcing MI off via module parameter? */
706 if (phba->cfg_enable_mi)
707 phba->sli4_hba.pc_sli4_params.mi_ver =
708 bf_get(cfg_mi_ver, mbx_sli4_parameters);
710 phba->sli4_hba.pc_sli4_params.mi_ver = 0;
712 phba->sli4_hba.pc_sli4_params.cmf =
713 bf_get(cfg_cmf, mbx_sli4_parameters);
714 phba->sli4_hba.pc_sli4_params.pls =
715 bf_get(cfg_pvl, mbx_sli4_parameters);
717 mempool_free(mboxq, phba->mbox_mem_pool);
722 * lpfc_hba_init_link - Initialize the FC link
723 * @phba: pointer to lpfc hba data structure.
724 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
726 * This routine will issue the INIT_LINK mailbox command call.
727 * It is available to other drivers through the lpfc_hba data
728 * structure for use as a delayed link up mechanism with the
729 * module parameter lpfc_suppress_link_up.
733 * Any other value - error
736 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
738 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
742 * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology
743 * @phba: pointer to lpfc hba data structure.
744 * @fc_topology: desired fc topology.
745 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
747 * This routine will issue the INIT_LINK mailbox command call.
748 * It is available to other drivers through the lpfc_hba data
749 * structure for use as a delayed link up mechanism with the
750 * module parameter lpfc_suppress_link_up.
754 * Any other value - error
757 lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
760 struct lpfc_vport *vport = phba->pport;
765 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
767 phba->link_state = LPFC_HBA_ERROR;
773 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
774 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
775 !(phba->lmt & LMT_1Gb)) ||
776 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
777 !(phba->lmt & LMT_2Gb)) ||
778 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
779 !(phba->lmt & LMT_4Gb)) ||
780 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
781 !(phba->lmt & LMT_8Gb)) ||
782 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
783 !(phba->lmt & LMT_10Gb)) ||
784 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
785 !(phba->lmt & LMT_16Gb)) ||
786 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) &&
787 !(phba->lmt & LMT_32Gb)) ||
788 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) &&
789 !(phba->lmt & LMT_64Gb))) {
790 /* Reset link speed to auto */
791 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
792 "1302 Invalid speed for this board:%d "
793 "Reset link speed to auto.\n",
794 phba->cfg_link_speed);
795 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
797 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
798 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
799 if (phba->sli_rev < LPFC_SLI_REV4)
800 lpfc_set_loopback_flag(phba);
801 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
802 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
803 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
804 "0498 Adapter failed to init, mbxCmd x%x "
805 "INIT_LINK, mbxStatus x%x\n",
806 mb->mbxCommand, mb->mbxStatus);
807 if (phba->sli_rev <= LPFC_SLI_REV3) {
808 /* Clear all interrupt enable conditions */
809 writel(0, phba->HCregaddr);
810 readl(phba->HCregaddr); /* flush */
811 /* Clear all pending interrupts */
812 writel(0xffffffff, phba->HAregaddr);
813 readl(phba->HAregaddr); /* flush */
815 phba->link_state = LPFC_HBA_ERROR;
816 if (rc != MBX_BUSY || flag == MBX_POLL)
817 mempool_free(pmb, phba->mbox_mem_pool);
820 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
821 if (flag == MBX_POLL)
822 mempool_free(pmb, phba->mbox_mem_pool);
828 * lpfc_hba_down_link - this routine downs the FC link
829 * @phba: pointer to lpfc hba data structure.
830 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
832 * This routine will issue the DOWN_LINK mailbox command call.
833 * It is available to other drivers through the lpfc_hba data
834 * structure for use to stop the link.
838 * Any other value - error
841 lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
846 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
848 phba->link_state = LPFC_HBA_ERROR;
852 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
853 "0491 Adapter Link is disabled.\n");
854 lpfc_down_link(phba, pmb);
855 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
856 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
857 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
858 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
859 "2522 Adapter failed to issue DOWN_LINK"
860 " mbox command rc 0x%x\n", rc);
862 mempool_free(pmb, phba->mbox_mem_pool);
865 if (flag == MBX_POLL)
866 mempool_free(pmb, phba->mbox_mem_pool);
872 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
873 * @phba: pointer to lpfc HBA data structure.
875 * This routine will do LPFC uninitialization before the HBA is reset when
876 * bringing down the SLI Layer.
880 * Any other value - error.
883 lpfc_hba_down_prep(struct lpfc_hba *phba)
885 struct lpfc_vport **vports;
888 if (phba->sli_rev <= LPFC_SLI_REV3) {
889 /* Disable interrupts */
890 writel(0, phba->HCregaddr);
891 readl(phba->HCregaddr); /* flush */
894 if (phba->pport->load_flag & FC_UNLOADING)
895 lpfc_cleanup_discovery_resources(phba->pport);
897 vports = lpfc_create_vport_work_array(phba);
899 for (i = 0; i <= phba->max_vports &&
900 vports[i] != NULL; i++)
901 lpfc_cleanup_discovery_resources(vports[i]);
902 lpfc_destroy_vport_work_array(phba, vports);
908 * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free
909 * rspiocb which got deferred
911 * @phba: pointer to lpfc HBA data structure.
913 * This routine will cleanup completed slow path events after HBA is reset
914 * when bringing down the SLI Layer.
921 lpfc_sli4_free_sp_events(struct lpfc_hba *phba)
923 struct lpfc_iocbq *rspiocbq;
924 struct hbq_dmabuf *dmabuf;
925 struct lpfc_cq_event *cq_event;
927 spin_lock_irq(&phba->hbalock);
928 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
929 spin_unlock_irq(&phba->hbalock);
931 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
932 /* Get the response iocb from the head of work queue */
933 spin_lock_irq(&phba->hbalock);
934 list_remove_head(&phba->sli4_hba.sp_queue_event,
935 cq_event, struct lpfc_cq_event, list);
936 spin_unlock_irq(&phba->hbalock);
938 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
939 case CQE_CODE_COMPL_WQE:
940 rspiocbq = container_of(cq_event, struct lpfc_iocbq,
942 lpfc_sli_release_iocbq(phba, rspiocbq);
944 case CQE_CODE_RECEIVE:
945 case CQE_CODE_RECEIVE_V1:
946 dmabuf = container_of(cq_event, struct hbq_dmabuf,
948 lpfc_in_buf_free(phba, &dmabuf->dbuf);
954 * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset
955 * @phba: pointer to lpfc HBA data structure.
957 * This routine will cleanup posted ELS buffers after the HBA is reset
958 * when bringing down the SLI Layer.
965 lpfc_hba_free_post_buf(struct lpfc_hba *phba)
967 struct lpfc_sli *psli = &phba->sli;
968 struct lpfc_sli_ring *pring;
969 struct lpfc_dmabuf *mp, *next_mp;
973 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
974 lpfc_sli_hbqbuf_free_all(phba);
976 /* Cleanup preposted buffers on the ELS ring */
977 pring = &psli->sli3_ring[LPFC_ELS_RING];
978 spin_lock_irq(&phba->hbalock);
979 list_splice_init(&pring->postbufq, &buflist);
980 spin_unlock_irq(&phba->hbalock);
983 list_for_each_entry_safe(mp, next_mp, &buflist, list) {
986 lpfc_mbuf_free(phba, mp->virt, mp->phys);
990 spin_lock_irq(&phba->hbalock);
991 pring->postbufq_cnt -= count;
992 spin_unlock_irq(&phba->hbalock);
997 * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset
998 * @phba: pointer to lpfc HBA data structure.
1000 * This routine will cleanup the txcmplq after the HBA is reset when bringing
1001 * down the SLI Layer.
1007 lpfc_hba_clean_txcmplq(struct lpfc_hba *phba)
1009 struct lpfc_sli *psli = &phba->sli;
1010 struct lpfc_queue *qp = NULL;
1011 struct lpfc_sli_ring *pring;
1012 LIST_HEAD(completions);
1014 struct lpfc_iocbq *piocb, *next_iocb;
1016 if (phba->sli_rev != LPFC_SLI_REV4) {
1017 for (i = 0; i < psli->num_rings; i++) {
1018 pring = &psli->sli3_ring[i];
1019 spin_lock_irq(&phba->hbalock);
1020 /* At this point in time the HBA is either reset or DOA
1021 * Nothing should be on txcmplq as it will
1024 list_splice_init(&pring->txcmplq, &completions);
1025 pring->txcmplq_cnt = 0;
1026 spin_unlock_irq(&phba->hbalock);
1028 lpfc_sli_abort_iocb_ring(phba, pring);
1030 /* Cancel all the IOCBs from the completions list */
1031 lpfc_sli_cancel_iocbs(phba, &completions,
1032 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
1035 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
1039 spin_lock_irq(&pring->ring_lock);
1040 list_for_each_entry_safe(piocb, next_iocb,
1041 &pring->txcmplq, list)
1042 piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
1043 list_splice_init(&pring->txcmplq, &completions);
1044 pring->txcmplq_cnt = 0;
1045 spin_unlock_irq(&pring->ring_lock);
1046 lpfc_sli_abort_iocb_ring(phba, pring);
1048 /* Cancel all the IOCBs from the completions list */
1049 lpfc_sli_cancel_iocbs(phba, &completions,
1050 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
1054 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
1055 * @phba: pointer to lpfc HBA data structure.
1057 * This routine will do uninitialization after the HBA is reset when bring
1058 * down the SLI Layer.
1062 * Any other value - error.
1065 lpfc_hba_down_post_s3(struct lpfc_hba *phba)
1067 lpfc_hba_free_post_buf(phba);
1068 lpfc_hba_clean_txcmplq(phba);
1073 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
1074 * @phba: pointer to lpfc HBA data structure.
1076 * This routine will do uninitialization after the HBA is reset when bring
1077 * down the SLI Layer.
1081 * Any other value - error.
1084 lpfc_hba_down_post_s4(struct lpfc_hba *phba)
1086 struct lpfc_io_buf *psb, *psb_next;
1087 struct lpfc_async_xchg_ctx *ctxp, *ctxp_next;
1088 struct lpfc_sli4_hdw_queue *qp;
1090 LIST_HEAD(nvme_aborts);
1091 LIST_HEAD(nvmet_aborts);
1092 struct lpfc_sglq *sglq_entry = NULL;
1096 lpfc_sli_hbqbuf_free_all(phba);
1097 lpfc_hba_clean_txcmplq(phba);
1099 /* At this point in time the HBA is either reset or DOA. Either
1100 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
1101 * on the lpfc_els_sgl_list so that it can either be freed if the
1102 * driver is unloading or reposted if the driver is restarting
1106 /* sgl_list_lock required because worker thread uses this
1109 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
1110 list_for_each_entry(sglq_entry,
1111 &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
1112 sglq_entry->state = SGL_FREED;
1114 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
1115 &phba->sli4_hba.lpfc_els_sgl_list);
1118 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
1120 /* abts_xxxx_buf_list_lock required because worker thread uses this
1123 spin_lock_irq(&phba->hbalock);
1125 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
1126 qp = &phba->sli4_hba.hdwq[idx];
1128 spin_lock(&qp->abts_io_buf_list_lock);
1129 list_splice_init(&qp->lpfc_abts_io_buf_list,
1132 list_for_each_entry_safe(psb, psb_next, &aborts, list) {
1134 psb->status = IOSTAT_SUCCESS;
1137 spin_lock(&qp->io_buf_list_put_lock);
1138 list_splice_init(&aborts, &qp->lpfc_io_buf_list_put);
1139 qp->put_io_bufs += qp->abts_scsi_io_bufs;
1140 qp->put_io_bufs += qp->abts_nvme_io_bufs;
1141 qp->abts_scsi_io_bufs = 0;
1142 qp->abts_nvme_io_bufs = 0;
1143 spin_unlock(&qp->io_buf_list_put_lock);
1144 spin_unlock(&qp->abts_io_buf_list_lock);
1146 spin_unlock_irq(&phba->hbalock);
1148 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
1149 spin_lock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1150 list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1152 spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1153 list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
1154 ctxp->flag &= ~(LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP);
1155 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1159 lpfc_sli4_free_sp_events(phba);
1164 * lpfc_hba_down_post - Wrapper func for hba down post routine
1165 * @phba: pointer to lpfc HBA data structure.
1167 * This routine wraps the actual SLI3 or SLI4 routine for performing
1168 * uninitialization after the HBA is reset when bring down the SLI Layer.
1172 * Any other value - error.
1175 lpfc_hba_down_post(struct lpfc_hba *phba)
1177 return (*phba->lpfc_hba_down_post)(phba);
1181 * lpfc_hb_timeout - The HBA-timer timeout handler
1182 * @t: timer context used to obtain the pointer to lpfc hba data structure.
1184 * This is the HBA-timer timeout handler registered to the lpfc driver. When
1185 * this timer fires, a HBA timeout event shall be posted to the lpfc driver
1186 * work-port-events bitmap and the worker thread is notified. This timeout
1187 * event will be used by the worker thread to invoke the actual timeout
1188 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
1189 * be performed in the timeout handler and the HBA timeout event bit shall
1190 * be cleared by the worker thread after it has taken the event bitmap out.
1193 lpfc_hb_timeout(struct timer_list *t)
1195 struct lpfc_hba *phba;
1196 uint32_t tmo_posted;
1197 unsigned long iflag;
1199 phba = from_timer(phba, t, hb_tmofunc);
1201 /* Check for heart beat timeout conditions */
1202 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1203 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
1205 phba->pport->work_port_events |= WORKER_HB_TMO;
1206 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1208 /* Tell the worker thread there is work to do */
1210 lpfc_worker_wake_up(phba);
1215 * lpfc_rrq_timeout - The RRQ-timer timeout handler
1216 * @t: timer context used to obtain the pointer to lpfc hba data structure.
1218 * This is the RRQ-timer timeout handler registered to the lpfc driver. When
1219 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver
1220 * work-port-events bitmap and the worker thread is notified. This timeout
1221 * event will be used by the worker thread to invoke the actual timeout
1222 * handler routine, lpfc_rrq_handler. Any periodical operations will
1223 * be performed in the timeout handler and the RRQ timeout event bit shall
1224 * be cleared by the worker thread after it has taken the event bitmap out.
1227 lpfc_rrq_timeout(struct timer_list *t)
1229 struct lpfc_hba *phba;
1230 unsigned long iflag;
1232 phba = from_timer(phba, t, rrq_tmr);
1233 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1234 if (!(phba->pport->load_flag & FC_UNLOADING))
1235 phba->hba_flag |= HBA_RRQ_ACTIVE;
1237 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1238 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1240 if (!(phba->pport->load_flag & FC_UNLOADING))
1241 lpfc_worker_wake_up(phba);
1245 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
1246 * @phba: pointer to lpfc hba data structure.
1247 * @pmboxq: pointer to the driver internal queue element for mailbox command.
1249 * This is the callback function to the lpfc heart-beat mailbox command.
1250 * If configured, the lpfc driver issues the heart-beat mailbox command to
1251 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
1252 * heart-beat mailbox command is issued, the driver shall set up heart-beat
1253 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
1254 * heart-beat outstanding state. Once the mailbox command comes back and
1255 * no error conditions detected, the heart-beat mailbox command timer is
1256 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
1257 * state is cleared for the next heart-beat. If the timer expired with the
1258 * heart-beat outstanding state set, the driver will put the HBA offline.
1261 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
1263 unsigned long drvr_flag;
1265 spin_lock_irqsave(&phba->hbalock, drvr_flag);
1266 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
1267 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
1269 /* Check and reset heart-beat timer if necessary */
1270 mempool_free(pmboxq, phba->mbox_mem_pool);
1271 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
1272 !(phba->link_state == LPFC_HBA_ERROR) &&
1273 !(phba->pport->load_flag & FC_UNLOADING))
1274 mod_timer(&phba->hb_tmofunc,
1276 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1281 * lpfc_idle_stat_delay_work - idle_stat tracking
1283 * This routine tracks per-cq idle_stat and determines polling decisions.
1289 lpfc_idle_stat_delay_work(struct work_struct *work)
1291 struct lpfc_hba *phba = container_of(to_delayed_work(work),
1293 idle_stat_delay_work);
1294 struct lpfc_queue *cq;
1295 struct lpfc_sli4_hdw_queue *hdwq;
1296 struct lpfc_idle_stat *idle_stat;
1297 u32 i, idle_percent;
1298 u64 wall, wall_idle, diff_wall, diff_idle, busy_time;
1300 if (phba->pport->load_flag & FC_UNLOADING)
1303 if (phba->link_state == LPFC_HBA_ERROR ||
1304 phba->pport->fc_flag & FC_OFFLINE_MODE ||
1305 phba->cmf_active_mode != LPFC_CFG_OFF)
1308 for_each_present_cpu(i) {
1309 hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
1312 /* Skip if we've already handled this cq's primary CPU */
1316 idle_stat = &phba->sli4_hba.idle_stat[i];
1318 /* get_cpu_idle_time returns values as running counters. Thus,
1319 * to know the amount for this period, the prior counter values
1320 * need to be subtracted from the current counter values.
1321 * From there, the idle time stat can be calculated as a
1322 * percentage of 100 - the sum of the other consumption times.
1324 wall_idle = get_cpu_idle_time(i, &wall, 1);
1325 diff_idle = wall_idle - idle_stat->prev_idle;
1326 diff_wall = wall - idle_stat->prev_wall;
1328 if (diff_wall <= diff_idle)
1331 busy_time = diff_wall - diff_idle;
1333 idle_percent = div64_u64(100 * busy_time, diff_wall);
1334 idle_percent = 100 - idle_percent;
1336 if (idle_percent < 15)
1337 cq->poll_mode = LPFC_QUEUE_WORK;
1339 cq->poll_mode = LPFC_IRQ_POLL;
1341 idle_stat->prev_idle = wall_idle;
1342 idle_stat->prev_wall = wall;
1346 schedule_delayed_work(&phba->idle_stat_delay_work,
1347 msecs_to_jiffies(LPFC_IDLE_STAT_DELAY));
1351 lpfc_hb_eq_delay_work(struct work_struct *work)
1353 struct lpfc_hba *phba = container_of(to_delayed_work(work),
1354 struct lpfc_hba, eq_delay_work);
1355 struct lpfc_eq_intr_info *eqi, *eqi_new;
1356 struct lpfc_queue *eq, *eq_next;
1357 unsigned char *ena_delay = NULL;
1361 if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING)
1364 if (phba->link_state == LPFC_HBA_ERROR ||
1365 phba->pport->fc_flag & FC_OFFLINE_MODE)
1368 ena_delay = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(*ena_delay),
1373 for (i = 0; i < phba->cfg_irq_chann; i++) {
1374 /* Get the EQ corresponding to the IRQ vector */
1375 eq = phba->sli4_hba.hba_eq_hdl[i].eq;
1378 if (eq->q_mode || eq->q_flag & HBA_EQ_DELAY_CHK) {
1379 eq->q_flag &= ~HBA_EQ_DELAY_CHK;
1380 ena_delay[eq->last_cpu] = 1;
1384 for_each_present_cpu(i) {
1385 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i);
1387 usdelay = (eqi->icnt >> 10) * LPFC_EQ_DELAY_STEP;
1388 if (usdelay > LPFC_MAX_AUTO_EQ_DELAY)
1389 usdelay = LPFC_MAX_AUTO_EQ_DELAY;
1396 list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) {
1397 if (unlikely(eq->last_cpu != i)) {
1398 eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info,
1400 list_move_tail(&eq->cpu_list, &eqi_new->list);
1403 if (usdelay != eq->q_mode)
1404 lpfc_modify_hba_eq_delay(phba, eq->hdwq, 1,
1412 queue_delayed_work(phba->wq, &phba->eq_delay_work,
1413 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
1417 * lpfc_hb_mxp_handler - Multi-XRI pools handler to adjust XRI distribution
1418 * @phba: pointer to lpfc hba data structure.
1420 * For each heartbeat, this routine does some heuristic methods to adjust
1421 * XRI distribution. The goal is to fully utilize free XRIs.
1423 static void lpfc_hb_mxp_handler(struct lpfc_hba *phba)
1428 hwq_count = phba->cfg_hdw_queue;
1429 for (i = 0; i < hwq_count; i++) {
1430 /* Adjust XRIs in private pool */
1431 lpfc_adjust_pvt_pool_count(phba, i);
1433 /* Adjust high watermark */
1434 lpfc_adjust_high_watermark(phba, i);
1436 #ifdef LPFC_MXP_STAT
1437 /* Snapshot pbl, pvt and busy count */
1438 lpfc_snapshot_mxp(phba, i);
1444 * lpfc_issue_hb_mbox - Issues heart-beat mailbox command
1445 * @phba: pointer to lpfc hba data structure.
1447 * If a HB mbox is not already in progrees, this routine will allocate
1448 * a LPFC_MBOXQ_t, populate it with a MBX_HEARTBEAT (0x31) command,
1449 * and issue it. The HBA_HBEAT_INP flag means the command is in progress.
1452 lpfc_issue_hb_mbox(struct lpfc_hba *phba)
1454 LPFC_MBOXQ_t *pmboxq;
1457 /* Is a Heartbeat mbox already in progress */
1458 if (phba->hba_flag & HBA_HBEAT_INP)
1461 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1465 lpfc_heart_beat(phba, pmboxq);
1466 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1467 pmboxq->vport = phba->pport;
1468 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
1470 if (retval != MBX_BUSY && retval != MBX_SUCCESS) {
1471 mempool_free(pmboxq, phba->mbox_mem_pool);
1474 phba->hba_flag |= HBA_HBEAT_INP;
1480 * lpfc_issue_hb_tmo - Signals heartbeat timer to issue mbox command
1481 * @phba: pointer to lpfc hba data structure.
1483 * The heartbeat timer (every 5 sec) will fire. If the HBA_HBEAT_TMO
1484 * flag is set, it will force a MBX_HEARTBEAT mbox command, regardless
1485 * of the value of lpfc_enable_hba_heartbeat.
1486 * If lpfc_enable_hba_heartbeat is set, the timeout routine will always
1487 * try to issue a MBX_HEARTBEAT mbox command.
1490 lpfc_issue_hb_tmo(struct lpfc_hba *phba)
1492 if (phba->cfg_enable_hba_heartbeat)
1494 phba->hba_flag |= HBA_HBEAT_TMO;
1498 * lpfc_hb_timeout_handler - The HBA-timer timeout handler
1499 * @phba: pointer to lpfc hba data structure.
1501 * This is the actual HBA-timer timeout handler to be invoked by the worker
1502 * thread whenever the HBA timer fired and HBA-timeout event posted. This
1503 * handler performs any periodic operations needed for the device. If such
1504 * periodic event has already been attended to either in the interrupt handler
1505 * or by processing slow-ring or fast-ring events within the HBA-timer
1506 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
1507 * the timer for the next timeout period. If lpfc heart-beat mailbox command
1508 * is configured and there is no heart-beat mailbox command outstanding, a
1509 * heart-beat mailbox is issued and timer set properly. Otherwise, if there
1510 * has been a heart-beat mailbox command outstanding, the HBA shall be put
1514 lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1516 struct lpfc_vport **vports;
1517 struct lpfc_dmabuf *buf_ptr;
1520 struct lpfc_sli *psli = &phba->sli;
1521 LIST_HEAD(completions);
1523 if (phba->cfg_xri_rebalancing) {
1524 /* Multi-XRI pools handler */
1525 lpfc_hb_mxp_handler(phba);
1528 vports = lpfc_create_vport_work_array(phba);
1530 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
1531 lpfc_rcv_seq_check_edtov(vports[i]);
1532 lpfc_fdmi_change_check(vports[i]);
1534 lpfc_destroy_vport_work_array(phba, vports);
1536 if ((phba->link_state == LPFC_HBA_ERROR) ||
1537 (phba->pport->load_flag & FC_UNLOADING) ||
1538 (phba->pport->fc_flag & FC_OFFLINE_MODE))
1541 if (phba->elsbuf_cnt &&
1542 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1543 spin_lock_irq(&phba->hbalock);
1544 list_splice_init(&phba->elsbuf, &completions);
1545 phba->elsbuf_cnt = 0;
1546 phba->elsbuf_prev_cnt = 0;
1547 spin_unlock_irq(&phba->hbalock);
1549 while (!list_empty(&completions)) {
1550 list_remove_head(&completions, buf_ptr,
1551 struct lpfc_dmabuf, list);
1552 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1556 phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1558 /* If there is no heart beat outstanding, issue a heartbeat command */
1559 if (phba->cfg_enable_hba_heartbeat) {
1560 /* If IOs are completing, no need to issue a MBX_HEARTBEAT */
1561 spin_lock_irq(&phba->pport->work_port_lock);
1562 if (time_after(phba->last_completion_time +
1563 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
1565 spin_unlock_irq(&phba->pport->work_port_lock);
1566 if (phba->hba_flag & HBA_HBEAT_INP)
1567 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1569 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1572 spin_unlock_irq(&phba->pport->work_port_lock);
1574 /* Check if a MBX_HEARTBEAT is already in progress */
1575 if (phba->hba_flag & HBA_HBEAT_INP) {
1577 * If heart beat timeout called with HBA_HBEAT_INP set
1578 * we need to give the hb mailbox cmd a chance to
1581 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1582 "0459 Adapter heartbeat still outstanding: "
1583 "last compl time was %d ms.\n",
1584 jiffies_to_msecs(jiffies
1585 - phba->last_completion_time));
1586 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1588 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
1589 (list_empty(&psli->mboxq))) {
1591 retval = lpfc_issue_hb_mbox(phba);
1593 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1596 phba->skipped_hb = 0;
1597 } else if (time_before_eq(phba->last_completion_time,
1598 phba->skipped_hb)) {
1599 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1600 "2857 Last completion time not "
1601 " updated in %d ms\n",
1602 jiffies_to_msecs(jiffies
1603 - phba->last_completion_time));
1605 phba->skipped_hb = jiffies;
1607 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1611 /* Check to see if we want to force a MBX_HEARTBEAT */
1612 if (phba->hba_flag & HBA_HBEAT_TMO) {
1613 retval = lpfc_issue_hb_mbox(phba);
1615 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1617 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1620 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1623 mod_timer(&phba->hb_tmofunc, jiffies + msecs_to_jiffies(tmo));
1627 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
1628 * @phba: pointer to lpfc hba data structure.
1630 * This routine is called to bring the HBA offline when HBA hardware error
1631 * other than Port Error 6 has been detected.
1634 lpfc_offline_eratt(struct lpfc_hba *phba)
1636 struct lpfc_sli *psli = &phba->sli;
1638 spin_lock_irq(&phba->hbalock);
1639 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1640 spin_unlock_irq(&phba->hbalock);
1641 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1644 lpfc_reset_barrier(phba);
1645 spin_lock_irq(&phba->hbalock);
1646 lpfc_sli_brdreset(phba);
1647 spin_unlock_irq(&phba->hbalock);
1648 lpfc_hba_down_post(phba);
1649 lpfc_sli_brdready(phba, HS_MBRDY);
1650 lpfc_unblock_mgmt_io(phba);
1651 phba->link_state = LPFC_HBA_ERROR;
1656 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
1657 * @phba: pointer to lpfc hba data structure.
1659 * This routine is called to bring a SLI4 HBA offline when HBA hardware error
1660 * other than Port Error 6 has been detected.
1663 lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1665 spin_lock_irq(&phba->hbalock);
1666 if (phba->link_state == LPFC_HBA_ERROR &&
1667 test_bit(HBA_PCI_ERR, &phba->bit_flags)) {
1668 spin_unlock_irq(&phba->hbalock);
1671 phba->link_state = LPFC_HBA_ERROR;
1672 spin_unlock_irq(&phba->hbalock);
1674 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1675 lpfc_sli_flush_io_rings(phba);
1677 lpfc_hba_down_post(phba);
1678 lpfc_unblock_mgmt_io(phba);
1682 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
1683 * @phba: pointer to lpfc hba data structure.
1685 * This routine is invoked to handle the deferred HBA hardware error
1686 * conditions. This type of error is indicated by HBA by setting ER1
1687 * and another ER bit in the host status register. The driver will
1688 * wait until the ER1 bit clears before handling the error condition.
1691 lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1693 uint32_t old_host_status = phba->work_hs;
1694 struct lpfc_sli *psli = &phba->sli;
1696 /* If the pci channel is offline, ignore possible errors,
1697 * since we cannot communicate with the pci card anyway.
1699 if (pci_channel_offline(phba->pcidev)) {
1700 spin_lock_irq(&phba->hbalock);
1701 phba->hba_flag &= ~DEFER_ERATT;
1702 spin_unlock_irq(&phba->hbalock);
1706 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1707 "0479 Deferred Adapter Hardware Error "
1708 "Data: x%x x%x x%x\n",
1709 phba->work_hs, phba->work_status[0],
1710 phba->work_status[1]);
1712 spin_lock_irq(&phba->hbalock);
1713 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1714 spin_unlock_irq(&phba->hbalock);
1718 * Firmware stops when it triggred erratt. That could cause the I/Os
1719 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
1720 * SCSI layer retry it after re-establishing link.
1722 lpfc_sli_abort_fcp_rings(phba);
1725 * There was a firmware error. Take the hba offline and then
1726 * attempt to restart it.
1728 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
1731 /* Wait for the ER1 bit to clear.*/
1732 while (phba->work_hs & HS_FFER1) {
1734 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
1735 phba->work_hs = UNPLUG_ERR ;
1738 /* If driver is unloading let the worker thread continue */
1739 if (phba->pport->load_flag & FC_UNLOADING) {
1746 * This is to ptrotect against a race condition in which
1747 * first write to the host attention register clear the
1748 * host status register.
1750 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1751 phba->work_hs = old_host_status & ~HS_FFER1;
1753 spin_lock_irq(&phba->hbalock);
1754 phba->hba_flag &= ~DEFER_ERATT;
1755 spin_unlock_irq(&phba->hbalock);
1756 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1757 phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1761 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1763 struct lpfc_board_event_header board_event;
1764 struct Scsi_Host *shost;
1766 board_event.event_type = FC_REG_BOARD_EVENT;
1767 board_event.subcategory = LPFC_EVENT_PORTINTERR;
1768 shost = lpfc_shost_from_vport(phba->pport);
1769 fc_host_post_vendor_event(shost, fc_get_event_number(),
1770 sizeof(board_event),
1771 (char *) &board_event,
1776 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
1777 * @phba: pointer to lpfc hba data structure.
1779 * This routine is invoked to handle the following HBA hardware error
1781 * 1 - HBA error attention interrupt
1782 * 2 - DMA ring index out of range
1783 * 3 - Mailbox command came back as unknown
1786 lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1788 struct lpfc_vport *vport = phba->pport;
1789 struct lpfc_sli *psli = &phba->sli;
1790 uint32_t event_data;
1791 unsigned long temperature;
1792 struct temp_event temp_event_data;
1793 struct Scsi_Host *shost;
1795 /* If the pci channel is offline, ignore possible errors,
1796 * since we cannot communicate with the pci card anyway.
1798 if (pci_channel_offline(phba->pcidev)) {
1799 spin_lock_irq(&phba->hbalock);
1800 phba->hba_flag &= ~DEFER_ERATT;
1801 spin_unlock_irq(&phba->hbalock);
1805 /* If resets are disabled then leave the HBA alone and return */
1806 if (!phba->cfg_enable_hba_reset)
1809 /* Send an internal error event to mgmt application */
1810 lpfc_board_errevt_to_mgmt(phba);
1812 if (phba->hba_flag & DEFER_ERATT)
1813 lpfc_handle_deferred_eratt(phba);
1815 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
1816 if (phba->work_hs & HS_FFER6)
1817 /* Re-establishing Link */
1818 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1819 "1301 Re-establishing Link "
1820 "Data: x%x x%x x%x\n",
1821 phba->work_hs, phba->work_status[0],
1822 phba->work_status[1]);
1823 if (phba->work_hs & HS_FFER8)
1824 /* Device Zeroization */
1825 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1826 "2861 Host Authentication device "
1827 "zeroization Data:x%x x%x x%x\n",
1828 phba->work_hs, phba->work_status[0],
1829 phba->work_status[1]);
1831 spin_lock_irq(&phba->hbalock);
1832 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1833 spin_unlock_irq(&phba->hbalock);
1836 * Firmware stops when it triggled erratt with HS_FFER6.
1837 * That could cause the I/Os dropped by the firmware.
1838 * Error iocb (I/O) on txcmplq and let the SCSI layer
1839 * retry it after re-establishing link.
1841 lpfc_sli_abort_fcp_rings(phba);
1844 * There was a firmware error. Take the hba offline and then
1845 * attempt to restart it.
1847 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1849 lpfc_sli_brdrestart(phba);
1850 if (lpfc_online(phba) == 0) { /* Initialize the HBA */
1851 lpfc_unblock_mgmt_io(phba);
1854 lpfc_unblock_mgmt_io(phba);
1855 } else if (phba->work_hs & HS_CRIT_TEMP) {
1856 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1857 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1858 temp_event_data.event_code = LPFC_CRIT_TEMP;
1859 temp_event_data.data = (uint32_t)temperature;
1861 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1862 "0406 Adapter maximum temperature exceeded "
1863 "(%ld), taking this port offline "
1864 "Data: x%x x%x x%x\n",
1865 temperature, phba->work_hs,
1866 phba->work_status[0], phba->work_status[1]);
1868 shost = lpfc_shost_from_vport(phba->pport);
1869 fc_host_post_vendor_event(shost, fc_get_event_number(),
1870 sizeof(temp_event_data),
1871 (char *) &temp_event_data,
1872 SCSI_NL_VID_TYPE_PCI
1873 | PCI_VENDOR_ID_EMULEX);
1875 spin_lock_irq(&phba->hbalock);
1876 phba->over_temp_state = HBA_OVER_TEMP;
1877 spin_unlock_irq(&phba->hbalock);
1878 lpfc_offline_eratt(phba);
1881 /* The if clause above forces this code path when the status
1882 * failure is a value other than FFER6. Do not call the offline
1883 * twice. This is the adapter hardware error path.
1885 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1886 "0457 Adapter Hardware Error "
1887 "Data: x%x x%x x%x\n",
1889 phba->work_status[0], phba->work_status[1]);
1891 event_data = FC_REG_DUMP_EVENT;
1892 shost = lpfc_shost_from_vport(vport);
1893 fc_host_post_vendor_event(shost, fc_get_event_number(),
1894 sizeof(event_data), (char *) &event_data,
1895 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1897 lpfc_offline_eratt(phba);
1903 * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg
1904 * @phba: pointer to lpfc hba data structure.
1905 * @mbx_action: flag for mailbox shutdown action.
1906 * @en_rn_msg: send reset/port recovery message.
1907 * This routine is invoked to perform an SLI4 port PCI function reset in
1908 * response to port status register polling attention. It waits for port
1909 * status register (ERR, RDY, RN) bits before proceeding with function reset.
1910 * During this process, interrupt vectors are freed and later requested
1911 * for handling possible port resource change.
1914 lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
1919 LPFC_MBOXQ_t *mboxq;
1921 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
1922 LPFC_SLI_INTF_IF_TYPE_2) {
1924 * On error status condition, driver need to wait for port
1925 * ready before performing reset.
1927 rc = lpfc_sli4_pdev_status_reg_wait(phba);
1932 /* need reset: attempt for port recovery */
1934 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1935 "2887 Reset Needed: Attempting Port "
1938 /* If we are no wait, the HBA has been reset and is not
1939 * functional, thus we should clear
1940 * (LPFC_SLI_ACTIVE | LPFC_SLI_MBOX_ACTIVE) flags.
1942 if (mbx_action == LPFC_MBX_NO_WAIT) {
1943 spin_lock_irq(&phba->hbalock);
1944 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
1945 if (phba->sli.mbox_active) {
1946 mboxq = phba->sli.mbox_active;
1947 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
1948 __lpfc_mbox_cmpl_put(phba, mboxq);
1949 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
1950 phba->sli.mbox_active = NULL;
1952 spin_unlock_irq(&phba->hbalock);
1955 lpfc_offline_prep(phba, mbx_action);
1956 lpfc_sli_flush_io_rings(phba);
1958 /* release interrupt for possible resource change */
1959 lpfc_sli4_disable_intr(phba);
1960 rc = lpfc_sli_brdrestart(phba);
1962 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1963 "6309 Failed to restart board\n");
1966 /* request and enable interrupt */
1967 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
1968 if (intr_mode == LPFC_INTR_ERROR) {
1969 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1970 "3175 Failed to enable interrupt\n");
1973 phba->intr_mode = intr_mode;
1974 rc = lpfc_online(phba);
1976 lpfc_unblock_mgmt_io(phba);
1982 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1983 * @phba: pointer to lpfc hba data structure.
1985 * This routine is invoked to handle the SLI4 HBA hardware error attention
1989 lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1991 struct lpfc_vport *vport = phba->pport;
1992 uint32_t event_data;
1993 struct Scsi_Host *shost;
1995 struct lpfc_register portstat_reg = {0};
1996 uint32_t reg_err1, reg_err2;
1997 uint32_t uerrlo_reg, uemasklo_reg;
1998 uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2;
1999 bool en_rn_msg = true;
2000 struct temp_event temp_event_data;
2001 struct lpfc_register portsmphr_reg;
2004 /* If the pci channel is offline, ignore possible errors, since
2005 * we cannot communicate with the pci card anyway.
2007 if (pci_channel_offline(phba->pcidev)) {
2008 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2009 "3166 pci channel is offline\n");
2010 lpfc_sli_flush_io_rings(phba);
2014 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
2015 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
2017 case LPFC_SLI_INTF_IF_TYPE_0:
2018 pci_rd_rc1 = lpfc_readl(
2019 phba->sli4_hba.u.if_type0.UERRLOregaddr,
2021 pci_rd_rc2 = lpfc_readl(
2022 phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
2024 /* consider PCI bus read error as pci_channel_offline */
2025 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
2027 if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) {
2028 lpfc_sli4_offline_eratt(phba);
2031 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2032 "7623 Checking UE recoverable");
2034 for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) {
2035 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
2036 &portsmphr_reg.word0))
2039 smphr_port_status = bf_get(lpfc_port_smphr_port_status,
2041 if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
2042 LPFC_PORT_SEM_UE_RECOVERABLE)
2044 /*Sleep for 1Sec, before checking SEMAPHORE */
2048 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2049 "4827 smphr_port_status x%x : Waited %dSec",
2050 smphr_port_status, i);
2052 /* Recoverable UE, reset the HBA device */
2053 if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
2054 LPFC_PORT_SEM_UE_RECOVERABLE) {
2055 for (i = 0; i < 20; i++) {
2057 if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
2058 &portsmphr_reg.word0) &&
2059 (LPFC_POST_STAGE_PORT_READY ==
2060 bf_get(lpfc_port_smphr_port_status,
2062 rc = lpfc_sli4_port_sta_fn_reset(phba,
2063 LPFC_MBX_NO_WAIT, en_rn_msg);
2066 lpfc_printf_log(phba, KERN_ERR,
2068 "4215 Failed to recover UE");
2073 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2074 "7624 Firmware not ready: Failing UE recovery,"
2075 " waited %dSec", i);
2076 phba->link_state = LPFC_HBA_ERROR;
2079 case LPFC_SLI_INTF_IF_TYPE_2:
2080 case LPFC_SLI_INTF_IF_TYPE_6:
2081 pci_rd_rc1 = lpfc_readl(
2082 phba->sli4_hba.u.if_type2.STATUSregaddr,
2083 &portstat_reg.word0);
2084 /* consider PCI bus read error as pci_channel_offline */
2085 if (pci_rd_rc1 == -EIO) {
2086 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2087 "3151 PCI bus read access failure: x%x\n",
2088 readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
2089 lpfc_sli4_offline_eratt(phba);
2092 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
2093 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
2094 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
2095 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2096 "2889 Port Overtemperature event, "
2097 "taking port offline Data: x%x x%x\n",
2098 reg_err1, reg_err2);
2100 phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
2101 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
2102 temp_event_data.event_code = LPFC_CRIT_TEMP;
2103 temp_event_data.data = 0xFFFFFFFF;
2105 shost = lpfc_shost_from_vport(phba->pport);
2106 fc_host_post_vendor_event(shost, fc_get_event_number(),
2107 sizeof(temp_event_data),
2108 (char *)&temp_event_data,
2109 SCSI_NL_VID_TYPE_PCI
2110 | PCI_VENDOR_ID_EMULEX);
2112 spin_lock_irq(&phba->hbalock);
2113 phba->over_temp_state = HBA_OVER_TEMP;
2114 spin_unlock_irq(&phba->hbalock);
2115 lpfc_sli4_offline_eratt(phba);
2118 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2119 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
2120 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2121 "3143 Port Down: Firmware Update "
2124 } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2125 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
2126 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2127 "3144 Port Down: Debug Dump\n");
2128 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2129 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
2130 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2131 "3145 Port Down: Provisioning\n");
2133 /* If resets are disabled then leave the HBA alone and return */
2134 if (!phba->cfg_enable_hba_reset)
2137 /* Check port status register for function reset */
2138 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT,
2141 /* don't report event on forced debug dump */
2142 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2143 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
2148 /* fall through for not able to recover */
2149 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2150 "3152 Unrecoverable error\n");
2151 phba->link_state = LPFC_HBA_ERROR;
2153 case LPFC_SLI_INTF_IF_TYPE_1:
2157 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2158 "3123 Report dump event to upper layer\n");
2159 /* Send an internal error event to mgmt application */
2160 lpfc_board_errevt_to_mgmt(phba);
2162 event_data = FC_REG_DUMP_EVENT;
2163 shost = lpfc_shost_from_vport(vport);
2164 fc_host_post_vendor_event(shost, fc_get_event_number(),
2165 sizeof(event_data), (char *) &event_data,
2166 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
2170 * lpfc_handle_eratt - Wrapper func for handling hba error attention
2171 * @phba: pointer to lpfc HBA data structure.
2173 * This routine wraps the actual SLI3 or SLI4 hba error attention handling
2174 * routine from the API jump table function pointer from the lpfc_hba struct.
2178 * Any other value - error.
2181 lpfc_handle_eratt(struct lpfc_hba *phba)
2183 (*phba->lpfc_handle_eratt)(phba);
2187 * lpfc_handle_latt - The HBA link event handler
2188 * @phba: pointer to lpfc hba data structure.
2190 * This routine is invoked from the worker thread to handle a HBA host
2191 * attention link event. SLI3 only.
2194 lpfc_handle_latt(struct lpfc_hba *phba)
2196 struct lpfc_vport *vport = phba->pport;
2197 struct lpfc_sli *psli = &phba->sli;
2199 volatile uint32_t control;
2202 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2205 goto lpfc_handle_latt_err_exit;
2208 rc = lpfc_mbox_rsrc_prep(phba, pmb);
2211 mempool_free(pmb, phba->mbox_mem_pool);
2212 goto lpfc_handle_latt_err_exit;
2215 /* Cleanup any outstanding ELS commands */
2216 lpfc_els_flush_all_cmd(phba);
2217 psli->slistat.link_event++;
2218 lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf);
2219 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
2221 /* Block ELS IOCBs until we have processed this mbox command */
2222 phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
2223 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
2224 if (rc == MBX_NOT_FINISHED) {
2226 goto lpfc_handle_latt_free_mbuf;
2229 /* Clear Link Attention in HA REG */
2230 spin_lock_irq(&phba->hbalock);
2231 writel(HA_LATT, phba->HAregaddr);
2232 readl(phba->HAregaddr); /* flush */
2233 spin_unlock_irq(&phba->hbalock);
2237 lpfc_handle_latt_free_mbuf:
2238 phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
2239 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
2240 lpfc_handle_latt_err_exit:
2241 /* Enable Link attention interrupts */
2242 spin_lock_irq(&phba->hbalock);
2243 psli->sli_flag |= LPFC_PROCESS_LA;
2244 control = readl(phba->HCregaddr);
2245 control |= HC_LAINT_ENA;
2246 writel(control, phba->HCregaddr);
2247 readl(phba->HCregaddr); /* flush */
2249 /* Clear Link Attention in HA REG */
2250 writel(HA_LATT, phba->HAregaddr);
2251 readl(phba->HAregaddr); /* flush */
2252 spin_unlock_irq(&phba->hbalock);
2253 lpfc_linkdown(phba);
2254 phba->link_state = LPFC_HBA_ERROR;
2256 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2257 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
2263 lpfc_fill_vpd(struct lpfc_hba *phba, uint8_t *vpd, int length, int *pindex)
2267 while (length > 0) {
2268 /* Look for Serial Number */
2269 if ((vpd[*pindex] == 'S') && (vpd[*pindex + 1] == 'N')) {
2276 phba->SerialNumber[j++] = vpd[(*pindex)++];
2280 phba->SerialNumber[j] = 0;
2282 } else if ((vpd[*pindex] == 'V') && (vpd[*pindex + 1] == '1')) {
2283 phba->vpd_flag |= VPD_MODEL_DESC;
2290 phba->ModelDesc[j++] = vpd[(*pindex)++];
2294 phba->ModelDesc[j] = 0;
2296 } else if ((vpd[*pindex] == 'V') && (vpd[*pindex + 1] == '2')) {
2297 phba->vpd_flag |= VPD_MODEL_NAME;
2304 phba->ModelName[j++] = vpd[(*pindex)++];
2308 phba->ModelName[j] = 0;
2310 } else if ((vpd[*pindex] == 'V') && (vpd[*pindex + 1] == '3')) {
2311 phba->vpd_flag |= VPD_PROGRAM_TYPE;
2318 phba->ProgramType[j++] = vpd[(*pindex)++];
2322 phba->ProgramType[j] = 0;
2324 } else if ((vpd[*pindex] == 'V') && (vpd[*pindex + 1] == '4')) {
2325 phba->vpd_flag |= VPD_PORT;
2332 if ((phba->sli_rev == LPFC_SLI_REV4) &&
2333 (phba->sli4_hba.pport_name_sta ==
2334 LPFC_SLI4_PPNAME_GET)) {
2338 phba->Port[j++] = vpd[(*pindex)++];
2342 if ((phba->sli_rev != LPFC_SLI_REV4) ||
2343 (phba->sli4_hba.pport_name_sta ==
2344 LPFC_SLI4_PPNAME_NON))
2358 * lpfc_parse_vpd - Parse VPD (Vital Product Data)
2359 * @phba: pointer to lpfc hba data structure.
2360 * @vpd: pointer to the vital product data.
2361 * @len: length of the vital product data in bytes.
2363 * This routine parses the Vital Product Data (VPD). The VPD is treated as
2364 * an array of characters. In this routine, the ModelName, ProgramType, and
2365 * ModelDesc, etc. fields of the phba data structure will be populated.
2368 * 0 - pointer to the VPD passed in is NULL
2372 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
2374 uint8_t lenlo, lenhi;
2384 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2385 "0455 Vital Product Data: x%x x%x x%x x%x\n",
2386 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
2388 while (!finished && (index < (len - 4))) {
2389 switch (vpd[index]) {
2397 i = ((((unsigned short)lenhi) << 8) + lenlo);
2406 Length = ((((unsigned short)lenhi) << 8) + lenlo);
2407 if (Length > len - index)
2408 Length = len - index;
2410 lpfc_fill_vpd(phba, vpd, Length, &index);
2426 * lpfc_get_atto_model_desc - Retrieve ATTO HBA device model name and description
2427 * @phba: pointer to lpfc hba data structure.
2428 * @mdp: pointer to the data structure to hold the derived model name.
2429 * @descp: pointer to the data structure to hold the derived description.
2431 * This routine retrieves HBA's description based on its registered PCI device
2432 * ID. The @descp passed into this function points to an array of 256 chars. It
2433 * shall be returned with the model name, maximum speed, and the host bus type.
2434 * The @mdp passed into this function points to an array of 80 chars. When the
2435 * function returns, the @mdp will be filled with the model name.
2438 lpfc_get_atto_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
2440 uint16_t sub_dev_id = phba->pcidev->subsystem_device;
2441 char *model = "<Unknown>";
2444 switch (sub_dev_id) {
2445 case PCI_DEVICE_ID_CLRY_161E:
2448 case PCI_DEVICE_ID_CLRY_162E:
2451 case PCI_DEVICE_ID_CLRY_164E:
2454 case PCI_DEVICE_ID_CLRY_161P:
2457 case PCI_DEVICE_ID_CLRY_162P:
2460 case PCI_DEVICE_ID_CLRY_164P:
2463 case PCI_DEVICE_ID_CLRY_321E:
2466 case PCI_DEVICE_ID_CLRY_322E:
2469 case PCI_DEVICE_ID_CLRY_324E:
2472 case PCI_DEVICE_ID_CLRY_321P:
2475 case PCI_DEVICE_ID_CLRY_322P:
2478 case PCI_DEVICE_ID_CLRY_324P:
2481 case PCI_DEVICE_ID_TLFC_2XX2:
2485 case PCI_DEVICE_ID_TLFC_3162:
2489 case PCI_DEVICE_ID_TLFC_3322:
2498 if (mdp && mdp[0] == '\0')
2499 snprintf(mdp, 79, "%s", model);
2501 if (descp && descp[0] == '\0')
2502 snprintf(descp, 255,
2503 "ATTO %s%s, Fibre Channel Adapter Initiator, Port %s",
2504 (tbolt) ? "ThunderLink FC " : "Celerity FC-",
2510 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
2511 * @phba: pointer to lpfc hba data structure.
2512 * @mdp: pointer to the data structure to hold the derived model name.
2513 * @descp: pointer to the data structure to hold the derived description.
2515 * This routine retrieves HBA's description based on its registered PCI device
2516 * ID. The @descp passed into this function points to an array of 256 chars. It
2517 * shall be returned with the model name, maximum speed, and the host bus type.
2518 * The @mdp passed into this function points to an array of 80 chars. When the
2519 * function returns, the @mdp will be filled with the model name.
2522 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
2525 uint16_t dev_id = phba->pcidev->device;
2528 int oneConnect = 0; /* default is not a oneConnect */
2533 } m = {"<Unknown>", "", ""};
2535 if (mdp && mdp[0] != '\0'
2536 && descp && descp[0] != '\0')
2539 if (phba->pcidev->vendor == PCI_VENDOR_ID_ATTO) {
2540 lpfc_get_atto_model_desc(phba, mdp, descp);
2544 if (phba->lmt & LMT_64Gb)
2546 else if (phba->lmt & LMT_32Gb)
2548 else if (phba->lmt & LMT_16Gb)
2550 else if (phba->lmt & LMT_10Gb)
2552 else if (phba->lmt & LMT_8Gb)
2554 else if (phba->lmt & LMT_4Gb)
2556 else if (phba->lmt & LMT_2Gb)
2558 else if (phba->lmt & LMT_1Gb)
2566 case PCI_DEVICE_ID_FIREFLY:
2567 m = (typeof(m)){"LP6000", "PCI",
2568 "Obsolete, Unsupported Fibre Channel Adapter"};
2570 case PCI_DEVICE_ID_SUPERFLY:
2571 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
2572 m = (typeof(m)){"LP7000", "PCI", ""};
2574 m = (typeof(m)){"LP7000E", "PCI", ""};
2575 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2577 case PCI_DEVICE_ID_DRAGONFLY:
2578 m = (typeof(m)){"LP8000", "PCI",
2579 "Obsolete, Unsupported Fibre Channel Adapter"};
2581 case PCI_DEVICE_ID_CENTAUR:
2582 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
2583 m = (typeof(m)){"LP9002", "PCI", ""};
2585 m = (typeof(m)){"LP9000", "PCI", ""};
2586 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2588 case PCI_DEVICE_ID_RFLY:
2589 m = (typeof(m)){"LP952", "PCI",
2590 "Obsolete, Unsupported Fibre Channel Adapter"};
2592 case PCI_DEVICE_ID_PEGASUS:
2593 m = (typeof(m)){"LP9802", "PCI-X",
2594 "Obsolete, Unsupported Fibre Channel Adapter"};
2596 case PCI_DEVICE_ID_THOR:
2597 m = (typeof(m)){"LP10000", "PCI-X",
2598 "Obsolete, Unsupported Fibre Channel Adapter"};
2600 case PCI_DEVICE_ID_VIPER:
2601 m = (typeof(m)){"LPX1000", "PCI-X",
2602 "Obsolete, Unsupported Fibre Channel Adapter"};
2604 case PCI_DEVICE_ID_PFLY:
2605 m = (typeof(m)){"LP982", "PCI-X",
2606 "Obsolete, Unsupported Fibre Channel Adapter"};
2608 case PCI_DEVICE_ID_TFLY:
2609 m = (typeof(m)){"LP1050", "PCI-X",
2610 "Obsolete, Unsupported Fibre Channel Adapter"};
2612 case PCI_DEVICE_ID_HELIOS:
2613 m = (typeof(m)){"LP11000", "PCI-X2",
2614 "Obsolete, Unsupported Fibre Channel Adapter"};
2616 case PCI_DEVICE_ID_HELIOS_SCSP:
2617 m = (typeof(m)){"LP11000-SP", "PCI-X2",
2618 "Obsolete, Unsupported Fibre Channel Adapter"};
2620 case PCI_DEVICE_ID_HELIOS_DCSP:
2621 m = (typeof(m)){"LP11002-SP", "PCI-X2",
2622 "Obsolete, Unsupported Fibre Channel Adapter"};
2624 case PCI_DEVICE_ID_NEPTUNE:
2625 m = (typeof(m)){"LPe1000", "PCIe",
2626 "Obsolete, Unsupported Fibre Channel Adapter"};
2628 case PCI_DEVICE_ID_NEPTUNE_SCSP:
2629 m = (typeof(m)){"LPe1000-SP", "PCIe",
2630 "Obsolete, Unsupported Fibre Channel Adapter"};
2632 case PCI_DEVICE_ID_NEPTUNE_DCSP:
2633 m = (typeof(m)){"LPe1002-SP", "PCIe",
2634 "Obsolete, Unsupported Fibre Channel Adapter"};
2636 case PCI_DEVICE_ID_BMID:
2637 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
2639 case PCI_DEVICE_ID_BSMB:
2640 m = (typeof(m)){"LP111", "PCI-X2",
2641 "Obsolete, Unsupported Fibre Channel Adapter"};
2643 case PCI_DEVICE_ID_ZEPHYR:
2644 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2646 case PCI_DEVICE_ID_ZEPHYR_SCSP:
2647 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2649 case PCI_DEVICE_ID_ZEPHYR_DCSP:
2650 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
2653 case PCI_DEVICE_ID_ZMID:
2654 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
2656 case PCI_DEVICE_ID_ZSMB:
2657 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
2659 case PCI_DEVICE_ID_LP101:
2660 m = (typeof(m)){"LP101", "PCI-X",
2661 "Obsolete, Unsupported Fibre Channel Adapter"};
2663 case PCI_DEVICE_ID_LP10000S:
2664 m = (typeof(m)){"LP10000-S", "PCI",
2665 "Obsolete, Unsupported Fibre Channel Adapter"};
2667 case PCI_DEVICE_ID_LP11000S:
2668 m = (typeof(m)){"LP11000-S", "PCI-X2",
2669 "Obsolete, Unsupported Fibre Channel Adapter"};
2671 case PCI_DEVICE_ID_LPE11000S:
2672 m = (typeof(m)){"LPe11000-S", "PCIe",
2673 "Obsolete, Unsupported Fibre Channel Adapter"};
2675 case PCI_DEVICE_ID_SAT:
2676 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
2678 case PCI_DEVICE_ID_SAT_MID:
2679 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
2681 case PCI_DEVICE_ID_SAT_SMB:
2682 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
2684 case PCI_DEVICE_ID_SAT_DCSP:
2685 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
2687 case PCI_DEVICE_ID_SAT_SCSP:
2688 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
2690 case PCI_DEVICE_ID_SAT_S:
2691 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
2693 case PCI_DEVICE_ID_PROTEUS_VF:
2694 m = (typeof(m)){"LPev12000", "PCIe IOV",
2695 "Obsolete, Unsupported Fibre Channel Adapter"};
2697 case PCI_DEVICE_ID_PROTEUS_PF:
2698 m = (typeof(m)){"LPev12000", "PCIe IOV",
2699 "Obsolete, Unsupported Fibre Channel Adapter"};
2701 case PCI_DEVICE_ID_PROTEUS_S:
2702 m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
2703 "Obsolete, Unsupported Fibre Channel Adapter"};
2705 case PCI_DEVICE_ID_TIGERSHARK:
2707 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
2709 case PCI_DEVICE_ID_TOMCAT:
2711 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
2713 case PCI_DEVICE_ID_FALCON:
2714 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
2715 "EmulexSecure Fibre"};
2717 case PCI_DEVICE_ID_BALIUS:
2718 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
2719 "Obsolete, Unsupported Fibre Channel Adapter"};
2721 case PCI_DEVICE_ID_LANCER_FC:
2722 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
2724 case PCI_DEVICE_ID_LANCER_FC_VF:
2725 m = (typeof(m)){"LPe16000", "PCIe",
2726 "Obsolete, Unsupported Fibre Channel Adapter"};
2728 case PCI_DEVICE_ID_LANCER_FCOE:
2730 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
2732 case PCI_DEVICE_ID_LANCER_FCOE_VF:
2734 m = (typeof(m)){"OCe15100", "PCIe",
2735 "Obsolete, Unsupported FCoE"};
2737 case PCI_DEVICE_ID_LANCER_G6_FC:
2738 m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"};
2740 case PCI_DEVICE_ID_LANCER_G7_FC:
2741 m = (typeof(m)){"LPe36000", "PCIe", "Fibre Channel Adapter"};
2743 case PCI_DEVICE_ID_LANCER_G7P_FC:
2744 m = (typeof(m)){"LPe38000", "PCIe", "Fibre Channel Adapter"};
2746 case PCI_DEVICE_ID_SKYHAWK:
2747 case PCI_DEVICE_ID_SKYHAWK_VF:
2749 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"};
2752 m = (typeof(m)){"Unknown", "", ""};
2756 if (mdp && mdp[0] == '\0')
2757 snprintf(mdp, 79,"%s", m.name);
2759 * oneConnect hba requires special processing, they are all initiators
2760 * and we put the port number on the end
2762 if (descp && descp[0] == '\0') {
2764 snprintf(descp, 255,
2765 "Emulex OneConnect %s, %s Initiator %s",
2768 else if (max_speed == 0)
2769 snprintf(descp, 255,
2771 m.name, m.bus, m.function);
2773 snprintf(descp, 255,
2774 "Emulex %s %d%s %s %s",
2775 m.name, max_speed, (GE) ? "GE" : "Gb",
2781 * lpfc_sli3_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
2782 * @phba: pointer to lpfc hba data structure.
2783 * @pring: pointer to a IOCB ring.
2784 * @cnt: the number of IOCBs to be posted to the IOCB ring.
2786 * This routine posts a given number of IOCBs with the associated DMA buffer
2787 * descriptors specified by the cnt argument to the given IOCB ring.
2790 * The number of IOCBs NOT able to be posted to the IOCB ring.
2793 lpfc_sli3_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
2796 struct lpfc_iocbq *iocb;
2797 struct lpfc_dmabuf *mp1, *mp2;
2799 cnt += pring->missbufcnt;
2801 /* While there are buffers to post */
2803 /* Allocate buffer for command iocb */
2804 iocb = lpfc_sli_get_iocbq(phba);
2806 pring->missbufcnt = cnt;
2811 /* 2 buffers can be posted per command */
2812 /* Allocate buffer to post */
2813 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2815 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
2816 if (!mp1 || !mp1->virt) {
2818 lpfc_sli_release_iocbq(phba, iocb);
2819 pring->missbufcnt = cnt;
2823 INIT_LIST_HEAD(&mp1->list);
2824 /* Allocate buffer to post */
2826 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2828 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
2830 if (!mp2 || !mp2->virt) {
2832 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2834 lpfc_sli_release_iocbq(phba, iocb);
2835 pring->missbufcnt = cnt;
2839 INIT_LIST_HEAD(&mp2->list);
2844 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
2845 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
2846 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
2847 icmd->ulpBdeCount = 1;
2850 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
2851 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
2852 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
2854 icmd->ulpBdeCount = 2;
2857 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
2860 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
2862 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2866 lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
2870 lpfc_sli_release_iocbq(phba, iocb);
2871 pring->missbufcnt = cnt;
2874 lpfc_sli_ringpostbuf_put(phba, pring, mp1);
2876 lpfc_sli_ringpostbuf_put(phba, pring, mp2);
2878 pring->missbufcnt = 0;
2883 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
2884 * @phba: pointer to lpfc hba data structure.
2886 * This routine posts initial receive IOCB buffers to the ELS ring. The
2887 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
2888 * set to 64 IOCBs. SLI3 only.
2891 * 0 - success (currently always success)
2894 lpfc_post_rcv_buf(struct lpfc_hba *phba)
2896 struct lpfc_sli *psli = &phba->sli;
2898 /* Ring 0, ELS / CT buffers */
2899 lpfc_sli3_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0);
2900 /* Ring 2 - FCP no buffers needed */
2905 #define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2908 * lpfc_sha_init - Set up initial array of hash table entries
2909 * @HashResultPointer: pointer to an array as hash table.
2911 * This routine sets up the initial values to the array of hash table entries
2915 lpfc_sha_init(uint32_t * HashResultPointer)
2917 HashResultPointer[0] = 0x67452301;
2918 HashResultPointer[1] = 0xEFCDAB89;
2919 HashResultPointer[2] = 0x98BADCFE;
2920 HashResultPointer[3] = 0x10325476;
2921 HashResultPointer[4] = 0xC3D2E1F0;
2925 * lpfc_sha_iterate - Iterate initial hash table with the working hash table
2926 * @HashResultPointer: pointer to an initial/result hash table.
2927 * @HashWorkingPointer: pointer to an working hash table.
2929 * This routine iterates an initial hash table pointed by @HashResultPointer
2930 * with the values from the working hash table pointeed by @HashWorkingPointer.
2931 * The results are putting back to the initial hash table, returned through
2932 * the @HashResultPointer as the result hash table.
2935 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
2939 uint32_t A, B, C, D, E;
2942 HashWorkingPointer[t] =
2944 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2946 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2947 } while (++t <= 79);
2949 A = HashResultPointer[0];
2950 B = HashResultPointer[1];
2951 C = HashResultPointer[2];
2952 D = HashResultPointer[3];
2953 E = HashResultPointer[4];
2957 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
2958 } else if (t < 40) {
2959 TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
2960 } else if (t < 60) {
2961 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
2963 TEMP = (B ^ C ^ D) + 0xCA62C1D6;
2965 TEMP += S(5, A) + E + HashWorkingPointer[t];
2971 } while (++t <= 79);
2973 HashResultPointer[0] += A;
2974 HashResultPointer[1] += B;
2975 HashResultPointer[2] += C;
2976 HashResultPointer[3] += D;
2977 HashResultPointer[4] += E;
2982 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
2983 * @RandomChallenge: pointer to the entry of host challenge random number array.
2984 * @HashWorking: pointer to the entry of the working hash array.
2986 * This routine calculates the working hash array referred by @HashWorking
2987 * from the challenge random numbers associated with the host, referred by
2988 * @RandomChallenge. The result is put into the entry of the working hash
2989 * array and returned by reference through @HashWorking.
2992 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
2994 *HashWorking = (*RandomChallenge ^ *HashWorking);
2998 * lpfc_hba_init - Perform special handling for LC HBA initialization
2999 * @phba: pointer to lpfc hba data structure.
3000 * @hbainit: pointer to an array of unsigned 32-bit integers.
3002 * This routine performs the special handling for LC HBA initialization.
3005 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
3008 uint32_t *HashWorking;
3009 uint32_t *pwwnn = (uint32_t *) phba->wwnn;
3011 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
3015 HashWorking[0] = HashWorking[78] = *pwwnn++;
3016 HashWorking[1] = HashWorking[79] = *pwwnn;
3018 for (t = 0; t < 7; t++)
3019 lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
3021 lpfc_sha_init(hbainit);
3022 lpfc_sha_iterate(hbainit, HashWorking);
3027 * lpfc_cleanup - Performs vport cleanups before deleting a vport
3028 * @vport: pointer to a virtual N_Port data structure.
3030 * This routine performs the necessary cleanups before deleting the @vport.
3031 * It invokes the discovery state machine to perform necessary state
3032 * transitions and to release the ndlps associated with the @vport. Note,
3033 * the physical port is treated as @vport 0.
3036 lpfc_cleanup(struct lpfc_vport *vport)
3038 struct lpfc_hba *phba = vport->phba;
3039 struct lpfc_nodelist *ndlp, *next_ndlp;
3042 if (phba->link_state > LPFC_LINK_DOWN)
3043 lpfc_port_link_failure(vport);
3045 /* Clean up VMID resources */
3046 if (lpfc_is_vmid_enabled(phba))
3047 lpfc_vmid_vport_cleanup(vport);
3049 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
3050 if (vport->port_type != LPFC_PHYSICAL_PORT &&
3051 ndlp->nlp_DID == Fabric_DID) {
3052 /* Just free up ndlp with Fabric_DID for vports */
3057 if (ndlp->nlp_DID == Fabric_Cntl_DID &&
3058 ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
3063 /* Fabric Ports not in UNMAPPED state are cleaned up in the
3066 if (ndlp->nlp_type & NLP_FABRIC &&
3067 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)
3068 lpfc_disc_state_machine(vport, ndlp, NULL,
3069 NLP_EVT_DEVICE_RECOVERY);
3071 if (!(ndlp->fc4_xpt_flags & (NVME_XPT_REGD|SCSI_XPT_REGD)))
3072 lpfc_disc_state_machine(vport, ndlp, NULL,
3076 /* This is a special case flush to return all
3077 * IOs before entering this loop. There are
3078 * two points in the code where a flush is
3079 * avoided if the FC_UNLOADING flag is set.
3080 * one is in the multipool destroy,
3081 * (this prevents a crash) and the other is
3082 * in the nvme abort handler, ( also prevents
3083 * a crash). Both of these exceptions are
3084 * cases where the slot is still accessible.
3085 * The flush here is only when the pci slot
3088 if (vport->load_flag & FC_UNLOADING &&
3089 pci_channel_offline(phba->pcidev))
3090 lpfc_sli_flush_io_rings(vport->phba);
3092 /* At this point, ALL ndlp's should be gone
3093 * because of the previous NLP_EVT_DEVICE_RM.
3094 * Lets wait for this to happen, if needed.
3096 while (!list_empty(&vport->fc_nodes)) {
3098 lpfc_printf_vlog(vport, KERN_ERR,
3100 "0233 Nodelist not empty\n");
3101 list_for_each_entry_safe(ndlp, next_ndlp,
3102 &vport->fc_nodes, nlp_listp) {
3103 lpfc_printf_vlog(ndlp->vport, KERN_ERR,
3105 "0282 did:x%x ndlp:x%px "
3106 "refcnt:%d xflags x%x nflag x%x\n",
3107 ndlp->nlp_DID, (void *)ndlp,
3108 kref_read(&ndlp->kref),
3109 ndlp->fc4_xpt_flags,
3115 /* Wait for any activity on ndlps to settle */
3118 lpfc_cleanup_vports_rrqs(vport, NULL);
3122 * lpfc_stop_vport_timers - Stop all the timers associated with a vport
3123 * @vport: pointer to a virtual N_Port data structure.
3125 * This routine stops all the timers associated with a @vport. This function
3126 * is invoked before disabling or deleting a @vport. Note that the physical
3127 * port is treated as @vport 0.
3130 lpfc_stop_vport_timers(struct lpfc_vport *vport)
3132 del_timer_sync(&vport->els_tmofunc);
3133 del_timer_sync(&vport->delayed_disc_tmo);
3134 lpfc_can_disctmo(vport);
3139 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
3140 * @phba: pointer to lpfc hba data structure.
3142 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
3143 * caller of this routine should already hold the host lock.
3146 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
3148 /* Clear pending FCF rediscovery wait flag */
3149 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
3151 /* Now, try to stop the timer */
3152 del_timer(&phba->fcf.redisc_wait);
3156 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
3157 * @phba: pointer to lpfc hba data structure.
3159 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
3160 * checks whether the FCF rediscovery wait timer is pending with the host
3161 * lock held before proceeding with disabling the timer and clearing the
3162 * wait timer pendig flag.
3165 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
3167 spin_lock_irq(&phba->hbalock);
3168 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
3169 /* FCF rediscovery timer already fired or stopped */
3170 spin_unlock_irq(&phba->hbalock);
3173 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
3174 /* Clear failover in progress flags */
3175 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
3176 spin_unlock_irq(&phba->hbalock);
3180 * lpfc_cmf_stop - Stop CMF processing
3181 * @phba: pointer to lpfc hba data structure.
3183 * This is called when the link goes down or if CMF mode is turned OFF.
3184 * It is also called when going offline or unloaded just before the
3185 * congestion info buffer is unregistered.
3188 lpfc_cmf_stop(struct lpfc_hba *phba)
3191 struct lpfc_cgn_stat *cgs;
3193 /* We only do something if CMF is enabled */
3194 if (!phba->sli4_hba.pc_sli4_params.cmf)
3197 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
3198 "6221 Stop CMF / Cancel Timer\n");
3200 /* Cancel the CMF timer */
3201 hrtimer_cancel(&phba->cmf_timer);
3203 /* Zero CMF counters */
3204 atomic_set(&phba->cmf_busy, 0);
3205 for_each_present_cpu(cpu) {
3206 cgs = per_cpu_ptr(phba->cmf_stat, cpu);
3207 atomic64_set(&cgs->total_bytes, 0);
3208 atomic64_set(&cgs->rcv_bytes, 0);
3209 atomic_set(&cgs->rx_io_cnt, 0);
3210 atomic64_set(&cgs->rx_latency, 0);
3212 atomic_set(&phba->cmf_bw_wait, 0);
3214 /* Resume any blocked IO - Queue unblock on workqueue */
3215 queue_work(phba->wq, &phba->unblock_request_work);
3218 static inline uint64_t
3219 lpfc_get_max_line_rate(struct lpfc_hba *phba)
3221 uint64_t rate = lpfc_sli_port_speed_get(phba);
3223 return ((((unsigned long)rate) * 1024 * 1024) / 10);
3227 lpfc_cmf_signal_init(struct lpfc_hba *phba)
3229 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
3230 "6223 Signal CMF init\n");
3232 /* Use the new fc_linkspeed to recalculate */
3233 phba->cmf_interval_rate = LPFC_CMF_INTERVAL;
3234 phba->cmf_max_line_rate = lpfc_get_max_line_rate(phba);
3235 phba->cmf_link_byte_count = div_u64(phba->cmf_max_line_rate *
3236 phba->cmf_interval_rate, 1000);
3237 phba->cmf_max_bytes_per_interval = phba->cmf_link_byte_count;
3239 /* This is a signal to firmware to sync up CMF BW with link speed */
3240 lpfc_issue_cmf_sync_wqe(phba, 0, 0);
3244 * lpfc_cmf_start - Start CMF processing
3245 * @phba: pointer to lpfc hba data structure.
3247 * This is called when the link comes up or if CMF mode is turned OFF
3248 * to Monitor or Managed.
3251 lpfc_cmf_start(struct lpfc_hba *phba)
3253 struct lpfc_cgn_stat *cgs;
3256 /* We only do something if CMF is enabled */
3257 if (!phba->sli4_hba.pc_sli4_params.cmf ||
3258 phba->cmf_active_mode == LPFC_CFG_OFF)
3261 /* Reinitialize congestion buffer info */
3262 lpfc_init_congestion_buf(phba);
3264 atomic_set(&phba->cgn_fabric_warn_cnt, 0);
3265 atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
3266 atomic_set(&phba->cgn_sync_alarm_cnt, 0);
3267 atomic_set(&phba->cgn_sync_warn_cnt, 0);
3269 atomic_set(&phba->cmf_busy, 0);
3270 for_each_present_cpu(cpu) {
3271 cgs = per_cpu_ptr(phba->cmf_stat, cpu);
3272 atomic64_set(&cgs->total_bytes, 0);
3273 atomic64_set(&cgs->rcv_bytes, 0);
3274 atomic_set(&cgs->rx_io_cnt, 0);
3275 atomic64_set(&cgs->rx_latency, 0);
3277 phba->cmf_latency.tv_sec = 0;
3278 phba->cmf_latency.tv_nsec = 0;
3280 lpfc_cmf_signal_init(phba);
3282 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
3283 "6222 Start CMF / Timer\n");
3285 phba->cmf_timer_cnt = 0;
3286 hrtimer_start(&phba->cmf_timer,
3287 ktime_set(0, LPFC_CMF_INTERVAL * 1000000),
3289 /* Setup for latency check in IO cmpl routines */
3290 ktime_get_real_ts64(&phba->cmf_latency);
3292 atomic_set(&phba->cmf_bw_wait, 0);
3293 atomic_set(&phba->cmf_stop_io, 0);
3297 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
3298 * @phba: pointer to lpfc hba data structure.
3300 * This routine stops all the timers associated with a HBA. This function is
3301 * invoked before either putting a HBA offline or unloading the driver.
3304 lpfc_stop_hba_timers(struct lpfc_hba *phba)
3307 lpfc_stop_vport_timers(phba->pport);
3308 cancel_delayed_work_sync(&phba->eq_delay_work);
3309 cancel_delayed_work_sync(&phba->idle_stat_delay_work);
3310 del_timer_sync(&phba->sli.mbox_tmo);
3311 del_timer_sync(&phba->fabric_block_timer);
3312 del_timer_sync(&phba->eratt_poll);
3313 del_timer_sync(&phba->hb_tmofunc);
3314 if (phba->sli_rev == LPFC_SLI_REV4) {
3315 del_timer_sync(&phba->rrq_tmr);
3316 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
3318 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
3320 switch (phba->pci_dev_grp) {
3321 case LPFC_PCI_DEV_LP:
3322 /* Stop any LightPulse device specific driver timers */
3323 del_timer_sync(&phba->fcp_poll_timer);
3325 case LPFC_PCI_DEV_OC:
3326 /* Stop any OneConnect device specific driver timers */
3327 lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
3330 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3331 "0297 Invalid device group (x%x)\n",
3339 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
3340 * @phba: pointer to lpfc hba data structure.
3341 * @mbx_action: flag for mailbox no wait action.
3343 * This routine marks a HBA's management interface as blocked. Once the HBA's
3344 * management interface is marked as blocked, all the user space access to
3345 * the HBA, whether they are from sysfs interface or libdfc interface will
3346 * all be blocked. The HBA is set to block the management interface when the
3347 * driver prepares the HBA interface for online or offline.
3350 lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
3352 unsigned long iflag;
3353 uint8_t actcmd = MBX_HEARTBEAT;
3354 unsigned long timeout;
3356 spin_lock_irqsave(&phba->hbalock, iflag);
3357 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
3358 spin_unlock_irqrestore(&phba->hbalock, iflag);
3359 if (mbx_action == LPFC_MBX_NO_WAIT)
3361 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
3362 spin_lock_irqsave(&phba->hbalock, iflag);
3363 if (phba->sli.mbox_active) {
3364 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
3365 /* Determine how long we might wait for the active mailbox
3366 * command to be gracefully completed by firmware.
3368 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
3369 phba->sli.mbox_active) * 1000) + jiffies;
3371 spin_unlock_irqrestore(&phba->hbalock, iflag);
3373 /* Wait for the outstnading mailbox command to complete */
3374 while (phba->sli.mbox_active) {
3375 /* Check active mailbox complete status every 2ms */
3377 if (time_after(jiffies, timeout)) {
3378 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3379 "2813 Mgmt IO is Blocked %x "
3380 "- mbox cmd %x still active\n",
3381 phba->sli.sli_flag, actcmd);
3388 * lpfc_sli4_node_prep - Assign RPIs for active nodes.
3389 * @phba: pointer to lpfc hba data structure.
3391 * Allocate RPIs for all active remote nodes. This is needed whenever
3392 * an SLI4 adapter is reset and the driver is not unloading. Its purpose
3393 * is to fixup the temporary rpi assignments.
3396 lpfc_sli4_node_prep(struct lpfc_hba *phba)
3398 struct lpfc_nodelist *ndlp, *next_ndlp;
3399 struct lpfc_vport **vports;
3402 if (phba->sli_rev != LPFC_SLI_REV4)
3405 vports = lpfc_create_vport_work_array(phba);
3409 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3410 if (vports[i]->load_flag & FC_UNLOADING)
3413 list_for_each_entry_safe(ndlp, next_ndlp,
3414 &vports[i]->fc_nodes,
3416 rpi = lpfc_sli4_alloc_rpi(phba);
3417 if (rpi == LPFC_RPI_ALLOC_ERROR) {
3418 /* TODO print log? */
3421 ndlp->nlp_rpi = rpi;
3422 lpfc_printf_vlog(ndlp->vport, KERN_INFO,
3423 LOG_NODE | LOG_DISCOVERY,
3424 "0009 Assign RPI x%x to ndlp x%px "
3425 "DID:x%06x flg:x%x\n",
3426 ndlp->nlp_rpi, ndlp, ndlp->nlp_DID,
3430 lpfc_destroy_vport_work_array(phba, vports);
3434 * lpfc_create_expedite_pool - create expedite pool
3435 * @phba: pointer to lpfc hba data structure.
3437 * This routine moves a batch of XRIs from lpfc_io_buf_list_put of HWQ 0
3438 * to expedite pool. Mark them as expedite.
3440 static void lpfc_create_expedite_pool(struct lpfc_hba *phba)
3442 struct lpfc_sli4_hdw_queue *qp;
3443 struct lpfc_io_buf *lpfc_ncmd;
3444 struct lpfc_io_buf *lpfc_ncmd_next;
3445 struct lpfc_epd_pool *epd_pool;
3446 unsigned long iflag;
3448 epd_pool = &phba->epd_pool;
3449 qp = &phba->sli4_hba.hdwq[0];
3451 spin_lock_init(&epd_pool->lock);
3452 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3453 spin_lock(&epd_pool->lock);
3454 INIT_LIST_HEAD(&epd_pool->list);
3455 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3456 &qp->lpfc_io_buf_list_put, list) {
3457 list_move_tail(&lpfc_ncmd->list, &epd_pool->list);
3458 lpfc_ncmd->expedite = true;
3461 if (epd_pool->count >= XRI_BATCH)
3464 spin_unlock(&epd_pool->lock);
3465 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3469 * lpfc_destroy_expedite_pool - destroy expedite pool
3470 * @phba: pointer to lpfc hba data structure.
3472 * This routine returns XRIs from expedite pool to lpfc_io_buf_list_put
3473 * of HWQ 0. Clear the mark.
3475 static void lpfc_destroy_expedite_pool(struct lpfc_hba *phba)
3477 struct lpfc_sli4_hdw_queue *qp;
3478 struct lpfc_io_buf *lpfc_ncmd;
3479 struct lpfc_io_buf *lpfc_ncmd_next;
3480 struct lpfc_epd_pool *epd_pool;
3481 unsigned long iflag;
3483 epd_pool = &phba->epd_pool;
3484 qp = &phba->sli4_hba.hdwq[0];
3486 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3487 spin_lock(&epd_pool->lock);
3488 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3489 &epd_pool->list, list) {
3490 list_move_tail(&lpfc_ncmd->list,
3491 &qp->lpfc_io_buf_list_put);
3492 lpfc_ncmd->flags = false;
3496 spin_unlock(&epd_pool->lock);
3497 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3501 * lpfc_create_multixri_pools - create multi-XRI pools
3502 * @phba: pointer to lpfc hba data structure.
3504 * This routine initialize public, private per HWQ. Then, move XRIs from
3505 * lpfc_io_buf_list_put to public pool. High and low watermark are also
3508 void lpfc_create_multixri_pools(struct lpfc_hba *phba)
3513 struct lpfc_io_buf *lpfc_ncmd;
3514 struct lpfc_io_buf *lpfc_ncmd_next;
3515 unsigned long iflag;
3516 struct lpfc_sli4_hdw_queue *qp;
3517 struct lpfc_multixri_pool *multixri_pool;
3518 struct lpfc_pbl_pool *pbl_pool;
3519 struct lpfc_pvt_pool *pvt_pool;
3521 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3522 "1234 num_hdw_queue=%d num_present_cpu=%d common_xri_cnt=%d\n",
3523 phba->cfg_hdw_queue, phba->sli4_hba.num_present_cpu,
3524 phba->sli4_hba.io_xri_cnt);
3526 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3527 lpfc_create_expedite_pool(phba);
3529 hwq_count = phba->cfg_hdw_queue;
3530 count_per_hwq = phba->sli4_hba.io_xri_cnt / hwq_count;
3532 for (i = 0; i < hwq_count; i++) {
3533 multixri_pool = kzalloc(sizeof(*multixri_pool), GFP_KERNEL);
3535 if (!multixri_pool) {
3536 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3537 "1238 Failed to allocate memory for "
3540 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3541 lpfc_destroy_expedite_pool(phba);
3545 qp = &phba->sli4_hba.hdwq[j];
3546 kfree(qp->p_multixri_pool);
3549 phba->cfg_xri_rebalancing = 0;
3553 qp = &phba->sli4_hba.hdwq[i];
3554 qp->p_multixri_pool = multixri_pool;
3556 multixri_pool->xri_limit = count_per_hwq;
3557 multixri_pool->rrb_next_hwqid = i;
3559 /* Deal with public free xri pool */
3560 pbl_pool = &multixri_pool->pbl_pool;
3561 spin_lock_init(&pbl_pool->lock);
3562 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3563 spin_lock(&pbl_pool->lock);
3564 INIT_LIST_HEAD(&pbl_pool->list);
3565 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3566 &qp->lpfc_io_buf_list_put, list) {
3567 list_move_tail(&lpfc_ncmd->list, &pbl_pool->list);
3571 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3572 "1235 Moved %d buffers from PUT list over to pbl_pool[%d]\n",
3573 pbl_pool->count, i);
3574 spin_unlock(&pbl_pool->lock);
3575 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3577 /* Deal with private free xri pool */
3578 pvt_pool = &multixri_pool->pvt_pool;
3579 pvt_pool->high_watermark = multixri_pool->xri_limit / 2;
3580 pvt_pool->low_watermark = XRI_BATCH;
3581 spin_lock_init(&pvt_pool->lock);
3582 spin_lock_irqsave(&pvt_pool->lock, iflag);
3583 INIT_LIST_HEAD(&pvt_pool->list);
3584 pvt_pool->count = 0;
3585 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
3590 * lpfc_destroy_multixri_pools - destroy multi-XRI pools
3591 * @phba: pointer to lpfc hba data structure.
3593 * This routine returns XRIs from public/private to lpfc_io_buf_list_put.
3595 static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba)
3599 struct lpfc_io_buf *lpfc_ncmd;
3600 struct lpfc_io_buf *lpfc_ncmd_next;
3601 unsigned long iflag;
3602 struct lpfc_sli4_hdw_queue *qp;
3603 struct lpfc_multixri_pool *multixri_pool;
3604 struct lpfc_pbl_pool *pbl_pool;
3605 struct lpfc_pvt_pool *pvt_pool;
3607 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3608 lpfc_destroy_expedite_pool(phba);
3610 if (!(phba->pport->load_flag & FC_UNLOADING))
3611 lpfc_sli_flush_io_rings(phba);
3613 hwq_count = phba->cfg_hdw_queue;
3615 for (i = 0; i < hwq_count; i++) {
3616 qp = &phba->sli4_hba.hdwq[i];
3617 multixri_pool = qp->p_multixri_pool;
3621 qp->p_multixri_pool = NULL;
3623 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3625 /* Deal with public free xri pool */
3626 pbl_pool = &multixri_pool->pbl_pool;
3627 spin_lock(&pbl_pool->lock);
3629 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3630 "1236 Moving %d buffers from pbl_pool[%d] TO PUT list\n",
3631 pbl_pool->count, i);
3633 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3634 &pbl_pool->list, list) {
3635 list_move_tail(&lpfc_ncmd->list,
3636 &qp->lpfc_io_buf_list_put);
3641 INIT_LIST_HEAD(&pbl_pool->list);
3642 pbl_pool->count = 0;
3644 spin_unlock(&pbl_pool->lock);
3646 /* Deal with private free xri pool */
3647 pvt_pool = &multixri_pool->pvt_pool;
3648 spin_lock(&pvt_pool->lock);
3650 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3651 "1237 Moving %d buffers from pvt_pool[%d] TO PUT list\n",
3652 pvt_pool->count, i);
3654 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3655 &pvt_pool->list, list) {
3656 list_move_tail(&lpfc_ncmd->list,
3657 &qp->lpfc_io_buf_list_put);
3662 INIT_LIST_HEAD(&pvt_pool->list);
3663 pvt_pool->count = 0;
3665 spin_unlock(&pvt_pool->lock);
3666 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3668 kfree(multixri_pool);
3673 * lpfc_online - Initialize and bring a HBA online
3674 * @phba: pointer to lpfc hba data structure.
3676 * This routine initializes the HBA and brings a HBA online. During this
3677 * process, the management interface is blocked to prevent user space access
3678 * to the HBA interfering with the driver initialization.
3685 lpfc_online(struct lpfc_hba *phba)
3687 struct lpfc_vport *vport;
3688 struct lpfc_vport **vports;
3690 bool vpis_cleared = false;
3694 vport = phba->pport;
3696 if (!(vport->fc_flag & FC_OFFLINE_MODE))
3699 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3700 "0458 Bring Adapter online\n");
3702 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
3704 if (phba->sli_rev == LPFC_SLI_REV4) {
3705 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
3706 lpfc_unblock_mgmt_io(phba);
3709 spin_lock_irq(&phba->hbalock);
3710 if (!phba->sli4_hba.max_cfg_param.vpi_used)
3711 vpis_cleared = true;
3712 spin_unlock_irq(&phba->hbalock);
3714 /* Reestablish the local initiator port.
3715 * The offline process destroyed the previous lport.
3717 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
3718 !phba->nvmet_support) {
3719 error = lpfc_nvme_create_localport(phba->pport);
3721 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3722 "6132 NVME restore reg failed "
3723 "on nvmei error x%x\n", error);
3726 lpfc_sli_queue_init(phba);
3727 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
3728 lpfc_unblock_mgmt_io(phba);
3733 vports = lpfc_create_vport_work_array(phba);
3734 if (vports != NULL) {
3735 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3736 struct Scsi_Host *shost;
3737 shost = lpfc_shost_from_vport(vports[i]);
3738 spin_lock_irq(shost->host_lock);
3739 vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
3740 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
3741 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3742 if (phba->sli_rev == LPFC_SLI_REV4) {
3743 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
3744 if ((vpis_cleared) &&
3745 (vports[i]->port_type !=
3746 LPFC_PHYSICAL_PORT))
3749 spin_unlock_irq(shost->host_lock);
3752 lpfc_destroy_vport_work_array(phba, vports);
3754 if (phba->cfg_xri_rebalancing)
3755 lpfc_create_multixri_pools(phba);
3757 lpfc_cpuhp_add(phba);
3759 lpfc_unblock_mgmt_io(phba);
3764 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
3765 * @phba: pointer to lpfc hba data structure.
3767 * This routine marks a HBA's management interface as not blocked. Once the
3768 * HBA's management interface is marked as not blocked, all the user space
3769 * access to the HBA, whether they are from sysfs interface or libdfc
3770 * interface will be allowed. The HBA is set to block the management interface
3771 * when the driver prepares the HBA interface for online or offline and then
3772 * set to unblock the management interface afterwards.
3775 lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
3777 unsigned long iflag;
3779 spin_lock_irqsave(&phba->hbalock, iflag);
3780 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
3781 spin_unlock_irqrestore(&phba->hbalock, iflag);
3785 * lpfc_offline_prep - Prepare a HBA to be brought offline
3786 * @phba: pointer to lpfc hba data structure.
3787 * @mbx_action: flag for mailbox shutdown action.
3789 * This routine is invoked to prepare a HBA to be brought offline. It performs
3790 * unregistration login to all the nodes on all vports and flushes the mailbox
3791 * queue to make it ready to be brought offline.
3794 lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
3796 struct lpfc_vport *vport = phba->pport;
3797 struct lpfc_nodelist *ndlp, *next_ndlp;
3798 struct lpfc_vport **vports;
3799 struct Scsi_Host *shost;
3804 if (vport->fc_flag & FC_OFFLINE_MODE)
3807 lpfc_block_mgmt_io(phba, mbx_action);
3809 lpfc_linkdown(phba);
3811 offline = pci_channel_offline(phba->pcidev);
3812 hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags);
3814 /* Issue an unreg_login to all nodes on all vports */
3815 vports = lpfc_create_vport_work_array(phba);
3816 if (vports != NULL) {
3817 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3818 if (vports[i]->load_flag & FC_UNLOADING)
3820 shost = lpfc_shost_from_vport(vports[i]);
3821 spin_lock_irq(shost->host_lock);
3822 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
3823 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3824 vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
3825 spin_unlock_irq(shost->host_lock);
3827 shost = lpfc_shost_from_vport(vports[i]);
3828 list_for_each_entry_safe(ndlp, next_ndlp,
3829 &vports[i]->fc_nodes,
3832 spin_lock_irq(&ndlp->lock);
3833 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
3834 spin_unlock_irq(&ndlp->lock);
3836 if (offline || hba_pci_err) {
3837 spin_lock_irq(&ndlp->lock);
3838 ndlp->nlp_flag &= ~(NLP_UNREG_INP |
3839 NLP_RPI_REGISTERED);
3840 spin_unlock_irq(&ndlp->lock);
3841 if (phba->sli_rev == LPFC_SLI_REV4)
3842 lpfc_sli_rpi_release(vports[i],
3845 lpfc_unreg_rpi(vports[i], ndlp);
3848 * Whenever an SLI4 port goes offline, free the
3849 * RPI. Get a new RPI when the adapter port
3850 * comes back online.
3852 if (phba->sli_rev == LPFC_SLI_REV4) {
3853 lpfc_printf_vlog(vports[i], KERN_INFO,
3854 LOG_NODE | LOG_DISCOVERY,
3855 "0011 Free RPI x%x on "
3856 "ndlp: x%px did x%x\n",
3857 ndlp->nlp_rpi, ndlp,
3859 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
3860 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
3863 if (ndlp->nlp_type & NLP_FABRIC) {
3864 lpfc_disc_state_machine(vports[i], ndlp,
3865 NULL, NLP_EVT_DEVICE_RECOVERY);
3867 /* Don't remove the node unless the node
3868 * has been unregistered with the
3869 * transport, and we're not in recovery
3870 * before dev_loss_tmo triggered.
3871 * Otherwise, let dev_loss take care of
3874 if (!(ndlp->save_flags &
3875 NLP_IN_RECOV_POST_DEV_LOSS) &&
3876 !(ndlp->fc4_xpt_flags &
3877 (NVME_XPT_REGD | SCSI_XPT_REGD)))
3878 lpfc_disc_state_machine
3886 lpfc_destroy_vport_work_array(phba, vports);
3888 lpfc_sli_mbox_sys_shutdown(phba, mbx_action);
3891 flush_workqueue(phba->wq);
3895 * lpfc_offline - Bring a HBA offline
3896 * @phba: pointer to lpfc hba data structure.
3898 * This routine actually brings a HBA offline. It stops all the timers
3899 * associated with the HBA, brings down the SLI layer, and eventually
3900 * marks the HBA as in offline state for the upper layer protocol.
3903 lpfc_offline(struct lpfc_hba *phba)
3905 struct Scsi_Host *shost;
3906 struct lpfc_vport **vports;
3909 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
3912 /* stop port and all timers associated with this hba */
3913 lpfc_stop_port(phba);
3915 /* Tear down the local and target port registrations. The
3916 * nvme transports need to cleanup.
3918 lpfc_nvmet_destroy_targetport(phba);
3919 lpfc_nvme_destroy_localport(phba->pport);
3921 vports = lpfc_create_vport_work_array(phba);
3923 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
3924 lpfc_stop_vport_timers(vports[i]);
3925 lpfc_destroy_vport_work_array(phba, vports);
3926 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3927 "0460 Bring Adapter offline\n");
3928 /* Bring down the SLI Layer and cleanup. The HBA is offline
3930 lpfc_sli_hba_down(phba);
3931 spin_lock_irq(&phba->hbalock);
3933 spin_unlock_irq(&phba->hbalock);
3934 vports = lpfc_create_vport_work_array(phba);
3936 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3937 shost = lpfc_shost_from_vport(vports[i]);
3938 spin_lock_irq(shost->host_lock);
3939 vports[i]->work_port_events = 0;
3940 vports[i]->fc_flag |= FC_OFFLINE_MODE;
3941 spin_unlock_irq(shost->host_lock);
3943 lpfc_destroy_vport_work_array(phba, vports);
3944 /* If OFFLINE flag is clear (i.e. unloading), cpuhp removal is handled
3947 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
3948 __lpfc_cpuhp_remove(phba);
3950 if (phba->cfg_xri_rebalancing)
3951 lpfc_destroy_multixri_pools(phba);
3955 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
3956 * @phba: pointer to lpfc hba data structure.
3958 * This routine is to free all the SCSI buffers and IOCBs from the driver
3959 * list back to kernel. It is called from lpfc_pci_remove_one to free
3960 * the internal resources before the device is removed from the system.
3963 lpfc_scsi_free(struct lpfc_hba *phba)
3965 struct lpfc_io_buf *sb, *sb_next;
3967 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
3970 spin_lock_irq(&phba->hbalock);
3972 /* Release all the lpfc_scsi_bufs maintained by this host. */
3974 spin_lock(&phba->scsi_buf_list_put_lock);
3975 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
3977 list_del(&sb->list);
3978 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3981 phba->total_scsi_bufs--;
3983 spin_unlock(&phba->scsi_buf_list_put_lock);
3985 spin_lock(&phba->scsi_buf_list_get_lock);
3986 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
3988 list_del(&sb->list);
3989 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3992 phba->total_scsi_bufs--;
3994 spin_unlock(&phba->scsi_buf_list_get_lock);
3995 spin_unlock_irq(&phba->hbalock);
3999 * lpfc_io_free - Free all the IO buffers and IOCBs from driver lists
4000 * @phba: pointer to lpfc hba data structure.
4002 * This routine is to free all the IO buffers and IOCBs from the driver
4003 * list back to kernel. It is called from lpfc_pci_remove_one to free
4004 * the internal resources before the device is removed from the system.
4007 lpfc_io_free(struct lpfc_hba *phba)
4009 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
4010 struct lpfc_sli4_hdw_queue *qp;
4013 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
4014 qp = &phba->sli4_hba.hdwq[idx];
4015 /* Release all the lpfc_nvme_bufs maintained by this host. */
4016 spin_lock(&qp->io_buf_list_put_lock);
4017 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
4018 &qp->lpfc_io_buf_list_put,
4020 list_del(&lpfc_ncmd->list);
4022 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4023 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4024 if (phba->cfg_xpsgl && !phba->nvmet_support)
4025 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
4026 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
4028 qp->total_io_bufs--;
4030 spin_unlock(&qp->io_buf_list_put_lock);
4032 spin_lock(&qp->io_buf_list_get_lock);
4033 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
4034 &qp->lpfc_io_buf_list_get,
4036 list_del(&lpfc_ncmd->list);
4038 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4039 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4040 if (phba->cfg_xpsgl && !phba->nvmet_support)
4041 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
4042 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
4044 qp->total_io_bufs--;
4046 spin_unlock(&qp->io_buf_list_get_lock);
4051 * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping
4052 * @phba: pointer to lpfc hba data structure.
4054 * This routine first calculates the sizes of the current els and allocated
4055 * scsi sgl lists, and then goes through all sgls to updates the physical
4056 * XRIs assigned due to port function reset. During port initialization, the
4057 * current els and allocated scsi sgl lists are 0s.
4060 * 0 - successful (for now, it always returns 0)
4063 lpfc_sli4_els_sgl_update(struct lpfc_hba *phba)
4065 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
4066 uint16_t i, lxri, xri_cnt, els_xri_cnt;
4067 LIST_HEAD(els_sgl_list);
4071 * update on pci function's els xri-sgl list
4073 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4075 if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) {
4076 /* els xri-sgl expanded */
4077 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt;
4078 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4079 "3157 ELS xri-sgl count increased from "
4080 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
4082 /* allocate the additional els sgls */
4083 for (i = 0; i < xri_cnt; i++) {
4084 sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
4086 if (sglq_entry == NULL) {
4087 lpfc_printf_log(phba, KERN_ERR,
4089 "2562 Failure to allocate an "
4090 "ELS sgl entry:%d\n", i);
4094 sglq_entry->buff_type = GEN_BUFF_TYPE;
4095 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0,
4097 if (sglq_entry->virt == NULL) {
4099 lpfc_printf_log(phba, KERN_ERR,
4101 "2563 Failure to allocate an "
4102 "ELS mbuf:%d\n", i);
4106 sglq_entry->sgl = sglq_entry->virt;
4107 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
4108 sglq_entry->state = SGL_FREED;
4109 list_add_tail(&sglq_entry->list, &els_sgl_list);
4111 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
4112 list_splice_init(&els_sgl_list,
4113 &phba->sli4_hba.lpfc_els_sgl_list);
4114 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
4115 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
4116 /* els xri-sgl shrinked */
4117 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
4118 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4119 "3158 ELS xri-sgl count decreased from "
4120 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
4122 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
4123 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list,
4125 /* release extra els sgls from list */
4126 for (i = 0; i < xri_cnt; i++) {
4127 list_remove_head(&els_sgl_list,
4128 sglq_entry, struct lpfc_sglq, list);
4130 __lpfc_mbuf_free(phba, sglq_entry->virt,
4135 list_splice_init(&els_sgl_list,
4136 &phba->sli4_hba.lpfc_els_sgl_list);
4137 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
4139 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4140 "3163 ELS xri-sgl count unchanged: %d\n",
4142 phba->sli4_hba.els_xri_cnt = els_xri_cnt;
4144 /* update xris to els sgls on the list */
4146 sglq_entry_next = NULL;
4147 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
4148 &phba->sli4_hba.lpfc_els_sgl_list, list) {
4149 lxri = lpfc_sli4_next_xritag(phba);
4150 if (lxri == NO_XRI) {
4151 lpfc_printf_log(phba, KERN_ERR,
4153 "2400 Failed to allocate xri for "
4158 sglq_entry->sli4_lxritag = lxri;
4159 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4164 lpfc_free_els_sgl_list(phba);
4169 * lpfc_sli4_nvmet_sgl_update - update xri-sgl sizing and mapping
4170 * @phba: pointer to lpfc hba data structure.
4172 * This routine first calculates the sizes of the current els and allocated
4173 * scsi sgl lists, and then goes through all sgls to updates the physical
4174 * XRIs assigned due to port function reset. During port initialization, the
4175 * current els and allocated scsi sgl lists are 0s.
4178 * 0 - successful (for now, it always returns 0)
4181 lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
4183 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
4184 uint16_t i, lxri, xri_cnt, els_xri_cnt;
4185 uint16_t nvmet_xri_cnt;
4186 LIST_HEAD(nvmet_sgl_list);
4190 * update on pci function's nvmet xri-sgl list
4192 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4194 /* For NVMET, ALL remaining XRIs are dedicated for IO processing */
4195 nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4196 if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) {
4197 /* els xri-sgl expanded */
4198 xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt;
4199 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4200 "6302 NVMET xri-sgl cnt grew from %d to %d\n",
4201 phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt);
4202 /* allocate the additional nvmet sgls */
4203 for (i = 0; i < xri_cnt; i++) {
4204 sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
4206 if (sglq_entry == NULL) {
4207 lpfc_printf_log(phba, KERN_ERR,
4209 "6303 Failure to allocate an "
4210 "NVMET sgl entry:%d\n", i);
4214 sglq_entry->buff_type = NVMET_BUFF_TYPE;
4215 sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0,
4217 if (sglq_entry->virt == NULL) {
4219 lpfc_printf_log(phba, KERN_ERR,
4221 "6304 Failure to allocate an "
4222 "NVMET buf:%d\n", i);
4226 sglq_entry->sgl = sglq_entry->virt;
4227 memset(sglq_entry->sgl, 0,
4228 phba->cfg_sg_dma_buf_size);
4229 sglq_entry->state = SGL_FREED;
4230 list_add_tail(&sglq_entry->list, &nvmet_sgl_list);
4232 spin_lock_irq(&phba->hbalock);
4233 spin_lock(&phba->sli4_hba.sgl_list_lock);
4234 list_splice_init(&nvmet_sgl_list,
4235 &phba->sli4_hba.lpfc_nvmet_sgl_list);
4236 spin_unlock(&phba->sli4_hba.sgl_list_lock);
4237 spin_unlock_irq(&phba->hbalock);
4238 } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) {
4239 /* nvmet xri-sgl shrunk */
4240 xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt;
4241 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4242 "6305 NVMET xri-sgl count decreased from "
4243 "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt,
4245 spin_lock_irq(&phba->hbalock);
4246 spin_lock(&phba->sli4_hba.sgl_list_lock);
4247 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list,
4249 /* release extra nvmet sgls from list */
4250 for (i = 0; i < xri_cnt; i++) {
4251 list_remove_head(&nvmet_sgl_list,
4252 sglq_entry, struct lpfc_sglq, list);
4254 lpfc_nvmet_buf_free(phba, sglq_entry->virt,
4259 list_splice_init(&nvmet_sgl_list,
4260 &phba->sli4_hba.lpfc_nvmet_sgl_list);
4261 spin_unlock(&phba->sli4_hba.sgl_list_lock);
4262 spin_unlock_irq(&phba->hbalock);
4264 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4265 "6306 NVMET xri-sgl count unchanged: %d\n",
4267 phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt;
4269 /* update xris to nvmet sgls on the list */
4271 sglq_entry_next = NULL;
4272 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
4273 &phba->sli4_hba.lpfc_nvmet_sgl_list, list) {
4274 lxri = lpfc_sli4_next_xritag(phba);
4275 if (lxri == NO_XRI) {
4276 lpfc_printf_log(phba, KERN_ERR,
4278 "6307 Failed to allocate xri for "
4283 sglq_entry->sli4_lxritag = lxri;
4284 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4289 lpfc_free_nvmet_sgl_list(phba);
4294 lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *cbuf)
4297 struct lpfc_sli4_hdw_queue *qp;
4298 struct lpfc_io_buf *lpfc_cmd;
4299 struct lpfc_io_buf *iobufp, *prev_iobufp;
4300 int idx, cnt, xri, inserted;
4303 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
4304 qp = &phba->sli4_hba.hdwq[idx];
4305 spin_lock_irq(&qp->io_buf_list_get_lock);
4306 spin_lock(&qp->io_buf_list_put_lock);
4308 /* Take everything off the get and put lists */
4309 list_splice_init(&qp->lpfc_io_buf_list_get, &blist);
4310 list_splice(&qp->lpfc_io_buf_list_put, &blist);
4311 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
4312 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
4313 cnt += qp->get_io_bufs + qp->put_io_bufs;
4314 qp->get_io_bufs = 0;
4315 qp->put_io_bufs = 0;
4316 qp->total_io_bufs = 0;
4317 spin_unlock(&qp->io_buf_list_put_lock);
4318 spin_unlock_irq(&qp->io_buf_list_get_lock);
4322 * Take IO buffers off blist and put on cbuf sorted by XRI.
4323 * This is because POST_SGL takes a sequential range of XRIs
4324 * to post to the firmware.
4326 for (idx = 0; idx < cnt; idx++) {
4327 list_remove_head(&blist, lpfc_cmd, struct lpfc_io_buf, list);
4331 list_add_tail(&lpfc_cmd->list, cbuf);
4334 xri = lpfc_cmd->cur_iocbq.sli4_xritag;
4337 list_for_each_entry(iobufp, cbuf, list) {
4338 if (xri < iobufp->cur_iocbq.sli4_xritag) {
4340 list_add(&lpfc_cmd->list,
4341 &prev_iobufp->list);
4343 list_add(&lpfc_cmd->list, cbuf);
4347 prev_iobufp = iobufp;
4350 list_add_tail(&lpfc_cmd->list, cbuf);
4356 lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf)
4358 struct lpfc_sli4_hdw_queue *qp;
4359 struct lpfc_io_buf *lpfc_cmd;
4362 qp = phba->sli4_hba.hdwq;
4364 while (!list_empty(cbuf)) {
4365 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
4366 list_remove_head(cbuf, lpfc_cmd,
4367 struct lpfc_io_buf, list);
4371 qp = &phba->sli4_hba.hdwq[idx];
4372 lpfc_cmd->hdwq_no = idx;
4373 lpfc_cmd->hdwq = qp;
4374 lpfc_cmd->cur_iocbq.cmd_cmpl = NULL;
4375 spin_lock(&qp->io_buf_list_put_lock);
4376 list_add_tail(&lpfc_cmd->list,
4377 &qp->lpfc_io_buf_list_put);
4379 qp->total_io_bufs++;
4380 spin_unlock(&qp->io_buf_list_put_lock);
4387 * lpfc_sli4_io_sgl_update - update xri-sgl sizing and mapping
4388 * @phba: pointer to lpfc hba data structure.
4390 * This routine first calculates the sizes of the current els and allocated
4391 * scsi sgl lists, and then goes through all sgls to updates the physical
4392 * XRIs assigned due to port function reset. During port initialization, the
4393 * current els and allocated scsi sgl lists are 0s.
4396 * 0 - successful (for now, it always returns 0)
4399 lpfc_sli4_io_sgl_update(struct lpfc_hba *phba)
4401 struct lpfc_io_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL;
4402 uint16_t i, lxri, els_xri_cnt;
4403 uint16_t io_xri_cnt, io_xri_max;
4404 LIST_HEAD(io_sgl_list);
4408 * update on pci function's allocated nvme xri-sgl list
4411 /* maximum number of xris available for nvme buffers */
4412 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4413 io_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4414 phba->sli4_hba.io_xri_max = io_xri_max;
4416 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4417 "6074 Current allocated XRI sgl count:%d, "
4418 "maximum XRI count:%d els_xri_cnt:%d\n\n",
4419 phba->sli4_hba.io_xri_cnt,
4420 phba->sli4_hba.io_xri_max,
4423 cnt = lpfc_io_buf_flush(phba, &io_sgl_list);
4425 if (phba->sli4_hba.io_xri_cnt > phba->sli4_hba.io_xri_max) {
4426 /* max nvme xri shrunk below the allocated nvme buffers */
4427 io_xri_cnt = phba->sli4_hba.io_xri_cnt -
4428 phba->sli4_hba.io_xri_max;
4429 /* release the extra allocated nvme buffers */
4430 for (i = 0; i < io_xri_cnt; i++) {
4431 list_remove_head(&io_sgl_list, lpfc_ncmd,
4432 struct lpfc_io_buf, list);
4434 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4436 lpfc_ncmd->dma_handle);
4440 phba->sli4_hba.io_xri_cnt -= io_xri_cnt;
4443 /* update xris associated to remaining allocated nvme buffers */
4445 lpfc_ncmd_next = NULL;
4446 phba->sli4_hba.io_xri_cnt = cnt;
4447 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
4448 &io_sgl_list, list) {
4449 lxri = lpfc_sli4_next_xritag(phba);
4450 if (lxri == NO_XRI) {
4451 lpfc_printf_log(phba, KERN_ERR,
4453 "6075 Failed to allocate xri for "
4458 lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri;
4459 lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4461 cnt = lpfc_io_buf_replenish(phba, &io_sgl_list);
4470 * lpfc_new_io_buf - IO buffer allocator for HBA with SLI4 IF spec
4471 * @phba: Pointer to lpfc hba data structure.
4472 * @num_to_alloc: The requested number of buffers to allocate.
4474 * This routine allocates nvme buffers for device with SLI-4 interface spec,
4475 * the nvme buffer contains all the necessary information needed to initiate
4476 * an I/O. After allocating up to @num_to_allocate IO buffers and put
4477 * them on a list, it post them to the port by using SGL block post.
4480 * int - number of IO buffers that were allocated and posted.
4481 * 0 = failure, less than num_to_alloc is a partial failure.
4484 lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
4486 struct lpfc_io_buf *lpfc_ncmd;
4487 struct lpfc_iocbq *pwqeq;
4488 uint16_t iotag, lxri = 0;
4489 int bcnt, num_posted;
4490 LIST_HEAD(prep_nblist);
4491 LIST_HEAD(post_nblist);
4492 LIST_HEAD(nvme_nblist);
4494 phba->sli4_hba.io_xri_cnt = 0;
4495 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
4496 lpfc_ncmd = kzalloc(sizeof(*lpfc_ncmd), GFP_KERNEL);
4500 * Get memory from the pci pool to map the virt space to
4501 * pci bus space for an I/O. The DMA buffer includes the
4502 * number of SGE's necessary to support the sg_tablesize.
4504 lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
4506 &lpfc_ncmd->dma_handle);
4507 if (!lpfc_ncmd->data) {
4512 if (phba->cfg_xpsgl && !phba->nvmet_support) {
4513 INIT_LIST_HEAD(&lpfc_ncmd->dma_sgl_xtra_list);
4516 * 4K Page alignment is CRITICAL to BlockGuard, double
4519 if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
4520 (((unsigned long)(lpfc_ncmd->data) &
4521 (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
4522 lpfc_printf_log(phba, KERN_ERR,
4524 "3369 Memory alignment err: "
4526 (unsigned long)lpfc_ncmd->data);
4527 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4529 lpfc_ncmd->dma_handle);
4535 INIT_LIST_HEAD(&lpfc_ncmd->dma_cmd_rsp_list);
4537 lxri = lpfc_sli4_next_xritag(phba);
4538 if (lxri == NO_XRI) {
4539 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4540 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4544 pwqeq = &lpfc_ncmd->cur_iocbq;
4546 /* Allocate iotag for lpfc_ncmd->cur_iocbq. */
4547 iotag = lpfc_sli_next_iotag(phba, pwqeq);
4549 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4550 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4552 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4553 "6121 Failed to allocate IOTAG for"
4554 " XRI:0x%x\n", lxri);
4555 lpfc_sli4_free_xri(phba, lxri);
4558 pwqeq->sli4_lxritag = lxri;
4559 pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4561 /* Initialize local short-hand pointers. */
4562 lpfc_ncmd->dma_sgl = lpfc_ncmd->data;
4563 lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle;
4564 lpfc_ncmd->cur_iocbq.io_buf = lpfc_ncmd;
4565 spin_lock_init(&lpfc_ncmd->buf_lock);
4567 /* add the nvme buffer to a post list */
4568 list_add_tail(&lpfc_ncmd->list, &post_nblist);
4569 phba->sli4_hba.io_xri_cnt++;
4571 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
4572 "6114 Allocate %d out of %d requested new NVME "
4573 "buffers of size x%zu bytes\n", bcnt, num_to_alloc,
4574 sizeof(*lpfc_ncmd));
4577 /* post the list of nvme buffer sgls to port if available */
4578 if (!list_empty(&post_nblist))
4579 num_posted = lpfc_sli4_post_io_sgl_list(
4580 phba, &post_nblist, bcnt);
4588 lpfc_get_wwpn(struct lpfc_hba *phba)
4592 LPFC_MBOXQ_t *mboxq;
4595 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
4598 return (uint64_t)-1;
4600 /* First get WWN of HBA instance */
4601 lpfc_read_nv(phba, mboxq);
4602 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4603 if (rc != MBX_SUCCESS) {
4604 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4605 "6019 Mailbox failed , mbxCmd x%x "
4606 "READ_NV, mbxStatus x%x\n",
4607 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
4608 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
4609 mempool_free(mboxq, phba->mbox_mem_pool);
4610 return (uint64_t) -1;
4613 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t));
4614 /* wwn is WWPN of HBA instance */
4615 mempool_free(mboxq, phba->mbox_mem_pool);
4616 if (phba->sli_rev == LPFC_SLI_REV4)
4617 return be64_to_cpu(wwn);
4619 return rol64(wwn, 32);
4622 static unsigned short lpfc_get_sg_tablesize(struct lpfc_hba *phba)
4624 if (phba->sli_rev == LPFC_SLI_REV4)
4625 if (phba->cfg_xpsgl && !phba->nvmet_support)
4626 return LPFC_MAX_SG_TABLESIZE;
4628 return phba->cfg_scsi_seg_cnt;
4630 return phba->cfg_sg_seg_cnt;
4634 * lpfc_vmid_res_alloc - Allocates resources for VMID
4635 * @phba: pointer to lpfc hba data structure.
4636 * @vport: pointer to vport data structure
4638 * This routine allocated the resources needed for the VMID.
4645 lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport)
4647 /* VMID feature is supported only on SLI4 */
4648 if (phba->sli_rev == LPFC_SLI_REV3) {
4649 phba->cfg_vmid_app_header = 0;
4650 phba->cfg_vmid_priority_tagging = 0;
4653 if (lpfc_is_vmid_enabled(phba)) {
4655 kcalloc(phba->cfg_max_vmid, sizeof(struct lpfc_vmid),
4660 rwlock_init(&vport->vmid_lock);
4662 /* Set the VMID parameters for the vport */
4663 vport->vmid_priority_tagging = phba->cfg_vmid_priority_tagging;
4664 vport->vmid_inactivity_timeout =
4665 phba->cfg_vmid_inactivity_timeout;
4666 vport->max_vmid = phba->cfg_max_vmid;
4667 vport->cur_vmid_cnt = 0;
4669 vport->vmid_priority_range = bitmap_zalloc
4670 (LPFC_VMID_MAX_PRIORITY_RANGE, GFP_KERNEL);
4672 if (!vport->vmid_priority_range) {
4677 hash_init(vport->hash_table);
4683 * lpfc_create_port - Create an FC port
4684 * @phba: pointer to lpfc hba data structure.
4685 * @instance: a unique integer ID to this FC port.
4686 * @dev: pointer to the device data structure.
4688 * This routine creates a FC port for the upper layer protocol. The FC port
4689 * can be created on top of either a physical port or a virtual port provided
4690 * by the HBA. This routine also allocates a SCSI host data structure (shost)
4691 * and associates the FC port created before adding the shost into the SCSI
4695 * @vport - pointer to the virtual N_Port data structure.
4696 * NULL - port create failed.
4699 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
4701 struct lpfc_vport *vport;
4702 struct Scsi_Host *shost = NULL;
4703 struct scsi_host_template *template;
4707 bool use_no_reset_hba = false;
4710 if (lpfc_no_hba_reset_cnt) {
4711 if (phba->sli_rev < LPFC_SLI_REV4 &&
4712 dev == &phba->pcidev->dev) {
4713 /* Reset the port first */
4714 lpfc_sli_brdrestart(phba);
4715 rc = lpfc_sli_chipset_init(phba);
4719 wwn = lpfc_get_wwpn(phba);
4722 for (i = 0; i < lpfc_no_hba_reset_cnt; i++) {
4723 if (wwn == lpfc_no_hba_reset[i]) {
4724 lpfc_printf_log(phba, KERN_ERR,
4726 "6020 Setting use_no_reset port=%llx\n",
4728 use_no_reset_hba = true;
4733 /* Seed template for SCSI host registration */
4734 if (dev == &phba->pcidev->dev) {
4735 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
4736 /* Seed physical port template */
4737 template = &lpfc_template;
4739 if (use_no_reset_hba)
4740 /* template is for a no reset SCSI Host */
4741 template->eh_host_reset_handler = NULL;
4743 /* Seed updated value of sg_tablesize */
4744 template->sg_tablesize = lpfc_get_sg_tablesize(phba);
4746 /* NVMET is for physical port only */
4747 template = &lpfc_template_nvme;
4750 /* Seed vport template */
4751 template = &lpfc_vport_template;
4753 /* Seed updated value of sg_tablesize */
4754 template->sg_tablesize = lpfc_get_sg_tablesize(phba);
4757 shost = scsi_host_alloc(template, sizeof(struct lpfc_vport));
4761 vport = (struct lpfc_vport *) shost->hostdata;
4763 vport->load_flag |= FC_LOADING;
4764 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
4765 vport->fc_rscn_flush = 0;
4766 lpfc_get_vport_cfgparam(vport);
4768 /* Adjust value in vport */
4769 vport->cfg_enable_fc4_type = phba->cfg_enable_fc4_type;
4771 shost->unique_id = instance;
4772 shost->max_id = LPFC_MAX_TARGET;
4773 shost->max_lun = vport->cfg_max_luns;
4774 shost->this_id = -1;
4775 shost->max_cmd_len = 16;
4777 if (phba->sli_rev == LPFC_SLI_REV4) {
4778 if (!phba->cfg_fcp_mq_threshold ||
4779 phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue)
4780 phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue;
4782 shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(),
4783 phba->cfg_fcp_mq_threshold);
4785 shost->dma_boundary =
4786 phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
4788 /* SLI-3 has a limited number of hardware queues (3),
4789 * thus there is only one for FCP processing.
4791 shost->nr_hw_queues = 1;
4794 * Set initial can_queue value since 0 is no longer supported and
4795 * scsi_add_host will fail. This will be adjusted later based on the
4796 * max xri value determined in hba setup.
4798 shost->can_queue = phba->cfg_hba_queue_depth - 10;
4799 if (dev != &phba->pcidev->dev) {
4800 shost->transportt = lpfc_vport_transport_template;
4801 vport->port_type = LPFC_NPIV_PORT;
4803 shost->transportt = lpfc_transport_template;
4804 vport->port_type = LPFC_PHYSICAL_PORT;
4807 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
4808 "9081 CreatePort TMPLATE type %x TBLsize %d "
4810 vport->port_type, shost->sg_tablesize,
4811 phba->cfg_scsi_seg_cnt, phba->cfg_sg_seg_cnt);
4813 /* Allocate the resources for VMID */
4814 rc = lpfc_vmid_res_alloc(phba, vport);
4819 /* Initialize all internally managed lists. */
4820 INIT_LIST_HEAD(&vport->fc_nodes);
4821 INIT_LIST_HEAD(&vport->rcv_buffer_list);
4822 spin_lock_init(&vport->work_port_lock);
4824 timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0);
4826 timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0);
4828 timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0);
4830 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
4831 lpfc_setup_bg(phba, shost);
4833 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
4837 spin_lock_irq(&phba->port_list_lock);
4838 list_add_tail(&vport->listentry, &phba->port_list);
4839 spin_unlock_irq(&phba->port_list_lock);
4844 bitmap_free(vport->vmid_priority_range);
4846 scsi_host_put(shost);
4852 * destroy_port - destroy an FC port
4853 * @vport: pointer to an lpfc virtual N_Port data structure.
4855 * This routine destroys a FC port from the upper layer protocol. All the
4856 * resources associated with the port are released.
4859 destroy_port(struct lpfc_vport *vport)
4861 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4862 struct lpfc_hba *phba = vport->phba;
4864 lpfc_debugfs_terminate(vport);
4865 fc_remove_host(shost);
4866 scsi_remove_host(shost);
4868 spin_lock_irq(&phba->port_list_lock);
4869 list_del_init(&vport->listentry);
4870 spin_unlock_irq(&phba->port_list_lock);
4872 lpfc_cleanup(vport);
4877 * lpfc_get_instance - Get a unique integer ID
4879 * This routine allocates a unique integer ID from lpfc_hba_index pool. It
4880 * uses the kernel idr facility to perform the task.
4883 * instance - a unique integer ID allocated as the new instance.
4884 * -1 - lpfc get instance failed.
4887 lpfc_get_instance(void)
4891 ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL);
4892 return ret < 0 ? -1 : ret;
4896 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
4897 * @shost: pointer to SCSI host data structure.
4898 * @time: elapsed time of the scan in jiffies.
4900 * This routine is called by the SCSI layer with a SCSI host to determine
4901 * whether the scan host is finished.
4903 * Note: there is no scan_start function as adapter initialization will have
4904 * asynchronously kicked off the link initialization.
4907 * 0 - SCSI host scan is not over yet.
4908 * 1 - SCSI host scan is over.
4910 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
4912 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4913 struct lpfc_hba *phba = vport->phba;
4916 spin_lock_irq(shost->host_lock);
4918 if (vport->load_flag & FC_UNLOADING) {
4922 if (time >= msecs_to_jiffies(30 * 1000)) {
4923 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4924 "0461 Scanning longer than 30 "
4925 "seconds. Continuing initialization\n");
4929 if (time >= msecs_to_jiffies(15 * 1000) &&
4930 phba->link_state <= LPFC_LINK_DOWN) {
4931 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4932 "0465 Link down longer than 15 "
4933 "seconds. Continuing initialization\n");
4938 if (vport->port_state != LPFC_VPORT_READY)
4940 if (vport->num_disc_nodes || vport->fc_prli_sent)
4942 if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000))
4944 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
4950 spin_unlock_irq(shost->host_lock);
4954 static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost)
4956 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4957 struct lpfc_hba *phba = vport->phba;
4959 fc_host_supported_speeds(shost) = 0;
4961 * Avoid reporting supported link speed for FCoE as it can't be
4962 * controlled via FCoE.
4964 if (phba->hba_flag & HBA_FCOE_MODE)
4967 if (phba->lmt & LMT_256Gb)
4968 fc_host_supported_speeds(shost) |= FC_PORTSPEED_256GBIT;
4969 if (phba->lmt & LMT_128Gb)
4970 fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT;
4971 if (phba->lmt & LMT_64Gb)
4972 fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT;
4973 if (phba->lmt & LMT_32Gb)
4974 fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT;
4975 if (phba->lmt & LMT_16Gb)
4976 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
4977 if (phba->lmt & LMT_10Gb)
4978 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
4979 if (phba->lmt & LMT_8Gb)
4980 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
4981 if (phba->lmt & LMT_4Gb)
4982 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
4983 if (phba->lmt & LMT_2Gb)
4984 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
4985 if (phba->lmt & LMT_1Gb)
4986 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
4990 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
4991 * @shost: pointer to SCSI host data structure.
4993 * This routine initializes a given SCSI host attributes on a FC port. The
4994 * SCSI host can be either on top of a physical port or a virtual port.
4996 void lpfc_host_attrib_init(struct Scsi_Host *shost)
4998 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4999 struct lpfc_hba *phba = vport->phba;
5001 * Set fixed host attributes. Must done after lpfc_sli_hba_setup().
5004 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
5005 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
5006 fc_host_supported_classes(shost) = FC_COS_CLASS3;
5008 memset(fc_host_supported_fc4s(shost), 0,
5009 sizeof(fc_host_supported_fc4s(shost)));
5010 fc_host_supported_fc4s(shost)[2] = 1;
5011 fc_host_supported_fc4s(shost)[7] = 1;
5013 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
5014 sizeof fc_host_symbolic_name(shost));
5016 lpfc_host_supported_speeds_set(shost);
5018 fc_host_maxframe_size(shost) =
5019 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
5020 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
5022 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
5024 /* This value is also unchanging */
5025 memset(fc_host_active_fc4s(shost), 0,
5026 sizeof(fc_host_active_fc4s(shost)));
5027 fc_host_active_fc4s(shost)[2] = 1;
5028 fc_host_active_fc4s(shost)[7] = 1;
5030 fc_host_max_npiv_vports(shost) = phba->max_vpi;
5031 spin_lock_irq(shost->host_lock);
5032 vport->load_flag &= ~FC_LOADING;
5033 spin_unlock_irq(shost->host_lock);
5037 * lpfc_stop_port_s3 - Stop SLI3 device port
5038 * @phba: pointer to lpfc hba data structure.
5040 * This routine is invoked to stop an SLI3 device port, it stops the device
5041 * from generating interrupts and stops the device driver's timers for the
5045 lpfc_stop_port_s3(struct lpfc_hba *phba)
5047 /* Clear all interrupt enable conditions */
5048 writel(0, phba->HCregaddr);
5049 readl(phba->HCregaddr); /* flush */
5050 /* Clear all pending interrupts */
5051 writel(0xffffffff, phba->HAregaddr);
5052 readl(phba->HAregaddr); /* flush */
5054 /* Reset some HBA SLI setup states */
5055 lpfc_stop_hba_timers(phba);
5056 phba->pport->work_port_events = 0;
5060 * lpfc_stop_port_s4 - Stop SLI4 device port
5061 * @phba: pointer to lpfc hba data structure.
5063 * This routine is invoked to stop an SLI4 device port, it stops the device
5064 * from generating interrupts and stops the device driver's timers for the
5068 lpfc_stop_port_s4(struct lpfc_hba *phba)
5070 /* Reset some HBA SLI4 setup states */
5071 lpfc_stop_hba_timers(phba);
5073 phba->pport->work_port_events = 0;
5074 phba->sli4_hba.intr_enable = 0;
5078 * lpfc_stop_port - Wrapper function for stopping hba port
5079 * @phba: Pointer to HBA context object.
5081 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
5082 * the API jump table function pointer from the lpfc_hba struct.
5085 lpfc_stop_port(struct lpfc_hba *phba)
5087 phba->lpfc_stop_port(phba);
5090 flush_workqueue(phba->wq);
5094 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
5095 * @phba: Pointer to hba for which this call is being executed.
5097 * This routine starts the timer waiting for the FCF rediscovery to complete.
5100 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
5102 unsigned long fcf_redisc_wait_tmo =
5103 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
5104 /* Start fcf rediscovery wait period timer */
5105 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
5106 spin_lock_irq(&phba->hbalock);
5107 /* Allow action to new fcf asynchronous event */
5108 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
5109 /* Mark the FCF rediscovery pending state */
5110 phba->fcf.fcf_flag |= FCF_REDISC_PEND;
5111 spin_unlock_irq(&phba->hbalock);
5115 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
5116 * @t: Timer context used to obtain the pointer to lpfc hba data structure.
5118 * This routine is invoked when waiting for FCF table rediscover has been
5119 * timed out. If new FCF record(s) has (have) been discovered during the
5120 * wait period, a new FCF event shall be added to the FCOE async event
5121 * list, and then worker thread shall be waked up for processing from the
5122 * worker thread context.
5125 lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t)
5127 struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait);
5129 /* Don't send FCF rediscovery event if timer cancelled */
5130 spin_lock_irq(&phba->hbalock);
5131 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
5132 spin_unlock_irq(&phba->hbalock);
5135 /* Clear FCF rediscovery timer pending flag */
5136 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
5137 /* FCF rediscovery event to worker thread */
5138 phba->fcf.fcf_flag |= FCF_REDISC_EVT;
5139 spin_unlock_irq(&phba->hbalock);
5140 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
5141 "2776 FCF rediscover quiescent timer expired\n");
5142 /* wake up worker thread */
5143 lpfc_worker_wake_up(phba);
5147 * lpfc_vmid_poll - VMID timeout detection
5148 * @t: Timer context used to obtain the pointer to lpfc hba data structure.
5150 * This routine is invoked when there is no I/O on by a VM for the specified
5151 * amount of time. When this situation is detected, the VMID has to be
5152 * deregistered from the switch and all the local resources freed. The VMID
5153 * will be reassigned to the VM once the I/O begins.
5156 lpfc_vmid_poll(struct timer_list *t)
5158 struct lpfc_hba *phba = from_timer(phba, t, inactive_vmid_poll);
5161 /* check if there is a need to issue QFPA */
5162 if (phba->pport->vmid_priority_tagging) {
5164 phba->pport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA;
5167 /* Is the vmid inactivity timer enabled */
5168 if (phba->pport->vmid_inactivity_timeout ||
5169 phba->pport->load_flag & FC_DEREGISTER_ALL_APP_ID) {
5171 phba->pport->work_port_events |= WORKER_CHECK_INACTIVE_VMID;
5175 lpfc_worker_wake_up(phba);
5177 /* restart the timer for the next iteration */
5178 mod_timer(&phba->inactive_vmid_poll, jiffies + msecs_to_jiffies(1000 *
5183 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
5184 * @phba: pointer to lpfc hba data structure.
5185 * @acqe_link: pointer to the async link completion queue entry.
5187 * This routine is to parse the SLI4 link-attention link fault code.
5190 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
5191 struct lpfc_acqe_link *acqe_link)
5193 switch (bf_get(lpfc_acqe_fc_la_att_type, acqe_link)) {
5194 case LPFC_FC_LA_TYPE_LINK_DOWN:
5195 case LPFC_FC_LA_TYPE_TRUNKING_EVENT:
5196 case LPFC_FC_LA_TYPE_ACTIVATE_FAIL:
5197 case LPFC_FC_LA_TYPE_LINK_RESET_PRTCL_EVT:
5200 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
5201 case LPFC_ASYNC_LINK_FAULT_NONE:
5202 case LPFC_ASYNC_LINK_FAULT_LOCAL:
5203 case LPFC_ASYNC_LINK_FAULT_REMOTE:
5204 case LPFC_ASYNC_LINK_FAULT_LR_LRR:
5207 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5208 "0398 Unknown link fault code: x%x\n",
5209 bf_get(lpfc_acqe_link_fault, acqe_link));
5217 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
5218 * @phba: pointer to lpfc hba data structure.
5219 * @acqe_link: pointer to the async link completion queue entry.
5221 * This routine is to parse the SLI4 link attention type and translate it
5222 * into the base driver's link attention type coding.
5224 * Return: Link attention type in terms of base driver's coding.
5227 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
5228 struct lpfc_acqe_link *acqe_link)
5232 switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
5233 case LPFC_ASYNC_LINK_STATUS_DOWN:
5234 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
5235 att_type = LPFC_ATT_LINK_DOWN;
5237 case LPFC_ASYNC_LINK_STATUS_UP:
5238 /* Ignore physical link up events - wait for logical link up */
5239 att_type = LPFC_ATT_RESERVED;
5241 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
5242 att_type = LPFC_ATT_LINK_UP;
5245 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5246 "0399 Invalid link attention type: x%x\n",
5247 bf_get(lpfc_acqe_link_status, acqe_link));
5248 att_type = LPFC_ATT_RESERVED;
5255 * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed
5256 * @phba: pointer to lpfc hba data structure.
5258 * This routine is to get an SLI3 FC port's link speed in Mbps.
5260 * Return: link speed in terms of Mbps.
5263 lpfc_sli_port_speed_get(struct lpfc_hba *phba)
5265 uint32_t link_speed;
5267 if (!lpfc_is_link_up(phba))
5270 if (phba->sli_rev <= LPFC_SLI_REV3) {
5271 switch (phba->fc_linkspeed) {
5272 case LPFC_LINK_SPEED_1GHZ:
5275 case LPFC_LINK_SPEED_2GHZ:
5278 case LPFC_LINK_SPEED_4GHZ:
5281 case LPFC_LINK_SPEED_8GHZ:
5284 case LPFC_LINK_SPEED_10GHZ:
5287 case LPFC_LINK_SPEED_16GHZ:
5294 if (phba->sli4_hba.link_state.logical_speed)
5296 phba->sli4_hba.link_state.logical_speed;
5298 link_speed = phba->sli4_hba.link_state.speed;
5304 * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed
5305 * @phba: pointer to lpfc hba data structure.
5306 * @evt_code: asynchronous event code.
5307 * @speed_code: asynchronous event link speed code.
5309 * This routine is to parse the giving SLI4 async event link speed code into
5310 * value of Mbps for the link speed.
5312 * Return: link speed in terms of Mbps.
5315 lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
5318 uint32_t port_speed;
5321 case LPFC_TRAILER_CODE_LINK:
5322 switch (speed_code) {
5323 case LPFC_ASYNC_LINK_SPEED_ZERO:
5326 case LPFC_ASYNC_LINK_SPEED_10MBPS:
5329 case LPFC_ASYNC_LINK_SPEED_100MBPS:
5332 case LPFC_ASYNC_LINK_SPEED_1GBPS:
5335 case LPFC_ASYNC_LINK_SPEED_10GBPS:
5338 case LPFC_ASYNC_LINK_SPEED_20GBPS:
5341 case LPFC_ASYNC_LINK_SPEED_25GBPS:
5344 case LPFC_ASYNC_LINK_SPEED_40GBPS:
5347 case LPFC_ASYNC_LINK_SPEED_100GBPS:
5348 port_speed = 100000;
5354 case LPFC_TRAILER_CODE_FC:
5355 switch (speed_code) {
5356 case LPFC_FC_LA_SPEED_UNKNOWN:
5359 case LPFC_FC_LA_SPEED_1G:
5362 case LPFC_FC_LA_SPEED_2G:
5365 case LPFC_FC_LA_SPEED_4G:
5368 case LPFC_FC_LA_SPEED_8G:
5371 case LPFC_FC_LA_SPEED_10G:
5374 case LPFC_FC_LA_SPEED_16G:
5377 case LPFC_FC_LA_SPEED_32G:
5380 case LPFC_FC_LA_SPEED_64G:
5383 case LPFC_FC_LA_SPEED_128G:
5384 port_speed = 128000;
5386 case LPFC_FC_LA_SPEED_256G:
5387 port_speed = 256000;
5400 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event
5401 * @phba: pointer to lpfc hba data structure.
5402 * @acqe_link: pointer to the async link completion queue entry.
5404 * This routine is to handle the SLI4 asynchronous FCoE link event.
5407 lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
5408 struct lpfc_acqe_link *acqe_link)
5412 struct lpfc_mbx_read_top *la;
5416 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
5417 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP)
5419 phba->fcoe_eventtag = acqe_link->event_tag;
5420 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5422 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5423 "0395 The mboxq allocation failed\n");
5427 rc = lpfc_mbox_rsrc_prep(phba, pmb);
5429 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5430 "0396 mailbox allocation failed\n");
5434 /* Cleanup any outstanding ELS commands */
5435 lpfc_els_flush_all_cmd(phba);
5437 /* Block ELS IOCBs until we have done process link event */
5438 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
5440 /* Update link event statistics */
5441 phba->sli.slistat.link_event++;
5443 /* Create lpfc_handle_latt mailbox command from link ACQE */
5444 lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf);
5445 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
5446 pmb->vport = phba->pport;
5448 /* Keep the link status for extra SLI4 state machine reference */
5449 phba->sli4_hba.link_state.speed =
5450 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK,
5451 bf_get(lpfc_acqe_link_speed, acqe_link));
5452 phba->sli4_hba.link_state.duplex =
5453 bf_get(lpfc_acqe_link_duplex, acqe_link);
5454 phba->sli4_hba.link_state.status =
5455 bf_get(lpfc_acqe_link_status, acqe_link);
5456 phba->sli4_hba.link_state.type =
5457 bf_get(lpfc_acqe_link_type, acqe_link);
5458 phba->sli4_hba.link_state.number =
5459 bf_get(lpfc_acqe_link_number, acqe_link);
5460 phba->sli4_hba.link_state.fault =
5461 bf_get(lpfc_acqe_link_fault, acqe_link);
5462 phba->sli4_hba.link_state.logical_speed =
5463 bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10;
5465 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5466 "2900 Async FC/FCoE Link event - Speed:%dGBit "
5467 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
5468 "Logical speed:%dMbps Fault:%d\n",
5469 phba->sli4_hba.link_state.speed,
5470 phba->sli4_hba.link_state.topology,
5471 phba->sli4_hba.link_state.status,
5472 phba->sli4_hba.link_state.type,
5473 phba->sli4_hba.link_state.number,
5474 phba->sli4_hba.link_state.logical_speed,
5475 phba->sli4_hba.link_state.fault);
5477 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch
5478 * topology info. Note: Optional for non FC-AL ports.
5480 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
5481 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
5482 if (rc == MBX_NOT_FINISHED)
5487 * For FCoE Mode: fill in all the topology information we need and call
5488 * the READ_TOPOLOGY completion routine to continue without actually
5489 * sending the READ_TOPOLOGY mailbox command to the port.
5491 /* Initialize completion status */
5493 mb->mbxStatus = MBX_SUCCESS;
5495 /* Parse port fault information field */
5496 lpfc_sli4_parse_latt_fault(phba, acqe_link);
5498 /* Parse and translate link attention fields */
5499 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
5500 la->eventTag = acqe_link->event_tag;
5501 bf_set(lpfc_mbx_read_top_att_type, la, att_type);
5502 bf_set(lpfc_mbx_read_top_link_spd, la,
5503 (bf_get(lpfc_acqe_link_speed, acqe_link)));
5505 /* Fake the following irrelevant fields */
5506 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT);
5507 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0);
5508 bf_set(lpfc_mbx_read_top_il, la, 0);
5509 bf_set(lpfc_mbx_read_top_pb, la, 0);
5510 bf_set(lpfc_mbx_read_top_fa, la, 0);
5511 bf_set(lpfc_mbx_read_top_mm, la, 0);
5513 /* Invoke the lpfc_handle_latt mailbox command callback function */
5514 lpfc_mbx_cmpl_read_topology(phba, pmb);
5519 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
5523 * lpfc_async_link_speed_to_read_top - Parse async evt link speed code to read
5525 * @phba: pointer to lpfc hba data structure.
5526 * @speed_code: asynchronous event link speed code.
5528 * This routine is to parse the giving SLI4 async event link speed code into
5529 * value of Read topology link speed.
5531 * Return: link speed in terms of Read topology.
5534 lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code)
5538 switch (speed_code) {
5539 case LPFC_FC_LA_SPEED_1G:
5540 port_speed = LPFC_LINK_SPEED_1GHZ;
5542 case LPFC_FC_LA_SPEED_2G:
5543 port_speed = LPFC_LINK_SPEED_2GHZ;
5545 case LPFC_FC_LA_SPEED_4G:
5546 port_speed = LPFC_LINK_SPEED_4GHZ;
5548 case LPFC_FC_LA_SPEED_8G:
5549 port_speed = LPFC_LINK_SPEED_8GHZ;
5551 case LPFC_FC_LA_SPEED_16G:
5552 port_speed = LPFC_LINK_SPEED_16GHZ;
5554 case LPFC_FC_LA_SPEED_32G:
5555 port_speed = LPFC_LINK_SPEED_32GHZ;
5557 case LPFC_FC_LA_SPEED_64G:
5558 port_speed = LPFC_LINK_SPEED_64GHZ;
5560 case LPFC_FC_LA_SPEED_128G:
5561 port_speed = LPFC_LINK_SPEED_128GHZ;
5563 case LPFC_FC_LA_SPEED_256G:
5564 port_speed = LPFC_LINK_SPEED_256GHZ;
5575 lpfc_cgn_dump_rxmonitor(struct lpfc_hba *phba)
5577 if (!phba->rx_monitor) {
5578 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5579 "4411 Rx Monitor Info is empty.\n");
5581 lpfc_rx_monitor_report(phba, phba->rx_monitor, NULL, 0,
5582 LPFC_MAX_RXMONITOR_DUMP);
5587 * lpfc_cgn_update_stat - Save data into congestion stats buffer
5588 * @phba: pointer to lpfc hba data structure.
5589 * @dtag: FPIN descriptor received
5591 * Increment the FPIN received counter/time when it happens.
5594 lpfc_cgn_update_stat(struct lpfc_hba *phba, uint32_t dtag)
5596 struct lpfc_cgn_info *cp;
5598 struct timespec64 cur_time;
5602 /* Make sure we have a congestion info buffer */
5605 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
5606 ktime_get_real_ts64(&cur_time);
5607 time64_to_tm(cur_time.tv_sec, 0, &broken);
5609 /* Update congestion statistics */
5611 case ELS_DTAG_LNK_INTEGRITY:
5612 cnt = le32_to_cpu(cp->link_integ_notification);
5614 cp->link_integ_notification = cpu_to_le32(cnt);
5616 cp->cgn_stat_lnk_month = broken.tm_mon + 1;
5617 cp->cgn_stat_lnk_day = broken.tm_mday;
5618 cp->cgn_stat_lnk_year = broken.tm_year - 100;
5619 cp->cgn_stat_lnk_hour = broken.tm_hour;
5620 cp->cgn_stat_lnk_min = broken.tm_min;
5621 cp->cgn_stat_lnk_sec = broken.tm_sec;
5623 case ELS_DTAG_DELIVERY:
5624 cnt = le32_to_cpu(cp->delivery_notification);
5626 cp->delivery_notification = cpu_to_le32(cnt);
5628 cp->cgn_stat_del_month = broken.tm_mon + 1;
5629 cp->cgn_stat_del_day = broken.tm_mday;
5630 cp->cgn_stat_del_year = broken.tm_year - 100;
5631 cp->cgn_stat_del_hour = broken.tm_hour;
5632 cp->cgn_stat_del_min = broken.tm_min;
5633 cp->cgn_stat_del_sec = broken.tm_sec;
5635 case ELS_DTAG_PEER_CONGEST:
5636 cnt = le32_to_cpu(cp->cgn_peer_notification);
5638 cp->cgn_peer_notification = cpu_to_le32(cnt);
5640 cp->cgn_stat_peer_month = broken.tm_mon + 1;
5641 cp->cgn_stat_peer_day = broken.tm_mday;
5642 cp->cgn_stat_peer_year = broken.tm_year - 100;
5643 cp->cgn_stat_peer_hour = broken.tm_hour;
5644 cp->cgn_stat_peer_min = broken.tm_min;
5645 cp->cgn_stat_peer_sec = broken.tm_sec;
5647 case ELS_DTAG_CONGESTION:
5648 cnt = le32_to_cpu(cp->cgn_notification);
5650 cp->cgn_notification = cpu_to_le32(cnt);
5652 cp->cgn_stat_cgn_month = broken.tm_mon + 1;
5653 cp->cgn_stat_cgn_day = broken.tm_mday;
5654 cp->cgn_stat_cgn_year = broken.tm_year - 100;
5655 cp->cgn_stat_cgn_hour = broken.tm_hour;
5656 cp->cgn_stat_cgn_min = broken.tm_min;
5657 cp->cgn_stat_cgn_sec = broken.tm_sec;
5659 if (phba->cgn_fpin_frequency &&
5660 phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) {
5661 value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency;
5662 cp->cgn_stat_npm = value;
5664 value = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
5665 LPFC_CGN_CRC32_SEED);
5666 cp->cgn_info_crc = cpu_to_le32(value);
5670 * lpfc_cgn_save_evt_cnt - Save data into registered congestion buffer
5671 * @phba: pointer to lpfc hba data structure.
5673 * Save the congestion event data every minute.
5674 * On the hour collapse all the minute data into hour data. Every day
5675 * collapse all the hour data into daily data. Separate driver
5676 * and fabrc congestion event counters that will be saved out
5677 * to the registered congestion buffer every minute.
5680 lpfc_cgn_save_evt_cnt(struct lpfc_hba *phba)
5682 struct lpfc_cgn_info *cp;
5684 struct timespec64 cur_time;
5686 uint16_t value, mvalue;
5689 uint32_t dvalue, wvalue, lvalue, avalue;
5695 /* Make sure we have a congestion info buffer */
5698 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
5700 if (time_before(jiffies, phba->cgn_evt_timestamp))
5702 phba->cgn_evt_timestamp = jiffies +
5703 msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN);
5704 phba->cgn_evt_minute++;
5706 /* We should get to this point in the routine on 1 minute intervals */
5708 ktime_get_real_ts64(&cur_time);
5709 time64_to_tm(cur_time.tv_sec, 0, &broken);
5711 if (phba->cgn_fpin_frequency &&
5712 phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) {
5713 value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency;
5714 cp->cgn_stat_npm = value;
5717 /* Read and clear the latency counters for this minute */
5718 lvalue = atomic_read(&phba->cgn_latency_evt_cnt);
5719 latsum = atomic64_read(&phba->cgn_latency_evt);
5720 atomic_set(&phba->cgn_latency_evt_cnt, 0);
5721 atomic64_set(&phba->cgn_latency_evt, 0);
5723 /* We need to store MB/sec bandwidth in the congestion information.
5724 * block_cnt is count of 512 byte blocks for the entire minute,
5725 * bps will get bytes per sec before finally converting to MB/sec.
5727 bps = div_u64(phba->rx_block_cnt, LPFC_SEC_MIN) * 512;
5728 phba->rx_block_cnt = 0;
5729 mvalue = bps / (1024 * 1024); /* convert to MB/sec */
5732 /* cgn parameters */
5733 cp->cgn_info_mode = phba->cgn_p.cgn_param_mode;
5734 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
5735 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
5736 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
5738 /* Fill in default LUN qdepth */
5739 value = (uint16_t)(phba->pport->cfg_lun_queue_depth);
5740 cp->cgn_lunq = cpu_to_le16(value);
5742 /* Record congestion buffer info - every minute
5743 * cgn_driver_evt_cnt (Driver events)
5744 * cgn_fabric_warn_cnt (Congestion Warnings)
5745 * cgn_latency_evt_cnt / cgn_latency_evt (IO Latency)
5746 * cgn_fabric_alarm_cnt (Congestion Alarms)
5748 index = ++cp->cgn_index_minute;
5749 if (cp->cgn_index_minute == LPFC_MIN_HOUR) {
5750 cp->cgn_index_minute = 0;
5754 /* Get the number of driver events in this sample and reset counter */
5755 dvalue = atomic_read(&phba->cgn_driver_evt_cnt);
5756 atomic_set(&phba->cgn_driver_evt_cnt, 0);
5758 /* Get the number of warning events - FPIN and Signal for this minute */
5760 if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) ||
5761 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
5762 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM)
5763 wvalue = atomic_read(&phba->cgn_fabric_warn_cnt);
5764 atomic_set(&phba->cgn_fabric_warn_cnt, 0);
5766 /* Get the number of alarm events - FPIN and Signal for this minute */
5768 if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) ||
5769 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM)
5770 avalue = atomic_read(&phba->cgn_fabric_alarm_cnt);
5771 atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
5773 /* Collect the driver, warning, alarm and latency counts for this
5774 * minute into the driver congestion buffer.
5776 ptr = &cp->cgn_drvr_min[index];
5777 value = (uint16_t)dvalue;
5778 *ptr = cpu_to_le16(value);
5780 ptr = &cp->cgn_warn_min[index];
5781 value = (uint16_t)wvalue;
5782 *ptr = cpu_to_le16(value);
5784 ptr = &cp->cgn_alarm_min[index];
5785 value = (uint16_t)avalue;
5786 *ptr = cpu_to_le16(value);
5788 lptr = &cp->cgn_latency_min[index];
5790 lvalue = (uint32_t)div_u64(latsum, lvalue);
5791 *lptr = cpu_to_le32(lvalue);
5796 /* Collect the bandwidth value into the driver's congesion buffer. */
5797 mptr = &cp->cgn_bw_min[index];
5798 *mptr = cpu_to_le16(mvalue);
5800 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5801 "2418 Congestion Info - minute (%d): %d %d %d %d %d\n",
5802 index, dvalue, wvalue, *lptr, mvalue, avalue);
5805 if ((phba->cgn_evt_minute % LPFC_MIN_HOUR) == 0) {
5806 /* Record congestion buffer info - every hour
5807 * Collapse all minutes into an hour
5809 index = ++cp->cgn_index_hour;
5810 if (cp->cgn_index_hour == LPFC_HOUR_DAY) {
5811 cp->cgn_index_hour = 0;
5821 for (i = 0; i < LPFC_MIN_HOUR; i++) {
5822 dvalue += le16_to_cpu(cp->cgn_drvr_min[i]);
5823 wvalue += le16_to_cpu(cp->cgn_warn_min[i]);
5824 lvalue += le32_to_cpu(cp->cgn_latency_min[i]);
5825 mbps += le16_to_cpu(cp->cgn_bw_min[i]);
5826 avalue += le16_to_cpu(cp->cgn_alarm_min[i]);
5828 if (lvalue) /* Avg of latency averages */
5829 lvalue /= LPFC_MIN_HOUR;
5830 if (mbps) /* Avg of Bandwidth averages */
5831 mvalue = mbps / LPFC_MIN_HOUR;
5833 lptr = &cp->cgn_drvr_hr[index];
5834 *lptr = cpu_to_le32(dvalue);
5835 lptr = &cp->cgn_warn_hr[index];
5836 *lptr = cpu_to_le32(wvalue);
5837 lptr = &cp->cgn_latency_hr[index];
5838 *lptr = cpu_to_le32(lvalue);
5839 mptr = &cp->cgn_bw_hr[index];
5840 *mptr = cpu_to_le16(mvalue);
5841 lptr = &cp->cgn_alarm_hr[index];
5842 *lptr = cpu_to_le32(avalue);
5844 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5845 "2419 Congestion Info - hour "
5846 "(%d): %d %d %d %d %d\n",
5847 index, dvalue, wvalue, lvalue, mvalue, avalue);
5851 if ((phba->cgn_evt_minute % LPFC_MIN_DAY) == 0) {
5852 /* Record congestion buffer info - every hour
5853 * Collapse all hours into a day. Rotate days
5854 * after LPFC_MAX_CGN_DAYS.
5856 index = ++cp->cgn_index_day;
5857 if (cp->cgn_index_day == LPFC_MAX_CGN_DAYS) {
5858 cp->cgn_index_day = 0;
5862 /* Anytime we overwrite daily index 0, after we wrap,
5863 * we will be overwriting the oldest day, so we must
5864 * update the congestion data start time for that day.
5865 * That start time should have previously been saved after
5866 * we wrote the last days worth of data.
5868 if ((phba->hba_flag & HBA_CGN_DAY_WRAP) && index == 0) {
5869 time64_to_tm(phba->cgn_daily_ts.tv_sec, 0, &broken);
5871 cp->cgn_info_month = broken.tm_mon + 1;
5872 cp->cgn_info_day = broken.tm_mday;
5873 cp->cgn_info_year = broken.tm_year - 100;
5874 cp->cgn_info_hour = broken.tm_hour;
5875 cp->cgn_info_minute = broken.tm_min;
5876 cp->cgn_info_second = broken.tm_sec;
5879 (phba, KERN_INFO, LOG_CGN_MGMT,
5880 "2646 CGNInfo idx0 Start Time: "
5881 "%d/%d/%d %d:%d:%d\n",
5882 cp->cgn_info_day, cp->cgn_info_month,
5883 cp->cgn_info_year, cp->cgn_info_hour,
5884 cp->cgn_info_minute, cp->cgn_info_second);
5893 for (i = 0; i < LPFC_HOUR_DAY; i++) {
5894 dvalue += le32_to_cpu(cp->cgn_drvr_hr[i]);
5895 wvalue += le32_to_cpu(cp->cgn_warn_hr[i]);
5896 lvalue += le32_to_cpu(cp->cgn_latency_hr[i]);
5897 mbps += le16_to_cpu(cp->cgn_bw_hr[i]);
5898 avalue += le32_to_cpu(cp->cgn_alarm_hr[i]);
5900 if (lvalue) /* Avg of latency averages */
5901 lvalue /= LPFC_HOUR_DAY;
5902 if (mbps) /* Avg of Bandwidth averages */
5903 mvalue = mbps / LPFC_HOUR_DAY;
5905 lptr = &cp->cgn_drvr_day[index];
5906 *lptr = cpu_to_le32(dvalue);
5907 lptr = &cp->cgn_warn_day[index];
5908 *lptr = cpu_to_le32(wvalue);
5909 lptr = &cp->cgn_latency_day[index];
5910 *lptr = cpu_to_le32(lvalue);
5911 mptr = &cp->cgn_bw_day[index];
5912 *mptr = cpu_to_le16(mvalue);
5913 lptr = &cp->cgn_alarm_day[index];
5914 *lptr = cpu_to_le32(avalue);
5916 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5917 "2420 Congestion Info - daily (%d): "
5919 index, dvalue, wvalue, lvalue, mvalue, avalue);
5921 /* We just wrote LPFC_MAX_CGN_DAYS of data,
5922 * so we are wrapped on any data after this.
5923 * Save this as the start time for the next day.
5925 if (index == (LPFC_MAX_CGN_DAYS - 1)) {
5926 phba->hba_flag |= HBA_CGN_DAY_WRAP;
5927 ktime_get_real_ts64(&phba->cgn_daily_ts);
5931 /* Use the frequency found in the last rcv'ed FPIN */
5932 value = phba->cgn_fpin_frequency;
5933 cp->cgn_warn_freq = cpu_to_le16(value);
5934 cp->cgn_alarm_freq = cpu_to_le16(value);
5936 lvalue = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
5937 LPFC_CGN_CRC32_SEED);
5938 cp->cgn_info_crc = cpu_to_le32(lvalue);
5942 * lpfc_calc_cmf_latency - latency from start of rxate timer interval
5943 * @phba: The Hba for which this call is being executed.
5945 * The routine calculates the latency from the beginning of the CMF timer
5946 * interval to the current point in time. It is called from IO completion
5947 * when we exceed our Bandwidth limitation for the time interval.
5950 lpfc_calc_cmf_latency(struct lpfc_hba *phba)
5952 struct timespec64 cmpl_time;
5955 ktime_get_real_ts64(&cmpl_time);
5957 /* This routine works on a ms granularity so sec and usec are
5958 * converted accordingly.
5960 if (cmpl_time.tv_sec == phba->cmf_latency.tv_sec) {
5961 msec = (cmpl_time.tv_nsec - phba->cmf_latency.tv_nsec) /
5964 if (cmpl_time.tv_nsec >= phba->cmf_latency.tv_nsec) {
5965 msec = (cmpl_time.tv_sec -
5966 phba->cmf_latency.tv_sec) * MSEC_PER_SEC;
5967 msec += ((cmpl_time.tv_nsec -
5968 phba->cmf_latency.tv_nsec) / NSEC_PER_MSEC);
5970 msec = (cmpl_time.tv_sec - phba->cmf_latency.tv_sec -
5972 msec += (((NSEC_PER_SEC - phba->cmf_latency.tv_nsec) +
5973 cmpl_time.tv_nsec) / NSEC_PER_MSEC);
5980 * lpfc_cmf_timer - This is the timer function for one congestion
5982 * @timer: Pointer to the high resolution timer that expired
5984 static enum hrtimer_restart
5985 lpfc_cmf_timer(struct hrtimer *timer)
5987 struct lpfc_hba *phba = container_of(timer, struct lpfc_hba,
5989 struct rx_info_entry entry;
5991 uint32_t busy, max_read;
5992 uint64_t total, rcv, lat, mbpi, extra, cnt;
5993 int timer_interval = LPFC_CMF_INTERVAL;
5995 struct lpfc_cgn_stat *cgs;
5998 /* Only restart the timer if congestion mgmt is on */
5999 if (phba->cmf_active_mode == LPFC_CFG_OFF ||
6000 !phba->cmf_latency.tv_sec) {
6001 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
6002 "6224 CMF timer exit: %d %lld\n",
6003 phba->cmf_active_mode,
6004 (uint64_t)phba->cmf_latency.tv_sec);
6005 return HRTIMER_NORESTART;
6008 /* If pport is not ready yet, just exit and wait for
6009 * the next timer cycle to hit.
6014 /* Do not block SCSI IO while in the timer routine since
6015 * total_bytes will be cleared
6017 atomic_set(&phba->cmf_stop_io, 1);
6019 /* First we need to calculate the actual ms between
6020 * the last timer interrupt and this one. We ask for
6021 * LPFC_CMF_INTERVAL, however the actual time may
6022 * vary depending on system overhead.
6024 ms = lpfc_calc_cmf_latency(phba);
6027 /* Immediately after we calculate the time since the last
6028 * timer interrupt, set the start time for the next
6031 ktime_get_real_ts64(&phba->cmf_latency);
6033 phba->cmf_link_byte_count =
6034 div_u64(phba->cmf_max_line_rate * LPFC_CMF_INTERVAL, 1000);
6036 /* Collect all the stats from the prior timer interval */
6041 for_each_present_cpu(cpu) {
6042 cgs = per_cpu_ptr(phba->cmf_stat, cpu);
6043 total += atomic64_xchg(&cgs->total_bytes, 0);
6044 io_cnt += atomic_xchg(&cgs->rx_io_cnt, 0);
6045 lat += atomic64_xchg(&cgs->rx_latency, 0);
6046 rcv += atomic64_xchg(&cgs->rcv_bytes, 0);
6049 /* Before we issue another CMF_SYNC_WQE, retrieve the BW
6050 * returned from the last CMF_SYNC_WQE issued, from
6051 * cmf_last_sync_bw. This will be the target BW for
6052 * this next timer interval.
6054 if (phba->cmf_active_mode == LPFC_CFG_MANAGED &&
6055 phba->link_state != LPFC_LINK_DOWN &&
6056 phba->hba_flag & HBA_SETUP) {
6057 mbpi = phba->cmf_last_sync_bw;
6058 phba->cmf_last_sync_bw = 0;
6061 /* Calculate any extra bytes needed to account for the
6062 * timer accuracy. If we are less than LPFC_CMF_INTERVAL
6063 * calculate the adjustment needed for total to reflect
6064 * a full LPFC_CMF_INTERVAL.
6066 if (ms && ms < LPFC_CMF_INTERVAL) {
6067 cnt = div_u64(total, ms); /* bytes per ms */
6068 cnt *= LPFC_CMF_INTERVAL; /* what total should be */
6070 /* If the timeout is scheduled to be shorter,
6071 * this value may skew the data, so cap it at mbpi.
6073 if ((phba->hba_flag & HBA_SHORT_CMF) && cnt > mbpi)
6076 extra = cnt - total;
6078 lpfc_issue_cmf_sync_wqe(phba, LPFC_CMF_INTERVAL, total + extra);
6080 /* For Monitor mode or link down we want mbpi
6081 * to be the full link speed
6083 mbpi = phba->cmf_link_byte_count;
6086 phba->cmf_timer_cnt++;
6089 /* Update congestion info buffer latency in us */
6090 atomic_add(io_cnt, &phba->cgn_latency_evt_cnt);
6091 atomic64_add(lat, &phba->cgn_latency_evt);
6093 busy = atomic_xchg(&phba->cmf_busy, 0);
6094 max_read = atomic_xchg(&phba->rx_max_read_cnt, 0);
6096 /* Calculate MBPI for the next timer interval */
6098 if (mbpi > phba->cmf_link_byte_count ||
6099 phba->cmf_active_mode == LPFC_CFG_MONITOR)
6100 mbpi = phba->cmf_link_byte_count;
6102 /* Change max_bytes_per_interval to what the prior
6103 * CMF_SYNC_WQE cmpl indicated.
6105 if (mbpi != phba->cmf_max_bytes_per_interval)
6106 phba->cmf_max_bytes_per_interval = mbpi;
6109 /* Save rxmonitor information for debug */
6110 if (phba->rx_monitor) {
6111 entry.total_bytes = total;
6112 entry.cmf_bytes = total + extra;
6113 entry.rcv_bytes = rcv;
6114 entry.cmf_busy = busy;
6115 entry.cmf_info = phba->cmf_active_info;
6117 entry.avg_io_latency = div_u64(lat, io_cnt);
6118 entry.avg_io_size = div_u64(rcv, io_cnt);
6120 entry.avg_io_latency = 0;
6121 entry.avg_io_size = 0;
6123 entry.max_read_cnt = max_read;
6124 entry.io_cnt = io_cnt;
6125 entry.max_bytes_per_interval = mbpi;
6126 if (phba->cmf_active_mode == LPFC_CFG_MANAGED)
6127 entry.timer_utilization = phba->cmf_last_ts;
6129 entry.timer_utilization = ms;
6130 entry.timer_interval = ms;
6131 phba->cmf_last_ts = 0;
6133 lpfc_rx_monitor_record(phba->rx_monitor, &entry);
6136 if (phba->cmf_active_mode == LPFC_CFG_MONITOR) {
6137 /* If Monitor mode, check if we are oversubscribed
6138 * against the full line rate.
6140 if (mbpi && total > mbpi)
6141 atomic_inc(&phba->cgn_driver_evt_cnt);
6143 phba->rx_block_cnt += div_u64(rcv, 512); /* save 512 byte block cnt */
6145 /* Each minute save Fabric and Driver congestion information */
6146 lpfc_cgn_save_evt_cnt(phba);
6148 phba->hba_flag &= ~HBA_SHORT_CMF;
6150 /* Since we need to call lpfc_cgn_save_evt_cnt every minute, on the
6151 * minute, adjust our next timer interval, if needed, to ensure a
6152 * 1 minute granularity when we get the next timer interrupt.
6154 if (time_after(jiffies + msecs_to_jiffies(LPFC_CMF_INTERVAL),
6155 phba->cgn_evt_timestamp)) {
6156 timer_interval = jiffies_to_msecs(phba->cgn_evt_timestamp -
6158 if (timer_interval <= 0)
6159 timer_interval = LPFC_CMF_INTERVAL;
6161 phba->hba_flag |= HBA_SHORT_CMF;
6163 /* If we adjust timer_interval, max_bytes_per_interval
6164 * needs to be adjusted as well.
6166 phba->cmf_link_byte_count = div_u64(phba->cmf_max_line_rate *
6167 timer_interval, 1000);
6168 if (phba->cmf_active_mode == LPFC_CFG_MONITOR)
6169 phba->cmf_max_bytes_per_interval =
6170 phba->cmf_link_byte_count;
6173 /* Since total_bytes has already been zero'ed, its okay to unblock
6174 * after max_bytes_per_interval is setup.
6176 if (atomic_xchg(&phba->cmf_bw_wait, 0))
6177 queue_work(phba->wq, &phba->unblock_request_work);
6179 /* SCSI IO is now unblocked */
6180 atomic_set(&phba->cmf_stop_io, 0);
6183 hrtimer_forward_now(timer,
6184 ktime_set(0, timer_interval * NSEC_PER_MSEC));
6185 return HRTIMER_RESTART;
6188 #define trunk_link_status(__idx)\
6189 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
6190 ((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\
6191 "Link up" : "Link down") : "NA"
6192 /* Did port __idx reported an error */
6193 #define trunk_port_fault(__idx)\
6194 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
6195 (port_fault & (1 << __idx) ? "YES" : "NO") : "NA"
6198 lpfc_update_trunk_link_status(struct lpfc_hba *phba,
6199 struct lpfc_acqe_fc_la *acqe_fc)
6201 uint8_t port_fault = bf_get(lpfc_acqe_fc_la_trunk_linkmask, acqe_fc);
6202 uint8_t err = bf_get(lpfc_acqe_fc_la_trunk_fault, acqe_fc);
6205 phba->sli4_hba.link_state.speed =
6206 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
6207 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
6209 phba->sli4_hba.link_state.logical_speed =
6210 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
6211 /* We got FC link speed, convert to fc_linkspeed (READ_TOPOLOGY) */
6212 phba->fc_linkspeed =
6213 lpfc_async_link_speed_to_read_top(
6215 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
6217 if (bf_get(lpfc_acqe_fc_la_trunk_config_port0, acqe_fc)) {
6218 phba->trunk_link.link0.state =
6219 bf_get(lpfc_acqe_fc_la_trunk_link_status_port0, acqe_fc)
6220 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
6221 phba->trunk_link.link0.fault = port_fault & 0x1 ? err : 0;
6224 if (bf_get(lpfc_acqe_fc_la_trunk_config_port1, acqe_fc)) {
6225 phba->trunk_link.link1.state =
6226 bf_get(lpfc_acqe_fc_la_trunk_link_status_port1, acqe_fc)
6227 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
6228 phba->trunk_link.link1.fault = port_fault & 0x2 ? err : 0;
6231 if (bf_get(lpfc_acqe_fc_la_trunk_config_port2, acqe_fc)) {
6232 phba->trunk_link.link2.state =
6233 bf_get(lpfc_acqe_fc_la_trunk_link_status_port2, acqe_fc)
6234 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
6235 phba->trunk_link.link2.fault = port_fault & 0x4 ? err : 0;
6238 if (bf_get(lpfc_acqe_fc_la_trunk_config_port3, acqe_fc)) {
6239 phba->trunk_link.link3.state =
6240 bf_get(lpfc_acqe_fc_la_trunk_link_status_port3, acqe_fc)
6241 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
6242 phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0;
6247 phba->trunk_link.phy_lnk_speed =
6248 phba->sli4_hba.link_state.logical_speed / (cnt * 1000);
6250 phba->trunk_link.phy_lnk_speed = LPFC_LINK_SPEED_UNKNOWN;
6252 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6253 "2910 Async FC Trunking Event - Speed:%d\n"
6254 "\tLogical speed:%d "
6255 "port0: %s port1: %s port2: %s port3: %s\n",
6256 phba->sli4_hba.link_state.speed,
6257 phba->sli4_hba.link_state.logical_speed,
6258 trunk_link_status(0), trunk_link_status(1),
6259 trunk_link_status(2), trunk_link_status(3));
6261 if (phba->cmf_active_mode != LPFC_CFG_OFF)
6262 lpfc_cmf_signal_init(phba);
6265 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6266 "3202 trunk error:0x%x (%s) seen on port0:%s "
6268 * SLI-4: We have only 0xA error codes
6269 * defined as of now. print an appropriate
6270 * message in case driver needs to be updated.
6272 "port1:%s port2:%s port3:%s\n", err, err > 0xA ?
6273 "UNDEFINED. update driver." : trunk_errmsg[err],
6274 trunk_port_fault(0), trunk_port_fault(1),
6275 trunk_port_fault(2), trunk_port_fault(3));
6280 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event
6281 * @phba: pointer to lpfc hba data structure.
6282 * @acqe_fc: pointer to the async fc completion queue entry.
6284 * This routine is to handle the SLI4 asynchronous FC event. It will simply log
6285 * that the event was received and then issue a read_topology mailbox command so
6286 * that the rest of the driver will treat it the same as SLI3.
6289 lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
6293 struct lpfc_mbx_read_top *la;
6297 if (bf_get(lpfc_trailer_type, acqe_fc) !=
6298 LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
6299 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6300 "2895 Non FC link Event detected.(%d)\n",
6301 bf_get(lpfc_trailer_type, acqe_fc));
6305 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
6306 LPFC_FC_LA_TYPE_TRUNKING_EVENT) {
6307 lpfc_update_trunk_link_status(phba, acqe_fc);
6311 /* Keep the link status for extra SLI4 state machine reference */
6312 phba->sli4_hba.link_state.speed =
6313 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
6314 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
6315 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
6316 phba->sli4_hba.link_state.topology =
6317 bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
6318 phba->sli4_hba.link_state.status =
6319 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc);
6320 phba->sli4_hba.link_state.type =
6321 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc);
6322 phba->sli4_hba.link_state.number =
6323 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
6324 phba->sli4_hba.link_state.fault =
6325 bf_get(lpfc_acqe_link_fault, acqe_fc);
6326 phba->sli4_hba.link_state.link_status =
6327 bf_get(lpfc_acqe_fc_la_link_status, acqe_fc);
6330 * Only select attention types need logical speed modification to what
6331 * was previously set.
6333 if (phba->sli4_hba.link_state.status >= LPFC_FC_LA_TYPE_LINK_UP &&
6334 phba->sli4_hba.link_state.status < LPFC_FC_LA_TYPE_ACTIVATE_FAIL) {
6335 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
6336 LPFC_FC_LA_TYPE_LINK_DOWN)
6337 phba->sli4_hba.link_state.logical_speed = 0;
6338 else if (!phba->sli4_hba.conf_trunk)
6339 phba->sli4_hba.link_state.logical_speed =
6340 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
6343 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6344 "2896 Async FC event - Speed:%dGBaud Topology:x%x "
6345 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
6346 "%dMbps Fault:x%x Link Status:x%x\n",
6347 phba->sli4_hba.link_state.speed,
6348 phba->sli4_hba.link_state.topology,
6349 phba->sli4_hba.link_state.status,
6350 phba->sli4_hba.link_state.type,
6351 phba->sli4_hba.link_state.number,
6352 phba->sli4_hba.link_state.logical_speed,
6353 phba->sli4_hba.link_state.fault,
6354 phba->sli4_hba.link_state.link_status);
6357 * The following attention types are informational only, providing
6358 * further details about link status. Overwrite the value of
6359 * link_state.status appropriately. No further action is required.
6361 if (phba->sli4_hba.link_state.status >= LPFC_FC_LA_TYPE_ACTIVATE_FAIL) {
6362 switch (phba->sli4_hba.link_state.status) {
6363 case LPFC_FC_LA_TYPE_ACTIVATE_FAIL:
6364 log_level = KERN_WARNING;
6365 phba->sli4_hba.link_state.status =
6366 LPFC_FC_LA_TYPE_LINK_DOWN;
6368 case LPFC_FC_LA_TYPE_LINK_RESET_PRTCL_EVT:
6370 * During bb credit recovery establishment, receiving
6371 * this attention type is normal. Link Up attention
6372 * type is expected to occur before this informational
6373 * attention type so keep the Link Up status.
6375 log_level = KERN_INFO;
6376 phba->sli4_hba.link_state.status =
6377 LPFC_FC_LA_TYPE_LINK_UP;
6380 log_level = KERN_INFO;
6383 lpfc_log_msg(phba, log_level, LOG_SLI,
6384 "2992 Async FC event - Informational Link "
6385 "Attention Type x%x\n",
6386 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc));
6390 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6392 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6393 "2897 The mboxq allocation failed\n");
6396 rc = lpfc_mbox_rsrc_prep(phba, pmb);
6398 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6399 "2898 The mboxq prep failed\n");
6403 /* Cleanup any outstanding ELS commands */
6404 lpfc_els_flush_all_cmd(phba);
6406 /* Block ELS IOCBs until we have done process link event */
6407 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
6409 /* Update link event statistics */
6410 phba->sli.slistat.link_event++;
6412 /* Create lpfc_handle_latt mailbox command from link ACQE */
6413 lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf);
6414 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
6415 pmb->vport = phba->pport;
6417 if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) {
6418 phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK);
6420 switch (phba->sli4_hba.link_state.status) {
6421 case LPFC_FC_LA_TYPE_MDS_LINK_DOWN:
6422 phba->link_flag |= LS_MDS_LINK_DOWN;
6424 case LPFC_FC_LA_TYPE_MDS_LOOPBACK:
6425 phba->link_flag |= LS_MDS_LOOPBACK;
6431 /* Initialize completion status */
6433 mb->mbxStatus = MBX_SUCCESS;
6435 /* Parse port fault information field */
6436 lpfc_sli4_parse_latt_fault(phba, (void *)acqe_fc);
6438 /* Parse and translate link attention fields */
6439 la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop;
6440 la->eventTag = acqe_fc->event_tag;
6442 if (phba->sli4_hba.link_state.status ==
6443 LPFC_FC_LA_TYPE_UNEXP_WWPN) {
6444 bf_set(lpfc_mbx_read_top_att_type, la,
6445 LPFC_FC_LA_TYPE_UNEXP_WWPN);
6447 bf_set(lpfc_mbx_read_top_att_type, la,
6448 LPFC_FC_LA_TYPE_LINK_DOWN);
6450 /* Invoke the mailbox command callback function */
6451 lpfc_mbx_cmpl_read_topology(phba, pmb);
6456 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
6457 if (rc == MBX_NOT_FINISHED)
6462 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
6466 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event
6467 * @phba: pointer to lpfc hba data structure.
6468 * @acqe_sli: pointer to the async SLI completion queue entry.
6470 * This routine is to handle the SLI4 asynchronous SLI events.
6473 lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
6479 uint8_t operational = 0;
6480 struct temp_event temp_event_data;
6481 struct lpfc_acqe_misconfigured_event *misconfigured;
6482 struct lpfc_acqe_cgn_signal *cgn_signal;
6483 struct Scsi_Host *shost;
6484 struct lpfc_vport **vports;
6487 evt_type = bf_get(lpfc_trailer_type, acqe_sli);
6489 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6490 "2901 Async SLI event - Type:%d, Event Data: x%08x "
6491 "x%08x x%08x x%08x\n", evt_type,
6492 acqe_sli->event_data1, acqe_sli->event_data2,
6493 acqe_sli->event_data3, acqe_sli->trailer);
6495 port_name = phba->Port[0];
6496 if (port_name == 0x00)
6497 port_name = '?'; /* get port name is empty */
6500 case LPFC_SLI_EVENT_TYPE_OVER_TEMP:
6501 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
6502 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
6503 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
6505 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6506 "3190 Over Temperature:%d Celsius- Port Name %c\n",
6507 acqe_sli->event_data1, port_name);
6509 phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
6510 shost = lpfc_shost_from_vport(phba->pport);
6511 fc_host_post_vendor_event(shost, fc_get_event_number(),
6512 sizeof(temp_event_data),
6513 (char *)&temp_event_data,
6514 SCSI_NL_VID_TYPE_PCI
6515 | PCI_VENDOR_ID_EMULEX);
6517 case LPFC_SLI_EVENT_TYPE_NORM_TEMP:
6518 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
6519 temp_event_data.event_code = LPFC_NORMAL_TEMP;
6520 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
6522 lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_LDS_EVENT,
6523 "3191 Normal Temperature:%d Celsius - Port Name %c\n",
6524 acqe_sli->event_data1, port_name);
6526 shost = lpfc_shost_from_vport(phba->pport);
6527 fc_host_post_vendor_event(shost, fc_get_event_number(),
6528 sizeof(temp_event_data),
6529 (char *)&temp_event_data,
6530 SCSI_NL_VID_TYPE_PCI
6531 | PCI_VENDOR_ID_EMULEX);
6533 case LPFC_SLI_EVENT_TYPE_MISCONFIGURED:
6534 misconfigured = (struct lpfc_acqe_misconfigured_event *)
6535 &acqe_sli->event_data1;
6537 /* fetch the status for this port */
6538 switch (phba->sli4_hba.lnk_info.lnk_no) {
6539 case LPFC_LINK_NUMBER_0:
6540 status = bf_get(lpfc_sli_misconfigured_port0_state,
6541 &misconfigured->theEvent);
6542 operational = bf_get(lpfc_sli_misconfigured_port0_op,
6543 &misconfigured->theEvent);
6545 case LPFC_LINK_NUMBER_1:
6546 status = bf_get(lpfc_sli_misconfigured_port1_state,
6547 &misconfigured->theEvent);
6548 operational = bf_get(lpfc_sli_misconfigured_port1_op,
6549 &misconfigured->theEvent);
6551 case LPFC_LINK_NUMBER_2:
6552 status = bf_get(lpfc_sli_misconfigured_port2_state,
6553 &misconfigured->theEvent);
6554 operational = bf_get(lpfc_sli_misconfigured_port2_op,
6555 &misconfigured->theEvent);
6557 case LPFC_LINK_NUMBER_3:
6558 status = bf_get(lpfc_sli_misconfigured_port3_state,
6559 &misconfigured->theEvent);
6560 operational = bf_get(lpfc_sli_misconfigured_port3_op,
6561 &misconfigured->theEvent);
6564 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6566 "LPFC_SLI_EVENT_TYPE_MISCONFIGURED "
6567 "event: Invalid link %d",
6568 phba->sli4_hba.lnk_info.lnk_no);
6572 /* Skip if optic state unchanged */
6573 if (phba->sli4_hba.lnk_info.optic_state == status)
6577 case LPFC_SLI_EVENT_STATUS_VALID:
6578 sprintf(message, "Physical Link is functional");
6580 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT:
6581 sprintf(message, "Optics faulted/incorrectly "
6582 "installed/not installed - Reseat optics, "
6583 "if issue not resolved, replace.");
6585 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE:
6587 "Optics of two types installed - Remove one "
6588 "optic or install matching pair of optics.");
6590 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED:
6591 sprintf(message, "Incompatible optics - Replace with "
6592 "compatible optics for card to function.");
6594 case LPFC_SLI_EVENT_STATUS_UNQUALIFIED:
6595 sprintf(message, "Unqualified optics - Replace with "
6596 "Avago optics for Warranty and Technical "
6597 "Support - Link is%s operational",
6598 (operational) ? " not" : "");
6600 case LPFC_SLI_EVENT_STATUS_UNCERTIFIED:
6601 sprintf(message, "Uncertified optics - Replace with "
6602 "Avago-certified optics to enable link "
6603 "operation - Link is%s operational",
6604 (operational) ? " not" : "");
6607 /* firmware is reporting a status we don't know about */
6608 sprintf(message, "Unknown event status x%02x", status);
6612 /* Issue READ_CONFIG mbox command to refresh supported speeds */
6613 rc = lpfc_sli4_read_config(phba);
6616 lpfc_printf_log(phba, KERN_ERR,
6618 "3194 Unable to retrieve supported "
6619 "speeds, rc = 0x%x\n", rc);
6621 rc = lpfc_sli4_refresh_params(phba);
6623 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6624 "3174 Unable to update pls support, "
6627 vports = lpfc_create_vport_work_array(phba);
6628 if (vports != NULL) {
6629 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
6631 shost = lpfc_shost_from_vport(vports[i]);
6632 lpfc_host_supported_speeds_set(shost);
6635 lpfc_destroy_vport_work_array(phba, vports);
6637 phba->sli4_hba.lnk_info.optic_state = status;
6638 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6639 "3176 Port Name %c %s\n", port_name, message);
6641 case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT:
6642 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6643 "3192 Remote DPort Test Initiated - "
6644 "Event Data1:x%08x Event Data2: x%08x\n",
6645 acqe_sli->event_data1, acqe_sli->event_data2);
6647 case LPFC_SLI_EVENT_TYPE_PORT_PARAMS_CHG:
6648 /* Call FW to obtain active parms */
6649 lpfc_sli4_cgn_parm_chg_evt(phba);
6651 case LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN:
6652 /* Misconfigured WWN. Reports that the SLI Port is configured
6653 * to use FA-WWN, but the attached device doesn’t support it.
6654 * Event Data1 - N.A, Event Data2 - N.A
6655 * This event only happens on the physical port.
6657 lpfc_log_msg(phba, KERN_WARNING, LOG_SLI | LOG_DISCOVERY,
6658 "2699 Misconfigured FA-PWWN - Attached device "
6659 "does not support FA-PWWN\n");
6660 phba->sli4_hba.fawwpn_flag &= ~LPFC_FAWWPN_FABRIC;
6661 memset(phba->pport->fc_portname.u.wwn, 0,
6662 sizeof(struct lpfc_name));
6664 case LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE:
6665 /* EEPROM failure. No driver action is required */
6666 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6667 "2518 EEPROM failure - "
6668 "Event Data1: x%08x Event Data2: x%08x\n",
6669 acqe_sli->event_data1, acqe_sli->event_data2);
6671 case LPFC_SLI_EVENT_TYPE_CGN_SIGNAL:
6672 if (phba->cmf_active_mode == LPFC_CFG_OFF)
6674 cgn_signal = (struct lpfc_acqe_cgn_signal *)
6675 &acqe_sli->event_data1;
6676 phba->cgn_acqe_cnt++;
6678 cnt = bf_get(lpfc_warn_acqe, cgn_signal);
6679 atomic64_add(cnt, &phba->cgn_acqe_stat.warn);
6680 atomic64_add(cgn_signal->alarm_cnt, &phba->cgn_acqe_stat.alarm);
6682 /* no threshold for CMF, even 1 signal will trigger an event */
6684 /* Alarm overrides warning, so check that first */
6685 if (cgn_signal->alarm_cnt) {
6686 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
6687 /* Keep track of alarm cnt for CMF_SYNC_WQE */
6688 atomic_add(cgn_signal->alarm_cnt,
6689 &phba->cgn_sync_alarm_cnt);
6692 /* signal action needs to be taken */
6693 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
6694 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
6695 /* Keep track of warning cnt for CMF_SYNC_WQE */
6696 atomic_add(cnt, &phba->cgn_sync_warn_cnt);
6700 case LPFC_SLI_EVENT_TYPE_RD_SIGNAL:
6701 /* May be accompanied by a temperature event */
6702 lpfc_printf_log(phba, KERN_INFO,
6703 LOG_SLI | LOG_LINK_EVENT | LOG_LDS_EVENT,
6704 "2902 Remote Degrade Signaling: x%08x x%08x "
6706 acqe_sli->event_data1, acqe_sli->event_data2,
6707 acqe_sli->event_data3);
6710 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6711 "3193 Unrecognized SLI event, type: 0x%x",
6718 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
6719 * @vport: pointer to vport data structure.
6721 * This routine is to perform Clear Virtual Link (CVL) on a vport in
6722 * response to a CVL event.
6724 * Return the pointer to the ndlp with the vport if successful, otherwise
6727 static struct lpfc_nodelist *
6728 lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
6730 struct lpfc_nodelist *ndlp;
6731 struct Scsi_Host *shost;
6732 struct lpfc_hba *phba;
6739 ndlp = lpfc_findnode_did(vport, Fabric_DID);
6741 /* Cannot find existing Fabric ndlp, so allocate a new one */
6742 ndlp = lpfc_nlp_init(vport, Fabric_DID);
6745 /* Set the node type */
6746 ndlp->nlp_type |= NLP_FABRIC;
6747 /* Put ndlp onto node list */
6748 lpfc_enqueue_node(vport, ndlp);
6750 if ((phba->pport->port_state < LPFC_FLOGI) &&
6751 (phba->pport->port_state != LPFC_VPORT_FAILED))
6753 /* If virtual link is not yet instantiated ignore CVL */
6754 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
6755 && (vport->port_state != LPFC_VPORT_FAILED))
6757 shost = lpfc_shost_from_vport(vport);
6760 lpfc_linkdown_port(vport);
6761 lpfc_cleanup_pending_mbox(vport);
6762 spin_lock_irq(shost->host_lock);
6763 vport->fc_flag |= FC_VPORT_CVL_RCVD;
6764 spin_unlock_irq(shost->host_lock);
6770 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
6771 * @phba: pointer to lpfc hba data structure.
6773 * This routine is to perform Clear Virtual Link (CVL) on all vports in
6774 * response to a FCF dead event.
6777 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
6779 struct lpfc_vport **vports;
6782 vports = lpfc_create_vport_work_array(phba);
6784 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
6785 lpfc_sli4_perform_vport_cvl(vports[i]);
6786 lpfc_destroy_vport_work_array(phba, vports);
6790 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
6791 * @phba: pointer to lpfc hba data structure.
6792 * @acqe_fip: pointer to the async fcoe completion queue entry.
6794 * This routine is to handle the SLI4 asynchronous fcoe event.
6797 lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
6798 struct lpfc_acqe_fip *acqe_fip)
6800 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip);
6802 struct lpfc_vport *vport;
6803 struct lpfc_nodelist *ndlp;
6804 int active_vlink_present;
6805 struct lpfc_vport **vports;
6808 phba->fc_eventTag = acqe_fip->event_tag;
6809 phba->fcoe_eventtag = acqe_fip->event_tag;
6810 switch (event_type) {
6811 case LPFC_FIP_EVENT_TYPE_NEW_FCF:
6812 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
6813 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
6814 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6815 "2546 New FCF event, evt_tag:x%x, "
6817 acqe_fip->event_tag,
6820 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
6822 "2788 FCF param modified event, "
6823 "evt_tag:x%x, index:x%x\n",
6824 acqe_fip->event_tag,
6826 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
6828 * During period of FCF discovery, read the FCF
6829 * table record indexed by the event to update
6830 * FCF roundrobin failover eligible FCF bmask.
6832 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
6834 "2779 Read FCF (x%x) for updating "
6835 "roundrobin FCF failover bmask\n",
6837 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
6840 /* If the FCF discovery is in progress, do nothing. */
6841 spin_lock_irq(&phba->hbalock);
6842 if (phba->hba_flag & FCF_TS_INPROG) {
6843 spin_unlock_irq(&phba->hbalock);
6846 /* If fast FCF failover rescan event is pending, do nothing */
6847 if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) {
6848 spin_unlock_irq(&phba->hbalock);
6852 /* If the FCF has been in discovered state, do nothing. */
6853 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
6854 spin_unlock_irq(&phba->hbalock);
6857 spin_unlock_irq(&phba->hbalock);
6859 /* Otherwise, scan the entire FCF table and re-discover SAN */
6860 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
6861 "2770 Start FCF table scan per async FCF "
6862 "event, evt_tag:x%x, index:x%x\n",
6863 acqe_fip->event_tag, acqe_fip->index);
6864 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
6865 LPFC_FCOE_FCF_GET_FIRST);
6867 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6868 "2547 Issue FCF scan read FCF mailbox "
6869 "command failed (x%x)\n", rc);
6872 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
6873 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6874 "2548 FCF Table full count 0x%x tag 0x%x\n",
6875 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
6876 acqe_fip->event_tag);
6879 case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
6880 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
6881 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6882 "2549 FCF (x%x) disconnected from network, "
6883 "tag:x%x\n", acqe_fip->index,
6884 acqe_fip->event_tag);
6886 * If we are in the middle of FCF failover process, clear
6887 * the corresponding FCF bit in the roundrobin bitmap.
6889 spin_lock_irq(&phba->hbalock);
6890 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
6891 (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) {
6892 spin_unlock_irq(&phba->hbalock);
6893 /* Update FLOGI FCF failover eligible FCF bmask */
6894 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
6897 spin_unlock_irq(&phba->hbalock);
6899 /* If the event is not for currently used fcf do nothing */
6900 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
6904 * Otherwise, request the port to rediscover the entire FCF
6905 * table for a fast recovery from case that the current FCF
6906 * is no longer valid as we are not in the middle of FCF
6907 * failover process already.
6909 spin_lock_irq(&phba->hbalock);
6910 /* Mark the fast failover process in progress */
6911 phba->fcf.fcf_flag |= FCF_DEAD_DISC;
6912 spin_unlock_irq(&phba->hbalock);
6914 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
6915 "2771 Start FCF fast failover process due to "
6916 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
6917 "\n", acqe_fip->event_tag, acqe_fip->index);
6918 rc = lpfc_sli4_redisc_fcf_table(phba);
6920 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
6922 "2772 Issue FCF rediscover mailbox "
6923 "command failed, fail through to FCF "
6925 spin_lock_irq(&phba->hbalock);
6926 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
6927 spin_unlock_irq(&phba->hbalock);
6929 * Last resort will fail over by treating this
6930 * as a link down to FCF registration.
6932 lpfc_sli4_fcf_dead_failthrough(phba);
6934 /* Reset FCF roundrobin bmask for new discovery */
6935 lpfc_sli4_clear_fcf_rr_bmask(phba);
6937 * Handling fast FCF failover to a DEAD FCF event is
6938 * considered equalivant to receiving CVL to all vports.
6940 lpfc_sli4_perform_all_vport_cvl(phba);
6943 case LPFC_FIP_EVENT_TYPE_CVL:
6944 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
6945 lpfc_printf_log(phba, KERN_ERR,
6947 "2718 Clear Virtual Link Received for VPI 0x%x"
6948 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
6950 vport = lpfc_find_vport_by_vpid(phba,
6952 ndlp = lpfc_sli4_perform_vport_cvl(vport);
6955 active_vlink_present = 0;
6957 vports = lpfc_create_vport_work_array(phba);
6959 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
6961 if ((!(vports[i]->fc_flag &
6962 FC_VPORT_CVL_RCVD)) &&
6963 (vports[i]->port_state > LPFC_FDISC)) {
6964 active_vlink_present = 1;
6968 lpfc_destroy_vport_work_array(phba, vports);
6972 * Don't re-instantiate if vport is marked for deletion.
6973 * If we are here first then vport_delete is going to wait
6974 * for discovery to complete.
6976 if (!(vport->load_flag & FC_UNLOADING) &&
6977 active_vlink_present) {
6979 * If there are other active VLinks present,
6980 * re-instantiate the Vlink using FDISC.
6982 mod_timer(&ndlp->nlp_delayfunc,
6983 jiffies + msecs_to_jiffies(1000));
6984 spin_lock_irq(&ndlp->lock);
6985 ndlp->nlp_flag |= NLP_DELAY_TMO;
6986 spin_unlock_irq(&ndlp->lock);
6987 ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
6988 vport->port_state = LPFC_FDISC;
6991 * Otherwise, we request port to rediscover
6992 * the entire FCF table for a fast recovery
6993 * from possible case that the current FCF
6994 * is no longer valid if we are not already
6995 * in the FCF failover process.
6997 spin_lock_irq(&phba->hbalock);
6998 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
6999 spin_unlock_irq(&phba->hbalock);
7002 /* Mark the fast failover process in progress */
7003 phba->fcf.fcf_flag |= FCF_ACVL_DISC;
7004 spin_unlock_irq(&phba->hbalock);
7005 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
7007 "2773 Start FCF failover per CVL, "
7008 "evt_tag:x%x\n", acqe_fip->event_tag);
7009 rc = lpfc_sli4_redisc_fcf_table(phba);
7011 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
7013 "2774 Issue FCF rediscover "
7014 "mailbox command failed, "
7015 "through to CVL event\n");
7016 spin_lock_irq(&phba->hbalock);
7017 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
7018 spin_unlock_irq(&phba->hbalock);
7020 * Last resort will be re-try on the
7021 * the current registered FCF entry.
7023 lpfc_retry_pport_discovery(phba);
7026 * Reset FCF roundrobin bmask for new
7029 lpfc_sli4_clear_fcf_rr_bmask(phba);
7033 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7034 "0288 Unknown FCoE event type 0x%x event tag "
7035 "0x%x\n", event_type, acqe_fip->event_tag);
7041 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
7042 * @phba: pointer to lpfc hba data structure.
7043 * @acqe_dcbx: pointer to the async dcbx completion queue entry.
7045 * This routine is to handle the SLI4 asynchronous dcbx event.
7048 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
7049 struct lpfc_acqe_dcbx *acqe_dcbx)
7051 phba->fc_eventTag = acqe_dcbx->event_tag;
7052 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7053 "0290 The SLI4 DCBX asynchronous event is not "
7058 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
7059 * @phba: pointer to lpfc hba data structure.
7060 * @acqe_grp5: pointer to the async grp5 completion queue entry.
7062 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
7063 * is an asynchronous notified of a logical link speed change. The Port
7064 * reports the logical link speed in units of 10Mbps.
7067 lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
7068 struct lpfc_acqe_grp5 *acqe_grp5)
7070 uint16_t prev_ll_spd;
7072 phba->fc_eventTag = acqe_grp5->event_tag;
7073 phba->fcoe_eventtag = acqe_grp5->event_tag;
7074 prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
7075 phba->sli4_hba.link_state.logical_speed =
7076 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10;
7077 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
7078 "2789 GRP5 Async Event: Updating logical link speed "
7079 "from %dMbps to %dMbps\n", prev_ll_spd,
7080 phba->sli4_hba.link_state.logical_speed);
7084 * lpfc_sli4_async_cmstat_evt - Process the asynchronous cmstat event
7085 * @phba: pointer to lpfc hba data structure.
7087 * This routine is to handle the SLI4 asynchronous cmstat event. A cmstat event
7088 * is an asynchronous notification of a request to reset CM stats.
7091 lpfc_sli4_async_cmstat_evt(struct lpfc_hba *phba)
7095 lpfc_init_congestion_stat(phba);
7099 * lpfc_cgn_params_val - Validate FW congestion parameters.
7100 * @phba: pointer to lpfc hba data structure.
7101 * @p_cfg_param: pointer to FW provided congestion parameters.
7103 * This routine validates the congestion parameters passed
7104 * by the FW to the driver via an ACQE event.
7107 lpfc_cgn_params_val(struct lpfc_hba *phba, struct lpfc_cgn_param *p_cfg_param)
7109 spin_lock_irq(&phba->hbalock);
7111 if (!lpfc_rangecheck(p_cfg_param->cgn_param_mode, LPFC_CFG_OFF,
7112 LPFC_CFG_MONITOR)) {
7113 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
7114 "6225 CMF mode param out of range: %d\n",
7115 p_cfg_param->cgn_param_mode);
7116 p_cfg_param->cgn_param_mode = LPFC_CFG_OFF;
7119 spin_unlock_irq(&phba->hbalock);
7122 static const char * const lpfc_cmf_mode_to_str[] = {
7129 * lpfc_cgn_params_parse - Process a FW cong parm change event
7130 * @phba: pointer to lpfc hba data structure.
7131 * @p_cgn_param: pointer to a data buffer with the FW cong params.
7132 * @len: the size of pdata in bytes.
7134 * This routine validates the congestion management buffer signature
7135 * from the FW, validates the contents and makes corrections for
7136 * valid, in-range values. If the signature magic is correct and
7137 * after parameter validation, the contents are copied to the driver's
7138 * @phba structure. If the magic is incorrect, an error message is
7142 lpfc_cgn_params_parse(struct lpfc_hba *phba,
7143 struct lpfc_cgn_param *p_cgn_param, uint32_t len)
7145 struct lpfc_cgn_info *cp;
7146 uint32_t crc, oldmode;
7147 char acr_string[4] = {0};
7149 /* Make sure the FW has encoded the correct magic number to
7150 * validate the congestion parameter in FW memory.
7152 if (p_cgn_param->cgn_param_magic == LPFC_CFG_PARAM_MAGIC_NUM) {
7153 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
7154 "4668 FW cgn parm buffer data: "
7155 "magic 0x%x version %d mode %d "
7156 "level0 %d level1 %d "
7157 "level2 %d byte13 %d "
7158 "byte14 %d byte15 %d "
7159 "byte11 %d byte12 %d activeMode %d\n",
7160 p_cgn_param->cgn_param_magic,
7161 p_cgn_param->cgn_param_version,
7162 p_cgn_param->cgn_param_mode,
7163 p_cgn_param->cgn_param_level0,
7164 p_cgn_param->cgn_param_level1,
7165 p_cgn_param->cgn_param_level2,
7166 p_cgn_param->byte13,
7167 p_cgn_param->byte14,
7168 p_cgn_param->byte15,
7169 p_cgn_param->byte11,
7170 p_cgn_param->byte12,
7171 phba->cmf_active_mode);
7173 oldmode = phba->cmf_active_mode;
7175 /* Any parameters out of range are corrected to defaults
7176 * by this routine. No need to fail.
7178 lpfc_cgn_params_val(phba, p_cgn_param);
7180 /* Parameters are verified, move them into driver storage */
7181 spin_lock_irq(&phba->hbalock);
7182 memcpy(&phba->cgn_p, p_cgn_param,
7183 sizeof(struct lpfc_cgn_param));
7185 /* Update parameters in congestion info buffer now */
7187 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
7188 cp->cgn_info_mode = phba->cgn_p.cgn_param_mode;
7189 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
7190 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
7191 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
7192 crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
7193 LPFC_CGN_CRC32_SEED);
7194 cp->cgn_info_crc = cpu_to_le32(crc);
7196 spin_unlock_irq(&phba->hbalock);
7198 phba->cmf_active_mode = phba->cgn_p.cgn_param_mode;
7202 if (phba->cgn_p.cgn_param_mode != LPFC_CFG_OFF) {
7203 /* Turning CMF on */
7204 lpfc_cmf_start(phba);
7206 if (phba->link_state >= LPFC_LINK_UP) {
7207 phba->cgn_reg_fpin =
7208 phba->cgn_init_reg_fpin;
7209 phba->cgn_reg_signal =
7210 phba->cgn_init_reg_signal;
7211 lpfc_issue_els_edc(phba->pport, 0);
7215 case LPFC_CFG_MANAGED:
7216 switch (phba->cgn_p.cgn_param_mode) {
7218 /* Turning CMF off */
7219 lpfc_cmf_stop(phba);
7220 if (phba->link_state >= LPFC_LINK_UP)
7221 lpfc_issue_els_edc(phba->pport, 0);
7223 case LPFC_CFG_MONITOR:
7224 phba->cmf_max_bytes_per_interval =
7225 phba->cmf_link_byte_count;
7227 /* Resume blocked IO - unblock on workqueue */
7228 queue_work(phba->wq,
7229 &phba->unblock_request_work);
7233 case LPFC_CFG_MONITOR:
7234 switch (phba->cgn_p.cgn_param_mode) {
7236 /* Turning CMF off */
7237 lpfc_cmf_stop(phba);
7238 if (phba->link_state >= LPFC_LINK_UP)
7239 lpfc_issue_els_edc(phba->pport, 0);
7241 case LPFC_CFG_MANAGED:
7242 lpfc_cmf_signal_init(phba);
7247 if (oldmode != LPFC_CFG_OFF ||
7248 oldmode != phba->cgn_p.cgn_param_mode) {
7249 if (phba->cgn_p.cgn_param_mode == LPFC_CFG_MANAGED)
7250 scnprintf(acr_string, sizeof(acr_string), "%u",
7251 phba->cgn_p.cgn_param_level0);
7253 scnprintf(acr_string, sizeof(acr_string), "NA");
7255 dev_info(&phba->pcidev->dev, "%d: "
7256 "4663 CMF: Mode %s acr %s\n",
7258 lpfc_cmf_mode_to_str
7259 [phba->cgn_p.cgn_param_mode],
7263 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7264 "4669 FW cgn parm buf wrong magic 0x%x "
7265 "version %d\n", p_cgn_param->cgn_param_magic,
7266 p_cgn_param->cgn_param_version);
7271 * lpfc_sli4_cgn_params_read - Read and Validate FW congestion parameters.
7272 * @phba: pointer to lpfc hba data structure.
7274 * This routine issues a read_object mailbox command to
7275 * get the congestion management parameters from the FW
7276 * parses it and updates the driver maintained values.
7279 * 0 if the object was empty
7280 * -Eval if an error was encountered
7281 * Count if bytes were read from object
7284 lpfc_sli4_cgn_params_read(struct lpfc_hba *phba)
7287 struct lpfc_cgn_param *p_cgn_param = NULL;
7291 /* Find out if the FW has a new set of congestion parameters. */
7292 len = sizeof(struct lpfc_cgn_param);
7293 pdata = kzalloc(len, GFP_KERNEL);
7294 ret = lpfc_read_object(phba, (char *)LPFC_PORT_CFG_NAME,
7297 /* 0 means no data. A negative means error. A positive means
7298 * bytes were copied.
7301 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7302 "4670 CGN RD OBJ returns no data\n");
7304 } else if (ret < 0) {
7305 /* Some error. Just exit and return it to the caller.*/
7309 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
7310 "6234 READ CGN PARAMS Successful %d\n", len);
7312 /* Parse data pointer over len and update the phba congestion
7313 * parameters with values passed back. The receive rate values
7314 * may have been altered in FW, but take no action here.
7316 p_cgn_param = (struct lpfc_cgn_param *)pdata;
7317 lpfc_cgn_params_parse(phba, p_cgn_param, len);
7325 * lpfc_sli4_cgn_parm_chg_evt - Process a FW congestion param change event
7326 * @phba: pointer to lpfc hba data structure.
7328 * The FW generated Async ACQE SLI event calls this routine when
7329 * the event type is an SLI Internal Port Event and the Event Code
7330 * indicates a change to the FW maintained congestion parameters.
7332 * This routine executes a Read_Object mailbox call to obtain the
7333 * current congestion parameters maintained in FW and corrects
7334 * the driver's active congestion parameters.
7336 * The acqe event is not passed because there is no further data
7339 * Returns nonzero error if event processing encountered an error.
7340 * Zero otherwise for success.
7343 lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *phba)
7347 if (!phba->sli4_hba.pc_sli4_params.cmf) {
7348 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7349 "4664 Cgn Evt when E2E off. Drop event\n");
7353 /* If the event is claiming an empty object, it's ok. A write
7354 * could have cleared it. Only error is a negative return
7357 ret = lpfc_sli4_cgn_params_read(phba);
7359 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7360 "4667 Error reading Cgn Params (%d)\n",
7363 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7364 "4673 CGN Event empty object.\n");
7370 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
7371 * @phba: pointer to lpfc hba data structure.
7373 * This routine is invoked by the worker thread to process all the pending
7374 * SLI4 asynchronous events.
7376 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
7378 struct lpfc_cq_event *cq_event;
7379 unsigned long iflags;
7381 /* First, declare the async event has been handled */
7382 spin_lock_irqsave(&phba->hbalock, iflags);
7383 phba->hba_flag &= ~ASYNC_EVENT;
7384 spin_unlock_irqrestore(&phba->hbalock, iflags);
7386 /* Now, handle all the async events */
7387 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
7388 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
7389 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
7390 cq_event, struct lpfc_cq_event, list);
7391 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock,
7394 /* Process the asynchronous event */
7395 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
7396 case LPFC_TRAILER_CODE_LINK:
7397 lpfc_sli4_async_link_evt(phba,
7398 &cq_event->cqe.acqe_link);
7400 case LPFC_TRAILER_CODE_FCOE:
7401 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
7403 case LPFC_TRAILER_CODE_DCBX:
7404 lpfc_sli4_async_dcbx_evt(phba,
7405 &cq_event->cqe.acqe_dcbx);
7407 case LPFC_TRAILER_CODE_GRP5:
7408 lpfc_sli4_async_grp5_evt(phba,
7409 &cq_event->cqe.acqe_grp5);
7411 case LPFC_TRAILER_CODE_FC:
7412 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
7414 case LPFC_TRAILER_CODE_SLI:
7415 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
7417 case LPFC_TRAILER_CODE_CMSTAT:
7418 lpfc_sli4_async_cmstat_evt(phba);
7421 lpfc_printf_log(phba, KERN_ERR,
7423 "1804 Invalid asynchronous event code: "
7424 "x%x\n", bf_get(lpfc_trailer_code,
7425 &cq_event->cqe.mcqe_cmpl));
7429 /* Free the completion event processed to the free pool */
7430 lpfc_sli4_cq_event_release(phba, cq_event);
7431 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
7433 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
7437 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
7438 * @phba: pointer to lpfc hba data structure.
7440 * This routine is invoked by the worker thread to process FCF table
7441 * rediscovery pending completion event.
7443 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
7447 spin_lock_irq(&phba->hbalock);
7448 /* Clear FCF rediscovery timeout event */
7449 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
7450 /* Clear driver fast failover FCF record flag */
7451 phba->fcf.failover_rec.flag = 0;
7452 /* Set state for FCF fast failover */
7453 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
7454 spin_unlock_irq(&phba->hbalock);
7456 /* Scan FCF table from the first entry to re-discover SAN */
7457 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
7458 "2777 Start post-quiescent FCF table scan\n");
7459 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
7461 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7462 "2747 Issue FCF scan read FCF mailbox "
7463 "command failed 0x%x\n", rc);
7467 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
7468 * @phba: pointer to lpfc hba data structure.
7469 * @dev_grp: The HBA PCI-Device group number.
7471 * This routine is invoked to set up the per HBA PCI-Device group function
7472 * API jump table entries.
7474 * Return: 0 if success, otherwise -ENODEV
7477 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
7481 /* Set up lpfc PCI-device group */
7482 phba->pci_dev_grp = dev_grp;
7484 /* The LPFC_PCI_DEV_OC uses SLI4 */
7485 if (dev_grp == LPFC_PCI_DEV_OC)
7486 phba->sli_rev = LPFC_SLI_REV4;
7488 /* Set up device INIT API function jump table */
7489 rc = lpfc_init_api_table_setup(phba, dev_grp);
7492 /* Set up SCSI API function jump table */
7493 rc = lpfc_scsi_api_table_setup(phba, dev_grp);
7496 /* Set up SLI API function jump table */
7497 rc = lpfc_sli_api_table_setup(phba, dev_grp);
7500 /* Set up MBOX API function jump table */
7501 rc = lpfc_mbox_api_table_setup(phba, dev_grp);
7509 * lpfc_log_intr_mode - Log the active interrupt mode
7510 * @phba: pointer to lpfc hba data structure.
7511 * @intr_mode: active interrupt mode adopted.
7513 * This routine it invoked to log the currently used active interrupt mode
7516 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
7518 switch (intr_mode) {
7520 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7521 "0470 Enable INTx interrupt mode.\n");
7524 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7525 "0481 Enabled MSI interrupt mode.\n");
7528 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7529 "0480 Enabled MSI-X interrupt mode.\n");
7532 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7533 "0482 Illegal interrupt mode.\n");
7540 * lpfc_enable_pci_dev - Enable a generic PCI device.
7541 * @phba: pointer to lpfc hba data structure.
7543 * This routine is invoked to enable the PCI device that is common to all
7548 * other values - error
7551 lpfc_enable_pci_dev(struct lpfc_hba *phba)
7553 struct pci_dev *pdev;
7555 /* Obtain PCI device reference */
7559 pdev = phba->pcidev;
7560 /* Enable PCI device */
7561 if (pci_enable_device_mem(pdev))
7563 /* Request PCI resource for the device */
7564 if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME))
7565 goto out_disable_device;
7566 /* Set up device as PCI master and save state for EEH */
7567 pci_set_master(pdev);
7568 pci_try_set_mwi(pdev);
7569 pci_save_state(pdev);
7571 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
7572 if (pci_is_pcie(pdev))
7573 pdev->needs_freset = 1;
7578 pci_disable_device(pdev);
7580 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7581 "1401 Failed to enable pci device\n");
7586 * lpfc_disable_pci_dev - Disable a generic PCI device.
7587 * @phba: pointer to lpfc hba data structure.
7589 * This routine is invoked to disable the PCI device that is common to all
7593 lpfc_disable_pci_dev(struct lpfc_hba *phba)
7595 struct pci_dev *pdev;
7597 /* Obtain PCI device reference */
7601 pdev = phba->pcidev;
7602 /* Release PCI resource and disable PCI device */
7603 pci_release_mem_regions(pdev);
7604 pci_disable_device(pdev);
7610 * lpfc_reset_hba - Reset a hba
7611 * @phba: pointer to lpfc hba data structure.
7613 * This routine is invoked to reset a hba device. It brings the HBA
7614 * offline, performs a board restart, and then brings the board back
7615 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
7616 * on outstanding mailbox commands.
7619 lpfc_reset_hba(struct lpfc_hba *phba)
7621 /* If resets are disabled then set error state and return. */
7622 if (!phba->cfg_enable_hba_reset) {
7623 phba->link_state = LPFC_HBA_ERROR;
7627 /* If not LPFC_SLI_ACTIVE, force all IO to be flushed */
7628 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) {
7629 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
7631 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
7632 lpfc_sli_flush_io_rings(phba);
7635 lpfc_sli_brdrestart(phba);
7637 lpfc_unblock_mgmt_io(phba);
7641 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions
7642 * @phba: pointer to lpfc hba data structure.
7644 * This function enables the PCI SR-IOV virtual functions to a physical
7645 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
7646 * enable the number of virtual functions to the physical function. As
7647 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
7648 * API call does not considered as an error condition for most of the device.
7651 lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
7653 struct pci_dev *pdev = phba->pcidev;
7657 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
7661 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn);
7666 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
7667 * @phba: pointer to lpfc hba data structure.
7668 * @nr_vfn: number of virtual functions to be enabled.
7670 * This function enables the PCI SR-IOV virtual functions to a physical
7671 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
7672 * enable the number of virtual functions to the physical function. As
7673 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
7674 * API call does not considered as an error condition for most of the device.
7677 lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
7679 struct pci_dev *pdev = phba->pcidev;
7680 uint16_t max_nr_vfn;
7683 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
7684 if (nr_vfn > max_nr_vfn) {
7685 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7686 "3057 Requested vfs (%d) greater than "
7687 "supported vfs (%d)", nr_vfn, max_nr_vfn);
7691 rc = pci_enable_sriov(pdev, nr_vfn);
7693 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7694 "2806 Failed to enable sriov on this device "
7695 "with vfn number nr_vf:%d, rc:%d\n",
7698 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7699 "2807 Successful enable sriov on this device "
7700 "with vfn number nr_vf:%d\n", nr_vfn);
7705 lpfc_unblock_requests_work(struct work_struct *work)
7707 struct lpfc_hba *phba = container_of(work, struct lpfc_hba,
7708 unblock_request_work);
7710 lpfc_unblock_requests(phba);
7714 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
7715 * @phba: pointer to lpfc hba data structure.
7717 * This routine is invoked to set up the driver internal resources before the
7718 * device specific resource setup to support the HBA device it attached to.
7722 * other values - error
7725 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
7727 struct lpfc_sli *psli = &phba->sli;
7730 * Driver resources common to all SLI revisions
7732 atomic_set(&phba->fast_event_count, 0);
7733 atomic_set(&phba->dbg_log_idx, 0);
7734 atomic_set(&phba->dbg_log_cnt, 0);
7735 atomic_set(&phba->dbg_log_dmping, 0);
7736 spin_lock_init(&phba->hbalock);
7738 /* Initialize port_list spinlock */
7739 spin_lock_init(&phba->port_list_lock);
7740 INIT_LIST_HEAD(&phba->port_list);
7742 INIT_LIST_HEAD(&phba->work_list);
7744 /* Initialize the wait queue head for the kernel thread */
7745 init_waitqueue_head(&phba->work_waitq);
7747 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7748 "1403 Protocols supported %s %s %s\n",
7749 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ?
7751 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ?
7753 (phba->nvmet_support ? "NVMET" : " "));
7755 /* Initialize the IO buffer list used by driver for SLI3 SCSI */
7756 spin_lock_init(&phba->scsi_buf_list_get_lock);
7757 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
7758 spin_lock_init(&phba->scsi_buf_list_put_lock);
7759 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
7761 /* Initialize the fabric iocb list */
7762 INIT_LIST_HEAD(&phba->fabric_iocb_list);
7764 /* Initialize list to save ELS buffers */
7765 INIT_LIST_HEAD(&phba->elsbuf);
7767 /* Initialize FCF connection rec list */
7768 INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
7770 /* Initialize OAS configuration list */
7771 spin_lock_init(&phba->devicelock);
7772 INIT_LIST_HEAD(&phba->luns);
7774 /* MBOX heartbeat timer */
7775 timer_setup(&psli->mbox_tmo, lpfc_mbox_timeout, 0);
7776 /* Fabric block timer */
7777 timer_setup(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 0);
7778 /* EA polling mode timer */
7779 timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0);
7780 /* Heartbeat timer */
7781 timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0);
7783 INIT_DELAYED_WORK(&phba->eq_delay_work, lpfc_hb_eq_delay_work);
7785 INIT_DELAYED_WORK(&phba->idle_stat_delay_work,
7786 lpfc_idle_stat_delay_work);
7787 INIT_WORK(&phba->unblock_request_work, lpfc_unblock_requests_work);
7792 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev
7793 * @phba: pointer to lpfc hba data structure.
7795 * This routine is invoked to set up the driver internal resources specific to
7796 * support the SLI-3 HBA device it attached to.
7800 * other values - error
7803 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
7808 * Initialize timers used by driver
7811 /* FCP polling mode timer */
7812 timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0);
7814 /* Host attention work mask setup */
7815 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
7816 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
7818 /* Get all the module params for configuring this host */
7819 lpfc_get_cfgparam(phba);
7820 /* Set up phase-1 common device driver resources */
7822 rc = lpfc_setup_driver_resource_phase1(phba);
7826 if (!phba->sli.sli3_ring)
7827 phba->sli.sli3_ring = kcalloc(LPFC_SLI3_MAX_RING,
7828 sizeof(struct lpfc_sli_ring),
7830 if (!phba->sli.sli3_ring)
7834 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
7835 * used to create the sg_dma_buf_pool must be dynamically calculated.
7838 if (phba->sli_rev == LPFC_SLI_REV4)
7839 entry_sz = sizeof(struct sli4_sge);
7841 entry_sz = sizeof(struct ulp_bde64);
7843 /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
7844 if (phba->cfg_enable_bg) {
7846 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
7847 * the FCP rsp, and a BDE for each. Sice we have no control
7848 * over how many protection data segments the SCSI Layer
7849 * will hand us (ie: there could be one for every block
7850 * in the IO), we just allocate enough BDEs to accomidate
7851 * our max amount and we need to limit lpfc_sg_seg_cnt to
7852 * minimize the risk of running out.
7854 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
7855 sizeof(struct fcp_rsp) +
7856 (LPFC_MAX_SG_SEG_CNT * entry_sz);
7858 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF)
7859 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF;
7861 /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */
7862 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT;
7865 * The scsi_buf for a regular I/O will hold the FCP cmnd,
7866 * the FCP rsp, a BDE for each, and a BDE for up to
7867 * cfg_sg_seg_cnt data segments.
7869 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
7870 sizeof(struct fcp_rsp) +
7871 ((phba->cfg_sg_seg_cnt + 2) * entry_sz);
7873 /* Total BDEs in BPL for scsi_sg_list */
7874 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
7877 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
7878 "9088 INIT sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
7879 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
7880 phba->cfg_total_seg_cnt);
7882 phba->max_vpi = LPFC_MAX_VPI;
7883 /* This will be set to correct value after config_port mbox */
7884 phba->max_vports = 0;
7887 * Initialize the SLI Layer to run with lpfc HBAs.
7889 lpfc_sli_setup(phba);
7890 lpfc_sli_queue_init(phba);
7892 /* Allocate device driver memory */
7893 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
7896 phba->lpfc_sg_dma_buf_pool =
7897 dma_pool_create("lpfc_sg_dma_buf_pool",
7898 &phba->pcidev->dev, phba->cfg_sg_dma_buf_size,
7901 if (!phba->lpfc_sg_dma_buf_pool)
7904 phba->lpfc_cmd_rsp_buf_pool =
7905 dma_pool_create("lpfc_cmd_rsp_buf_pool",
7907 sizeof(struct fcp_cmnd) +
7908 sizeof(struct fcp_rsp),
7911 if (!phba->lpfc_cmd_rsp_buf_pool)
7912 goto fail_free_dma_buf_pool;
7915 * Enable sr-iov virtual functions if supported and configured
7916 * through the module parameter.
7918 if (phba->cfg_sriov_nr_virtfn > 0) {
7919 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
7920 phba->cfg_sriov_nr_virtfn);
7922 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7923 "2808 Requested number of SR-IOV "
7924 "virtual functions (%d) is not "
7926 phba->cfg_sriov_nr_virtfn);
7927 phba->cfg_sriov_nr_virtfn = 0;
7933 fail_free_dma_buf_pool:
7934 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
7935 phba->lpfc_sg_dma_buf_pool = NULL;
7937 lpfc_mem_free(phba);
7942 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
7943 * @phba: pointer to lpfc hba data structure.
7945 * This routine is invoked to unset the driver internal resources set up
7946 * specific for supporting the SLI-3 HBA device it attached to.
7949 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
7951 /* Free device driver memory allocated */
7952 lpfc_mem_free_all(phba);
7958 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
7959 * @phba: pointer to lpfc hba data structure.
7961 * This routine is invoked to set up the driver internal resources specific to
7962 * support the SLI-4 HBA device it attached to.
7966 * other values - error
7969 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
7971 LPFC_MBOXQ_t *mboxq;
7973 int rc, i, max_buf_size;
7980 phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
7981 phba->sli4_hba.num_possible_cpu = cpumask_last(cpu_possible_mask) + 1;
7982 phba->sli4_hba.curr_disp_cpu = 0;
7984 /* Get all the module params for configuring this host */
7985 lpfc_get_cfgparam(phba);
7987 /* Set up phase-1 common device driver resources */
7988 rc = lpfc_setup_driver_resource_phase1(phba);
7992 /* Before proceed, wait for POST done and device ready */
7993 rc = lpfc_sli4_post_status_check(phba);
7997 /* Allocate all driver workqueues here */
7999 /* The lpfc_wq workqueue for deferred irq use */
8000 phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
8005 * Initialize timers used by driver
8008 timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0);
8010 /* FCF rediscover timer */
8011 timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0);
8013 /* CMF congestion timer */
8014 hrtimer_init(&phba->cmf_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
8015 phba->cmf_timer.function = lpfc_cmf_timer;
8018 * Control structure for handling external multi-buffer mailbox
8019 * command pass-through.
8021 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
8022 sizeof(struct lpfc_mbox_ext_buf_ctx));
8023 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
8025 phba->max_vpi = LPFC_MAX_VPI;
8027 /* This will be set to correct value after the read_config mbox */
8028 phba->max_vports = 0;
8030 /* Program the default value of vlan_id and fc_map */
8031 phba->valid_vlan = 0;
8032 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
8033 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
8034 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
8037 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands
8038 * we will associate a new ring, for each EQ/CQ/WQ tuple.
8039 * The WQ create will allocate the ring.
8042 /* Initialize buffer queue management fields */
8043 INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list);
8044 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
8045 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
8047 /* for VMID idle timeout if VMID is enabled */
8048 if (lpfc_is_vmid_enabled(phba))
8049 timer_setup(&phba->inactive_vmid_poll, lpfc_vmid_poll, 0);
8052 * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
8054 /* Initialize the Abort buffer list used by driver */
8055 spin_lock_init(&phba->sli4_hba.abts_io_buf_list_lock);
8056 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_io_buf_list);
8058 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
8059 /* Initialize the Abort nvme buffer list used by driver */
8060 spin_lock_init(&phba->sli4_hba.abts_nvmet_buf_list_lock);
8061 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
8062 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
8063 spin_lock_init(&phba->sli4_hba.t_active_list_lock);
8064 INIT_LIST_HEAD(&phba->sli4_hba.t_active_ctx_list);
8067 /* This abort list used by worker thread */
8068 spin_lock_init(&phba->sli4_hba.sgl_list_lock);
8069 spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock);
8070 spin_lock_init(&phba->sli4_hba.asynce_list_lock);
8071 spin_lock_init(&phba->sli4_hba.els_xri_abrt_list_lock);
8074 * Initialize driver internal slow-path work queues
8077 /* Driver internel slow-path CQ Event pool */
8078 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
8079 /* Response IOCB work queue list */
8080 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
8081 /* Asynchronous event CQ Event work queue list */
8082 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
8083 /* Slow-path XRI aborted CQ Event work queue list */
8084 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
8085 /* Receive queue CQ Event work queue list */
8086 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
8088 /* Initialize extent block lists. */
8089 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
8090 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
8091 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
8092 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
8094 /* Initialize mboxq lists. If the early init routines fail
8095 * these lists need to be correctly initialized.
8097 INIT_LIST_HEAD(&phba->sli.mboxq);
8098 INIT_LIST_HEAD(&phba->sli.mboxq_cmpl);
8100 /* initialize optic_state to 0xFF */
8101 phba->sli4_hba.lnk_info.optic_state = 0xff;
8103 /* Allocate device driver memory */
8104 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
8106 goto out_destroy_workqueue;
8108 /* IF Type 2 ports get initialized now. */
8109 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
8110 LPFC_SLI_INTF_IF_TYPE_2) {
8111 rc = lpfc_pci_function_reset(phba);
8116 phba->temp_sensor_support = 1;
8119 /* Create the bootstrap mailbox command */
8120 rc = lpfc_create_bootstrap_mbox(phba);
8124 /* Set up the host's endian order with the device. */
8125 rc = lpfc_setup_endian_order(phba);
8127 goto out_free_bsmbx;
8129 /* Set up the hba's configuration parameters. */
8130 rc = lpfc_sli4_read_config(phba);
8132 goto out_free_bsmbx;
8134 if (phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG) {
8135 /* Right now the link is down, if FA-PWWN is configured the
8136 * firmware will try FLOGI before the driver gets a link up.
8137 * If it fails, the driver should get a MISCONFIGURED async
8138 * event which will clear this flag. The only notification
8139 * the driver gets is if it fails, if it succeeds there is no
8140 * notification given. Assume success.
8142 phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_FABRIC;
8145 rc = lpfc_mem_alloc_active_rrq_pool_s4(phba);
8147 goto out_free_bsmbx;
8149 /* IF Type 0 ports get initialized now. */
8150 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
8151 LPFC_SLI_INTF_IF_TYPE_0) {
8152 rc = lpfc_pci_function_reset(phba);
8154 goto out_free_bsmbx;
8157 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
8161 goto out_free_bsmbx;
8164 /* Check for NVMET being configured */
8165 phba->nvmet_support = 0;
8166 if (lpfc_enable_nvmet_cnt) {
8168 /* First get WWN of HBA instance */
8169 lpfc_read_nv(phba, mboxq);
8170 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8171 if (rc != MBX_SUCCESS) {
8172 lpfc_printf_log(phba, KERN_ERR,
8174 "6016 Mailbox failed , mbxCmd x%x "
8175 "READ_NV, mbxStatus x%x\n",
8176 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8177 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
8178 mempool_free(mboxq, phba->mbox_mem_pool);
8180 goto out_free_bsmbx;
8183 memcpy(&wwn, (char *)mb->un.varRDnvp.nodename,
8185 wwn = cpu_to_be64(wwn);
8186 phba->sli4_hba.wwnn.u.name = wwn;
8187 memcpy(&wwn, (char *)mb->un.varRDnvp.portname,
8189 /* wwn is WWPN of HBA instance */
8190 wwn = cpu_to_be64(wwn);
8191 phba->sli4_hba.wwpn.u.name = wwn;
8193 /* Check to see if it matches any module parameter */
8194 for (i = 0; i < lpfc_enable_nvmet_cnt; i++) {
8195 if (wwn == lpfc_enable_nvmet[i]) {
8196 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
8197 if (lpfc_nvmet_mem_alloc(phba))
8200 phba->nvmet_support = 1; /* a match */
8202 lpfc_printf_log(phba, KERN_ERR,
8204 "6017 NVME Target %016llx\n",
8207 lpfc_printf_log(phba, KERN_ERR,
8209 "6021 Can't enable NVME Target."
8210 " NVME_TARGET_FC infrastructure"
8211 " is not in kernel\n");
8213 /* Not supported for NVMET */
8214 phba->cfg_xri_rebalancing = 0;
8215 if (phba->irq_chann_mode == NHT_MODE) {
8216 phba->cfg_irq_chann =
8217 phba->sli4_hba.num_present_cpu;
8218 phba->cfg_hdw_queue =
8219 phba->sli4_hba.num_present_cpu;
8220 phba->irq_chann_mode = NORMAL_MODE;
8227 lpfc_nvme_mod_param_dep(phba);
8230 * Get sli4 parameters that override parameters from Port capabilities.
8231 * If this call fails, it isn't critical unless the SLI4 parameters come
8234 rc = lpfc_get_sli4_parameters(phba, mboxq);
8236 if_type = bf_get(lpfc_sli_intf_if_type,
8237 &phba->sli4_hba.sli_intf);
8238 if_fam = bf_get(lpfc_sli_intf_sli_family,
8239 &phba->sli4_hba.sli_intf);
8240 if (phba->sli4_hba.extents_in_use &&
8241 phba->sli4_hba.rpi_hdrs_in_use) {
8242 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8243 "2999 Unsupported SLI4 Parameters "
8244 "Extents and RPI headers enabled.\n");
8245 if (if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
8246 if_fam == LPFC_SLI_INTF_FAMILY_BE2) {
8247 mempool_free(mboxq, phba->mbox_mem_pool);
8249 goto out_free_bsmbx;
8252 if (!(if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
8253 if_fam == LPFC_SLI_INTF_FAMILY_BE2)) {
8254 mempool_free(mboxq, phba->mbox_mem_pool);
8256 goto out_free_bsmbx;
8261 * 1 for cmd, 1 for rsp, NVME adds an extra one
8262 * for boundary conditions in its max_sgl_segment template.
8265 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
8269 * It doesn't matter what family our adapter is in, we are
8270 * limited to 2 Pages, 512 SGEs, for our SGL.
8271 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
8273 max_buf_size = (2 * SLI4_PAGE_SIZE);
8276 * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
8277 * used to create the sg_dma_buf_pool must be calculated.
8279 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
8280 /* Both cfg_enable_bg and cfg_external_dif code paths */
8283 * The scsi_buf for a T10-DIF I/O holds the FCP cmnd,
8284 * the FCP rsp, and a SGE. Sice we have no control
8285 * over how many protection segments the SCSI Layer
8286 * will hand us (ie: there could be one for every block
8287 * in the IO), just allocate enough SGEs to accomidate
8288 * our max amount and we need to limit lpfc_sg_seg_cnt
8289 * to minimize the risk of running out.
8291 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
8292 sizeof(struct fcp_rsp) + max_buf_size;
8294 /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
8295 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
8298 * If supporting DIF, reduce the seg count for scsi to
8299 * allow room for the DIF sges.
8301 if (phba->cfg_enable_bg &&
8302 phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF)
8303 phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF;
8305 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
8309 * The scsi_buf for a regular I/O holds the FCP cmnd,
8310 * the FCP rsp, a SGE for each, and a SGE for up to
8311 * cfg_sg_seg_cnt data segments.
8313 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
8314 sizeof(struct fcp_rsp) +
8315 ((phba->cfg_sg_seg_cnt + extra) *
8316 sizeof(struct sli4_sge));
8318 /* Total SGEs for scsi_sg_list */
8319 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
8320 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
8323 * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only
8324 * need to post 1 page for the SGL.
8328 if (phba->cfg_xpsgl && !phba->nvmet_support)
8329 phba->cfg_sg_dma_buf_size = LPFC_DEFAULT_XPSGL_SIZE;
8330 else if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ)
8331 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
8333 phba->cfg_sg_dma_buf_size =
8334 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
8336 phba->border_sge_num = phba->cfg_sg_dma_buf_size /
8337 sizeof(struct sli4_sge);
8339 /* Limit to LPFC_MAX_NVME_SEG_CNT for NVME. */
8340 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
8341 if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
8342 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
8343 "6300 Reducing NVME sg segment "
8345 LPFC_MAX_NVME_SEG_CNT);
8346 phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
8348 phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
8351 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
8352 "9087 sg_seg_cnt:%d dmabuf_size:%d "
8353 "total:%d scsi:%d nvme:%d\n",
8354 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
8355 phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt,
8356 phba->cfg_nvme_seg_cnt);
8358 if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE)
8359 i = phba->cfg_sg_dma_buf_size;
8363 phba->lpfc_sg_dma_buf_pool =
8364 dma_pool_create("lpfc_sg_dma_buf_pool",
8366 phba->cfg_sg_dma_buf_size,
8368 if (!phba->lpfc_sg_dma_buf_pool) {
8370 goto out_free_bsmbx;
8373 phba->lpfc_cmd_rsp_buf_pool =
8374 dma_pool_create("lpfc_cmd_rsp_buf_pool",
8376 sizeof(struct fcp_cmnd) +
8377 sizeof(struct fcp_rsp),
8379 if (!phba->lpfc_cmd_rsp_buf_pool) {
8381 goto out_free_sg_dma_buf;
8384 mempool_free(mboxq, phba->mbox_mem_pool);
8386 /* Verify OAS is supported */
8387 lpfc_sli4_oas_verify(phba);
8389 /* Verify RAS support on adapter */
8390 lpfc_sli4_ras_init(phba);
8392 /* Verify all the SLI4 queues */
8393 rc = lpfc_sli4_queue_verify(phba);
8395 goto out_free_cmd_rsp_buf;
8397 /* Create driver internal CQE event pool */
8398 rc = lpfc_sli4_cq_event_pool_create(phba);
8400 goto out_free_cmd_rsp_buf;
8402 /* Initialize sgl lists per host */
8403 lpfc_init_sgl_list(phba);
8405 /* Allocate and initialize active sgl array */
8406 rc = lpfc_init_active_sgl_array(phba);
8408 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8409 "1430 Failed to initialize sgl list.\n");
8410 goto out_destroy_cq_event_pool;
8412 rc = lpfc_sli4_init_rpi_hdrs(phba);
8414 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8415 "1432 Failed to initialize rpi headers.\n");
8416 goto out_free_active_sgl;
8419 /* Allocate eligible FCF bmask memory for FCF roundrobin failover */
8420 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
8421 phba->fcf.fcf_rr_bmask = kcalloc(longs, sizeof(unsigned long),
8423 if (!phba->fcf.fcf_rr_bmask) {
8424 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8425 "2759 Failed allocate memory for FCF round "
8426 "robin failover bmask\n");
8428 goto out_remove_rpi_hdrs;
8431 phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_irq_chann,
8432 sizeof(struct lpfc_hba_eq_hdl),
8434 if (!phba->sli4_hba.hba_eq_hdl) {
8435 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8436 "2572 Failed allocate memory for "
8437 "fast-path per-EQ handle array\n");
8439 goto out_free_fcf_rr_bmask;
8442 phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_possible_cpu,
8443 sizeof(struct lpfc_vector_map_info),
8445 if (!phba->sli4_hba.cpu_map) {
8446 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8447 "3327 Failed allocate memory for msi-x "
8448 "interrupt vector mapping\n");
8450 goto out_free_hba_eq_hdl;
8453 phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info);
8454 if (!phba->sli4_hba.eq_info) {
8455 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8456 "3321 Failed allocation for per_cpu stats\n");
8458 goto out_free_hba_cpu_map;
8461 phba->sli4_hba.idle_stat = kcalloc(phba->sli4_hba.num_possible_cpu,
8462 sizeof(*phba->sli4_hba.idle_stat),
8464 if (!phba->sli4_hba.idle_stat) {
8465 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8466 "3390 Failed allocation for idle_stat\n");
8468 goto out_free_hba_eq_info;
8471 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
8472 phba->sli4_hba.c_stat = alloc_percpu(struct lpfc_hdwq_stat);
8473 if (!phba->sli4_hba.c_stat) {
8474 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8475 "3332 Failed allocating per cpu hdwq stats\n");
8477 goto out_free_hba_idle_stat;
8481 phba->cmf_stat = alloc_percpu(struct lpfc_cgn_stat);
8482 if (!phba->cmf_stat) {
8483 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8484 "3331 Failed allocating per cpu cgn stats\n");
8486 goto out_free_hba_hdwq_info;
8490 * Enable sr-iov virtual functions if supported and configured
8491 * through the module parameter.
8493 if (phba->cfg_sriov_nr_virtfn > 0) {
8494 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
8495 phba->cfg_sriov_nr_virtfn);
8497 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8498 "3020 Requested number of SR-IOV "
8499 "virtual functions (%d) is not "
8501 phba->cfg_sriov_nr_virtfn);
8502 phba->cfg_sriov_nr_virtfn = 0;
8508 out_free_hba_hdwq_info:
8509 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
8510 free_percpu(phba->sli4_hba.c_stat);
8511 out_free_hba_idle_stat:
8513 kfree(phba->sli4_hba.idle_stat);
8514 out_free_hba_eq_info:
8515 free_percpu(phba->sli4_hba.eq_info);
8516 out_free_hba_cpu_map:
8517 kfree(phba->sli4_hba.cpu_map);
8518 out_free_hba_eq_hdl:
8519 kfree(phba->sli4_hba.hba_eq_hdl);
8520 out_free_fcf_rr_bmask:
8521 kfree(phba->fcf.fcf_rr_bmask);
8522 out_remove_rpi_hdrs:
8523 lpfc_sli4_remove_rpi_hdrs(phba);
8524 out_free_active_sgl:
8525 lpfc_free_active_sgl(phba);
8526 out_destroy_cq_event_pool:
8527 lpfc_sli4_cq_event_pool_destroy(phba);
8528 out_free_cmd_rsp_buf:
8529 dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool);
8530 phba->lpfc_cmd_rsp_buf_pool = NULL;
8531 out_free_sg_dma_buf:
8532 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
8533 phba->lpfc_sg_dma_buf_pool = NULL;
8535 lpfc_destroy_bootstrap_mbox(phba);
8537 lpfc_mem_free(phba);
8538 out_destroy_workqueue:
8539 destroy_workqueue(phba->wq);
8545 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
8546 * @phba: pointer to lpfc hba data structure.
8548 * This routine is invoked to unset the driver internal resources set up
8549 * specific for supporting the SLI-4 HBA device it attached to.
8552 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
8554 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
8556 free_percpu(phba->sli4_hba.eq_info);
8557 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
8558 free_percpu(phba->sli4_hba.c_stat);
8560 free_percpu(phba->cmf_stat);
8561 kfree(phba->sli4_hba.idle_stat);
8563 /* Free memory allocated for msi-x interrupt vector to CPU mapping */
8564 kfree(phba->sli4_hba.cpu_map);
8565 phba->sli4_hba.num_possible_cpu = 0;
8566 phba->sli4_hba.num_present_cpu = 0;
8567 phba->sli4_hba.curr_disp_cpu = 0;
8568 cpumask_clear(&phba->sli4_hba.irq_aff_mask);
8570 /* Free memory allocated for fast-path work queue handles */
8571 kfree(phba->sli4_hba.hba_eq_hdl);
8573 /* Free the allocated rpi headers. */
8574 lpfc_sli4_remove_rpi_hdrs(phba);
8575 lpfc_sli4_remove_rpis(phba);
8577 /* Free eligible FCF index bmask */
8578 kfree(phba->fcf.fcf_rr_bmask);
8580 /* Free the ELS sgl list */
8581 lpfc_free_active_sgl(phba);
8582 lpfc_free_els_sgl_list(phba);
8583 lpfc_free_nvmet_sgl_list(phba);
8585 /* Free the completion queue EQ event pool */
8586 lpfc_sli4_cq_event_release_all(phba);
8587 lpfc_sli4_cq_event_pool_destroy(phba);
8589 /* Release resource identifiers. */
8590 lpfc_sli4_dealloc_resource_identifiers(phba);
8592 /* Free the bsmbx region. */
8593 lpfc_destroy_bootstrap_mbox(phba);
8595 /* Free the SLI Layer memory with SLI4 HBAs */
8596 lpfc_mem_free_all(phba);
8598 /* Free the current connect table */
8599 list_for_each_entry_safe(conn_entry, next_conn_entry,
8600 &phba->fcf_conn_rec_list, list) {
8601 list_del_init(&conn_entry->list);
8609 * lpfc_init_api_table_setup - Set up init api function jump table
8610 * @phba: The hba struct for which this call is being executed.
8611 * @dev_grp: The HBA PCI-Device group number.
8613 * This routine sets up the device INIT interface API function jump table
8616 * Returns: 0 - success, -ENODEV - failure.
8619 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
8621 phba->lpfc_hba_init_link = lpfc_hba_init_link;
8622 phba->lpfc_hba_down_link = lpfc_hba_down_link;
8623 phba->lpfc_selective_reset = lpfc_selective_reset;
8625 case LPFC_PCI_DEV_LP:
8626 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
8627 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
8628 phba->lpfc_stop_port = lpfc_stop_port_s3;
8630 case LPFC_PCI_DEV_OC:
8631 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
8632 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
8633 phba->lpfc_stop_port = lpfc_stop_port_s4;
8636 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8637 "1431 Invalid HBA PCI-device group: 0x%x\n",
8645 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
8646 * @phba: pointer to lpfc hba data structure.
8648 * This routine is invoked to set up the driver internal resources after the
8649 * device specific resource setup to support the HBA device it attached to.
8653 * other values - error
8656 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
8660 /* Startup the kernel thread for this host adapter. */
8661 phba->worker_thread = kthread_run(lpfc_do_work, phba,
8662 "lpfc_worker_%d", phba->brd_no);
8663 if (IS_ERR(phba->worker_thread)) {
8664 error = PTR_ERR(phba->worker_thread);
8672 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
8673 * @phba: pointer to lpfc hba data structure.
8675 * This routine is invoked to unset the driver internal resources set up after
8676 * the device specific resource setup for supporting the HBA device it
8680 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
8683 destroy_workqueue(phba->wq);
8687 /* Stop kernel worker thread */
8688 if (phba->worker_thread)
8689 kthread_stop(phba->worker_thread);
8693 * lpfc_free_iocb_list - Free iocb list.
8694 * @phba: pointer to lpfc hba data structure.
8696 * This routine is invoked to free the driver's IOCB list and memory.
8699 lpfc_free_iocb_list(struct lpfc_hba *phba)
8701 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
8703 spin_lock_irq(&phba->hbalock);
8704 list_for_each_entry_safe(iocbq_entry, iocbq_next,
8705 &phba->lpfc_iocb_list, list) {
8706 list_del(&iocbq_entry->list);
8708 phba->total_iocbq_bufs--;
8710 spin_unlock_irq(&phba->hbalock);
8716 * lpfc_init_iocb_list - Allocate and initialize iocb list.
8717 * @phba: pointer to lpfc hba data structure.
8718 * @iocb_count: number of requested iocbs
8720 * This routine is invoked to allocate and initizlize the driver's IOCB
8721 * list and set up the IOCB tag array accordingly.
8725 * other values - error
8728 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
8730 struct lpfc_iocbq *iocbq_entry = NULL;
8734 /* Initialize and populate the iocb list per host. */
8735 INIT_LIST_HEAD(&phba->lpfc_iocb_list);
8736 for (i = 0; i < iocb_count; i++) {
8737 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
8738 if (iocbq_entry == NULL) {
8739 printk(KERN_ERR "%s: only allocated %d iocbs of "
8740 "expected %d count. Unloading driver.\n",
8741 __func__, i, iocb_count);
8742 goto out_free_iocbq;
8745 iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
8748 printk(KERN_ERR "%s: failed to allocate IOTAG. "
8749 "Unloading driver.\n", __func__);
8750 goto out_free_iocbq;
8752 iocbq_entry->sli4_lxritag = NO_XRI;
8753 iocbq_entry->sli4_xritag = NO_XRI;
8755 spin_lock_irq(&phba->hbalock);
8756 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
8757 phba->total_iocbq_bufs++;
8758 spin_unlock_irq(&phba->hbalock);
8764 lpfc_free_iocb_list(phba);
8770 * lpfc_free_sgl_list - Free a given sgl list.
8771 * @phba: pointer to lpfc hba data structure.
8772 * @sglq_list: pointer to the head of sgl list.
8774 * This routine is invoked to free a give sgl list and memory.
8777 lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list)
8779 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
8781 list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) {
8782 list_del(&sglq_entry->list);
8783 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
8789 * lpfc_free_els_sgl_list - Free els sgl list.
8790 * @phba: pointer to lpfc hba data structure.
8792 * This routine is invoked to free the driver's els sgl list and memory.
8795 lpfc_free_els_sgl_list(struct lpfc_hba *phba)
8797 LIST_HEAD(sglq_list);
8799 /* Retrieve all els sgls from driver list */
8800 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
8801 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list);
8802 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
8804 /* Now free the sgl list */
8805 lpfc_free_sgl_list(phba, &sglq_list);
8809 * lpfc_free_nvmet_sgl_list - Free nvmet sgl list.
8810 * @phba: pointer to lpfc hba data structure.
8812 * This routine is invoked to free the driver's nvmet sgl list and memory.
8815 lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba)
8817 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
8818 LIST_HEAD(sglq_list);
8820 /* Retrieve all nvmet sgls from driver list */
8821 spin_lock_irq(&phba->hbalock);
8822 spin_lock(&phba->sli4_hba.sgl_list_lock);
8823 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list);
8824 spin_unlock(&phba->sli4_hba.sgl_list_lock);
8825 spin_unlock_irq(&phba->hbalock);
8827 /* Now free the sgl list */
8828 list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) {
8829 list_del(&sglq_entry->list);
8830 lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys);
8834 /* Update the nvmet_xri_cnt to reflect no current sgls.
8835 * The next initialization cycle sets the count and allocates
8836 * the sgls over again.
8838 phba->sli4_hba.nvmet_xri_cnt = 0;
8842 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
8843 * @phba: pointer to lpfc hba data structure.
8845 * This routine is invoked to allocate the driver's active sgl memory.
8846 * This array will hold the sglq_entry's for active IOs.
8849 lpfc_init_active_sgl_array(struct lpfc_hba *phba)
8852 size = sizeof(struct lpfc_sglq *);
8853 size *= phba->sli4_hba.max_cfg_param.max_xri;
8855 phba->sli4_hba.lpfc_sglq_active_list =
8856 kzalloc(size, GFP_KERNEL);
8857 if (!phba->sli4_hba.lpfc_sglq_active_list)
8863 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
8864 * @phba: pointer to lpfc hba data structure.
8866 * This routine is invoked to walk through the array of active sglq entries
8867 * and free all of the resources.
8868 * This is just a place holder for now.
8871 lpfc_free_active_sgl(struct lpfc_hba *phba)
8873 kfree(phba->sli4_hba.lpfc_sglq_active_list);
8877 * lpfc_init_sgl_list - Allocate and initialize sgl list.
8878 * @phba: pointer to lpfc hba data structure.
8880 * This routine is invoked to allocate and initizlize the driver's sgl
8881 * list and set up the sgl xritag tag array accordingly.
8885 lpfc_init_sgl_list(struct lpfc_hba *phba)
8887 /* Initialize and populate the sglq list per host/VF. */
8888 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list);
8889 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
8890 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list);
8891 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
8893 /* els xri-sgl book keeping */
8894 phba->sli4_hba.els_xri_cnt = 0;
8896 /* nvme xri-buffer book keeping */
8897 phba->sli4_hba.io_xri_cnt = 0;
8901 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
8902 * @phba: pointer to lpfc hba data structure.
8904 * This routine is invoked to post rpi header templates to the
8905 * port for those SLI4 ports that do not support extents. This routine
8906 * posts a PAGE_SIZE memory region to the port to hold up to
8907 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine
8908 * and should be called only when interrupts are disabled.
8912 * -ERROR - otherwise.
8915 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
8918 struct lpfc_rpi_hdr *rpi_hdr;
8920 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
8921 if (!phba->sli4_hba.rpi_hdrs_in_use)
8923 if (phba->sli4_hba.extents_in_use)
8926 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
8928 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8929 "0391 Error during rpi post operation\n");
8930 lpfc_sli4_remove_rpis(phba);
8938 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
8939 * @phba: pointer to lpfc hba data structure.
8941 * This routine is invoked to allocate a single 4KB memory region to
8942 * support rpis and stores them in the phba. This single region
8943 * provides support for up to 64 rpis. The region is used globally
8947 * A valid rpi hdr on success.
8948 * A NULL pointer on any failure.
8950 struct lpfc_rpi_hdr *
8951 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
8953 uint16_t rpi_limit, curr_rpi_range;
8954 struct lpfc_dmabuf *dmabuf;
8955 struct lpfc_rpi_hdr *rpi_hdr;
8958 * If the SLI4 port supports extents, posting the rpi header isn't
8959 * required. Set the expected maximum count and let the actual value
8960 * get set when extents are fully allocated.
8962 if (!phba->sli4_hba.rpi_hdrs_in_use)
8964 if (phba->sli4_hba.extents_in_use)
8967 /* The limit on the logical index is just the max_rpi count. */
8968 rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi;
8970 spin_lock_irq(&phba->hbalock);
8972 * Establish the starting RPI in this header block. The starting
8973 * rpi is normalized to a zero base because the physical rpi is
8976 curr_rpi_range = phba->sli4_hba.next_rpi;
8977 spin_unlock_irq(&phba->hbalock);
8979 /* Reached full RPI range */
8980 if (curr_rpi_range == rpi_limit)
8984 * First allocate the protocol header region for the port. The
8985 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
8987 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
8991 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
8992 LPFC_HDR_TEMPLATE_SIZE,
8993 &dmabuf->phys, GFP_KERNEL);
8994 if (!dmabuf->virt) {
8996 goto err_free_dmabuf;
8999 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
9001 goto err_free_coherent;
9004 /* Save the rpi header data for cleanup later. */
9005 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
9007 goto err_free_coherent;
9009 rpi_hdr->dmabuf = dmabuf;
9010 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
9011 rpi_hdr->page_count = 1;
9012 spin_lock_irq(&phba->hbalock);
9014 /* The rpi_hdr stores the logical index only. */
9015 rpi_hdr->start_rpi = curr_rpi_range;
9016 rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT;
9017 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
9019 spin_unlock_irq(&phba->hbalock);
9023 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
9024 dmabuf->virt, dmabuf->phys);
9031 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
9032 * @phba: pointer to lpfc hba data structure.
9034 * This routine is invoked to remove all memory resources allocated
9035 * to support rpis for SLI4 ports not supporting extents. This routine
9036 * presumes the caller has released all rpis consumed by fabric or port
9037 * logins and is prepared to have the header pages removed.
9040 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
9042 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
9044 if (!phba->sli4_hba.rpi_hdrs_in_use)
9047 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
9048 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
9049 list_del(&rpi_hdr->list);
9050 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
9051 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
9052 kfree(rpi_hdr->dmabuf);
9056 /* There are no rpis available to the port now. */
9057 phba->sli4_hba.next_rpi = 0;
9061 * lpfc_hba_alloc - Allocate driver hba data structure for a device.
9062 * @pdev: pointer to pci device data structure.
9064 * This routine is invoked to allocate the driver hba data structure for an
9065 * HBA device. If the allocation is successful, the phba reference to the
9066 * PCI device data structure is set.
9069 * pointer to @phba - successful
9072 static struct lpfc_hba *
9073 lpfc_hba_alloc(struct pci_dev *pdev)
9075 struct lpfc_hba *phba;
9077 /* Allocate memory for HBA structure */
9078 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
9080 dev_err(&pdev->dev, "failed to allocate hba struct\n");
9084 /* Set reference to PCI device in HBA structure */
9085 phba->pcidev = pdev;
9087 /* Assign an unused board number */
9088 phba->brd_no = lpfc_get_instance();
9089 if (phba->brd_no < 0) {
9093 phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL;
9095 spin_lock_init(&phba->ct_ev_lock);
9096 INIT_LIST_HEAD(&phba->ct_ev_waiters);
9102 * lpfc_hba_free - Free driver hba data structure with a device.
9103 * @phba: pointer to lpfc hba data structure.
9105 * This routine is invoked to free the driver hba data structure with an
9109 lpfc_hba_free(struct lpfc_hba *phba)
9111 if (phba->sli_rev == LPFC_SLI_REV4)
9112 kfree(phba->sli4_hba.hdwq);
9114 /* Release the driver assigned board number */
9115 idr_remove(&lpfc_hba_index, phba->brd_no);
9117 /* Free memory allocated with sli3 rings */
9118 kfree(phba->sli.sli3_ring);
9119 phba->sli.sli3_ring = NULL;
9126 * lpfc_setup_fdmi_mask - Setup initial FDMI mask for HBA and Port attributes
9127 * @vport: pointer to lpfc vport data structure.
9129 * This routine is will setup initial FDMI attribute masks for
9130 * FDMI2 or SmartSAN depending on module parameters. The driver will attempt
9131 * to get these attributes first before falling back, the attribute
9132 * fallback hierarchy is SmartSAN -> FDMI2 -> FMDI1
9135 lpfc_setup_fdmi_mask(struct lpfc_vport *vport)
9137 struct lpfc_hba *phba = vport->phba;
9139 vport->load_flag |= FC_ALLOW_FDMI;
9140 if (phba->cfg_enable_SmartSAN ||
9141 phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT) {
9142 /* Setup appropriate attribute masks */
9143 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
9144 if (phba->cfg_enable_SmartSAN)
9145 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
9147 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
9150 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
9151 "6077 Setup FDMI mask: hba x%x port x%x\n",
9152 vport->fdmi_hba_mask, vport->fdmi_port_mask);
9156 * lpfc_create_shost - Create hba physical port with associated scsi host.
9157 * @phba: pointer to lpfc hba data structure.
9159 * This routine is invoked to create HBA physical port and associate a SCSI
9164 * other values - error
9167 lpfc_create_shost(struct lpfc_hba *phba)
9169 struct lpfc_vport *vport;
9170 struct Scsi_Host *shost;
9172 /* Initialize HBA FC structure */
9173 phba->fc_edtov = FF_DEF_EDTOV;
9174 phba->fc_ratov = FF_DEF_RATOV;
9175 phba->fc_altov = FF_DEF_ALTOV;
9176 phba->fc_arbtov = FF_DEF_ARBTOV;
9178 atomic_set(&phba->sdev_cnt, 0);
9179 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
9183 shost = lpfc_shost_from_vport(vport);
9184 phba->pport = vport;
9186 if (phba->nvmet_support) {
9187 /* Only 1 vport (pport) will support NVME target */
9188 phba->targetport = NULL;
9189 phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME;
9190 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME_DISC,
9191 "6076 NVME Target Found\n");
9194 lpfc_debugfs_initialize(vport);
9195 /* Put reference to SCSI host to driver's device private data */
9196 pci_set_drvdata(phba->pcidev, shost);
9198 lpfc_setup_fdmi_mask(vport);
9201 * At this point we are fully registered with PSA. In addition,
9202 * any initial discovery should be completed.
9208 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
9209 * @phba: pointer to lpfc hba data structure.
9211 * This routine is invoked to destroy HBA physical port and the associated
9215 lpfc_destroy_shost(struct lpfc_hba *phba)
9217 struct lpfc_vport *vport = phba->pport;
9219 /* Destroy physical port that associated with the SCSI host */
9220 destroy_port(vport);
9226 * lpfc_setup_bg - Setup Block guard structures and debug areas.
9227 * @phba: pointer to lpfc hba data structure.
9228 * @shost: the shost to be used to detect Block guard settings.
9230 * This routine sets up the local Block guard protocol settings for @shost.
9231 * This routine also allocates memory for debugging bg buffers.
9234 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
9239 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
9240 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9241 "1478 Registering BlockGuard with the "
9244 old_mask = phba->cfg_prot_mask;
9245 old_guard = phba->cfg_prot_guard;
9247 /* Only allow supported values */
9248 phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION |
9249 SHOST_DIX_TYPE0_PROTECTION |
9250 SHOST_DIX_TYPE1_PROTECTION);
9251 phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP |
9252 SHOST_DIX_GUARD_CRC);
9254 /* DIF Type 1 protection for profiles AST1/C1 is end to end */
9255 if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION)
9256 phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION;
9258 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
9259 if ((old_mask != phba->cfg_prot_mask) ||
9260 (old_guard != phba->cfg_prot_guard))
9261 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9262 "1475 Registering BlockGuard with the "
9263 "SCSI layer: mask %d guard %d\n",
9264 phba->cfg_prot_mask,
9265 phba->cfg_prot_guard);
9267 scsi_host_set_prot(shost, phba->cfg_prot_mask);
9268 scsi_host_set_guard(shost, phba->cfg_prot_guard);
9270 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9271 "1479 Not Registering BlockGuard with the SCSI "
9272 "layer, Bad protection parameters: %d %d\n",
9273 old_mask, old_guard);
9278 * lpfc_post_init_setup - Perform necessary device post initialization setup.
9279 * @phba: pointer to lpfc hba data structure.
9281 * This routine is invoked to perform all the necessary post initialization
9282 * setup for the device.
9285 lpfc_post_init_setup(struct lpfc_hba *phba)
9287 struct Scsi_Host *shost;
9288 struct lpfc_adapter_event_header adapter_event;
9290 /* Get the default values for Model Name and Description */
9291 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
9294 * hba setup may have changed the hba_queue_depth so we need to
9295 * adjust the value of can_queue.
9297 shost = pci_get_drvdata(phba->pcidev);
9298 shost->can_queue = phba->cfg_hba_queue_depth - 10;
9300 lpfc_host_attrib_init(shost);
9302 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
9303 spin_lock_irq(shost->host_lock);
9304 lpfc_poll_start_timer(phba);
9305 spin_unlock_irq(shost->host_lock);
9308 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9309 "0428 Perform SCSI scan\n");
9310 /* Send board arrival event to upper layer */
9311 adapter_event.event_type = FC_REG_ADAPTER_EVENT;
9312 adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
9313 fc_host_post_vendor_event(shost, fc_get_event_number(),
9314 sizeof(adapter_event),
9315 (char *) &adapter_event,
9321 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
9322 * @phba: pointer to lpfc hba data structure.
9324 * This routine is invoked to set up the PCI device memory space for device
9325 * with SLI-3 interface spec.
9329 * other values - error
9332 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
9334 struct pci_dev *pdev = phba->pcidev;
9335 unsigned long bar0map_len, bar2map_len;
9343 /* Set the device DMA mask size */
9344 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9346 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9351 /* Get the bus address of Bar0 and Bar2 and the number of bytes
9352 * required by each mapping.
9354 phba->pci_bar0_map = pci_resource_start(pdev, 0);
9355 bar0map_len = pci_resource_len(pdev, 0);
9357 phba->pci_bar2_map = pci_resource_start(pdev, 2);
9358 bar2map_len = pci_resource_len(pdev, 2);
9360 /* Map HBA SLIM to a kernel virtual address. */
9361 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
9362 if (!phba->slim_memmap_p) {
9363 dev_printk(KERN_ERR, &pdev->dev,
9364 "ioremap failed for SLIM memory.\n");
9368 /* Map HBA Control Registers to a kernel virtual address. */
9369 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
9370 if (!phba->ctrl_regs_memmap_p) {
9371 dev_printk(KERN_ERR, &pdev->dev,
9372 "ioremap failed for HBA control registers.\n");
9373 goto out_iounmap_slim;
9376 /* Allocate memory for SLI-2 structures */
9377 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE,
9378 &phba->slim2p.phys, GFP_KERNEL);
9379 if (!phba->slim2p.virt)
9382 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
9383 phba->mbox_ext = (phba->slim2p.virt +
9384 offsetof(struct lpfc_sli2_slim, mbx_ext_words));
9385 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
9386 phba->IOCBs = (phba->slim2p.virt +
9387 offsetof(struct lpfc_sli2_slim, IOCBs));
9389 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
9390 lpfc_sli_hbq_size(),
9391 &phba->hbqslimp.phys,
9393 if (!phba->hbqslimp.virt)
9396 hbq_count = lpfc_sli_hbq_count();
9397 ptr = phba->hbqslimp.virt;
9398 for (i = 0; i < hbq_count; ++i) {
9399 phba->hbqs[i].hbq_virt = ptr;
9400 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
9401 ptr += (lpfc_hbq_defs[i]->entry_count *
9402 sizeof(struct lpfc_hbq_entry));
9404 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
9405 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
9407 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
9409 phba->MBslimaddr = phba->slim_memmap_p;
9410 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
9411 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
9412 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
9413 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
9418 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
9419 phba->slim2p.virt, phba->slim2p.phys);
9421 iounmap(phba->ctrl_regs_memmap_p);
9423 iounmap(phba->slim_memmap_p);
9429 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
9430 * @phba: pointer to lpfc hba data structure.
9432 * This routine is invoked to unset the PCI device memory space for device
9433 * with SLI-3 interface spec.
9436 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
9438 struct pci_dev *pdev;
9440 /* Obtain PCI device reference */
9444 pdev = phba->pcidev;
9446 /* Free coherent DMA memory allocated */
9447 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
9448 phba->hbqslimp.virt, phba->hbqslimp.phys);
9449 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
9450 phba->slim2p.virt, phba->slim2p.phys);
9452 /* I/O memory unmap */
9453 iounmap(phba->ctrl_regs_memmap_p);
9454 iounmap(phba->slim_memmap_p);
9460 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
9461 * @phba: pointer to lpfc hba data structure.
9463 * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
9464 * done and check status.
9466 * Return 0 if successful, otherwise -ENODEV.
9469 lpfc_sli4_post_status_check(struct lpfc_hba *phba)
9471 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg;
9472 struct lpfc_register reg_data;
9473 int i, port_error = 0;
9476 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
9477 memset(®_data, 0, sizeof(reg_data));
9478 if (!phba->sli4_hba.PSMPHRregaddr)
9481 /* Wait up to 30 seconds for the SLI Port POST done and ready */
9482 for (i = 0; i < 3000; i++) {
9483 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
9484 &portsmphr_reg.word0) ||
9485 (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) {
9486 /* Port has a fatal POST error, break out */
9487 port_error = -ENODEV;
9490 if (LPFC_POST_STAGE_PORT_READY ==
9491 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg))
9497 * If there was a port error during POST, then don't proceed with
9498 * other register reads as the data may not be valid. Just exit.
9501 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9502 "1408 Port Failed POST - portsmphr=0x%x, "
9503 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
9504 "scr2=x%x, hscratch=x%x, pstatus=x%x\n",
9505 portsmphr_reg.word0,
9506 bf_get(lpfc_port_smphr_perr, &portsmphr_reg),
9507 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg),
9508 bf_get(lpfc_port_smphr_nip, &portsmphr_reg),
9509 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg),
9510 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg),
9511 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg),
9512 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg),
9513 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg));
9515 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9516 "2534 Device Info: SLIFamily=0x%x, "
9517 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
9518 "SLIHint_2=0x%x, FT=0x%x\n",
9519 bf_get(lpfc_sli_intf_sli_family,
9520 &phba->sli4_hba.sli_intf),
9521 bf_get(lpfc_sli_intf_slirev,
9522 &phba->sli4_hba.sli_intf),
9523 bf_get(lpfc_sli_intf_if_type,
9524 &phba->sli4_hba.sli_intf),
9525 bf_get(lpfc_sli_intf_sli_hint1,
9526 &phba->sli4_hba.sli_intf),
9527 bf_get(lpfc_sli_intf_sli_hint2,
9528 &phba->sli4_hba.sli_intf),
9529 bf_get(lpfc_sli_intf_func_type,
9530 &phba->sli4_hba.sli_intf));
9532 * Check for other Port errors during the initialization
9533 * process. Fail the load if the port did not come up
9536 if_type = bf_get(lpfc_sli_intf_if_type,
9537 &phba->sli4_hba.sli_intf);
9539 case LPFC_SLI_INTF_IF_TYPE_0:
9540 phba->sli4_hba.ue_mask_lo =
9541 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr);
9542 phba->sli4_hba.ue_mask_hi =
9543 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr);
9545 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
9547 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
9548 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
9549 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
9550 lpfc_printf_log(phba, KERN_ERR,
9552 "1422 Unrecoverable Error "
9553 "Detected during POST "
9554 "uerr_lo_reg=0x%x, "
9555 "uerr_hi_reg=0x%x, "
9556 "ue_mask_lo_reg=0x%x, "
9557 "ue_mask_hi_reg=0x%x\n",
9560 phba->sli4_hba.ue_mask_lo,
9561 phba->sli4_hba.ue_mask_hi);
9562 port_error = -ENODEV;
9565 case LPFC_SLI_INTF_IF_TYPE_2:
9566 case LPFC_SLI_INTF_IF_TYPE_6:
9567 /* Final checks. The port status should be clean. */
9568 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
9570 (bf_get(lpfc_sliport_status_err, ®_data) &&
9571 !bf_get(lpfc_sliport_status_rn, ®_data))) {
9572 phba->work_status[0] =
9573 readl(phba->sli4_hba.u.if_type2.
9575 phba->work_status[1] =
9576 readl(phba->sli4_hba.u.if_type2.
9578 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9579 "2888 Unrecoverable port error "
9580 "following POST: port status reg "
9581 "0x%x, port_smphr reg 0x%x, "
9582 "error 1=0x%x, error 2=0x%x\n",
9584 portsmphr_reg.word0,
9585 phba->work_status[0],
9586 phba->work_status[1]);
9587 port_error = -ENODEV;
9591 if (lpfc_pldv_detect &&
9592 bf_get(lpfc_sli_intf_sli_family,
9593 &phba->sli4_hba.sli_intf) ==
9594 LPFC_SLI_INTF_FAMILY_G6)
9595 pci_write_config_byte(phba->pcidev,
9596 LPFC_SLI_INTF, CFG_PLD);
9598 case LPFC_SLI_INTF_IF_TYPE_1:
9607 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
9608 * @phba: pointer to lpfc hba data structure.
9609 * @if_type: The SLI4 interface type getting configured.
9611 * This routine is invoked to set up SLI4 BAR0 PCI config space register
9615 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
9618 case LPFC_SLI_INTF_IF_TYPE_0:
9619 phba->sli4_hba.u.if_type0.UERRLOregaddr =
9620 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO;
9621 phba->sli4_hba.u.if_type0.UERRHIregaddr =
9622 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI;
9623 phba->sli4_hba.u.if_type0.UEMASKLOregaddr =
9624 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO;
9625 phba->sli4_hba.u.if_type0.UEMASKHIregaddr =
9626 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI;
9627 phba->sli4_hba.SLIINTFregaddr =
9628 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
9630 case LPFC_SLI_INTF_IF_TYPE_2:
9631 phba->sli4_hba.u.if_type2.EQDregaddr =
9632 phba->sli4_hba.conf_regs_memmap_p +
9633 LPFC_CTL_PORT_EQ_DELAY_OFFSET;
9634 phba->sli4_hba.u.if_type2.ERR1regaddr =
9635 phba->sli4_hba.conf_regs_memmap_p +
9636 LPFC_CTL_PORT_ER1_OFFSET;
9637 phba->sli4_hba.u.if_type2.ERR2regaddr =
9638 phba->sli4_hba.conf_regs_memmap_p +
9639 LPFC_CTL_PORT_ER2_OFFSET;
9640 phba->sli4_hba.u.if_type2.CTRLregaddr =
9641 phba->sli4_hba.conf_regs_memmap_p +
9642 LPFC_CTL_PORT_CTL_OFFSET;
9643 phba->sli4_hba.u.if_type2.STATUSregaddr =
9644 phba->sli4_hba.conf_regs_memmap_p +
9645 LPFC_CTL_PORT_STA_OFFSET;
9646 phba->sli4_hba.SLIINTFregaddr =
9647 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
9648 phba->sli4_hba.PSMPHRregaddr =
9649 phba->sli4_hba.conf_regs_memmap_p +
9650 LPFC_CTL_PORT_SEM_OFFSET;
9651 phba->sli4_hba.RQDBregaddr =
9652 phba->sli4_hba.conf_regs_memmap_p +
9653 LPFC_ULP0_RQ_DOORBELL;
9654 phba->sli4_hba.WQDBregaddr =
9655 phba->sli4_hba.conf_regs_memmap_p +
9656 LPFC_ULP0_WQ_DOORBELL;
9657 phba->sli4_hba.CQDBregaddr =
9658 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
9659 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
9660 phba->sli4_hba.MQDBregaddr =
9661 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
9662 phba->sli4_hba.BMBXregaddr =
9663 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
9665 case LPFC_SLI_INTF_IF_TYPE_6:
9666 phba->sli4_hba.u.if_type2.EQDregaddr =
9667 phba->sli4_hba.conf_regs_memmap_p +
9668 LPFC_CTL_PORT_EQ_DELAY_OFFSET;
9669 phba->sli4_hba.u.if_type2.ERR1regaddr =
9670 phba->sli4_hba.conf_regs_memmap_p +
9671 LPFC_CTL_PORT_ER1_OFFSET;
9672 phba->sli4_hba.u.if_type2.ERR2regaddr =
9673 phba->sli4_hba.conf_regs_memmap_p +
9674 LPFC_CTL_PORT_ER2_OFFSET;
9675 phba->sli4_hba.u.if_type2.CTRLregaddr =
9676 phba->sli4_hba.conf_regs_memmap_p +
9677 LPFC_CTL_PORT_CTL_OFFSET;
9678 phba->sli4_hba.u.if_type2.STATUSregaddr =
9679 phba->sli4_hba.conf_regs_memmap_p +
9680 LPFC_CTL_PORT_STA_OFFSET;
9681 phba->sli4_hba.PSMPHRregaddr =
9682 phba->sli4_hba.conf_regs_memmap_p +
9683 LPFC_CTL_PORT_SEM_OFFSET;
9684 phba->sli4_hba.BMBXregaddr =
9685 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
9687 case LPFC_SLI_INTF_IF_TYPE_1:
9689 dev_printk(KERN_ERR, &phba->pcidev->dev,
9690 "FATAL - unsupported SLI4 interface type - %d\n",
9697 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
9698 * @phba: pointer to lpfc hba data structure.
9699 * @if_type: sli if type to operate on.
9701 * This routine is invoked to set up SLI4 BAR1 register memory map.
9704 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
9707 case LPFC_SLI_INTF_IF_TYPE_0:
9708 phba->sli4_hba.PSMPHRregaddr =
9709 phba->sli4_hba.ctrl_regs_memmap_p +
9710 LPFC_SLIPORT_IF0_SMPHR;
9711 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
9713 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
9715 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
9718 case LPFC_SLI_INTF_IF_TYPE_6:
9719 phba->sli4_hba.RQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9720 LPFC_IF6_RQ_DOORBELL;
9721 phba->sli4_hba.WQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9722 LPFC_IF6_WQ_DOORBELL;
9723 phba->sli4_hba.CQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9724 LPFC_IF6_CQ_DOORBELL;
9725 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9726 LPFC_IF6_EQ_DOORBELL;
9727 phba->sli4_hba.MQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9728 LPFC_IF6_MQ_DOORBELL;
9730 case LPFC_SLI_INTF_IF_TYPE_2:
9731 case LPFC_SLI_INTF_IF_TYPE_1:
9733 dev_err(&phba->pcidev->dev,
9734 "FATAL - unsupported SLI4 interface type - %d\n",
9741 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
9742 * @phba: pointer to lpfc hba data structure.
9743 * @vf: virtual function number
9745 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
9746 * based on the given viftual function number, @vf.
9748 * Return 0 if successful, otherwise -ENODEV.
9751 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
9753 if (vf > LPFC_VIR_FUNC_MAX)
9756 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9757 vf * LPFC_VFR_PAGE_SIZE +
9758 LPFC_ULP0_RQ_DOORBELL);
9759 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9760 vf * LPFC_VFR_PAGE_SIZE +
9761 LPFC_ULP0_WQ_DOORBELL);
9762 phba->sli4_hba.CQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9763 vf * LPFC_VFR_PAGE_SIZE +
9764 LPFC_EQCQ_DOORBELL);
9765 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
9766 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9767 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
9768 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9769 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
9774 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
9775 * @phba: pointer to lpfc hba data structure.
9777 * This routine is invoked to create the bootstrap mailbox
9778 * region consistent with the SLI-4 interface spec. This
9779 * routine allocates all memory necessary to communicate
9780 * mailbox commands to the port and sets up all alignment
9781 * needs. No locks are expected to be held when calling
9786 * -ENOMEM - could not allocated memory.
9789 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
9792 struct lpfc_dmabuf *dmabuf;
9793 struct dma_address *dma_address;
9797 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
9802 * The bootstrap mailbox region is comprised of 2 parts
9803 * plus an alignment restriction of 16 bytes.
9805 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
9806 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size,
9807 &dmabuf->phys, GFP_KERNEL);
9808 if (!dmabuf->virt) {
9814 * Initialize the bootstrap mailbox pointers now so that the register
9815 * operations are simple later. The mailbox dma address is required
9816 * to be 16-byte aligned. Also align the virtual memory as each
9817 * maibox is copied into the bmbx mailbox region before issuing the
9818 * command to the port.
9820 phba->sli4_hba.bmbx.dmabuf = dmabuf;
9821 phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
9823 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
9824 LPFC_ALIGN_16_BYTE);
9825 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
9826 LPFC_ALIGN_16_BYTE);
9829 * Set the high and low physical addresses now. The SLI4 alignment
9830 * requirement is 16 bytes and the mailbox is posted to the port
9831 * as two 30-bit addresses. The other data is a bit marking whether
9832 * the 30-bit address is the high or low address.
9833 * Upcast bmbx aphys to 64bits so shift instruction compiles
9834 * clean on 32 bit machines.
9836 dma_address = &phba->sli4_hba.bmbx.dma_address;
9837 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
9838 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
9839 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
9840 LPFC_BMBX_BIT1_ADDR_HI);
9842 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
9843 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
9844 LPFC_BMBX_BIT1_ADDR_LO);
9849 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
9850 * @phba: pointer to lpfc hba data structure.
9852 * This routine is invoked to teardown the bootstrap mailbox
9853 * region and release all host resources. This routine requires
9854 * the caller to ensure all mailbox commands recovered, no
9855 * additional mailbox comands are sent, and interrupts are disabled
9856 * before calling this routine.
9860 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
9862 dma_free_coherent(&phba->pcidev->dev,
9863 phba->sli4_hba.bmbx.bmbx_size,
9864 phba->sli4_hba.bmbx.dmabuf->virt,
9865 phba->sli4_hba.bmbx.dmabuf->phys);
9867 kfree(phba->sli4_hba.bmbx.dmabuf);
9868 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
9871 static const char * const lpfc_topo_to_str[] = {
9881 #define LINK_FLAGS_DEF 0x0
9882 #define LINK_FLAGS_P2P 0x1
9883 #define LINK_FLAGS_LOOP 0x2
9885 * lpfc_map_topology - Map the topology read from READ_CONFIG
9886 * @phba: pointer to lpfc hba data structure.
9887 * @rd_config: pointer to read config data
9889 * This routine is invoked to map the topology values as read
9890 * from the read config mailbox command. If the persistent
9891 * topology feature is supported, the firmware will provide the
9892 * saved topology information to be used in INIT_LINK
9895 lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config)
9899 ptv = bf_get(lpfc_mbx_rd_conf_ptv, rd_config);
9900 tf = bf_get(lpfc_mbx_rd_conf_tf, rd_config);
9901 pt = bf_get(lpfc_mbx_rd_conf_pt, rd_config);
9903 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9904 "2027 Read Config Data : ptv:0x%x, tf:0x%x pt:0x%x",
9907 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9908 "2019 FW does not support persistent topology "
9909 "Using driver parameter defined value [%s]",
9910 lpfc_topo_to_str[phba->cfg_topology]);
9913 /* FW supports persistent topology - override module parameter value */
9914 phba->hba_flag |= HBA_PERSISTENT_TOPO;
9916 /* if ASIC_GEN_NUM >= 0xC) */
9917 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
9918 LPFC_SLI_INTF_IF_TYPE_6) ||
9919 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
9920 LPFC_SLI_INTF_FAMILY_G6)) {
9922 phba->cfg_topology = ((pt == LINK_FLAGS_LOOP)
9923 ? FLAGS_TOPOLOGY_MODE_LOOP
9924 : FLAGS_TOPOLOGY_MODE_PT_PT);
9926 phba->hba_flag &= ~HBA_PERSISTENT_TOPO;
9930 /* If topology failover set - pt is '0' or '1' */
9931 phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP :
9932 FLAGS_TOPOLOGY_MODE_LOOP_PT);
9934 phba->cfg_topology = ((pt == LINK_FLAGS_P2P)
9935 ? FLAGS_TOPOLOGY_MODE_PT_PT
9936 : FLAGS_TOPOLOGY_MODE_LOOP);
9939 if (phba->hba_flag & HBA_PERSISTENT_TOPO) {
9940 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9941 "2020 Using persistent topology value [%s]",
9942 lpfc_topo_to_str[phba->cfg_topology]);
9944 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9945 "2021 Invalid topology values from FW "
9946 "Using driver parameter defined value [%s]",
9947 lpfc_topo_to_str[phba->cfg_topology]);
9952 * lpfc_sli4_read_config - Get the config parameters.
9953 * @phba: pointer to lpfc hba data structure.
9955 * This routine is invoked to read the configuration parameters from the HBA.
9956 * The configuration parameters are used to set the base and maximum values
9957 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
9958 * allocation for the port.
9962 * -ENOMEM - No available memory
9963 * -EIO - The mailbox failed to complete successfully.
9966 lpfc_sli4_read_config(struct lpfc_hba *phba)
9969 struct lpfc_mbx_read_config *rd_config;
9970 union lpfc_sli4_cfg_shdr *shdr;
9971 uint32_t shdr_status, shdr_add_status;
9972 struct lpfc_mbx_get_func_cfg *get_func_cfg;
9973 struct lpfc_rsrc_desc_fcfcoe *desc;
9975 uint16_t forced_link_speed;
9976 uint32_t if_type, qmin, fawwpn;
9977 int length, i, rc = 0, rc2;
9979 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9981 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9982 "2011 Unable to allocate memory for issuing "
9983 "SLI_CONFIG_SPECIAL mailbox command\n");
9987 lpfc_read_config(phba, pmb);
9989 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
9990 if (rc != MBX_SUCCESS) {
9991 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9992 "2012 Mailbox failed , mbxCmd x%x "
9993 "READ_CONFIG, mbxStatus x%x\n",
9994 bf_get(lpfc_mqe_command, &pmb->u.mqe),
9995 bf_get(lpfc_mqe_status, &pmb->u.mqe));
9998 rd_config = &pmb->u.mqe.un.rd_config;
9999 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
10000 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
10001 phba->sli4_hba.lnk_info.lnk_tp =
10002 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
10003 phba->sli4_hba.lnk_info.lnk_no =
10004 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
10005 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10006 "3081 lnk_type:%d, lnk_numb:%d\n",
10007 phba->sli4_hba.lnk_info.lnk_tp,
10008 phba->sli4_hba.lnk_info.lnk_no);
10010 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10011 "3082 Mailbox (x%x) returned ldv:x0\n",
10012 bf_get(lpfc_mqe_command, &pmb->u.mqe));
10013 if (bf_get(lpfc_mbx_rd_conf_bbscn_def, rd_config)) {
10014 phba->bbcredit_support = 1;
10015 phba->sli4_hba.bbscn_params.word0 = rd_config->word8;
10018 fawwpn = bf_get(lpfc_mbx_rd_conf_fawwpn, rd_config);
10021 lpfc_printf_log(phba, KERN_INFO,
10022 LOG_INIT | LOG_DISCOVERY,
10023 "2702 READ_CONFIG: FA-PWWN is "
10024 "configured on\n");
10025 phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_CONFIG;
10027 /* Clear FW configured flag, preserve driver flag */
10028 phba->sli4_hba.fawwpn_flag &= ~LPFC_FAWWPN_CONFIG;
10031 phba->sli4_hba.conf_trunk =
10032 bf_get(lpfc_mbx_rd_conf_trunk, rd_config);
10033 phba->sli4_hba.extents_in_use =
10034 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
10036 phba->sli4_hba.max_cfg_param.max_xri =
10037 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
10038 /* Reduce resource usage in kdump environment */
10039 if (is_kdump_kernel() &&
10040 phba->sli4_hba.max_cfg_param.max_xri > 512)
10041 phba->sli4_hba.max_cfg_param.max_xri = 512;
10042 phba->sli4_hba.max_cfg_param.xri_base =
10043 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
10044 phba->sli4_hba.max_cfg_param.max_vpi =
10045 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
10046 /* Limit the max we support */
10047 if (phba->sli4_hba.max_cfg_param.max_vpi > LPFC_MAX_VPORTS)
10048 phba->sli4_hba.max_cfg_param.max_vpi = LPFC_MAX_VPORTS;
10049 phba->sli4_hba.max_cfg_param.vpi_base =
10050 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
10051 phba->sli4_hba.max_cfg_param.max_rpi =
10052 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
10053 phba->sli4_hba.max_cfg_param.rpi_base =
10054 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
10055 phba->sli4_hba.max_cfg_param.max_vfi =
10056 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
10057 phba->sli4_hba.max_cfg_param.vfi_base =
10058 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
10059 phba->sli4_hba.max_cfg_param.max_fcfi =
10060 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
10061 phba->sli4_hba.max_cfg_param.max_eq =
10062 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
10063 phba->sli4_hba.max_cfg_param.max_rq =
10064 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
10065 phba->sli4_hba.max_cfg_param.max_wq =
10066 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
10067 phba->sli4_hba.max_cfg_param.max_cq =
10068 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
10069 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
10070 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
10071 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
10072 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
10073 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
10074 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
10075 phba->max_vports = phba->max_vpi;
10077 /* Next decide on FPIN or Signal E2E CGN support
10078 * For congestion alarms and warnings valid combination are:
10079 * 1. FPIN alarms / FPIN warnings
10080 * 2. Signal alarms / Signal warnings
10081 * 3. FPIN alarms / Signal warnings
10082 * 4. Signal alarms / FPIN warnings
10084 * Initialize the adapter frequency to 100 mSecs
10086 phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH;
10087 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
10088 phba->cgn_sig_freq = lpfc_fabric_cgn_frequency;
10090 if (lpfc_use_cgn_signal) {
10091 if (bf_get(lpfc_mbx_rd_conf_wcs, rd_config)) {
10092 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY;
10093 phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN;
10095 if (bf_get(lpfc_mbx_rd_conf_acs, rd_config)) {
10096 /* MUST support both alarm and warning
10097 * because EDC does not support alarm alone.
10099 if (phba->cgn_reg_signal !=
10100 EDC_CG_SIG_WARN_ONLY) {
10101 /* Must support both or none */
10102 phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH;
10103 phba->cgn_reg_signal =
10104 EDC_CG_SIG_NOTSUPPORTED;
10106 phba->cgn_reg_signal =
10107 EDC_CG_SIG_WARN_ALARM;
10108 phba->cgn_reg_fpin =
10109 LPFC_CGN_FPIN_NONE;
10114 /* Set the congestion initial signal and fpin values. */
10115 phba->cgn_init_reg_fpin = phba->cgn_reg_fpin;
10116 phba->cgn_init_reg_signal = phba->cgn_reg_signal;
10118 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
10119 "6446 READ_CONFIG reg_sig x%x reg_fpin:x%x\n",
10120 phba->cgn_reg_signal, phba->cgn_reg_fpin);
10122 lpfc_map_topology(phba, rd_config);
10123 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10124 "2003 cfg params Extents? %d "
10129 "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d lmt:x%x\n",
10130 phba->sli4_hba.extents_in_use,
10131 phba->sli4_hba.max_cfg_param.xri_base,
10132 phba->sli4_hba.max_cfg_param.max_xri,
10133 phba->sli4_hba.max_cfg_param.vpi_base,
10134 phba->sli4_hba.max_cfg_param.max_vpi,
10135 phba->sli4_hba.max_cfg_param.vfi_base,
10136 phba->sli4_hba.max_cfg_param.max_vfi,
10137 phba->sli4_hba.max_cfg_param.rpi_base,
10138 phba->sli4_hba.max_cfg_param.max_rpi,
10139 phba->sli4_hba.max_cfg_param.max_fcfi,
10140 phba->sli4_hba.max_cfg_param.max_eq,
10141 phba->sli4_hba.max_cfg_param.max_cq,
10142 phba->sli4_hba.max_cfg_param.max_wq,
10143 phba->sli4_hba.max_cfg_param.max_rq,
10147 * Calculate queue resources based on how
10148 * many WQ/CQ/EQs are available.
10150 qmin = phba->sli4_hba.max_cfg_param.max_wq;
10151 if (phba->sli4_hba.max_cfg_param.max_cq < qmin)
10152 qmin = phba->sli4_hba.max_cfg_param.max_cq;
10154 * Reserve 4 (ELS, NVME LS, MBOX, plus one extra) and
10155 * the remainder can be used for NVME / FCP.
10158 if (phba->sli4_hba.max_cfg_param.max_eq < qmin)
10159 qmin = phba->sli4_hba.max_cfg_param.max_eq;
10161 /* Check to see if there is enough for default cfg */
10162 if ((phba->cfg_irq_chann > qmin) ||
10163 (phba->cfg_hdw_queue > qmin)) {
10164 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10165 "2005 Reducing Queues - "
10166 "FW resource limitation: "
10167 "WQ %d CQ %d EQ %d: min %d: "
10168 "IRQ %d HDWQ %d\n",
10169 phba->sli4_hba.max_cfg_param.max_wq,
10170 phba->sli4_hba.max_cfg_param.max_cq,
10171 phba->sli4_hba.max_cfg_param.max_eq,
10172 qmin, phba->cfg_irq_chann,
10173 phba->cfg_hdw_queue);
10175 if (phba->cfg_irq_chann > qmin)
10176 phba->cfg_irq_chann = qmin;
10177 if (phba->cfg_hdw_queue > qmin)
10178 phba->cfg_hdw_queue = qmin;
10185 /* Update link speed if forced link speed is supported */
10186 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10187 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
10188 forced_link_speed =
10189 bf_get(lpfc_mbx_rd_conf_link_speed, rd_config);
10190 if (forced_link_speed) {
10191 phba->hba_flag |= HBA_FORCED_LINK_SPEED;
10193 switch (forced_link_speed) {
10194 case LINK_SPEED_1G:
10195 phba->cfg_link_speed =
10196 LPFC_USER_LINK_SPEED_1G;
10198 case LINK_SPEED_2G:
10199 phba->cfg_link_speed =
10200 LPFC_USER_LINK_SPEED_2G;
10202 case LINK_SPEED_4G:
10203 phba->cfg_link_speed =
10204 LPFC_USER_LINK_SPEED_4G;
10206 case LINK_SPEED_8G:
10207 phba->cfg_link_speed =
10208 LPFC_USER_LINK_SPEED_8G;
10210 case LINK_SPEED_10G:
10211 phba->cfg_link_speed =
10212 LPFC_USER_LINK_SPEED_10G;
10214 case LINK_SPEED_16G:
10215 phba->cfg_link_speed =
10216 LPFC_USER_LINK_SPEED_16G;
10218 case LINK_SPEED_32G:
10219 phba->cfg_link_speed =
10220 LPFC_USER_LINK_SPEED_32G;
10222 case LINK_SPEED_64G:
10223 phba->cfg_link_speed =
10224 LPFC_USER_LINK_SPEED_64G;
10227 phba->cfg_link_speed =
10228 LPFC_USER_LINK_SPEED_AUTO;
10231 lpfc_printf_log(phba, KERN_ERR,
10233 "0047 Unrecognized link "
10235 forced_link_speed);
10236 phba->cfg_link_speed =
10237 LPFC_USER_LINK_SPEED_AUTO;
10242 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
10243 length = phba->sli4_hba.max_cfg_param.max_xri -
10244 lpfc_sli4_get_els_iocb_cnt(phba);
10245 if (phba->cfg_hba_queue_depth > length) {
10246 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10247 "3361 HBA queue depth changed from %d to %d\n",
10248 phba->cfg_hba_queue_depth, length);
10249 phba->cfg_hba_queue_depth = length;
10252 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
10253 LPFC_SLI_INTF_IF_TYPE_2)
10256 /* get the pf# and vf# for SLI4 if_type 2 port */
10257 length = (sizeof(struct lpfc_mbx_get_func_cfg) -
10258 sizeof(struct lpfc_sli4_cfg_mhdr));
10259 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
10260 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
10261 length, LPFC_SLI4_MBX_EMBED);
10263 rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
10264 shdr = (union lpfc_sli4_cfg_shdr *)
10265 &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
10266 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10267 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10268 if (rc2 || shdr_status || shdr_add_status) {
10269 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10270 "3026 Mailbox failed , mbxCmd x%x "
10271 "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
10272 bf_get(lpfc_mqe_command, &pmb->u.mqe),
10273 bf_get(lpfc_mqe_status, &pmb->u.mqe));
10277 /* search for fc_fcoe resrouce descriptor */
10278 get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
10280 pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0];
10281 desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0;
10282 length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc);
10283 if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD)
10284 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH;
10285 else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH)
10288 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
10289 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i);
10290 if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
10291 bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) {
10292 phba->sli4_hba.iov.pf_number =
10293 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
10294 phba->sli4_hba.iov.vf_number =
10295 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
10300 if (i < LPFC_RSRC_DESC_MAX_NUM)
10301 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10302 "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
10303 "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
10304 phba->sli4_hba.iov.vf_number);
10306 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10307 "3028 GET_FUNCTION_CONFIG: failed to find "
10308 "Resource Descriptor:x%x\n",
10309 LPFC_RSRC_DESC_TYPE_FCFCOE);
10312 mempool_free(pmb, phba->mbox_mem_pool);
10317 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port.
10318 * @phba: pointer to lpfc hba data structure.
10320 * This routine is invoked to setup the port-side endian order when
10321 * the port if_type is 0. This routine has no function for other
10326 * -ENOMEM - No available memory
10327 * -EIO - The mailbox failed to complete successfully.
10330 lpfc_setup_endian_order(struct lpfc_hba *phba)
10332 LPFC_MBOXQ_t *mboxq;
10333 uint32_t if_type, rc = 0;
10334 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
10335 HOST_ENDIAN_HIGH_WORD1};
10337 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10339 case LPFC_SLI_INTF_IF_TYPE_0:
10340 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
10343 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10344 "0492 Unable to allocate memory for "
10345 "issuing SLI_CONFIG_SPECIAL mailbox "
10351 * The SLI4_CONFIG_SPECIAL mailbox command requires the first
10352 * two words to contain special data values and no other data.
10354 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
10355 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
10356 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
10357 if (rc != MBX_SUCCESS) {
10358 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10359 "0493 SLI_CONFIG_SPECIAL mailbox "
10360 "failed with status x%x\n",
10364 mempool_free(mboxq, phba->mbox_mem_pool);
10366 case LPFC_SLI_INTF_IF_TYPE_6:
10367 case LPFC_SLI_INTF_IF_TYPE_2:
10368 case LPFC_SLI_INTF_IF_TYPE_1:
10376 * lpfc_sli4_queue_verify - Verify and update EQ counts
10377 * @phba: pointer to lpfc hba data structure.
10379 * This routine is invoked to check the user settable queue counts for EQs.
10380 * After this routine is called the counts will be set to valid values that
10381 * adhere to the constraints of the system's interrupt vectors and the port's
10386 * -ENOMEM - No available memory
10389 lpfc_sli4_queue_verify(struct lpfc_hba *phba)
10392 * Sanity check for configured queue parameters against the run-time
10393 * device parameters
10396 if (phba->nvmet_support) {
10397 if (phba->cfg_hdw_queue < phba->cfg_nvmet_mrq)
10398 phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
10399 if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
10400 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
10403 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10404 "2574 IO channels: hdwQ %d IRQ %d MRQ: %d\n",
10405 phba->cfg_hdw_queue, phba->cfg_irq_chann,
10406 phba->cfg_nvmet_mrq);
10408 /* Get EQ depth from module parameter, fake the default for now */
10409 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
10410 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
10412 /* Get CQ depth from module parameter, fake the default for now */
10413 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
10414 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
10419 lpfc_alloc_io_wq_cq(struct lpfc_hba *phba, int idx)
10421 struct lpfc_queue *qdesc;
10425 cpu = lpfc_find_cpu_handle(phba, idx, LPFC_FIND_BY_HDWQ);
10426 /* Create Fast Path IO CQs */
10427 if (phba->enab_exp_wqcq_pages)
10428 /* Increase the CQ size when WQEs contain an embedded cdb */
10429 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
10430 phba->sli4_hba.cq_esize,
10431 LPFC_CQE_EXP_COUNT, cpu);
10434 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10435 phba->sli4_hba.cq_esize,
10436 phba->sli4_hba.cq_ecount, cpu);
10438 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10439 "0499 Failed allocate fast-path IO CQ (%d)\n",
10443 qdesc->qe_valid = 1;
10445 qdesc->chann = cpu;
10446 phba->sli4_hba.hdwq[idx].io_cq = qdesc;
10448 /* Create Fast Path IO WQs */
10449 if (phba->enab_exp_wqcq_pages) {
10450 /* Increase the WQ size when WQEs contain an embedded cdb */
10451 wqesize = (phba->fcp_embed_io) ?
10452 LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
10453 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
10455 LPFC_WQE_EXP_COUNT, cpu);
10457 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10458 phba->sli4_hba.wq_esize,
10459 phba->sli4_hba.wq_ecount, cpu);
10462 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10463 "0503 Failed allocate fast-path IO WQ (%d)\n",
10468 qdesc->chann = cpu;
10469 phba->sli4_hba.hdwq[idx].io_wq = qdesc;
10470 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
10475 * lpfc_sli4_queue_create - Create all the SLI4 queues
10476 * @phba: pointer to lpfc hba data structure.
10478 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
10479 * operation. For each SLI4 queue type, the parameters such as queue entry
10480 * count (queue depth) shall be taken from the module parameter. For now,
10481 * we just use some constant number as place holder.
10485 * -ENOMEM - No availble memory
10486 * -EIO - The mailbox failed to complete successfully.
10489 lpfc_sli4_queue_create(struct lpfc_hba *phba)
10491 struct lpfc_queue *qdesc;
10492 int idx, cpu, eqcpu;
10493 struct lpfc_sli4_hdw_queue *qp;
10494 struct lpfc_vector_map_info *cpup;
10495 struct lpfc_vector_map_info *eqcpup;
10496 struct lpfc_eq_intr_info *eqi;
10499 * Create HBA Record arrays.
10500 * Both NVME and FCP will share that same vectors / EQs
10502 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
10503 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
10504 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
10505 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
10506 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
10507 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
10508 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
10509 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
10510 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
10511 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
10513 if (!phba->sli4_hba.hdwq) {
10514 phba->sli4_hba.hdwq = kcalloc(
10515 phba->cfg_hdw_queue, sizeof(struct lpfc_sli4_hdw_queue),
10517 if (!phba->sli4_hba.hdwq) {
10518 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10519 "6427 Failed allocate memory for "
10520 "fast-path Hardware Queue array\n");
10523 /* Prepare hardware queues to take IO buffers */
10524 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10525 qp = &phba->sli4_hba.hdwq[idx];
10526 spin_lock_init(&qp->io_buf_list_get_lock);
10527 spin_lock_init(&qp->io_buf_list_put_lock);
10528 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
10529 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
10530 qp->get_io_bufs = 0;
10531 qp->put_io_bufs = 0;
10532 qp->total_io_bufs = 0;
10533 spin_lock_init(&qp->abts_io_buf_list_lock);
10534 INIT_LIST_HEAD(&qp->lpfc_abts_io_buf_list);
10535 qp->abts_scsi_io_bufs = 0;
10536 qp->abts_nvme_io_bufs = 0;
10537 INIT_LIST_HEAD(&qp->sgl_list);
10538 INIT_LIST_HEAD(&qp->cmd_rsp_buf_list);
10539 spin_lock_init(&qp->hdwq_lock);
10543 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10544 if (phba->nvmet_support) {
10545 phba->sli4_hba.nvmet_cqset = kcalloc(
10546 phba->cfg_nvmet_mrq,
10547 sizeof(struct lpfc_queue *),
10549 if (!phba->sli4_hba.nvmet_cqset) {
10550 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10551 "3121 Fail allocate memory for "
10552 "fast-path CQ set array\n");
10555 phba->sli4_hba.nvmet_mrq_hdr = kcalloc(
10556 phba->cfg_nvmet_mrq,
10557 sizeof(struct lpfc_queue *),
10559 if (!phba->sli4_hba.nvmet_mrq_hdr) {
10560 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10561 "3122 Fail allocate memory for "
10562 "fast-path RQ set hdr array\n");
10565 phba->sli4_hba.nvmet_mrq_data = kcalloc(
10566 phba->cfg_nvmet_mrq,
10567 sizeof(struct lpfc_queue *),
10569 if (!phba->sli4_hba.nvmet_mrq_data) {
10570 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10571 "3124 Fail allocate memory for "
10572 "fast-path RQ set data array\n");
10578 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
10580 /* Create HBA Event Queues (EQs) */
10581 for_each_present_cpu(cpu) {
10582 /* We only want to create 1 EQ per vector, even though
10583 * multiple CPUs might be using that vector. so only
10584 * selects the CPUs that are LPFC_CPU_FIRST_IRQ.
10586 cpup = &phba->sli4_hba.cpu_map[cpu];
10587 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
10590 /* Get a ptr to the Hardware Queue associated with this CPU */
10591 qp = &phba->sli4_hba.hdwq[cpup->hdwq];
10593 /* Allocate an EQ */
10594 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10595 phba->sli4_hba.eq_esize,
10596 phba->sli4_hba.eq_ecount, cpu);
10598 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10599 "0497 Failed allocate EQ (%d)\n",
10603 qdesc->qe_valid = 1;
10604 qdesc->hdwq = cpup->hdwq;
10605 qdesc->chann = cpu; /* First CPU this EQ is affinitized to */
10606 qdesc->last_cpu = qdesc->chann;
10608 /* Save the allocated EQ in the Hardware Queue */
10609 qp->hba_eq = qdesc;
10611 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, qdesc->last_cpu);
10612 list_add(&qdesc->cpu_list, &eqi->list);
10615 /* Now we need to populate the other Hardware Queues, that share
10616 * an IRQ vector, with the associated EQ ptr.
10618 for_each_present_cpu(cpu) {
10619 cpup = &phba->sli4_hba.cpu_map[cpu];
10621 /* Check for EQ already allocated in previous loop */
10622 if (cpup->flag & LPFC_CPU_FIRST_IRQ)
10625 /* Check for multiple CPUs per hdwq */
10626 qp = &phba->sli4_hba.hdwq[cpup->hdwq];
10630 /* We need to share an EQ for this hdwq */
10631 eqcpu = lpfc_find_cpu_handle(phba, cpup->eq, LPFC_FIND_BY_EQ);
10632 eqcpup = &phba->sli4_hba.cpu_map[eqcpu];
10633 qp->hba_eq = phba->sli4_hba.hdwq[eqcpup->hdwq].hba_eq;
10636 /* Allocate IO Path SLI4 CQ/WQs */
10637 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10638 if (lpfc_alloc_io_wq_cq(phba, idx))
10642 if (phba->nvmet_support) {
10643 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
10644 cpu = lpfc_find_cpu_handle(phba, idx,
10645 LPFC_FIND_BY_HDWQ);
10646 qdesc = lpfc_sli4_queue_alloc(phba,
10647 LPFC_DEFAULT_PAGE_SIZE,
10648 phba->sli4_hba.cq_esize,
10649 phba->sli4_hba.cq_ecount,
10652 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10653 "3142 Failed allocate NVME "
10654 "CQ Set (%d)\n", idx);
10657 qdesc->qe_valid = 1;
10659 qdesc->chann = cpu;
10660 phba->sli4_hba.nvmet_cqset[idx] = qdesc;
10665 * Create Slow Path Completion Queues (CQs)
10668 cpu = lpfc_find_cpu_handle(phba, 0, LPFC_FIND_BY_EQ);
10669 /* Create slow-path Mailbox Command Complete Queue */
10670 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10671 phba->sli4_hba.cq_esize,
10672 phba->sli4_hba.cq_ecount, cpu);
10674 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10675 "0500 Failed allocate slow-path mailbox CQ\n");
10678 qdesc->qe_valid = 1;
10679 phba->sli4_hba.mbx_cq = qdesc;
10681 /* Create slow-path ELS Complete Queue */
10682 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10683 phba->sli4_hba.cq_esize,
10684 phba->sli4_hba.cq_ecount, cpu);
10686 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10687 "0501 Failed allocate slow-path ELS CQ\n");
10690 qdesc->qe_valid = 1;
10691 qdesc->chann = cpu;
10692 phba->sli4_hba.els_cq = qdesc;
10696 * Create Slow Path Work Queues (WQs)
10699 /* Create Mailbox Command Queue */
10701 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10702 phba->sli4_hba.mq_esize,
10703 phba->sli4_hba.mq_ecount, cpu);
10705 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10706 "0505 Failed allocate slow-path MQ\n");
10709 qdesc->chann = cpu;
10710 phba->sli4_hba.mbx_wq = qdesc;
10713 * Create ELS Work Queues
10716 /* Create slow-path ELS Work Queue */
10717 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10718 phba->sli4_hba.wq_esize,
10719 phba->sli4_hba.wq_ecount, cpu);
10721 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10722 "0504 Failed allocate slow-path ELS WQ\n");
10725 qdesc->chann = cpu;
10726 phba->sli4_hba.els_wq = qdesc;
10727 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
10729 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10730 /* Create NVME LS Complete Queue */
10731 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10732 phba->sli4_hba.cq_esize,
10733 phba->sli4_hba.cq_ecount, cpu);
10735 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10736 "6079 Failed allocate NVME LS CQ\n");
10739 qdesc->chann = cpu;
10740 qdesc->qe_valid = 1;
10741 phba->sli4_hba.nvmels_cq = qdesc;
10743 /* Create NVME LS Work Queue */
10744 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10745 phba->sli4_hba.wq_esize,
10746 phba->sli4_hba.wq_ecount, cpu);
10748 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10749 "6080 Failed allocate NVME LS WQ\n");
10752 qdesc->chann = cpu;
10753 phba->sli4_hba.nvmels_wq = qdesc;
10754 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
10758 * Create Receive Queue (RQ)
10761 /* Create Receive Queue for header */
10762 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10763 phba->sli4_hba.rq_esize,
10764 phba->sli4_hba.rq_ecount, cpu);
10766 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10767 "0506 Failed allocate receive HRQ\n");
10770 phba->sli4_hba.hdr_rq = qdesc;
10772 /* Create Receive Queue for data */
10773 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10774 phba->sli4_hba.rq_esize,
10775 phba->sli4_hba.rq_ecount, cpu);
10777 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10778 "0507 Failed allocate receive DRQ\n");
10781 phba->sli4_hba.dat_rq = qdesc;
10783 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
10784 phba->nvmet_support) {
10785 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
10786 cpu = lpfc_find_cpu_handle(phba, idx,
10787 LPFC_FIND_BY_HDWQ);
10788 /* Create NVMET Receive Queue for header */
10789 qdesc = lpfc_sli4_queue_alloc(phba,
10790 LPFC_DEFAULT_PAGE_SIZE,
10791 phba->sli4_hba.rq_esize,
10792 LPFC_NVMET_RQE_DEF_COUNT,
10795 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10796 "3146 Failed allocate "
10801 phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc;
10803 /* Only needed for header of RQ pair */
10804 qdesc->rqbp = kzalloc_node(sizeof(*qdesc->rqbp),
10807 if (qdesc->rqbp == NULL) {
10808 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10809 "6131 Failed allocate "
10814 /* Put list in known state in case driver load fails. */
10815 INIT_LIST_HEAD(&qdesc->rqbp->rqb_buffer_list);
10817 /* Create NVMET Receive Queue for data */
10818 qdesc = lpfc_sli4_queue_alloc(phba,
10819 LPFC_DEFAULT_PAGE_SIZE,
10820 phba->sli4_hba.rq_esize,
10821 LPFC_NVMET_RQE_DEF_COUNT,
10824 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10825 "3156 Failed allocate "
10830 phba->sli4_hba.nvmet_mrq_data[idx] = qdesc;
10834 /* Clear NVME stats */
10835 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10836 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10837 memset(&phba->sli4_hba.hdwq[idx].nvme_cstat, 0,
10838 sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat));
10842 /* Clear SCSI stats */
10843 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
10844 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10845 memset(&phba->sli4_hba.hdwq[idx].scsi_cstat, 0,
10846 sizeof(phba->sli4_hba.hdwq[idx].scsi_cstat));
10853 lpfc_sli4_queue_destroy(phba);
10858 __lpfc_sli4_release_queue(struct lpfc_queue **qp)
10861 lpfc_sli4_queue_free(*qp);
10867 lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max)
10874 for (idx = 0; idx < max; idx++)
10875 __lpfc_sli4_release_queue(&(*qs)[idx]);
10882 lpfc_sli4_release_hdwq(struct lpfc_hba *phba)
10884 struct lpfc_sli4_hdw_queue *hdwq;
10885 struct lpfc_queue *eq;
10888 hdwq = phba->sli4_hba.hdwq;
10890 /* Loop thru all Hardware Queues */
10891 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10892 /* Free the CQ/WQ corresponding to the Hardware Queue */
10893 lpfc_sli4_queue_free(hdwq[idx].io_cq);
10894 lpfc_sli4_queue_free(hdwq[idx].io_wq);
10895 hdwq[idx].hba_eq = NULL;
10896 hdwq[idx].io_cq = NULL;
10897 hdwq[idx].io_wq = NULL;
10898 if (phba->cfg_xpsgl && !phba->nvmet_support)
10899 lpfc_free_sgl_per_hdwq(phba, &hdwq[idx]);
10900 lpfc_free_cmd_rsp_buf_per_hdwq(phba, &hdwq[idx]);
10902 /* Loop thru all IRQ vectors */
10903 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
10904 /* Free the EQ corresponding to the IRQ vector */
10905 eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
10906 lpfc_sli4_queue_free(eq);
10907 phba->sli4_hba.hba_eq_hdl[idx].eq = NULL;
10912 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
10913 * @phba: pointer to lpfc hba data structure.
10915 * This routine is invoked to release all the SLI4 queues with the FCoE HBA
10920 * -ENOMEM - No available memory
10921 * -EIO - The mailbox failed to complete successfully.
10924 lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
10927 * Set FREE_INIT before beginning to free the queues.
10928 * Wait until the users of queues to acknowledge to
10929 * release queues by clearing FREE_WAIT.
10931 spin_lock_irq(&phba->hbalock);
10932 phba->sli.sli_flag |= LPFC_QUEUE_FREE_INIT;
10933 while (phba->sli.sli_flag & LPFC_QUEUE_FREE_WAIT) {
10934 spin_unlock_irq(&phba->hbalock);
10936 spin_lock_irq(&phba->hbalock);
10938 spin_unlock_irq(&phba->hbalock);
10940 lpfc_sli4_cleanup_poll_list(phba);
10942 /* Release HBA eqs */
10943 if (phba->sli4_hba.hdwq)
10944 lpfc_sli4_release_hdwq(phba);
10946 if (phba->nvmet_support) {
10947 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
10948 phba->cfg_nvmet_mrq);
10950 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
10951 phba->cfg_nvmet_mrq);
10952 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
10953 phba->cfg_nvmet_mrq);
10956 /* Release mailbox command work queue */
10957 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq);
10959 /* Release ELS work queue */
10960 __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq);
10962 /* Release ELS work queue */
10963 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq);
10965 /* Release unsolicited receive queue */
10966 __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq);
10967 __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq);
10969 /* Release ELS complete queue */
10970 __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq);
10972 /* Release NVME LS complete queue */
10973 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq);
10975 /* Release mailbox command complete queue */
10976 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq);
10978 /* Everything on this list has been freed */
10979 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
10981 /* Done with freeing the queues */
10982 spin_lock_irq(&phba->hbalock);
10983 phba->sli.sli_flag &= ~LPFC_QUEUE_FREE_INIT;
10984 spin_unlock_irq(&phba->hbalock);
10988 lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq)
10990 struct lpfc_rqb *rqbp;
10991 struct lpfc_dmabuf *h_buf;
10992 struct rqb_dmabuf *rqb_buffer;
10995 while (!list_empty(&rqbp->rqb_buffer_list)) {
10996 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
10997 struct lpfc_dmabuf, list);
10999 rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf);
11000 (rqbp->rqb_free_buffer)(phba, rqb_buffer);
11001 rqbp->buffer_count--;
11007 lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
11008 struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map,
11009 int qidx, uint32_t qtype)
11011 struct lpfc_sli_ring *pring;
11014 if (!eq || !cq || !wq) {
11015 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11016 "6085 Fast-path %s (%d) not allocated\n",
11017 ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx);
11021 /* create the Cq first */
11022 rc = lpfc_cq_create(phba, cq, eq,
11023 (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype);
11025 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11026 "6086 Failed setup of CQ (%d), rc = 0x%x\n",
11027 qidx, (uint32_t)rc);
11031 if (qtype != LPFC_MBOX) {
11032 /* Setup cq_map for fast lookup */
11034 *cq_map = cq->queue_id;
11036 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11037 "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n",
11038 qidx, cq->queue_id, qidx, eq->queue_id);
11040 /* create the wq */
11041 rc = lpfc_wq_create(phba, wq, cq, qtype);
11043 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11044 "4618 Fail setup fastpath WQ (%d), rc = 0x%x\n",
11045 qidx, (uint32_t)rc);
11046 /* no need to tear down cq - caller will do so */
11050 /* Bind this CQ/WQ to the NVME ring */
11052 pring->sli.sli4.wqp = (void *)wq;
11055 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11056 "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n",
11057 qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id);
11059 rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX);
11061 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11062 "0539 Failed setup of slow-path MQ: "
11063 "rc = 0x%x\n", rc);
11064 /* no need to tear down cq - caller will do so */
11068 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11069 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
11070 phba->sli4_hba.mbx_wq->queue_id,
11071 phba->sli4_hba.mbx_cq->queue_id);
11078 * lpfc_setup_cq_lookup - Setup the CQ lookup table
11079 * @phba: pointer to lpfc hba data structure.
11081 * This routine will populate the cq_lookup table by all
11082 * available CQ queue_id's.
11085 lpfc_setup_cq_lookup(struct lpfc_hba *phba)
11087 struct lpfc_queue *eq, *childq;
11090 memset(phba->sli4_hba.cq_lookup, 0,
11091 (sizeof(struct lpfc_queue *) * (phba->sli4_hba.cq_max + 1)));
11092 /* Loop thru all IRQ vectors */
11093 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
11094 /* Get the EQ corresponding to the IRQ vector */
11095 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
11098 /* Loop through all CQs associated with that EQ */
11099 list_for_each_entry(childq, &eq->child_list, list) {
11100 if (childq->queue_id > phba->sli4_hba.cq_max)
11102 if (childq->subtype == LPFC_IO)
11103 phba->sli4_hba.cq_lookup[childq->queue_id] =
11110 * lpfc_sli4_queue_setup - Set up all the SLI4 queues
11111 * @phba: pointer to lpfc hba data structure.
11113 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
11118 * -ENOMEM - No available memory
11119 * -EIO - The mailbox failed to complete successfully.
11122 lpfc_sli4_queue_setup(struct lpfc_hba *phba)
11124 uint32_t shdr_status, shdr_add_status;
11125 union lpfc_sli4_cfg_shdr *shdr;
11126 struct lpfc_vector_map_info *cpup;
11127 struct lpfc_sli4_hdw_queue *qp;
11128 LPFC_MBOXQ_t *mboxq;
11130 uint32_t length, usdelay;
11133 /* Check for dual-ULP support */
11134 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11136 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11137 "3249 Unable to allocate memory for "
11138 "QUERY_FW_CFG mailbox command\n");
11141 length = (sizeof(struct lpfc_mbx_query_fw_config) -
11142 sizeof(struct lpfc_sli4_cfg_mhdr));
11143 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
11144 LPFC_MBOX_OPCODE_QUERY_FW_CFG,
11145 length, LPFC_SLI4_MBX_EMBED);
11147 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
11149 shdr = (union lpfc_sli4_cfg_shdr *)
11150 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
11151 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
11152 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
11153 if (shdr_status || shdr_add_status || rc) {
11154 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11155 "3250 QUERY_FW_CFG mailbox failed with status "
11156 "x%x add_status x%x, mbx status x%x\n",
11157 shdr_status, shdr_add_status, rc);
11158 mempool_free(mboxq, phba->mbox_mem_pool);
11163 phba->sli4_hba.fw_func_mode =
11164 mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode;
11165 phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode;
11166 phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode;
11167 phba->sli4_hba.physical_port =
11168 mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port;
11169 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11170 "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, "
11171 "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
11172 phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
11174 mempool_free(mboxq, phba->mbox_mem_pool);
11177 * Set up HBA Event Queues (EQs)
11179 qp = phba->sli4_hba.hdwq;
11181 /* Set up HBA event queue */
11183 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11184 "3147 Fast-path EQs not allocated\n");
11189 /* Loop thru all IRQ vectors */
11190 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
11191 /* Create HBA Event Queues (EQs) in order */
11192 for_each_present_cpu(cpu) {
11193 cpup = &phba->sli4_hba.cpu_map[cpu];
11195 /* Look for the CPU thats using that vector with
11196 * LPFC_CPU_FIRST_IRQ set.
11198 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
11200 if (qidx != cpup->eq)
11203 /* Create an EQ for that vector */
11204 rc = lpfc_eq_create(phba, qp[cpup->hdwq].hba_eq,
11205 phba->cfg_fcp_imax);
11207 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11208 "0523 Failed setup of fast-path"
11209 " EQ (%d), rc = 0x%x\n",
11210 cpup->eq, (uint32_t)rc);
11214 /* Save the EQ for that vector in the hba_eq_hdl */
11215 phba->sli4_hba.hba_eq_hdl[cpup->eq].eq =
11216 qp[cpup->hdwq].hba_eq;
11218 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11219 "2584 HBA EQ setup: queue[%d]-id=%d\n",
11221 qp[cpup->hdwq].hba_eq->queue_id);
11225 /* Loop thru all Hardware Queues */
11226 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
11227 cpu = lpfc_find_cpu_handle(phba, qidx, LPFC_FIND_BY_HDWQ);
11228 cpup = &phba->sli4_hba.cpu_map[cpu];
11230 /* Create the CQ/WQ corresponding to the Hardware Queue */
11231 rc = lpfc_create_wq_cq(phba,
11232 phba->sli4_hba.hdwq[cpup->hdwq].hba_eq,
11235 &phba->sli4_hba.hdwq[qidx].io_cq_map,
11239 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11240 "0535 Failed to setup fastpath "
11241 "IO WQ/CQ (%d), rc = 0x%x\n",
11242 qidx, (uint32_t)rc);
11248 * Set up Slow Path Complete Queues (CQs)
11251 /* Set up slow-path MBOX CQ/MQ */
11253 if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) {
11254 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11255 "0528 %s not allocated\n",
11256 phba->sli4_hba.mbx_cq ?
11257 "Mailbox WQ" : "Mailbox CQ");
11262 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
11263 phba->sli4_hba.mbx_cq,
11264 phba->sli4_hba.mbx_wq,
11265 NULL, 0, LPFC_MBOX);
11267 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11268 "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n",
11272 if (phba->nvmet_support) {
11273 if (!phba->sli4_hba.nvmet_cqset) {
11274 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11275 "3165 Fast-path NVME CQ Set "
11276 "array not allocated\n");
11280 if (phba->cfg_nvmet_mrq > 1) {
11281 rc = lpfc_cq_create_set(phba,
11282 phba->sli4_hba.nvmet_cqset,
11284 LPFC_WCQ, LPFC_NVMET);
11286 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11287 "3164 Failed setup of NVME CQ "
11288 "Set, rc = 0x%x\n",
11293 /* Set up NVMET Receive Complete Queue */
11294 rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0],
11296 LPFC_WCQ, LPFC_NVMET);
11298 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11299 "6089 Failed setup NVMET CQ: "
11300 "rc = 0x%x\n", (uint32_t)rc);
11303 phba->sli4_hba.nvmet_cqset[0]->chann = 0;
11305 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11306 "6090 NVMET CQ setup: cq-id=%d, "
11307 "parent eq-id=%d\n",
11308 phba->sli4_hba.nvmet_cqset[0]->queue_id,
11309 qp[0].hba_eq->queue_id);
11313 /* Set up slow-path ELS WQ/CQ */
11314 if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) {
11315 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11316 "0530 ELS %s not allocated\n",
11317 phba->sli4_hba.els_cq ? "WQ" : "CQ");
11321 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
11322 phba->sli4_hba.els_cq,
11323 phba->sli4_hba.els_wq,
11324 NULL, 0, LPFC_ELS);
11326 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11327 "0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n",
11331 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11332 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
11333 phba->sli4_hba.els_wq->queue_id,
11334 phba->sli4_hba.els_cq->queue_id);
11336 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11337 /* Set up NVME LS Complete Queue */
11338 if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) {
11339 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11340 "6091 LS %s not allocated\n",
11341 phba->sli4_hba.nvmels_cq ? "WQ" : "CQ");
11345 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
11346 phba->sli4_hba.nvmels_cq,
11347 phba->sli4_hba.nvmels_wq,
11348 NULL, 0, LPFC_NVME_LS);
11350 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11351 "0526 Failed setup of NVVME LS WQ/CQ: "
11352 "rc = 0x%x\n", (uint32_t)rc);
11356 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11357 "6096 ELS WQ setup: wq-id=%d, "
11358 "parent cq-id=%d\n",
11359 phba->sli4_hba.nvmels_wq->queue_id,
11360 phba->sli4_hba.nvmels_cq->queue_id);
11364 * Create NVMET Receive Queue (RQ)
11366 if (phba->nvmet_support) {
11367 if ((!phba->sli4_hba.nvmet_cqset) ||
11368 (!phba->sli4_hba.nvmet_mrq_hdr) ||
11369 (!phba->sli4_hba.nvmet_mrq_data)) {
11370 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11371 "6130 MRQ CQ Queues not "
11376 if (phba->cfg_nvmet_mrq > 1) {
11377 rc = lpfc_mrq_create(phba,
11378 phba->sli4_hba.nvmet_mrq_hdr,
11379 phba->sli4_hba.nvmet_mrq_data,
11380 phba->sli4_hba.nvmet_cqset,
11383 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11384 "6098 Failed setup of NVMET "
11385 "MRQ: rc = 0x%x\n",
11391 rc = lpfc_rq_create(phba,
11392 phba->sli4_hba.nvmet_mrq_hdr[0],
11393 phba->sli4_hba.nvmet_mrq_data[0],
11394 phba->sli4_hba.nvmet_cqset[0],
11397 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11398 "6057 Failed setup of NVMET "
11399 "Receive Queue: rc = 0x%x\n",
11405 phba, KERN_INFO, LOG_INIT,
11406 "6099 NVMET RQ setup: hdr-rq-id=%d, "
11407 "dat-rq-id=%d parent cq-id=%d\n",
11408 phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id,
11409 phba->sli4_hba.nvmet_mrq_data[0]->queue_id,
11410 phba->sli4_hba.nvmet_cqset[0]->queue_id);
11415 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
11416 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11417 "0540 Receive Queue not allocated\n");
11422 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
11423 phba->sli4_hba.els_cq, LPFC_USOL);
11425 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11426 "0541 Failed setup of Receive Queue: "
11427 "rc = 0x%x\n", (uint32_t)rc);
11431 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11432 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
11433 "parent cq-id=%d\n",
11434 phba->sli4_hba.hdr_rq->queue_id,
11435 phba->sli4_hba.dat_rq->queue_id,
11436 phba->sli4_hba.els_cq->queue_id);
11438 if (phba->cfg_fcp_imax)
11439 usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax;
11443 for (qidx = 0; qidx < phba->cfg_irq_chann;
11444 qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
11445 lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT,
11448 if (phba->sli4_hba.cq_max) {
11449 kfree(phba->sli4_hba.cq_lookup);
11450 phba->sli4_hba.cq_lookup = kcalloc((phba->sli4_hba.cq_max + 1),
11451 sizeof(struct lpfc_queue *), GFP_KERNEL);
11452 if (!phba->sli4_hba.cq_lookup) {
11453 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11454 "0549 Failed setup of CQ Lookup table: "
11455 "size 0x%x\n", phba->sli4_hba.cq_max);
11459 lpfc_setup_cq_lookup(phba);
11464 lpfc_sli4_queue_unset(phba);
11470 * lpfc_sli4_queue_unset - Unset all the SLI4 queues
11471 * @phba: pointer to lpfc hba data structure.
11473 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
11478 * -ENOMEM - No available memory
11479 * -EIO - The mailbox failed to complete successfully.
11482 lpfc_sli4_queue_unset(struct lpfc_hba *phba)
11484 struct lpfc_sli4_hdw_queue *qp;
11485 struct lpfc_queue *eq;
11488 /* Unset mailbox command work queue */
11489 if (phba->sli4_hba.mbx_wq)
11490 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
11492 /* Unset NVME LS work queue */
11493 if (phba->sli4_hba.nvmels_wq)
11494 lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq);
11496 /* Unset ELS work queue */
11497 if (phba->sli4_hba.els_wq)
11498 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
11500 /* Unset unsolicited receive queue */
11501 if (phba->sli4_hba.hdr_rq)
11502 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq,
11503 phba->sli4_hba.dat_rq);
11505 /* Unset mailbox command complete queue */
11506 if (phba->sli4_hba.mbx_cq)
11507 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
11509 /* Unset ELS complete queue */
11510 if (phba->sli4_hba.els_cq)
11511 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
11513 /* Unset NVME LS complete queue */
11514 if (phba->sli4_hba.nvmels_cq)
11515 lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq);
11517 if (phba->nvmet_support) {
11518 /* Unset NVMET MRQ queue */
11519 if (phba->sli4_hba.nvmet_mrq_hdr) {
11520 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
11523 phba->sli4_hba.nvmet_mrq_hdr[qidx],
11524 phba->sli4_hba.nvmet_mrq_data[qidx]);
11527 /* Unset NVMET CQ Set complete queue */
11528 if (phba->sli4_hba.nvmet_cqset) {
11529 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
11531 phba, phba->sli4_hba.nvmet_cqset[qidx]);
11535 /* Unset fast-path SLI4 queues */
11536 if (phba->sli4_hba.hdwq) {
11537 /* Loop thru all Hardware Queues */
11538 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
11539 /* Destroy the CQ/WQ corresponding to Hardware Queue */
11540 qp = &phba->sli4_hba.hdwq[qidx];
11541 lpfc_wq_destroy(phba, qp->io_wq);
11542 lpfc_cq_destroy(phba, qp->io_cq);
11544 /* Loop thru all IRQ vectors */
11545 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
11546 /* Destroy the EQ corresponding to the IRQ vector */
11547 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
11548 lpfc_eq_destroy(phba, eq);
11552 kfree(phba->sli4_hba.cq_lookup);
11553 phba->sli4_hba.cq_lookup = NULL;
11554 phba->sli4_hba.cq_max = 0;
11558 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
11559 * @phba: pointer to lpfc hba data structure.
11561 * This routine is invoked to allocate and set up a pool of completion queue
11562 * events. The body of the completion queue event is a completion queue entry
11563 * CQE. For now, this pool is used for the interrupt service routine to queue
11564 * the following HBA completion queue events for the worker thread to process:
11565 * - Mailbox asynchronous events
11566 * - Receive queue completion unsolicited events
11567 * Later, this can be used for all the slow-path events.
11571 * -ENOMEM - No available memory
11574 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
11576 struct lpfc_cq_event *cq_event;
11579 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
11580 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
11582 goto out_pool_create_fail;
11583 list_add_tail(&cq_event->list,
11584 &phba->sli4_hba.sp_cqe_event_pool);
11588 out_pool_create_fail:
11589 lpfc_sli4_cq_event_pool_destroy(phba);
11594 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
11595 * @phba: pointer to lpfc hba data structure.
11597 * This routine is invoked to free the pool of completion queue events at
11598 * driver unload time. Note that, it is the responsibility of the driver
11599 * cleanup routine to free all the outstanding completion-queue events
11600 * allocated from this pool back into the pool before invoking this routine
11601 * to destroy the pool.
11604 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
11606 struct lpfc_cq_event *cq_event, *next_cq_event;
11608 list_for_each_entry_safe(cq_event, next_cq_event,
11609 &phba->sli4_hba.sp_cqe_event_pool, list) {
11610 list_del(&cq_event->list);
11616 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
11617 * @phba: pointer to lpfc hba data structure.
11619 * This routine is the lock free version of the API invoked to allocate a
11620 * completion-queue event from the free pool.
11622 * Return: Pointer to the newly allocated completion-queue event if successful
11625 struct lpfc_cq_event *
11626 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
11628 struct lpfc_cq_event *cq_event = NULL;
11630 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
11631 struct lpfc_cq_event, list);
11636 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
11637 * @phba: pointer to lpfc hba data structure.
11639 * This routine is the lock version of the API invoked to allocate a
11640 * completion-queue event from the free pool.
11642 * Return: Pointer to the newly allocated completion-queue event if successful
11645 struct lpfc_cq_event *
11646 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
11648 struct lpfc_cq_event *cq_event;
11649 unsigned long iflags;
11651 spin_lock_irqsave(&phba->hbalock, iflags);
11652 cq_event = __lpfc_sli4_cq_event_alloc(phba);
11653 spin_unlock_irqrestore(&phba->hbalock, iflags);
11658 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
11659 * @phba: pointer to lpfc hba data structure.
11660 * @cq_event: pointer to the completion queue event to be freed.
11662 * This routine is the lock free version of the API invoked to release a
11663 * completion-queue event back into the free pool.
11666 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
11667 struct lpfc_cq_event *cq_event)
11669 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
11673 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
11674 * @phba: pointer to lpfc hba data structure.
11675 * @cq_event: pointer to the completion queue event to be freed.
11677 * This routine is the lock version of the API invoked to release a
11678 * completion-queue event back into the free pool.
11681 lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
11682 struct lpfc_cq_event *cq_event)
11684 unsigned long iflags;
11685 spin_lock_irqsave(&phba->hbalock, iflags);
11686 __lpfc_sli4_cq_event_release(phba, cq_event);
11687 spin_unlock_irqrestore(&phba->hbalock, iflags);
11691 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
11692 * @phba: pointer to lpfc hba data structure.
11694 * This routine is to free all the pending completion-queue events to the
11695 * back into the free pool for device reset.
11698 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
11700 LIST_HEAD(cq_event_list);
11701 struct lpfc_cq_event *cq_event;
11702 unsigned long iflags;
11704 /* Retrieve all the pending WCQEs from pending WCQE lists */
11706 /* Pending ELS XRI abort events */
11707 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
11708 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
11710 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
11712 /* Pending asynnc events */
11713 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
11714 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
11716 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
11718 while (!list_empty(&cq_event_list)) {
11719 list_remove_head(&cq_event_list, cq_event,
11720 struct lpfc_cq_event, list);
11721 lpfc_sli4_cq_event_release(phba, cq_event);
11726 * lpfc_pci_function_reset - Reset pci function.
11727 * @phba: pointer to lpfc hba data structure.
11729 * This routine is invoked to request a PCI function reset. It will destroys
11730 * all resources assigned to the PCI function which originates this request.
11734 * -ENOMEM - No available memory
11735 * -EIO - The mailbox failed to complete successfully.
11738 lpfc_pci_function_reset(struct lpfc_hba *phba)
11740 LPFC_MBOXQ_t *mboxq;
11741 uint32_t rc = 0, if_type;
11742 uint32_t shdr_status, shdr_add_status;
11744 uint32_t port_reset = 0;
11745 union lpfc_sli4_cfg_shdr *shdr;
11746 struct lpfc_register reg_data;
11749 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
11751 case LPFC_SLI_INTF_IF_TYPE_0:
11752 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
11755 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11756 "0494 Unable to allocate memory for "
11757 "issuing SLI_FUNCTION_RESET mailbox "
11762 /* Setup PCI function reset mailbox-ioctl command */
11763 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
11764 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
11765 LPFC_SLI4_MBX_EMBED);
11766 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
11767 shdr = (union lpfc_sli4_cfg_shdr *)
11768 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
11769 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
11770 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
11772 mempool_free(mboxq, phba->mbox_mem_pool);
11773 if (shdr_status || shdr_add_status || rc) {
11774 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11775 "0495 SLI_FUNCTION_RESET mailbox "
11776 "failed with status x%x add_status x%x,"
11777 " mbx status x%x\n",
11778 shdr_status, shdr_add_status, rc);
11782 case LPFC_SLI_INTF_IF_TYPE_2:
11783 case LPFC_SLI_INTF_IF_TYPE_6:
11786 * Poll the Port Status Register and wait for RDY for
11787 * up to 30 seconds. If the port doesn't respond, treat
11790 for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) {
11791 if (lpfc_readl(phba->sli4_hba.u.if_type2.
11792 STATUSregaddr, ®_data.word0)) {
11796 if (bf_get(lpfc_sliport_status_rdy, ®_data))
11801 if (!bf_get(lpfc_sliport_status_rdy, ®_data)) {
11802 phba->work_status[0] = readl(
11803 phba->sli4_hba.u.if_type2.ERR1regaddr);
11804 phba->work_status[1] = readl(
11805 phba->sli4_hba.u.if_type2.ERR2regaddr);
11806 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11807 "2890 Port not ready, port status reg "
11808 "0x%x error 1=0x%x, error 2=0x%x\n",
11810 phba->work_status[0],
11811 phba->work_status[1]);
11816 if (bf_get(lpfc_sliport_status_pldv, ®_data))
11817 lpfc_pldv_detect = true;
11821 * Reset the port now
11823 reg_data.word0 = 0;
11824 bf_set(lpfc_sliport_ctrl_end, ®_data,
11825 LPFC_SLIPORT_LITTLE_ENDIAN);
11826 bf_set(lpfc_sliport_ctrl_ip, ®_data,
11827 LPFC_SLIPORT_INIT_PORT);
11828 writel(reg_data.word0, phba->sli4_hba.u.if_type2.
11831 pci_read_config_word(phba->pcidev,
11832 PCI_DEVICE_ID, &devid);
11837 } else if (bf_get(lpfc_sliport_status_rn, ®_data)) {
11843 case LPFC_SLI_INTF_IF_TYPE_1:
11849 /* Catch the not-ready port failure after a port reset. */
11851 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11852 "3317 HBA not functional: IP Reset Failed "
11853 "try: echo fw_reset > board_mode\n");
11861 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
11862 * @phba: pointer to lpfc hba data structure.
11864 * This routine is invoked to set up the PCI device memory space for device
11865 * with SLI-4 interface spec.
11869 * other values - error
11872 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
11874 struct pci_dev *pdev = phba->pcidev;
11875 unsigned long bar0map_len, bar1map_len, bar2map_len;
11882 /* Set the device DMA mask size */
11883 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
11885 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
11890 * The BARs and register set definitions and offset locations are
11891 * dependent on the if_type.
11893 if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
11894 &phba->sli4_hba.sli_intf.word0)) {
11898 /* There is no SLI3 failback for SLI4 devices. */
11899 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
11900 LPFC_SLI_INTF_VALID) {
11901 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11902 "2894 SLI_INTF reg contents invalid "
11903 "sli_intf reg 0x%x\n",
11904 phba->sli4_hba.sli_intf.word0);
11908 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
11910 * Get the bus address of SLI4 device Bar regions and the
11911 * number of bytes required by each mapping. The mapping of the
11912 * particular PCI BARs regions is dependent on the type of
11915 if (pci_resource_start(pdev, PCI_64BIT_BAR0)) {
11916 phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
11917 bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
11920 * Map SLI4 PCI Config Space Register base to a kernel virtual
11923 phba->sli4_hba.conf_regs_memmap_p =
11924 ioremap(phba->pci_bar0_map, bar0map_len);
11925 if (!phba->sli4_hba.conf_regs_memmap_p) {
11926 dev_printk(KERN_ERR, &pdev->dev,
11927 "ioremap failed for SLI4 PCI config "
11931 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p;
11932 /* Set up BAR0 PCI config space register memory map */
11933 lpfc_sli4_bar0_register_memmap(phba, if_type);
11935 phba->pci_bar0_map = pci_resource_start(pdev, 1);
11936 bar0map_len = pci_resource_len(pdev, 1);
11937 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
11938 dev_printk(KERN_ERR, &pdev->dev,
11939 "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
11942 phba->sli4_hba.conf_regs_memmap_p =
11943 ioremap(phba->pci_bar0_map, bar0map_len);
11944 if (!phba->sli4_hba.conf_regs_memmap_p) {
11945 dev_printk(KERN_ERR, &pdev->dev,
11946 "ioremap failed for SLI4 PCI config "
11950 lpfc_sli4_bar0_register_memmap(phba, if_type);
11953 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
11954 if (pci_resource_start(pdev, PCI_64BIT_BAR2)) {
11956 * Map SLI4 if type 0 HBA Control Register base to a
11957 * kernel virtual address and setup the registers.
11959 phba->pci_bar1_map = pci_resource_start(pdev,
11961 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
11962 phba->sli4_hba.ctrl_regs_memmap_p =
11963 ioremap(phba->pci_bar1_map,
11965 if (!phba->sli4_hba.ctrl_regs_memmap_p) {
11966 dev_err(&pdev->dev,
11967 "ioremap failed for SLI4 HBA "
11968 "control registers.\n");
11970 goto out_iounmap_conf;
11972 phba->pci_bar2_memmap_p =
11973 phba->sli4_hba.ctrl_regs_memmap_p;
11974 lpfc_sli4_bar1_register_memmap(phba, if_type);
11977 goto out_iounmap_conf;
11981 if ((if_type == LPFC_SLI_INTF_IF_TYPE_6) &&
11982 (pci_resource_start(pdev, PCI_64BIT_BAR2))) {
11984 * Map SLI4 if type 6 HBA Doorbell Register base to a kernel
11985 * virtual address and setup the registers.
11987 phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
11988 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
11989 phba->sli4_hba.drbl_regs_memmap_p =
11990 ioremap(phba->pci_bar1_map, bar1map_len);
11991 if (!phba->sli4_hba.drbl_regs_memmap_p) {
11992 dev_err(&pdev->dev,
11993 "ioremap failed for SLI4 HBA doorbell registers.\n");
11995 goto out_iounmap_conf;
11997 phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
11998 lpfc_sli4_bar1_register_memmap(phba, if_type);
12001 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
12002 if (pci_resource_start(pdev, PCI_64BIT_BAR4)) {
12004 * Map SLI4 if type 0 HBA Doorbell Register base to
12005 * a kernel virtual address and setup the registers.
12007 phba->pci_bar2_map = pci_resource_start(pdev,
12009 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
12010 phba->sli4_hba.drbl_regs_memmap_p =
12011 ioremap(phba->pci_bar2_map,
12013 if (!phba->sli4_hba.drbl_regs_memmap_p) {
12014 dev_err(&pdev->dev,
12015 "ioremap failed for SLI4 HBA"
12016 " doorbell registers.\n");
12018 goto out_iounmap_ctrl;
12020 phba->pci_bar4_memmap_p =
12021 phba->sli4_hba.drbl_regs_memmap_p;
12022 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
12024 goto out_iounmap_all;
12027 goto out_iounmap_all;
12031 if (if_type == LPFC_SLI_INTF_IF_TYPE_6 &&
12032 pci_resource_start(pdev, PCI_64BIT_BAR4)) {
12034 * Map SLI4 if type 6 HBA DPP Register base to a kernel
12035 * virtual address and setup the registers.
12037 phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
12038 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
12039 phba->sli4_hba.dpp_regs_memmap_p =
12040 ioremap(phba->pci_bar2_map, bar2map_len);
12041 if (!phba->sli4_hba.dpp_regs_memmap_p) {
12042 dev_err(&pdev->dev,
12043 "ioremap failed for SLI4 HBA dpp registers.\n");
12045 goto out_iounmap_ctrl;
12047 phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p;
12050 /* Set up the EQ/CQ register handeling functions now */
12052 case LPFC_SLI_INTF_IF_TYPE_0:
12053 case LPFC_SLI_INTF_IF_TYPE_2:
12054 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr;
12055 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_write_eq_db;
12056 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_write_cq_db;
12058 case LPFC_SLI_INTF_IF_TYPE_6:
12059 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr;
12060 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_if6_write_eq_db;
12061 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_if6_write_cq_db;
12070 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
12072 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
12074 iounmap(phba->sli4_hba.conf_regs_memmap_p);
12080 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
12081 * @phba: pointer to lpfc hba data structure.
12083 * This routine is invoked to unset the PCI device memory space for device
12084 * with SLI-4 interface spec.
12087 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
12090 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
12093 case LPFC_SLI_INTF_IF_TYPE_0:
12094 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
12095 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
12096 iounmap(phba->sli4_hba.conf_regs_memmap_p);
12098 case LPFC_SLI_INTF_IF_TYPE_2:
12099 iounmap(phba->sli4_hba.conf_regs_memmap_p);
12101 case LPFC_SLI_INTF_IF_TYPE_6:
12102 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
12103 iounmap(phba->sli4_hba.conf_regs_memmap_p);
12104 if (phba->sli4_hba.dpp_regs_memmap_p)
12105 iounmap(phba->sli4_hba.dpp_regs_memmap_p);
12107 case LPFC_SLI_INTF_IF_TYPE_1:
12109 dev_printk(KERN_ERR, &phba->pcidev->dev,
12110 "FATAL - unsupported SLI4 interface type - %d\n",
12117 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
12118 * @phba: pointer to lpfc hba data structure.
12120 * This routine is invoked to enable the MSI-X interrupt vectors to device
12121 * with SLI-3 interface specs.
12125 * other values - error
12128 lpfc_sli_enable_msix(struct lpfc_hba *phba)
12133 /* Set up MSI-X multi-message vectors */
12134 rc = pci_alloc_irq_vectors(phba->pcidev,
12135 LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX);
12137 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12138 "0420 PCI enable MSI-X failed (%d)\n", rc);
12143 * Assign MSI-X vectors to interrupt handlers
12146 /* vector-0 is associated to slow-path handler */
12147 rc = request_irq(pci_irq_vector(phba->pcidev, 0),
12148 &lpfc_sli_sp_intr_handler, 0,
12149 LPFC_SP_DRIVER_HANDLER_NAME, phba);
12151 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
12152 "0421 MSI-X slow-path request_irq failed "
12157 /* vector-1 is associated to fast-path handler */
12158 rc = request_irq(pci_irq_vector(phba->pcidev, 1),
12159 &lpfc_sli_fp_intr_handler, 0,
12160 LPFC_FP_DRIVER_HANDLER_NAME, phba);
12163 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
12164 "0429 MSI-X fast-path request_irq failed "
12170 * Configure HBA MSI-X attention conditions to messages
12172 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12176 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12177 "0474 Unable to allocate memory for issuing "
12178 "MBOX_CONFIG_MSI command\n");
12181 rc = lpfc_config_msi(phba, pmb);
12184 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
12185 if (rc != MBX_SUCCESS) {
12186 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
12187 "0351 Config MSI mailbox command failed, "
12188 "mbxCmd x%x, mbxStatus x%x\n",
12189 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
12193 /* Free memory allocated for mailbox command */
12194 mempool_free(pmb, phba->mbox_mem_pool);
12198 /* Free memory allocated for mailbox command */
12199 mempool_free(pmb, phba->mbox_mem_pool);
12202 /* free the irq already requested */
12203 free_irq(pci_irq_vector(phba->pcidev, 1), phba);
12206 /* free the irq already requested */
12207 free_irq(pci_irq_vector(phba->pcidev, 0), phba);
12210 /* Unconfigure MSI-X capability structure */
12211 pci_free_irq_vectors(phba->pcidev);
12218 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
12219 * @phba: pointer to lpfc hba data structure.
12221 * This routine is invoked to enable the MSI interrupt mode to device with
12222 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
12223 * enable the MSI vector. The device driver is responsible for calling the
12224 * request_irq() to register MSI vector with a interrupt the handler, which
12225 * is done in this function.
12229 * other values - error
12232 lpfc_sli_enable_msi(struct lpfc_hba *phba)
12236 rc = pci_enable_msi(phba->pcidev);
12238 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12239 "0012 PCI enable MSI mode success.\n");
12241 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12242 "0471 PCI enable MSI mode failed (%d)\n", rc);
12246 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
12247 0, LPFC_DRIVER_NAME, phba);
12249 pci_disable_msi(phba->pcidev);
12250 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
12251 "0478 MSI request_irq failed (%d)\n", rc);
12257 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
12258 * @phba: pointer to lpfc hba data structure.
12259 * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X).
12261 * This routine is invoked to enable device interrupt and associate driver's
12262 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
12263 * spec. Depends on the interrupt mode configured to the driver, the driver
12264 * will try to fallback from the configured interrupt mode to an interrupt
12265 * mode which is supported by the platform, kernel, and device in the order
12267 * MSI-X -> MSI -> IRQ.
12271 * other values - error
12274 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
12276 uint32_t intr_mode = LPFC_INTR_ERROR;
12279 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
12280 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
12283 phba->hba_flag &= ~HBA_NEEDS_CFG_PORT;
12285 if (cfg_mode == 2) {
12286 /* Now, try to enable MSI-X interrupt mode */
12287 retval = lpfc_sli_enable_msix(phba);
12289 /* Indicate initialization to MSI-X mode */
12290 phba->intr_type = MSIX;
12295 /* Fallback to MSI if MSI-X initialization failed */
12296 if (cfg_mode >= 1 && phba->intr_type == NONE) {
12297 retval = lpfc_sli_enable_msi(phba);
12299 /* Indicate initialization to MSI mode */
12300 phba->intr_type = MSI;
12305 /* Fallback to INTx if both MSI-X/MSI initalization failed */
12306 if (phba->intr_type == NONE) {
12307 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
12308 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
12310 /* Indicate initialization to INTx mode */
12311 phba->intr_type = INTx;
12319 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
12320 * @phba: pointer to lpfc hba data structure.
12322 * This routine is invoked to disable device interrupt and disassociate the
12323 * driver's interrupt handler(s) from interrupt vector(s) to device with
12324 * SLI-3 interface spec. Depending on the interrupt mode, the driver will
12325 * release the interrupt vector(s) for the message signaled interrupt.
12328 lpfc_sli_disable_intr(struct lpfc_hba *phba)
12332 if (phba->intr_type == MSIX)
12333 nr_irqs = LPFC_MSIX_VECTORS;
12337 for (i = 0; i < nr_irqs; i++)
12338 free_irq(pci_irq_vector(phba->pcidev, i), phba);
12339 pci_free_irq_vectors(phba->pcidev);
12341 /* Reset interrupt management states */
12342 phba->intr_type = NONE;
12343 phba->sli.slistat.sli_intr = 0;
12347 * lpfc_find_cpu_handle - Find the CPU that corresponds to the specified Queue
12348 * @phba: pointer to lpfc hba data structure.
12349 * @id: EQ vector index or Hardware Queue index
12350 * @match: LPFC_FIND_BY_EQ = match by EQ
12351 * LPFC_FIND_BY_HDWQ = match by Hardware Queue
12352 * Return the CPU that matches the selection criteria
12355 lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match)
12357 struct lpfc_vector_map_info *cpup;
12360 /* Loop through all CPUs */
12361 for_each_present_cpu(cpu) {
12362 cpup = &phba->sli4_hba.cpu_map[cpu];
12364 /* If we are matching by EQ, there may be multiple CPUs using
12365 * using the same vector, so select the one with
12366 * LPFC_CPU_FIRST_IRQ set.
12368 if ((match == LPFC_FIND_BY_EQ) &&
12369 (cpup->flag & LPFC_CPU_FIRST_IRQ) &&
12373 /* If matching by HDWQ, select the first CPU that matches */
12374 if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id))
12382 * lpfc_find_hyper - Determine if the CPU map entry is hyper-threaded
12383 * @phba: pointer to lpfc hba data structure.
12384 * @cpu: CPU map index
12385 * @phys_id: CPU package physical id
12386 * @core_id: CPU core id
12389 lpfc_find_hyper(struct lpfc_hba *phba, int cpu,
12390 uint16_t phys_id, uint16_t core_id)
12392 struct lpfc_vector_map_info *cpup;
12395 for_each_present_cpu(idx) {
12396 cpup = &phba->sli4_hba.cpu_map[idx];
12397 /* Does the cpup match the one we are looking for */
12398 if ((cpup->phys_id == phys_id) &&
12399 (cpup->core_id == core_id) &&
12408 * lpfc_assign_eq_map_info - Assigns eq for vector_map structure
12409 * @phba: pointer to lpfc hba data structure.
12410 * @eqidx: index for eq and irq vector
12411 * @flag: flags to set for vector_map structure
12412 * @cpu: cpu used to index vector_map structure
12414 * The routine assigns eq info into vector_map structure
12417 lpfc_assign_eq_map_info(struct lpfc_hba *phba, uint16_t eqidx, uint16_t flag,
12420 struct lpfc_vector_map_info *cpup = &phba->sli4_hba.cpu_map[cpu];
12421 struct lpfc_hba_eq_hdl *eqhdl = lpfc_get_eq_hdl(eqidx);
12424 cpup->flag |= flag;
12426 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12427 "3336 Set Affinity: CPU %d irq %d eq %d flag x%x\n",
12428 cpu, eqhdl->irq, cpup->eq, cpup->flag);
12432 * lpfc_cpu_map_array_init - Initialize cpu_map structure
12433 * @phba: pointer to lpfc hba data structure.
12435 * The routine initializes the cpu_map array structure
12438 lpfc_cpu_map_array_init(struct lpfc_hba *phba)
12440 struct lpfc_vector_map_info *cpup;
12441 struct lpfc_eq_intr_info *eqi;
12444 for_each_possible_cpu(cpu) {
12445 cpup = &phba->sli4_hba.cpu_map[cpu];
12446 cpup->phys_id = LPFC_VECTOR_MAP_EMPTY;
12447 cpup->core_id = LPFC_VECTOR_MAP_EMPTY;
12448 cpup->hdwq = LPFC_VECTOR_MAP_EMPTY;
12449 cpup->eq = LPFC_VECTOR_MAP_EMPTY;
12451 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, cpu);
12452 INIT_LIST_HEAD(&eqi->list);
12458 * lpfc_hba_eq_hdl_array_init - Initialize hba_eq_hdl structure
12459 * @phba: pointer to lpfc hba data structure.
12461 * The routine initializes the hba_eq_hdl array structure
12464 lpfc_hba_eq_hdl_array_init(struct lpfc_hba *phba)
12466 struct lpfc_hba_eq_hdl *eqhdl;
12469 for (i = 0; i < phba->cfg_irq_chann; i++) {
12470 eqhdl = lpfc_get_eq_hdl(i);
12471 eqhdl->irq = LPFC_IRQ_EMPTY;
12472 eqhdl->phba = phba;
12477 * lpfc_cpu_affinity_check - Check vector CPU affinity mappings
12478 * @phba: pointer to lpfc hba data structure.
12479 * @vectors: number of msix vectors allocated.
12481 * The routine will figure out the CPU affinity assignment for every
12482 * MSI-X vector allocated for the HBA.
12483 * In addition, the CPU to IO channel mapping will be calculated
12484 * and the phba->sli4_hba.cpu_map array will reflect this.
12487 lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
12489 int i, cpu, idx, next_idx, new_cpu, start_cpu, first_cpu;
12490 int max_phys_id, min_phys_id;
12491 int max_core_id, min_core_id;
12492 struct lpfc_vector_map_info *cpup;
12493 struct lpfc_vector_map_info *new_cpup;
12495 struct cpuinfo_x86 *cpuinfo;
12497 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
12498 struct lpfc_hdwq_stat *c_stat;
12502 min_phys_id = LPFC_VECTOR_MAP_EMPTY;
12504 min_core_id = LPFC_VECTOR_MAP_EMPTY;
12506 /* Update CPU map with physical id and core id of each CPU */
12507 for_each_present_cpu(cpu) {
12508 cpup = &phba->sli4_hba.cpu_map[cpu];
12510 cpuinfo = &cpu_data(cpu);
12511 cpup->phys_id = cpuinfo->phys_proc_id;
12512 cpup->core_id = cpuinfo->cpu_core_id;
12513 if (lpfc_find_hyper(phba, cpu, cpup->phys_id, cpup->core_id))
12514 cpup->flag |= LPFC_CPU_MAP_HYPER;
12516 /* No distinction between CPUs for other platforms */
12518 cpup->core_id = cpu;
12521 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12522 "3328 CPU %d physid %d coreid %d flag x%x\n",
12523 cpu, cpup->phys_id, cpup->core_id, cpup->flag);
12525 if (cpup->phys_id > max_phys_id)
12526 max_phys_id = cpup->phys_id;
12527 if (cpup->phys_id < min_phys_id)
12528 min_phys_id = cpup->phys_id;
12530 if (cpup->core_id > max_core_id)
12531 max_core_id = cpup->core_id;
12532 if (cpup->core_id < min_core_id)
12533 min_core_id = cpup->core_id;
12536 /* After looking at each irq vector assigned to this pcidev, its
12537 * possible to see that not ALL CPUs have been accounted for.
12538 * Next we will set any unassigned (unaffinitized) cpu map
12539 * entries to a IRQ on the same phys_id.
12541 first_cpu = cpumask_first(cpu_present_mask);
12542 start_cpu = first_cpu;
12544 for_each_present_cpu(cpu) {
12545 cpup = &phba->sli4_hba.cpu_map[cpu];
12547 /* Is this CPU entry unassigned */
12548 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
12549 /* Mark CPU as IRQ not assigned by the kernel */
12550 cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
12552 /* If so, find a new_cpup that is on the SAME
12553 * phys_id as cpup. start_cpu will start where we
12554 * left off so all unassigned entries don't get assgined
12555 * the IRQ of the first entry.
12557 new_cpu = start_cpu;
12558 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12559 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12560 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
12561 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY) &&
12562 (new_cpup->phys_id == cpup->phys_id))
12564 new_cpu = cpumask_next(
12565 new_cpu, cpu_present_mask);
12566 if (new_cpu == nr_cpumask_bits)
12567 new_cpu = first_cpu;
12569 /* At this point, we leave the CPU as unassigned */
12572 /* We found a matching phys_id, so copy the IRQ info */
12573 cpup->eq = new_cpup->eq;
12575 /* Bump start_cpu to the next slot to minmize the
12576 * chance of having multiple unassigned CPU entries
12577 * selecting the same IRQ.
12579 start_cpu = cpumask_next(new_cpu, cpu_present_mask);
12580 if (start_cpu == nr_cpumask_bits)
12581 start_cpu = first_cpu;
12583 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12584 "3337 Set Affinity: CPU %d "
12585 "eq %d from peer cpu %d same "
12587 cpu, cpup->eq, new_cpu,
12592 /* Set any unassigned cpu map entries to a IRQ on any phys_id */
12593 start_cpu = first_cpu;
12595 for_each_present_cpu(cpu) {
12596 cpup = &phba->sli4_hba.cpu_map[cpu];
12598 /* Is this entry unassigned */
12599 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
12600 /* Mark it as IRQ not assigned by the kernel */
12601 cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
12603 /* If so, find a new_cpup thats on ANY phys_id
12604 * as the cpup. start_cpu will start where we
12605 * left off so all unassigned entries don't get
12606 * assigned the IRQ of the first entry.
12608 new_cpu = start_cpu;
12609 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12610 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12611 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
12612 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY))
12614 new_cpu = cpumask_next(
12615 new_cpu, cpu_present_mask);
12616 if (new_cpu == nr_cpumask_bits)
12617 new_cpu = first_cpu;
12619 /* We should never leave an entry unassigned */
12620 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12621 "3339 Set Affinity: CPU %d "
12622 "eq %d UNASSIGNED\n",
12623 cpup->hdwq, cpup->eq);
12626 /* We found an available entry, copy the IRQ info */
12627 cpup->eq = new_cpup->eq;
12629 /* Bump start_cpu to the next slot to minmize the
12630 * chance of having multiple unassigned CPU entries
12631 * selecting the same IRQ.
12633 start_cpu = cpumask_next(new_cpu, cpu_present_mask);
12634 if (start_cpu == nr_cpumask_bits)
12635 start_cpu = first_cpu;
12637 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12638 "3338 Set Affinity: CPU %d "
12639 "eq %d from peer cpu %d (%d/%d)\n",
12640 cpu, cpup->eq, new_cpu,
12641 new_cpup->phys_id, new_cpup->core_id);
12645 /* Assign hdwq indices that are unique across all cpus in the map
12646 * that are also FIRST_CPUs.
12649 for_each_present_cpu(cpu) {
12650 cpup = &phba->sli4_hba.cpu_map[cpu];
12652 /* Only FIRST IRQs get a hdwq index assignment. */
12653 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
12656 /* 1 to 1, the first LPFC_CPU_FIRST_IRQ cpus to a unique hdwq */
12659 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12660 "3333 Set Affinity: CPU %d (phys %d core %d): "
12661 "hdwq %d eq %d flg x%x\n",
12662 cpu, cpup->phys_id, cpup->core_id,
12663 cpup->hdwq, cpup->eq, cpup->flag);
12665 /* Associate a hdwq with each cpu_map entry
12666 * This will be 1 to 1 - hdwq to cpu, unless there are less
12667 * hardware queues then CPUs. For that case we will just round-robin
12668 * the available hardware queues as they get assigned to CPUs.
12669 * The next_idx is the idx from the FIRST_CPU loop above to account
12670 * for irq_chann < hdwq. The idx is used for round-robin assignments
12671 * and needs to start at 0.
12676 for_each_present_cpu(cpu) {
12677 cpup = &phba->sli4_hba.cpu_map[cpu];
12679 /* FIRST cpus are already mapped. */
12680 if (cpup->flag & LPFC_CPU_FIRST_IRQ)
12683 /* If the cfg_irq_chann < cfg_hdw_queue, set the hdwq
12684 * of the unassigned cpus to the next idx so that all
12685 * hdw queues are fully utilized.
12687 if (next_idx < phba->cfg_hdw_queue) {
12688 cpup->hdwq = next_idx;
12693 /* Not a First CPU and all hdw_queues are used. Reuse a
12694 * Hardware Queue for another CPU, so be smart about it
12695 * and pick one that has its IRQ/EQ mapped to the same phys_id
12696 * (CPU package) and core_id.
12698 new_cpu = start_cpu;
12699 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12700 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12701 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
12702 new_cpup->phys_id == cpup->phys_id &&
12703 new_cpup->core_id == cpup->core_id) {
12706 new_cpu = cpumask_next(new_cpu, cpu_present_mask);
12707 if (new_cpu == nr_cpumask_bits)
12708 new_cpu = first_cpu;
12711 /* If we can't match both phys_id and core_id,
12712 * settle for just a phys_id match.
12714 new_cpu = start_cpu;
12715 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12716 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12717 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
12718 new_cpup->phys_id == cpup->phys_id)
12721 new_cpu = cpumask_next(new_cpu, cpu_present_mask);
12722 if (new_cpu == nr_cpumask_bits)
12723 new_cpu = first_cpu;
12726 /* Otherwise just round robin on cfg_hdw_queue */
12727 cpup->hdwq = idx % phba->cfg_hdw_queue;
12731 /* We found an available entry, copy the IRQ info */
12732 start_cpu = cpumask_next(new_cpu, cpu_present_mask);
12733 if (start_cpu == nr_cpumask_bits)
12734 start_cpu = first_cpu;
12735 cpup->hdwq = new_cpup->hdwq;
12737 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12738 "3335 Set Affinity: CPU %d (phys %d core %d): "
12739 "hdwq %d eq %d flg x%x\n",
12740 cpu, cpup->phys_id, cpup->core_id,
12741 cpup->hdwq, cpup->eq, cpup->flag);
12745 * Initialize the cpu_map slots for not-present cpus in case
12746 * a cpu is hot-added. Perform a simple hdwq round robin assignment.
12749 for_each_possible_cpu(cpu) {
12750 cpup = &phba->sli4_hba.cpu_map[cpu];
12751 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
12752 c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, cpu);
12753 c_stat->hdwq_no = cpup->hdwq;
12755 if (cpup->hdwq != LPFC_VECTOR_MAP_EMPTY)
12758 cpup->hdwq = idx++ % phba->cfg_hdw_queue;
12759 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
12760 c_stat->hdwq_no = cpup->hdwq;
12762 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12763 "3340 Set Affinity: not present "
12764 "CPU %d hdwq %d\n",
12768 /* The cpu_map array will be used later during initialization
12769 * when EQ / CQ / WQs are allocated and configured.
12775 * lpfc_cpuhp_get_eq
12777 * @phba: pointer to lpfc hba data structure.
12778 * @cpu: cpu going offline
12779 * @eqlist: eq list to append to
12782 lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu,
12783 struct list_head *eqlist)
12785 const struct cpumask *maskp;
12786 struct lpfc_queue *eq;
12787 struct cpumask *tmp;
12790 tmp = kzalloc(cpumask_size(), GFP_KERNEL);
12794 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
12795 maskp = pci_irq_get_affinity(phba->pcidev, idx);
12799 * if irq is not affinitized to the cpu going
12800 * then we don't need to poll the eq attached
12803 if (!cpumask_and(tmp, maskp, cpumask_of(cpu)))
12805 /* get the cpus that are online and are affini-
12806 * tized to this irq vector. If the count is
12807 * more than 1 then cpuhp is not going to shut-
12808 * down this vector. Since this cpu has not
12809 * gone offline yet, we need >1.
12811 cpumask_and(tmp, maskp, cpu_online_mask);
12812 if (cpumask_weight(tmp) > 1)
12815 /* Now that we have an irq to shutdown, get the eq
12816 * mapped to this irq. Note: multiple hdwq's in
12817 * the software can share an eq, but eventually
12818 * only eq will be mapped to this vector
12820 eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
12821 list_add(&eq->_poll_list, eqlist);
12827 static void __lpfc_cpuhp_remove(struct lpfc_hba *phba)
12829 if (phba->sli_rev != LPFC_SLI_REV4)
12832 cpuhp_state_remove_instance_nocalls(lpfc_cpuhp_state,
12835 * unregistering the instance doesn't stop the polling
12836 * timer. Wait for the poll timer to retire.
12839 del_timer_sync(&phba->cpuhp_poll_timer);
12842 static void lpfc_cpuhp_remove(struct lpfc_hba *phba)
12844 if (phba->pport && (phba->pport->fc_flag & FC_OFFLINE_MODE))
12847 __lpfc_cpuhp_remove(phba);
12850 static void lpfc_cpuhp_add(struct lpfc_hba *phba)
12852 if (phba->sli_rev != LPFC_SLI_REV4)
12857 if (!list_empty(&phba->poll_list))
12858 mod_timer(&phba->cpuhp_poll_timer,
12859 jiffies + msecs_to_jiffies(LPFC_POLL_HB));
12863 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state,
12867 static int __lpfc_cpuhp_checks(struct lpfc_hba *phba, int *retval)
12869 if (phba->pport->load_flag & FC_UNLOADING) {
12874 if (phba->sli_rev != LPFC_SLI_REV4) {
12879 /* proceed with the hotplug */
12884 * lpfc_irq_set_aff - set IRQ affinity
12885 * @eqhdl: EQ handle
12886 * @cpu: cpu to set affinity
12890 lpfc_irq_set_aff(struct lpfc_hba_eq_hdl *eqhdl, unsigned int cpu)
12892 cpumask_clear(&eqhdl->aff_mask);
12893 cpumask_set_cpu(cpu, &eqhdl->aff_mask);
12894 irq_set_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
12895 irq_set_affinity(eqhdl->irq, &eqhdl->aff_mask);
12899 * lpfc_irq_clear_aff - clear IRQ affinity
12900 * @eqhdl: EQ handle
12904 lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl *eqhdl)
12906 cpumask_clear(&eqhdl->aff_mask);
12907 irq_clear_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
12911 * lpfc_irq_rebalance - rebalances IRQ affinity according to cpuhp event
12912 * @phba: pointer to HBA context object.
12913 * @cpu: cpu going offline/online
12914 * @offline: true, cpu is going offline. false, cpu is coming online.
12916 * If cpu is going offline, we'll try our best effort to find the next
12917 * online cpu on the phba's original_mask and migrate all offlining IRQ
12920 * If cpu is coming online, reaffinitize the IRQ back to the onlining cpu.
12922 * Note: Call only if NUMA or NHT mode is enabled, otherwise rely on
12923 * PCI_IRQ_AFFINITY to auto-manage IRQ affinity.
12927 lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline)
12929 struct lpfc_vector_map_info *cpup;
12930 struct cpumask *aff_mask;
12931 unsigned int cpu_select, cpu_next, idx;
12932 const struct cpumask *orig_mask;
12934 if (phba->irq_chann_mode == NORMAL_MODE)
12937 orig_mask = &phba->sli4_hba.irq_aff_mask;
12939 if (!cpumask_test_cpu(cpu, orig_mask))
12942 cpup = &phba->sli4_hba.cpu_map[cpu];
12944 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
12948 /* Find next online CPU on original mask */
12949 cpu_next = cpumask_next_wrap(cpu, orig_mask, cpu, true);
12950 cpu_select = lpfc_next_online_cpu(orig_mask, cpu_next);
12952 /* Found a valid CPU */
12953 if ((cpu_select < nr_cpu_ids) && (cpu_select != cpu)) {
12954 /* Go through each eqhdl and ensure offlining
12955 * cpu aff_mask is migrated
12957 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
12958 aff_mask = lpfc_get_aff_mask(idx);
12960 /* Migrate affinity */
12961 if (cpumask_test_cpu(cpu, aff_mask))
12962 lpfc_irq_set_aff(lpfc_get_eq_hdl(idx),
12966 /* Rely on irqbalance if no online CPUs left on NUMA */
12967 for (idx = 0; idx < phba->cfg_irq_chann; idx++)
12968 lpfc_irq_clear_aff(lpfc_get_eq_hdl(idx));
12971 /* Migrate affinity back to this CPU */
12972 lpfc_irq_set_aff(lpfc_get_eq_hdl(cpup->eq), cpu);
12976 static int lpfc_cpu_offline(unsigned int cpu, struct hlist_node *node)
12978 struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
12979 struct lpfc_queue *eq, *next;
12984 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
12988 if (__lpfc_cpuhp_checks(phba, &retval))
12991 lpfc_irq_rebalance(phba, cpu, true);
12993 retval = lpfc_cpuhp_get_eq(phba, cpu, &eqlist);
12997 /* start polling on these eq's */
12998 list_for_each_entry_safe(eq, next, &eqlist, _poll_list) {
12999 list_del_init(&eq->_poll_list);
13000 lpfc_sli4_start_polling(eq);
13006 static int lpfc_cpu_online(unsigned int cpu, struct hlist_node *node)
13008 struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
13009 struct lpfc_queue *eq, *next;
13014 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
13018 if (__lpfc_cpuhp_checks(phba, &retval))
13021 lpfc_irq_rebalance(phba, cpu, false);
13023 list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) {
13024 n = lpfc_find_cpu_handle(phba, eq->hdwq, LPFC_FIND_BY_HDWQ);
13026 lpfc_sli4_stop_polling(eq);
13033 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
13034 * @phba: pointer to lpfc hba data structure.
13036 * This routine is invoked to enable the MSI-X interrupt vectors to device
13037 * with SLI-4 interface spec. It also allocates MSI-X vectors and maps them
13038 * to cpus on the system.
13040 * When cfg_irq_numa is enabled, the adapter will only allocate vectors for
13041 * the number of cpus on the same numa node as this adapter. The vectors are
13042 * allocated without requesting OS affinity mapping. A vector will be
13043 * allocated and assigned to each online and offline cpu. If the cpu is
13044 * online, then affinity will be set to that cpu. If the cpu is offline, then
13045 * affinity will be set to the nearest peer cpu within the numa node that is
13046 * online. If there are no online cpus within the numa node, affinity is not
13047 * assigned and the OS may do as it pleases. Note: cpu vector affinity mapping
13048 * is consistent with the way cpu online/offline is handled when cfg_irq_numa is
13051 * If numa mode is not enabled and there is more than 1 vector allocated, then
13052 * the driver relies on the managed irq interface where the OS assigns vector to
13053 * cpu affinity. The driver will then use that affinity mapping to setup its
13054 * cpu mapping table.
13058 * other values - error
13061 lpfc_sli4_enable_msix(struct lpfc_hba *phba)
13063 int vectors, rc, index;
13065 const struct cpumask *aff_mask = NULL;
13066 unsigned int cpu = 0, cpu_cnt = 0, cpu_select = nr_cpu_ids;
13067 struct lpfc_vector_map_info *cpup;
13068 struct lpfc_hba_eq_hdl *eqhdl;
13069 const struct cpumask *maskp;
13070 unsigned int flags = PCI_IRQ_MSIX;
13072 /* Set up MSI-X multi-message vectors */
13073 vectors = phba->cfg_irq_chann;
13075 if (phba->irq_chann_mode != NORMAL_MODE)
13076 aff_mask = &phba->sli4_hba.irq_aff_mask;
13079 cpu_cnt = cpumask_weight(aff_mask);
13080 vectors = min(phba->cfg_irq_chann, cpu_cnt);
13082 /* cpu: iterates over aff_mask including offline or online
13083 * cpu_select: iterates over online aff_mask to set affinity
13085 cpu = cpumask_first(aff_mask);
13086 cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
13088 flags |= PCI_IRQ_AFFINITY;
13091 rc = pci_alloc_irq_vectors(phba->pcidev, 1, vectors, flags);
13093 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13094 "0484 PCI enable MSI-X failed (%d)\n", rc);
13099 /* Assign MSI-X vectors to interrupt handlers */
13100 for (index = 0; index < vectors; index++) {
13101 eqhdl = lpfc_get_eq_hdl(index);
13102 name = eqhdl->handler_name;
13103 memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ);
13104 snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ,
13105 LPFC_DRIVER_HANDLER_NAME"%d", index);
13107 eqhdl->idx = index;
13108 rc = pci_irq_vector(phba->pcidev, index);
13110 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13111 "0489 MSI-X fast-path (%d) "
13112 "pci_irq_vec failed (%d)\n", index, rc);
13117 rc = request_irq(eqhdl->irq, &lpfc_sli4_hba_intr_handler, 0,
13120 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13121 "0486 MSI-X fast-path (%d) "
13122 "request_irq failed (%d)\n", index, rc);
13127 /* If found a neighboring online cpu, set affinity */
13128 if (cpu_select < nr_cpu_ids)
13129 lpfc_irq_set_aff(eqhdl, cpu_select);
13131 /* Assign EQ to cpu_map */
13132 lpfc_assign_eq_map_info(phba, index,
13133 LPFC_CPU_FIRST_IRQ,
13136 /* Iterate to next offline or online cpu in aff_mask */
13137 cpu = cpumask_next(cpu, aff_mask);
13139 /* Find next online cpu in aff_mask to set affinity */
13140 cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
13141 } else if (vectors == 1) {
13142 cpu = cpumask_first(cpu_present_mask);
13143 lpfc_assign_eq_map_info(phba, index, LPFC_CPU_FIRST_IRQ,
13146 maskp = pci_irq_get_affinity(phba->pcidev, index);
13148 /* Loop through all CPUs associated with vector index */
13149 for_each_cpu_and(cpu, maskp, cpu_present_mask) {
13150 cpup = &phba->sli4_hba.cpu_map[cpu];
13152 /* If this is the first CPU thats assigned to
13153 * this vector, set LPFC_CPU_FIRST_IRQ.
13155 * With certain platforms its possible that irq
13156 * vectors are affinitized to all the cpu's.
13157 * This can result in each cpu_map.eq to be set
13158 * to the last vector, resulting in overwrite
13159 * of all the previous cpu_map.eq. Ensure that
13160 * each vector receives a place in cpu_map.
13161 * Later call to lpfc_cpu_affinity_check will
13162 * ensure we are nicely balanced out.
13164 if (cpup->eq != LPFC_VECTOR_MAP_EMPTY)
13166 lpfc_assign_eq_map_info(phba, index,
13167 LPFC_CPU_FIRST_IRQ,
13174 if (vectors != phba->cfg_irq_chann) {
13175 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13176 "3238 Reducing IO channels to match number of "
13177 "MSI-X vectors, requested %d got %d\n",
13178 phba->cfg_irq_chann, vectors);
13179 if (phba->cfg_irq_chann > vectors)
13180 phba->cfg_irq_chann = vectors;
13186 /* free the irq already requested */
13187 for (--index; index >= 0; index--) {
13188 eqhdl = lpfc_get_eq_hdl(index);
13189 lpfc_irq_clear_aff(eqhdl);
13190 free_irq(eqhdl->irq, eqhdl);
13193 /* Unconfigure MSI-X capability structure */
13194 pci_free_irq_vectors(phba->pcidev);
13201 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
13202 * @phba: pointer to lpfc hba data structure.
13204 * This routine is invoked to enable the MSI interrupt mode to device with
13205 * SLI-4 interface spec. The kernel function pci_alloc_irq_vectors() is
13206 * called to enable the MSI vector. The device driver is responsible for
13207 * calling the request_irq() to register MSI vector with a interrupt the
13208 * handler, which is done in this function.
13212 * other values - error
13215 lpfc_sli4_enable_msi(struct lpfc_hba *phba)
13219 struct lpfc_hba_eq_hdl *eqhdl;
13221 rc = pci_alloc_irq_vectors(phba->pcidev, 1, 1,
13222 PCI_IRQ_MSI | PCI_IRQ_AFFINITY);
13224 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13225 "0487 PCI enable MSI mode success.\n");
13227 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13228 "0488 PCI enable MSI mode failed (%d)\n", rc);
13229 return rc ? rc : -1;
13232 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
13233 0, LPFC_DRIVER_NAME, phba);
13235 pci_free_irq_vectors(phba->pcidev);
13236 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13237 "0490 MSI request_irq failed (%d)\n", rc);
13241 eqhdl = lpfc_get_eq_hdl(0);
13242 rc = pci_irq_vector(phba->pcidev, 0);
13244 pci_free_irq_vectors(phba->pcidev);
13245 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13246 "0496 MSI pci_irq_vec failed (%d)\n", rc);
13251 cpu = cpumask_first(cpu_present_mask);
13252 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, cpu);
13254 for (index = 0; index < phba->cfg_irq_chann; index++) {
13255 eqhdl = lpfc_get_eq_hdl(index);
13256 eqhdl->idx = index;
13263 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
13264 * @phba: pointer to lpfc hba data structure.
13265 * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X).
13267 * This routine is invoked to enable device interrupt and associate driver's
13268 * interrupt handler(s) to interrupt vector(s) to device with SLI-4
13269 * interface spec. Depends on the interrupt mode configured to the driver,
13270 * the driver will try to fallback from the configured interrupt mode to an
13271 * interrupt mode which is supported by the platform, kernel, and device in
13273 * MSI-X -> MSI -> IRQ.
13276 * Interrupt mode (2, 1, 0) - successful
13277 * LPFC_INTR_ERROR - error
13280 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
13282 uint32_t intr_mode = LPFC_INTR_ERROR;
13285 if (cfg_mode == 2) {
13286 /* Preparation before conf_msi mbox cmd */
13289 /* Now, try to enable MSI-X interrupt mode */
13290 retval = lpfc_sli4_enable_msix(phba);
13292 /* Indicate initialization to MSI-X mode */
13293 phba->intr_type = MSIX;
13299 /* Fallback to MSI if MSI-X initialization failed */
13300 if (cfg_mode >= 1 && phba->intr_type == NONE) {
13301 retval = lpfc_sli4_enable_msi(phba);
13303 /* Indicate initialization to MSI mode */
13304 phba->intr_type = MSI;
13309 /* Fallback to INTx if both MSI-X/MSI initalization failed */
13310 if (phba->intr_type == NONE) {
13311 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
13312 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
13314 struct lpfc_hba_eq_hdl *eqhdl;
13317 /* Indicate initialization to INTx mode */
13318 phba->intr_type = INTx;
13321 eqhdl = lpfc_get_eq_hdl(0);
13322 retval = pci_irq_vector(phba->pcidev, 0);
13324 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13325 "0502 INTR pci_irq_vec failed (%d)\n",
13327 return LPFC_INTR_ERROR;
13329 eqhdl->irq = retval;
13331 cpu = cpumask_first(cpu_present_mask);
13332 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ,
13334 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
13335 eqhdl = lpfc_get_eq_hdl(idx);
13344 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
13345 * @phba: pointer to lpfc hba data structure.
13347 * This routine is invoked to disable device interrupt and disassociate
13348 * the driver's interrupt handler(s) from interrupt vector(s) to device
13349 * with SLI-4 interface spec. Depending on the interrupt mode, the driver
13350 * will release the interrupt vector(s) for the message signaled interrupt.
13353 lpfc_sli4_disable_intr(struct lpfc_hba *phba)
13355 /* Disable the currently initialized interrupt mode */
13356 if (phba->intr_type == MSIX) {
13358 struct lpfc_hba_eq_hdl *eqhdl;
13360 /* Free up MSI-X multi-message vectors */
13361 for (index = 0; index < phba->cfg_irq_chann; index++) {
13362 eqhdl = lpfc_get_eq_hdl(index);
13363 lpfc_irq_clear_aff(eqhdl);
13364 free_irq(eqhdl->irq, eqhdl);
13367 free_irq(phba->pcidev->irq, phba);
13370 pci_free_irq_vectors(phba->pcidev);
13372 /* Reset interrupt management states */
13373 phba->intr_type = NONE;
13374 phba->sli.slistat.sli_intr = 0;
13378 * lpfc_unset_hba - Unset SLI3 hba device initialization
13379 * @phba: pointer to lpfc hba data structure.
13381 * This routine is invoked to unset the HBA device initialization steps to
13382 * a device with SLI-3 interface spec.
13385 lpfc_unset_hba(struct lpfc_hba *phba)
13387 struct lpfc_vport *vport = phba->pport;
13388 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
13390 spin_lock_irq(shost->host_lock);
13391 vport->load_flag |= FC_UNLOADING;
13392 spin_unlock_irq(shost->host_lock);
13394 kfree(phba->vpi_bmask);
13395 kfree(phba->vpi_ids);
13397 lpfc_stop_hba_timers(phba);
13399 phba->pport->work_port_events = 0;
13401 lpfc_sli_hba_down(phba);
13403 lpfc_sli_brdrestart(phba);
13405 lpfc_sli_disable_intr(phba);
13411 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
13412 * @phba: Pointer to HBA context object.
13414 * This function is called in the SLI4 code path to wait for completion
13415 * of device's XRIs exchange busy. It will check the XRI exchange busy
13416 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after
13417 * that, it will check the XRI exchange busy on outstanding FCP and ELS
13418 * I/Os every 30 seconds, log error message, and wait forever. Only when
13419 * all XRI exchange busy complete, the driver unload shall proceed with
13420 * invoking the function reset ioctl mailbox command to the CNA and the
13421 * the rest of the driver unload resource release.
13424 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
13426 struct lpfc_sli4_hdw_queue *qp;
13429 int io_xri_cmpl = 1;
13430 int nvmet_xri_cmpl = 1;
13431 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
13433 /* Driver just aborted IOs during the hba_unset process. Pause
13434 * here to give the HBA time to complete the IO and get entries
13435 * into the abts lists.
13437 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5);
13439 /* Wait for NVME pending IO to flush back to transport. */
13440 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
13441 lpfc_nvme_wait_for_io_drain(phba);
13444 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
13445 qp = &phba->sli4_hba.hdwq[idx];
13446 io_xri_cmpl = list_empty(&qp->lpfc_abts_io_buf_list);
13447 if (!io_xri_cmpl) /* if list is NOT empty */
13453 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13455 list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
13458 while (!els_xri_cmpl || !io_xri_cmpl || !nvmet_xri_cmpl) {
13459 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
13460 if (!nvmet_xri_cmpl)
13461 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13462 "6424 NVMET XRI exchange busy "
13463 "wait time: %d seconds.\n",
13466 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13467 "6100 IO XRI exchange busy "
13468 "wait time: %d seconds.\n",
13471 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13472 "2878 ELS XRI exchange busy "
13473 "wait time: %d seconds.\n",
13475 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
13476 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
13478 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
13479 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
13483 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
13484 qp = &phba->sli4_hba.hdwq[idx];
13485 io_xri_cmpl = list_empty(
13486 &qp->lpfc_abts_io_buf_list);
13487 if (!io_xri_cmpl) /* if list is NOT empty */
13493 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13494 nvmet_xri_cmpl = list_empty(
13495 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
13498 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
13504 * lpfc_sli4_hba_unset - Unset the fcoe hba
13505 * @phba: Pointer to HBA context object.
13507 * This function is called in the SLI4 code path to reset the HBA's FCoE
13508 * function. The caller is not required to hold any lock. This routine
13509 * issues PCI function reset mailbox command to reset the FCoE function.
13510 * At the end of the function, it calls lpfc_hba_down_post function to
13511 * free any pending commands.
13514 lpfc_sli4_hba_unset(struct lpfc_hba *phba)
13517 LPFC_MBOXQ_t *mboxq;
13518 struct pci_dev *pdev = phba->pcidev;
13520 lpfc_stop_hba_timers(phba);
13521 hrtimer_cancel(&phba->cmf_timer);
13524 phba->sli4_hba.intr_enable = 0;
13527 * Gracefully wait out the potential current outstanding asynchronous
13531 /* First, block any pending async mailbox command from posted */
13532 spin_lock_irq(&phba->hbalock);
13533 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
13534 spin_unlock_irq(&phba->hbalock);
13535 /* Now, trying to wait it out if we can */
13536 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
13538 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
13541 /* Forcefully release the outstanding mailbox command if timed out */
13542 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
13543 spin_lock_irq(&phba->hbalock);
13544 mboxq = phba->sli.mbox_active;
13545 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
13546 __lpfc_mbox_cmpl_put(phba, mboxq);
13547 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
13548 phba->sli.mbox_active = NULL;
13549 spin_unlock_irq(&phba->hbalock);
13552 /* Abort all iocbs associated with the hba */
13553 lpfc_sli_hba_iocb_abort(phba);
13555 if (!pci_channel_offline(phba->pcidev))
13556 /* Wait for completion of device XRI exchange busy */
13557 lpfc_sli4_xri_exchange_busy_wait(phba);
13559 /* per-phba callback de-registration for hotplug event */
13561 lpfc_cpuhp_remove(phba);
13563 /* Disable PCI subsystem interrupt */
13564 lpfc_sli4_disable_intr(phba);
13566 /* Disable SR-IOV if enabled */
13567 if (phba->cfg_sriov_nr_virtfn)
13568 pci_disable_sriov(pdev);
13570 /* Stop kthread signal shall trigger work_done one more time */
13571 kthread_stop(phba->worker_thread);
13573 /* Disable FW logging to host memory */
13574 lpfc_ras_stop_fwlog(phba);
13576 /* Reset SLI4 HBA FCoE function */
13577 lpfc_pci_function_reset(phba);
13579 /* release all queue allocated resources. */
13580 lpfc_sli4_queue_destroy(phba);
13582 /* Free RAS DMA memory */
13583 if (phba->ras_fwlog.ras_enabled)
13584 lpfc_sli4_ras_dma_free(phba);
13586 /* Stop the SLI4 device port */
13588 phba->pport->work_port_events = 0;
13592 lpfc_cgn_crc32(uint32_t crc, u8 byte)
13597 for (bit = 0; bit < 8; bit++) {
13598 msb = (crc >> 31) & 1;
13601 if (msb ^ (byte & 1)) {
13602 crc ^= LPFC_CGN_CRC32_MAGIC_NUMBER;
13611 lpfc_cgn_reverse_bits(uint32_t wd)
13613 uint32_t result = 0;
13616 for (i = 0; i < 32; i++) {
13618 result |= (1 & (wd >> i));
13624 * The routine corresponds with the algorithm the HBA firmware
13625 * uses to validate the data integrity.
13628 lpfc_cgn_calc_crc32(void *ptr, uint32_t byteLen, uint32_t crc)
13632 uint8_t *data = (uint8_t *)ptr;
13634 for (i = 0; i < byteLen; ++i)
13635 crc = lpfc_cgn_crc32(crc, data[i]);
13637 result = ~lpfc_cgn_reverse_bits(crc);
13642 lpfc_init_congestion_buf(struct lpfc_hba *phba)
13644 struct lpfc_cgn_info *cp;
13645 struct timespec64 cmpl_time;
13650 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
13651 "6235 INIT Congestion Buffer %p\n", phba->cgn_i);
13655 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
13657 atomic_set(&phba->cgn_fabric_warn_cnt, 0);
13658 atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
13659 atomic_set(&phba->cgn_sync_alarm_cnt, 0);
13660 atomic_set(&phba->cgn_sync_warn_cnt, 0);
13662 atomic_set(&phba->cgn_driver_evt_cnt, 0);
13663 atomic_set(&phba->cgn_latency_evt_cnt, 0);
13664 atomic64_set(&phba->cgn_latency_evt, 0);
13665 phba->cgn_evt_minute = 0;
13666 phba->hba_flag &= ~HBA_CGN_DAY_WRAP;
13668 memset(cp, 0xff, offsetof(struct lpfc_cgn_info, cgn_stat));
13669 cp->cgn_info_size = cpu_to_le16(LPFC_CGN_INFO_SZ);
13670 cp->cgn_info_version = LPFC_CGN_INFO_V3;
13672 /* cgn parameters */
13673 cp->cgn_info_mode = phba->cgn_p.cgn_param_mode;
13674 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
13675 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
13676 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
13678 ktime_get_real_ts64(&cmpl_time);
13679 time64_to_tm(cmpl_time.tv_sec, 0, &broken);
13681 cp->cgn_info_month = broken.tm_mon + 1;
13682 cp->cgn_info_day = broken.tm_mday;
13683 cp->cgn_info_year = broken.tm_year - 100; /* relative to 2000 */
13684 cp->cgn_info_hour = broken.tm_hour;
13685 cp->cgn_info_minute = broken.tm_min;
13686 cp->cgn_info_second = broken.tm_sec;
13688 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
13689 "2643 CGNInfo Init: Start Time "
13690 "%d/%d/%d %d:%d:%d\n",
13691 cp->cgn_info_day, cp->cgn_info_month,
13692 cp->cgn_info_year, cp->cgn_info_hour,
13693 cp->cgn_info_minute, cp->cgn_info_second);
13695 /* Fill in default LUN qdepth */
13697 size = (uint16_t)(phba->pport->cfg_lun_queue_depth);
13698 cp->cgn_lunq = cpu_to_le16(size);
13701 /* last used Index initialized to 0xff already */
13703 cp->cgn_warn_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ);
13704 cp->cgn_alarm_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ);
13705 crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED);
13706 cp->cgn_info_crc = cpu_to_le32(crc);
13708 phba->cgn_evt_timestamp = jiffies +
13709 msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN);
13713 lpfc_init_congestion_stat(struct lpfc_hba *phba)
13715 struct lpfc_cgn_info *cp;
13716 struct timespec64 cmpl_time;
13720 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
13721 "6236 INIT Congestion Stat %p\n", phba->cgn_i);
13726 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
13727 memset(&cp->cgn_stat, 0, sizeof(cp->cgn_stat));
13729 ktime_get_real_ts64(&cmpl_time);
13730 time64_to_tm(cmpl_time.tv_sec, 0, &broken);
13732 cp->cgn_stat_month = broken.tm_mon + 1;
13733 cp->cgn_stat_day = broken.tm_mday;
13734 cp->cgn_stat_year = broken.tm_year - 100; /* relative to 2000 */
13735 cp->cgn_stat_hour = broken.tm_hour;
13736 cp->cgn_stat_minute = broken.tm_min;
13738 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
13739 "2647 CGNstat Init: Start Time "
13740 "%d/%d/%d %d:%d\n",
13741 cp->cgn_stat_day, cp->cgn_stat_month,
13742 cp->cgn_stat_year, cp->cgn_stat_hour,
13743 cp->cgn_stat_minute);
13745 crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED);
13746 cp->cgn_info_crc = cpu_to_le32(crc);
13750 * __lpfc_reg_congestion_buf - register congestion info buffer with HBA
13751 * @phba: Pointer to hba context object.
13752 * @reg: flag to determine register or unregister.
13755 __lpfc_reg_congestion_buf(struct lpfc_hba *phba, int reg)
13757 struct lpfc_mbx_reg_congestion_buf *reg_congestion_buf;
13758 union lpfc_sli4_cfg_shdr *shdr;
13759 uint32_t shdr_status, shdr_add_status;
13760 LPFC_MBOXQ_t *mboxq;
13766 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13768 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
13769 "2641 REG_CONGESTION_BUF mbox allocation fail: "
13770 "HBA state x%x reg %d\n",
13771 phba->pport->port_state, reg);
13775 length = (sizeof(struct lpfc_mbx_reg_congestion_buf) -
13776 sizeof(struct lpfc_sli4_cfg_mhdr));
13777 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
13778 LPFC_MBOX_OPCODE_REG_CONGESTION_BUF, length,
13779 LPFC_SLI4_MBX_EMBED);
13780 reg_congestion_buf = &mboxq->u.mqe.un.reg_congestion_buf;
13781 bf_set(lpfc_mbx_reg_cgn_buf_type, reg_congestion_buf, 1);
13783 bf_set(lpfc_mbx_reg_cgn_buf_cnt, reg_congestion_buf, 1);
13785 bf_set(lpfc_mbx_reg_cgn_buf_cnt, reg_congestion_buf, 0);
13786 reg_congestion_buf->length = sizeof(struct lpfc_cgn_info);
13787 reg_congestion_buf->addr_lo =
13788 putPaddrLow(phba->cgn_i->phys);
13789 reg_congestion_buf->addr_hi =
13790 putPaddrHigh(phba->cgn_i->phys);
13792 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
13793 shdr = (union lpfc_sli4_cfg_shdr *)
13794 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
13795 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13796 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
13798 mempool_free(mboxq, phba->mbox_mem_pool);
13799 if (shdr_status || shdr_add_status || rc) {
13800 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13801 "2642 REG_CONGESTION_BUF mailbox "
13802 "failed with status x%x add_status x%x,"
13803 " mbx status x%x reg %d\n",
13804 shdr_status, shdr_add_status, rc, reg);
13811 lpfc_unreg_congestion_buf(struct lpfc_hba *phba)
13813 lpfc_cmf_stop(phba);
13814 return __lpfc_reg_congestion_buf(phba, 0);
13818 lpfc_reg_congestion_buf(struct lpfc_hba *phba)
13820 return __lpfc_reg_congestion_buf(phba, 1);
13824 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
13825 * @phba: Pointer to HBA context object.
13826 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
13828 * This function is called in the SLI4 code path to read the port's
13829 * sli4 capabilities.
13831 * This function may be be called from any context that can block-wait
13832 * for the completion. The expectation is that this routine is called
13833 * typically from probe_one or from the online routine.
13836 lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
13839 struct lpfc_mqe *mqe = &mboxq->u.mqe;
13840 struct lpfc_pc_sli4_params *sli4_params;
13843 bool exp_wqcq_pages = true;
13844 struct lpfc_sli4_parameters *mbx_sli4_parameters;
13847 * By default, the driver assumes the SLI4 port requires RPI
13848 * header postings. The SLI4_PARAM response will correct this
13851 phba->sli4_hba.rpi_hdrs_in_use = 1;
13853 /* Read the port's SLI4 Config Parameters */
13854 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
13855 sizeof(struct lpfc_sli4_cfg_mhdr));
13856 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
13857 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
13858 length, LPFC_SLI4_MBX_EMBED);
13859 if (!phba->sli4_hba.intr_enable)
13860 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
13862 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
13863 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
13867 sli4_params = &phba->sli4_hba.pc_sli4_params;
13868 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
13869 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
13870 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
13871 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
13872 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
13873 mbx_sli4_parameters);
13874 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
13875 mbx_sli4_parameters);
13876 if (bf_get(cfg_phwq, mbx_sli4_parameters))
13877 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
13879 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
13880 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
13881 sli4_params->loopbk_scope = bf_get(cfg_loopbk_scope,
13882 mbx_sli4_parameters);
13883 sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);
13884 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
13885 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
13886 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
13887 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
13888 sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters);
13889 sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters);
13890 sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
13891 sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters);
13892 sli4_params->pls = bf_get(cfg_pvl, mbx_sli4_parameters);
13893 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
13894 mbx_sli4_parameters);
13895 sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters);
13896 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
13897 mbx_sli4_parameters);
13898 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
13899 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
13900 sli4_params->mi_cap = bf_get(cfg_mi_ver, mbx_sli4_parameters);
13902 /* Check for Extended Pre-Registered SGL support */
13903 phba->cfg_xpsgl = bf_get(cfg_xpsgl, mbx_sli4_parameters);
13905 /* Check for firmware nvme support */
13906 rc = (bf_get(cfg_nvme, mbx_sli4_parameters) &&
13907 bf_get(cfg_xib, mbx_sli4_parameters));
13910 /* Save this to indicate the Firmware supports NVME */
13911 sli4_params->nvme = 1;
13913 /* Firmware NVME support, check driver FC4 NVME support */
13914 if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) {
13915 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
13916 "6133 Disabling NVME support: "
13917 "FC4 type not supported: x%x\n",
13918 phba->cfg_enable_fc4_type);
13922 /* No firmware NVME support, check driver FC4 NVME support */
13923 sli4_params->nvme = 0;
13924 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13925 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
13926 "6101 Disabling NVME support: Not "
13927 "supported by firmware (%d %d) x%x\n",
13928 bf_get(cfg_nvme, mbx_sli4_parameters),
13929 bf_get(cfg_xib, mbx_sli4_parameters),
13930 phba->cfg_enable_fc4_type);
13932 phba->nvmet_support = 0;
13933 phba->cfg_nvmet_mrq = 0;
13934 phba->cfg_nvme_seg_cnt = 0;
13936 /* If no FC4 type support, move to just SCSI support */
13937 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
13939 phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
13943 /* If the NVME FC4 type is enabled, scale the sg_seg_cnt to
13944 * accommodate 512K and 1M IOs in a single nvme buf.
13946 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
13947 phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
13949 /* Enable embedded Payload BDE if support is indicated */
13950 if (bf_get(cfg_pbde, mbx_sli4_parameters))
13951 phba->cfg_enable_pbde = 1;
13953 phba->cfg_enable_pbde = 0;
13956 * To support Suppress Response feature we must satisfy 3 conditions.
13957 * lpfc_suppress_rsp module parameter must be set (default).
13958 * In SLI4-Parameters Descriptor:
13959 * Extended Inline Buffers (XIB) must be supported.
13960 * Suppress Response IU Not Supported (SRIUNS) must NOT be supported
13961 * (double negative).
13963 if (phba->cfg_suppress_rsp && bf_get(cfg_xib, mbx_sli4_parameters) &&
13964 !(bf_get(cfg_nosr, mbx_sli4_parameters)))
13965 phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP;
13967 phba->cfg_suppress_rsp = 0;
13969 if (bf_get(cfg_eqdr, mbx_sli4_parameters))
13970 phba->sli.sli_flag |= LPFC_SLI_USE_EQDR;
13972 /* Make sure that sge_supp_len can be handled by the driver */
13973 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
13974 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
13976 rc = dma_set_max_seg_size(&phba->pcidev->dev, sli4_params->sge_supp_len);
13977 if (unlikely(rc)) {
13978 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13979 "6400 Can't set dma maximum segment size\n");
13984 * Check whether the adapter supports an embedded copy of the
13985 * FCP CMD IU within the WQE for FCP_Ixxx commands. In order
13986 * to use this option, 128-byte WQEs must be used.
13988 if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters))
13989 phba->fcp_embed_io = 1;
13991 phba->fcp_embed_io = 0;
13993 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
13994 "6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n",
13995 bf_get(cfg_xib, mbx_sli4_parameters),
13996 phba->cfg_enable_pbde,
13997 phba->fcp_embed_io, sli4_params->nvme,
13998 phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp);
14000 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
14001 LPFC_SLI_INTF_IF_TYPE_2) &&
14002 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
14003 LPFC_SLI_INTF_FAMILY_LNCR_A0))
14004 exp_wqcq_pages = false;
14006 if ((bf_get(cfg_cqpsize, mbx_sli4_parameters) & LPFC_CQ_16K_PAGE_SZ) &&
14007 (bf_get(cfg_wqpsize, mbx_sli4_parameters) & LPFC_WQ_16K_PAGE_SZ) &&
14009 (sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT))
14010 phba->enab_exp_wqcq_pages = 1;
14012 phba->enab_exp_wqcq_pages = 0;
14014 * Check if the SLI port supports MDS Diagnostics
14016 if (bf_get(cfg_mds_diags, mbx_sli4_parameters))
14017 phba->mds_diags_support = 1;
14019 phba->mds_diags_support = 0;
14022 * Check if the SLI port supports NSLER
14024 if (bf_get(cfg_nsler, mbx_sli4_parameters))
14033 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
14034 * @pdev: pointer to PCI device
14035 * @pid: pointer to PCI device identifier
14037 * This routine is to be called to attach a device with SLI-3 interface spec
14038 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
14039 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
14040 * information of the device and driver to see if the driver state that it can
14041 * support this kind of device. If the match is successful, the driver core
14042 * invokes this routine. If this routine determines it can claim the HBA, it
14043 * does all the initialization that it needs to do to handle the HBA properly.
14046 * 0 - driver can claim the device
14047 * negative value - driver can not claim the device
14050 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
14052 struct lpfc_hba *phba;
14053 struct lpfc_vport *vport = NULL;
14054 struct Scsi_Host *shost = NULL;
14056 uint32_t cfg_mode, intr_mode;
14058 /* Allocate memory for HBA structure */
14059 phba = lpfc_hba_alloc(pdev);
14063 /* Perform generic PCI device enabling operation */
14064 error = lpfc_enable_pci_dev(phba);
14066 goto out_free_phba;
14068 /* Set up SLI API function jump table for PCI-device group-0 HBAs */
14069 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
14071 goto out_disable_pci_dev;
14073 /* Set up SLI-3 specific device PCI memory space */
14074 error = lpfc_sli_pci_mem_setup(phba);
14076 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14077 "1402 Failed to set up pci memory space.\n");
14078 goto out_disable_pci_dev;
14081 /* Set up SLI-3 specific device driver resources */
14082 error = lpfc_sli_driver_resource_setup(phba);
14084 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14085 "1404 Failed to set up driver resource.\n");
14086 goto out_unset_pci_mem_s3;
14089 /* Initialize and populate the iocb list per host */
14091 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
14093 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14094 "1405 Failed to initialize iocb list.\n");
14095 goto out_unset_driver_resource_s3;
14098 /* Set up common device driver resources */
14099 error = lpfc_setup_driver_resource_phase2(phba);
14101 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14102 "1406 Failed to set up driver resource.\n");
14103 goto out_free_iocb_list;
14106 /* Get the default values for Model Name and Description */
14107 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
14109 /* Create SCSI host to the physical port */
14110 error = lpfc_create_shost(phba);
14112 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14113 "1407 Failed to create scsi host.\n");
14114 goto out_unset_driver_resource;
14117 /* Configure sysfs attributes */
14118 vport = phba->pport;
14119 error = lpfc_alloc_sysfs_attr(vport);
14121 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14122 "1476 Failed to allocate sysfs attr\n");
14123 goto out_destroy_shost;
14126 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
14127 /* Now, trying to enable interrupt and bring up the device */
14128 cfg_mode = phba->cfg_use_msi;
14130 /* Put device to a known state before enabling interrupt */
14131 lpfc_stop_port(phba);
14132 /* Configure and enable interrupt */
14133 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
14134 if (intr_mode == LPFC_INTR_ERROR) {
14135 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14136 "0431 Failed to enable interrupt.\n");
14138 goto out_free_sysfs_attr;
14140 /* SLI-3 HBA setup */
14141 if (lpfc_sli_hba_setup(phba)) {
14142 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14143 "1477 Failed to set up hba\n");
14145 goto out_remove_device;
14148 /* Wait 50ms for the interrupts of previous mailbox commands */
14150 /* Check active interrupts on message signaled interrupts */
14151 if (intr_mode == 0 ||
14152 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
14153 /* Log the current active interrupt mode */
14154 phba->intr_mode = intr_mode;
14155 lpfc_log_intr_mode(phba, intr_mode);
14158 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14159 "0447 Configure interrupt mode (%d) "
14160 "failed active interrupt test.\n",
14162 /* Disable the current interrupt mode */
14163 lpfc_sli_disable_intr(phba);
14164 /* Try next level of interrupt mode */
14165 cfg_mode = --intr_mode;
14169 /* Perform post initialization setup */
14170 lpfc_post_init_setup(phba);
14172 /* Check if there are static vports to be created. */
14173 lpfc_create_static_vport(phba);
14178 lpfc_unset_hba(phba);
14179 out_free_sysfs_attr:
14180 lpfc_free_sysfs_attr(vport);
14182 lpfc_destroy_shost(phba);
14183 out_unset_driver_resource:
14184 lpfc_unset_driver_resource_phase2(phba);
14185 out_free_iocb_list:
14186 lpfc_free_iocb_list(phba);
14187 out_unset_driver_resource_s3:
14188 lpfc_sli_driver_resource_unset(phba);
14189 out_unset_pci_mem_s3:
14190 lpfc_sli_pci_mem_unset(phba);
14191 out_disable_pci_dev:
14192 lpfc_disable_pci_dev(phba);
14194 scsi_host_put(shost);
14196 lpfc_hba_free(phba);
14201 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
14202 * @pdev: pointer to PCI device
14204 * This routine is to be called to disattach a device with SLI-3 interface
14205 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
14206 * removed from PCI bus, it performs all the necessary cleanup for the HBA
14207 * device to be removed from the PCI subsystem properly.
14210 lpfc_pci_remove_one_s3(struct pci_dev *pdev)
14212 struct Scsi_Host *shost = pci_get_drvdata(pdev);
14213 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
14214 struct lpfc_vport **vports;
14215 struct lpfc_hba *phba = vport->phba;
14218 spin_lock_irq(&phba->hbalock);
14219 vport->load_flag |= FC_UNLOADING;
14220 spin_unlock_irq(&phba->hbalock);
14222 lpfc_free_sysfs_attr(vport);
14224 /* Release all the vports against this physical port */
14225 vports = lpfc_create_vport_work_array(phba);
14226 if (vports != NULL)
14227 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
14228 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
14230 fc_vport_terminate(vports[i]->fc_vport);
14232 lpfc_destroy_vport_work_array(phba, vports);
14234 /* Remove FC host with the physical port */
14235 fc_remove_host(shost);
14236 scsi_remove_host(shost);
14238 /* Clean up all nodes, mailboxes and IOs. */
14239 lpfc_cleanup(vport);
14242 * Bring down the SLI Layer. This step disable all interrupts,
14243 * clears the rings, discards all mailbox commands, and resets
14247 /* HBA interrupt will be disabled after this call */
14248 lpfc_sli_hba_down(phba);
14249 /* Stop kthread signal shall trigger work_done one more time */
14250 kthread_stop(phba->worker_thread);
14251 /* Final cleanup of txcmplq and reset the HBA */
14252 lpfc_sli_brdrestart(phba);
14254 kfree(phba->vpi_bmask);
14255 kfree(phba->vpi_ids);
14257 lpfc_stop_hba_timers(phba);
14258 spin_lock_irq(&phba->port_list_lock);
14259 list_del_init(&vport->listentry);
14260 spin_unlock_irq(&phba->port_list_lock);
14262 lpfc_debugfs_terminate(vport);
14264 /* Disable SR-IOV if enabled */
14265 if (phba->cfg_sriov_nr_virtfn)
14266 pci_disable_sriov(pdev);
14268 /* Disable interrupt */
14269 lpfc_sli_disable_intr(phba);
14271 scsi_host_put(shost);
14274 * Call scsi_free before mem_free since scsi bufs are released to their
14275 * corresponding pools here.
14277 lpfc_scsi_free(phba);
14278 lpfc_free_iocb_list(phba);
14280 lpfc_mem_free_all(phba);
14282 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
14283 phba->hbqslimp.virt, phba->hbqslimp.phys);
14285 /* Free resources associated with SLI2 interface */
14286 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
14287 phba->slim2p.virt, phba->slim2p.phys);
14289 /* unmap adapter SLIM and Control Registers */
14290 iounmap(phba->ctrl_regs_memmap_p);
14291 iounmap(phba->slim_memmap_p);
14293 lpfc_hba_free(phba);
14295 pci_release_mem_regions(pdev);
14296 pci_disable_device(pdev);
14300 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
14301 * @dev_d: pointer to device
14303 * This routine is to be called from the kernel's PCI subsystem to support
14304 * system Power Management (PM) to device with SLI-3 interface spec. When
14305 * PM invokes this method, it quiesces the device by stopping the driver's
14306 * worker thread for the device, turning off device's interrupt and DMA,
14307 * and bring the device offline. Note that as the driver implements the
14308 * minimum PM requirements to a power-aware driver's PM support for the
14309 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
14310 * to the suspend() method call will be treated as SUSPEND and the driver will
14311 * fully reinitialize its device during resume() method call, the driver will
14312 * set device to PCI_D3hot state in PCI config space instead of setting it
14313 * according to the @msg provided by the PM.
14316 * 0 - driver suspended the device
14319 static int __maybe_unused
14320 lpfc_pci_suspend_one_s3(struct device *dev_d)
14322 struct Scsi_Host *shost = dev_get_drvdata(dev_d);
14323 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14325 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14326 "0473 PCI device Power Management suspend.\n");
14328 /* Bring down the device */
14329 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
14330 lpfc_offline(phba);
14331 kthread_stop(phba->worker_thread);
14333 /* Disable interrupt from device */
14334 lpfc_sli_disable_intr(phba);
14340 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
14341 * @dev_d: pointer to device
14343 * This routine is to be called from the kernel's PCI subsystem to support
14344 * system Power Management (PM) to device with SLI-3 interface spec. When PM
14345 * invokes this method, it restores the device's PCI config space state and
14346 * fully reinitializes the device and brings it online. Note that as the
14347 * driver implements the minimum PM requirements to a power-aware driver's
14348 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
14349 * FREEZE) to the suspend() method call will be treated as SUSPEND and the
14350 * driver will fully reinitialize its device during resume() method call,
14351 * the device will be set to PCI_D0 directly in PCI config space before
14352 * restoring the state.
14355 * 0 - driver suspended the device
14358 static int __maybe_unused
14359 lpfc_pci_resume_one_s3(struct device *dev_d)
14361 struct Scsi_Host *shost = dev_get_drvdata(dev_d);
14362 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14363 uint32_t intr_mode;
14366 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14367 "0452 PCI device Power Management resume.\n");
14369 /* Startup the kernel thread for this host adapter. */
14370 phba->worker_thread = kthread_run(lpfc_do_work, phba,
14371 "lpfc_worker_%d", phba->brd_no);
14372 if (IS_ERR(phba->worker_thread)) {
14373 error = PTR_ERR(phba->worker_thread);
14374 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14375 "0434 PM resume failed to start worker "
14376 "thread: error=x%x.\n", error);
14380 /* Init cpu_map array */
14381 lpfc_cpu_map_array_init(phba);
14382 /* Init hba_eq_hdl array */
14383 lpfc_hba_eq_hdl_array_init(phba);
14384 /* Configure and enable interrupt */
14385 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
14386 if (intr_mode == LPFC_INTR_ERROR) {
14387 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14388 "0430 PM resume Failed to enable interrupt\n");
14391 phba->intr_mode = intr_mode;
14393 /* Restart HBA and bring it online */
14394 lpfc_sli_brdrestart(phba);
14397 /* Log the current active interrupt mode */
14398 lpfc_log_intr_mode(phba, phba->intr_mode);
14404 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
14405 * @phba: pointer to lpfc hba data structure.
14407 * This routine is called to prepare the SLI3 device for PCI slot recover. It
14408 * aborts all the outstanding SCSI I/Os to the pci device.
14411 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
14413 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14414 "2723 PCI channel I/O abort preparing for recovery\n");
14417 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
14418 * and let the SCSI mid-layer to retry them to recover.
14420 lpfc_sli_abort_fcp_rings(phba);
14424 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
14425 * @phba: pointer to lpfc hba data structure.
14427 * This routine is called to prepare the SLI3 device for PCI slot reset. It
14428 * disables the device interrupt and pci device, and aborts the internal FCP
14432 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
14434 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14435 "2710 PCI channel disable preparing for reset\n");
14437 /* Block any management I/Os to the device */
14438 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
14440 /* Block all SCSI devices' I/Os on the host */
14441 lpfc_scsi_dev_block(phba);
14443 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
14444 lpfc_sli_flush_io_rings(phba);
14446 /* stop all timers */
14447 lpfc_stop_hba_timers(phba);
14449 /* Disable interrupt and pci device */
14450 lpfc_sli_disable_intr(phba);
14451 pci_disable_device(phba->pcidev);
14455 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
14456 * @phba: pointer to lpfc hba data structure.
14458 * This routine is called to prepare the SLI3 device for PCI slot permanently
14459 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
14463 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
14465 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14466 "2711 PCI channel permanent disable for failure\n");
14467 /* Block all SCSI devices' I/Os on the host */
14468 lpfc_scsi_dev_block(phba);
14469 lpfc_sli4_prep_dev_for_reset(phba);
14471 /* stop all timers */
14472 lpfc_stop_hba_timers(phba);
14474 /* Clean up all driver's outstanding SCSI I/Os */
14475 lpfc_sli_flush_io_rings(phba);
14479 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
14480 * @pdev: pointer to PCI device.
14481 * @state: the current PCI connection state.
14483 * This routine is called from the PCI subsystem for I/O error handling to
14484 * device with SLI-3 interface spec. This function is called by the PCI
14485 * subsystem after a PCI bus error affecting this device has been detected.
14486 * When this function is invoked, it will need to stop all the I/Os and
14487 * interrupt(s) to the device. Once that is done, it will return
14488 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
14492 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
14493 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
14494 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
14496 static pci_ers_result_t
14497 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
14499 struct Scsi_Host *shost = pci_get_drvdata(pdev);
14500 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14503 case pci_channel_io_normal:
14504 /* Non-fatal error, prepare for recovery */
14505 lpfc_sli_prep_dev_for_recover(phba);
14506 return PCI_ERS_RESULT_CAN_RECOVER;
14507 case pci_channel_io_frozen:
14508 /* Fatal error, prepare for slot reset */
14509 lpfc_sli_prep_dev_for_reset(phba);
14510 return PCI_ERS_RESULT_NEED_RESET;
14511 case pci_channel_io_perm_failure:
14512 /* Permanent failure, prepare for device down */
14513 lpfc_sli_prep_dev_for_perm_failure(phba);
14514 return PCI_ERS_RESULT_DISCONNECT;
14516 /* Unknown state, prepare and request slot reset */
14517 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14518 "0472 Unknown PCI error state: x%x\n", state);
14519 lpfc_sli_prep_dev_for_reset(phba);
14520 return PCI_ERS_RESULT_NEED_RESET;
14525 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
14526 * @pdev: pointer to PCI device.
14528 * This routine is called from the PCI subsystem for error handling to
14529 * device with SLI-3 interface spec. This is called after PCI bus has been
14530 * reset to restart the PCI card from scratch, as if from a cold-boot.
14531 * During the PCI subsystem error recovery, after driver returns
14532 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
14533 * recovery and then call this routine before calling the .resume method
14534 * to recover the device. This function will initialize the HBA device,
14535 * enable the interrupt, but it will just put the HBA to offline state
14536 * without passing any I/O traffic.
14539 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
14540 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
14542 static pci_ers_result_t
14543 lpfc_io_slot_reset_s3(struct pci_dev *pdev)
14545 struct Scsi_Host *shost = pci_get_drvdata(pdev);
14546 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14547 struct lpfc_sli *psli = &phba->sli;
14548 uint32_t intr_mode;
14550 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
14551 if (pci_enable_device_mem(pdev)) {
14552 printk(KERN_ERR "lpfc: Cannot re-enable "
14553 "PCI device after reset.\n");
14554 return PCI_ERS_RESULT_DISCONNECT;
14557 pci_restore_state(pdev);
14560 * As the new kernel behavior of pci_restore_state() API call clears
14561 * device saved_state flag, need to save the restored state again.
14563 pci_save_state(pdev);
14565 if (pdev->is_busmaster)
14566 pci_set_master(pdev);
14568 spin_lock_irq(&phba->hbalock);
14569 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
14570 spin_unlock_irq(&phba->hbalock);
14572 /* Configure and enable interrupt */
14573 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
14574 if (intr_mode == LPFC_INTR_ERROR) {
14575 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14576 "0427 Cannot re-enable interrupt after "
14578 return PCI_ERS_RESULT_DISCONNECT;
14580 phba->intr_mode = intr_mode;
14582 /* Take device offline, it will perform cleanup */
14583 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
14584 lpfc_offline(phba);
14585 lpfc_sli_brdrestart(phba);
14587 /* Log the current active interrupt mode */
14588 lpfc_log_intr_mode(phba, phba->intr_mode);
14590 return PCI_ERS_RESULT_RECOVERED;
14594 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
14595 * @pdev: pointer to PCI device
14597 * This routine is called from the PCI subsystem for error handling to device
14598 * with SLI-3 interface spec. It is called when kernel error recovery tells
14599 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
14600 * error recovery. After this call, traffic can start to flow from this device
14604 lpfc_io_resume_s3(struct pci_dev *pdev)
14606 struct Scsi_Host *shost = pci_get_drvdata(pdev);
14607 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14609 /* Bring device online, it will be no-op for non-fatal error resume */
14614 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
14615 * @phba: pointer to lpfc hba data structure.
14617 * returns the number of ELS/CT IOCBs to reserve
14620 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
14622 int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
14624 if (phba->sli_rev == LPFC_SLI_REV4) {
14625 if (max_xri <= 100)
14627 else if (max_xri <= 256)
14629 else if (max_xri <= 512)
14631 else if (max_xri <= 1024)
14633 else if (max_xri <= 1536)
14635 else if (max_xri <= 2048)
14644 * lpfc_sli4_get_iocb_cnt - Calculate the # of total IOCBs to reserve
14645 * @phba: pointer to lpfc hba data structure.
14647 * returns the number of ELS/CT + NVMET IOCBs to reserve
14650 lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba)
14652 int max_xri = lpfc_sli4_get_els_iocb_cnt(phba);
14654 if (phba->nvmet_support)
14655 max_xri += LPFC_NVMET_BUF_POST;
14661 lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset,
14662 uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize,
14663 const struct firmware *fw)
14668 sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf);
14669 /* Three cases: (1) FW was not supported on the detected adapter.
14670 * (2) FW update has been locked out administratively.
14671 * (3) Some other error during FW update.
14672 * In each case, an unmaskable message is written to the console
14673 * for admin diagnosis.
14675 if (offset == ADD_STATUS_FW_NOT_SUPPORTED ||
14676 (sli_family == LPFC_SLI_INTF_FAMILY_G6 &&
14677 magic_number != MAGIC_NUMBER_G6) ||
14678 (sli_family == LPFC_SLI_INTF_FAMILY_G7 &&
14679 magic_number != MAGIC_NUMBER_G7) ||
14680 (sli_family == LPFC_SLI_INTF_FAMILY_G7P &&
14681 magic_number != MAGIC_NUMBER_G7P)) {
14682 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14683 "3030 This firmware version is not supported on"
14684 " this HBA model. Device:%x Magic:%x Type:%x "
14685 "ID:%x Size %d %zd\n",
14686 phba->pcidev->device, magic_number, ftype, fid,
14689 } else if (offset == ADD_STATUS_FW_DOWNLOAD_HW_DISABLED) {
14690 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14691 "3021 Firmware downloads have been prohibited "
14692 "by a system configuration setting on "
14693 "Device:%x Magic:%x Type:%x ID:%x Size %d "
14695 phba->pcidev->device, magic_number, ftype, fid,
14699 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14700 "3022 FW Download failed. Add Status x%x "
14701 "Device:%x Magic:%x Type:%x ID:%x Size %d "
14703 offset, phba->pcidev->device, magic_number,
14704 ftype, fid, fsize, fw->size);
14711 * lpfc_write_firmware - attempt to write a firmware image to the port
14712 * @fw: pointer to firmware image returned from request_firmware.
14713 * @context: pointer to firmware image returned from request_firmware.
14717 lpfc_write_firmware(const struct firmware *fw, void *context)
14719 struct lpfc_hba *phba = (struct lpfc_hba *)context;
14720 char fwrev[FW_REV_STR_SIZE];
14721 struct lpfc_grp_hdr *image;
14722 struct list_head dma_buffer_list;
14724 struct lpfc_dmabuf *dmabuf, *next;
14725 uint32_t offset = 0, temp_offset = 0;
14726 uint32_t magic_number, ftype, fid, fsize;
14728 /* It can be null in no-wait mode, sanity check */
14733 image = (struct lpfc_grp_hdr *)fw->data;
14735 magic_number = be32_to_cpu(image->magic_number);
14736 ftype = bf_get_be32(lpfc_grp_hdr_file_type, image);
14737 fid = bf_get_be32(lpfc_grp_hdr_id, image);
14738 fsize = be32_to_cpu(image->size);
14740 INIT_LIST_HEAD(&dma_buffer_list);
14741 lpfc_decode_firmware_rev(phba, fwrev, 1);
14742 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
14743 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14744 "3023 Updating Firmware, Current Version:%s "
14745 "New Version:%s\n",
14746 fwrev, image->revision);
14747 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
14748 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
14754 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
14758 if (!dmabuf->virt) {
14763 list_add_tail(&dmabuf->list, &dma_buffer_list);
14765 while (offset < fw->size) {
14766 temp_offset = offset;
14767 list_for_each_entry(dmabuf, &dma_buffer_list, list) {
14768 if (temp_offset + SLI4_PAGE_SIZE > fw->size) {
14769 memcpy(dmabuf->virt,
14770 fw->data + temp_offset,
14771 fw->size - temp_offset);
14772 temp_offset = fw->size;
14775 memcpy(dmabuf->virt, fw->data + temp_offset,
14777 temp_offset += SLI4_PAGE_SIZE;
14779 rc = lpfc_wr_object(phba, &dma_buffer_list,
14780 (fw->size - offset), &offset);
14782 rc = lpfc_log_write_firmware_error(phba, offset,
14793 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14794 "3029 Skipped Firmware update, Current "
14795 "Version:%s New Version:%s\n",
14796 fwrev, image->revision);
14799 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
14800 list_del(&dmabuf->list);
14801 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
14802 dmabuf->virt, dmabuf->phys);
14805 release_firmware(fw);
14808 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14809 "3062 Firmware update error, status %d.\n", rc);
14811 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14812 "3024 Firmware update success: size %d.\n", rc);
14816 * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade
14817 * @phba: pointer to lpfc hba data structure.
14818 * @fw_upgrade: which firmware to update.
14820 * This routine is called to perform Linux generic firmware upgrade on device
14821 * that supports such feature.
14824 lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade)
14826 uint8_t file_name[ELX_MODEL_NAME_SIZE];
14828 const struct firmware *fw;
14830 /* Only supported on SLI4 interface type 2 for now */
14831 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
14832 LPFC_SLI_INTF_IF_TYPE_2)
14835 snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName);
14837 if (fw_upgrade == INT_FW_UPGRADE) {
14838 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT,
14839 file_name, &phba->pcidev->dev,
14840 GFP_KERNEL, (void *)phba,
14841 lpfc_write_firmware);
14842 } else if (fw_upgrade == RUN_FW_UPGRADE) {
14843 ret = request_firmware(&fw, file_name, &phba->pcidev->dev);
14845 lpfc_write_firmware(fw, (void *)phba);
14854 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
14855 * @pdev: pointer to PCI device
14856 * @pid: pointer to PCI device identifier
14858 * This routine is called from the kernel's PCI subsystem to device with
14859 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
14860 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
14861 * information of the device and driver to see if the driver state that it
14862 * can support this kind of device. If the match is successful, the driver
14863 * core invokes this routine. If this routine determines it can claim the HBA,
14864 * it does all the initialization that it needs to do to handle the HBA
14868 * 0 - driver can claim the device
14869 * negative value - driver can not claim the device
14872 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
14874 struct lpfc_hba *phba;
14875 struct lpfc_vport *vport = NULL;
14876 struct Scsi_Host *shost = NULL;
14878 uint32_t cfg_mode, intr_mode;
14880 /* Allocate memory for HBA structure */
14881 phba = lpfc_hba_alloc(pdev);
14885 INIT_LIST_HEAD(&phba->poll_list);
14887 /* Perform generic PCI device enabling operation */
14888 error = lpfc_enable_pci_dev(phba);
14890 goto out_free_phba;
14892 /* Set up SLI API function jump table for PCI-device group-1 HBAs */
14893 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
14895 goto out_disable_pci_dev;
14897 /* Set up SLI-4 specific device PCI memory space */
14898 error = lpfc_sli4_pci_mem_setup(phba);
14900 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14901 "1410 Failed to set up pci memory space.\n");
14902 goto out_disable_pci_dev;
14905 /* Set up SLI-4 Specific device driver resources */
14906 error = lpfc_sli4_driver_resource_setup(phba);
14908 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14909 "1412 Failed to set up driver resource.\n");
14910 goto out_unset_pci_mem_s4;
14913 INIT_LIST_HEAD(&phba->active_rrq_list);
14914 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
14916 /* Set up common device driver resources */
14917 error = lpfc_setup_driver_resource_phase2(phba);
14919 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14920 "1414 Failed to set up driver resource.\n");
14921 goto out_unset_driver_resource_s4;
14924 /* Get the default values for Model Name and Description */
14925 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
14927 /* Now, trying to enable interrupt and bring up the device */
14928 cfg_mode = phba->cfg_use_msi;
14930 /* Put device to a known state before enabling interrupt */
14931 phba->pport = NULL;
14932 lpfc_stop_port(phba);
14934 /* Init cpu_map array */
14935 lpfc_cpu_map_array_init(phba);
14937 /* Init hba_eq_hdl array */
14938 lpfc_hba_eq_hdl_array_init(phba);
14940 /* Configure and enable interrupt */
14941 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
14942 if (intr_mode == LPFC_INTR_ERROR) {
14943 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14944 "0426 Failed to enable interrupt.\n");
14946 goto out_unset_driver_resource;
14948 /* Default to single EQ for non-MSI-X */
14949 if (phba->intr_type != MSIX) {
14950 phba->cfg_irq_chann = 1;
14951 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
14952 if (phba->nvmet_support)
14953 phba->cfg_nvmet_mrq = 1;
14956 lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann);
14958 /* Create SCSI host to the physical port */
14959 error = lpfc_create_shost(phba);
14961 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14962 "1415 Failed to create scsi host.\n");
14963 goto out_disable_intr;
14965 vport = phba->pport;
14966 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
14968 /* Configure sysfs attributes */
14969 error = lpfc_alloc_sysfs_attr(vport);
14971 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14972 "1416 Failed to allocate sysfs attr\n");
14973 goto out_destroy_shost;
14976 /* Set up SLI-4 HBA */
14977 if (lpfc_sli4_hba_setup(phba)) {
14978 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14979 "1421 Failed to set up hba\n");
14981 goto out_free_sysfs_attr;
14984 /* Log the current active interrupt mode */
14985 phba->intr_mode = intr_mode;
14986 lpfc_log_intr_mode(phba, intr_mode);
14988 /* Perform post initialization setup */
14989 lpfc_post_init_setup(phba);
14991 /* NVME support in FW earlier in the driver load corrects the
14992 * FC4 type making a check for nvme_support unnecessary.
14994 if (phba->nvmet_support == 0) {
14995 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
14996 /* Create NVME binding with nvme_fc_transport. This
14997 * ensures the vport is initialized. If the localport
14998 * create fails, it should not unload the driver to
14999 * support field issues.
15001 error = lpfc_nvme_create_localport(vport);
15003 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15004 "6004 NVME registration "
15005 "failed, error x%x\n",
15011 /* check for firmware upgrade or downgrade */
15012 if (phba->cfg_request_firmware_upgrade)
15013 lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE);
15015 /* Check if there are static vports to be created. */
15016 lpfc_create_static_vport(phba);
15018 timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
15019 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp);
15023 out_free_sysfs_attr:
15024 lpfc_free_sysfs_attr(vport);
15026 lpfc_destroy_shost(phba);
15028 lpfc_sli4_disable_intr(phba);
15029 out_unset_driver_resource:
15030 lpfc_unset_driver_resource_phase2(phba);
15031 out_unset_driver_resource_s4:
15032 lpfc_sli4_driver_resource_unset(phba);
15033 out_unset_pci_mem_s4:
15034 lpfc_sli4_pci_mem_unset(phba);
15035 out_disable_pci_dev:
15036 lpfc_disable_pci_dev(phba);
15038 scsi_host_put(shost);
15040 lpfc_hba_free(phba);
15045 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
15046 * @pdev: pointer to PCI device
15048 * This routine is called from the kernel's PCI subsystem to device with
15049 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
15050 * removed from PCI bus, it performs all the necessary cleanup for the HBA
15051 * device to be removed from the PCI subsystem properly.
15054 lpfc_pci_remove_one_s4(struct pci_dev *pdev)
15056 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15057 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
15058 struct lpfc_vport **vports;
15059 struct lpfc_hba *phba = vport->phba;
15062 /* Mark the device unloading flag */
15063 spin_lock_irq(&phba->hbalock);
15064 vport->load_flag |= FC_UNLOADING;
15065 spin_unlock_irq(&phba->hbalock);
15067 lpfc_unreg_congestion_buf(phba);
15069 lpfc_free_sysfs_attr(vport);
15071 /* Release all the vports against this physical port */
15072 vports = lpfc_create_vport_work_array(phba);
15073 if (vports != NULL)
15074 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
15075 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
15077 fc_vport_terminate(vports[i]->fc_vport);
15079 lpfc_destroy_vport_work_array(phba, vports);
15081 /* Remove FC host with the physical port */
15082 fc_remove_host(shost);
15083 scsi_remove_host(shost);
15085 /* Perform ndlp cleanup on the physical port. The nvme and nvmet
15086 * localports are destroyed after to cleanup all transport memory.
15088 lpfc_cleanup(vport);
15089 lpfc_nvmet_destroy_targetport(phba);
15090 lpfc_nvme_destroy_localport(vport);
15092 /* De-allocate multi-XRI pools */
15093 if (phba->cfg_xri_rebalancing)
15094 lpfc_destroy_multixri_pools(phba);
15097 * Bring down the SLI Layer. This step disables all interrupts,
15098 * clears the rings, discards all mailbox commands, and resets
15099 * the HBA FCoE function.
15101 lpfc_debugfs_terminate(vport);
15103 lpfc_stop_hba_timers(phba);
15104 spin_lock_irq(&phba->port_list_lock);
15105 list_del_init(&vport->listentry);
15106 spin_unlock_irq(&phba->port_list_lock);
15108 /* Perform scsi free before driver resource_unset since scsi
15109 * buffers are released to their corresponding pools here.
15111 lpfc_io_free(phba);
15112 lpfc_free_iocb_list(phba);
15113 lpfc_sli4_hba_unset(phba);
15115 lpfc_unset_driver_resource_phase2(phba);
15116 lpfc_sli4_driver_resource_unset(phba);
15118 /* Unmap adapter Control and Doorbell registers */
15119 lpfc_sli4_pci_mem_unset(phba);
15121 /* Release PCI resources and disable device's PCI function */
15122 scsi_host_put(shost);
15123 lpfc_disable_pci_dev(phba);
15125 /* Finally, free the driver's device data structure */
15126 lpfc_hba_free(phba);
15132 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
15133 * @dev_d: pointer to device
15135 * This routine is called from the kernel's PCI subsystem to support system
15136 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
15137 * this method, it quiesces the device by stopping the driver's worker
15138 * thread for the device, turning off device's interrupt and DMA, and bring
15139 * the device offline. Note that as the driver implements the minimum PM
15140 * requirements to a power-aware driver's PM support for suspend/resume -- all
15141 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
15142 * method call will be treated as SUSPEND and the driver will fully
15143 * reinitialize its device during resume() method call, the driver will set
15144 * device to PCI_D3hot state in PCI config space instead of setting it
15145 * according to the @msg provided by the PM.
15148 * 0 - driver suspended the device
15151 static int __maybe_unused
15152 lpfc_pci_suspend_one_s4(struct device *dev_d)
15154 struct Scsi_Host *shost = dev_get_drvdata(dev_d);
15155 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15157 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15158 "2843 PCI device Power Management suspend.\n");
15160 /* Bring down the device */
15161 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
15162 lpfc_offline(phba);
15163 kthread_stop(phba->worker_thread);
15165 /* Disable interrupt from device */
15166 lpfc_sli4_disable_intr(phba);
15167 lpfc_sli4_queue_destroy(phba);
15173 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
15174 * @dev_d: pointer to device
15176 * This routine is called from the kernel's PCI subsystem to support system
15177 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
15178 * this method, it restores the device's PCI config space state and fully
15179 * reinitializes the device and brings it online. Note that as the driver
15180 * implements the minimum PM requirements to a power-aware driver's PM for
15181 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
15182 * to the suspend() method call will be treated as SUSPEND and the driver
15183 * will fully reinitialize its device during resume() method call, the device
15184 * will be set to PCI_D0 directly in PCI config space before restoring the
15188 * 0 - driver suspended the device
15191 static int __maybe_unused
15192 lpfc_pci_resume_one_s4(struct device *dev_d)
15194 struct Scsi_Host *shost = dev_get_drvdata(dev_d);
15195 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15196 uint32_t intr_mode;
15199 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15200 "0292 PCI device Power Management resume.\n");
15202 /* Startup the kernel thread for this host adapter. */
15203 phba->worker_thread = kthread_run(lpfc_do_work, phba,
15204 "lpfc_worker_%d", phba->brd_no);
15205 if (IS_ERR(phba->worker_thread)) {
15206 error = PTR_ERR(phba->worker_thread);
15207 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15208 "0293 PM resume failed to start worker "
15209 "thread: error=x%x.\n", error);
15213 /* Configure and enable interrupt */
15214 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
15215 if (intr_mode == LPFC_INTR_ERROR) {
15216 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15217 "0294 PM resume Failed to enable interrupt\n");
15220 phba->intr_mode = intr_mode;
15222 /* Restart HBA and bring it online */
15223 lpfc_sli_brdrestart(phba);
15226 /* Log the current active interrupt mode */
15227 lpfc_log_intr_mode(phba, phba->intr_mode);
15233 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover
15234 * @phba: pointer to lpfc hba data structure.
15236 * This routine is called to prepare the SLI4 device for PCI slot recover. It
15237 * aborts all the outstanding SCSI I/Os to the pci device.
15240 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
15242 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15243 "2828 PCI channel I/O abort preparing for recovery\n");
15245 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
15246 * and let the SCSI mid-layer to retry them to recover.
15248 lpfc_sli_abort_fcp_rings(phba);
15252 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset
15253 * @phba: pointer to lpfc hba data structure.
15255 * This routine is called to prepare the SLI4 device for PCI slot reset. It
15256 * disables the device interrupt and pci device, and aborts the internal FCP
15260 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
15262 int offline = pci_channel_offline(phba->pcidev);
15264 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15265 "2826 PCI channel disable preparing for reset offline"
15268 /* Block any management I/Os to the device */
15269 lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT);
15272 /* HBA_PCI_ERR was set in io_error_detect */
15273 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
15274 /* Flush all driver's outstanding I/Os as we are to reset */
15275 lpfc_sli_flush_io_rings(phba);
15276 lpfc_offline(phba);
15278 /* stop all timers */
15279 lpfc_stop_hba_timers(phba);
15281 lpfc_sli4_queue_destroy(phba);
15282 /* Disable interrupt and pci device */
15283 lpfc_sli4_disable_intr(phba);
15284 pci_disable_device(phba->pcidev);
15288 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable
15289 * @phba: pointer to lpfc hba data structure.
15291 * This routine is called to prepare the SLI4 device for PCI slot permanently
15292 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
15296 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
15298 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15299 "2827 PCI channel permanent disable for failure\n");
15301 /* Block all SCSI devices' I/Os on the host */
15302 lpfc_scsi_dev_block(phba);
15304 /* stop all timers */
15305 lpfc_stop_hba_timers(phba);
15307 /* Clean up all driver's outstanding I/Os */
15308 lpfc_sli_flush_io_rings(phba);
15312 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
15313 * @pdev: pointer to PCI device.
15314 * @state: the current PCI connection state.
15316 * This routine is called from the PCI subsystem for error handling to device
15317 * with SLI-4 interface spec. This function is called by the PCI subsystem
15318 * after a PCI bus error affecting this device has been detected. When this
15319 * function is invoked, it will need to stop all the I/Os and interrupt(s)
15320 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
15321 * for the PCI subsystem to perform proper recovery as desired.
15324 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
15325 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15327 static pci_ers_result_t
15328 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
15330 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15331 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15335 case pci_channel_io_normal:
15336 /* Non-fatal error, prepare for recovery */
15337 lpfc_sli4_prep_dev_for_recover(phba);
15338 return PCI_ERS_RESULT_CAN_RECOVER;
15339 case pci_channel_io_frozen:
15340 hba_pci_err = test_and_set_bit(HBA_PCI_ERR, &phba->bit_flags);
15341 /* Fatal error, prepare for slot reset */
15343 lpfc_sli4_prep_dev_for_reset(phba);
15345 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15346 "2832 Already handling PCI error "
15347 "state: x%x\n", state);
15348 return PCI_ERS_RESULT_NEED_RESET;
15349 case pci_channel_io_perm_failure:
15350 set_bit(HBA_PCI_ERR, &phba->bit_flags);
15351 /* Permanent failure, prepare for device down */
15352 lpfc_sli4_prep_dev_for_perm_failure(phba);
15353 return PCI_ERS_RESULT_DISCONNECT;
15355 hba_pci_err = test_and_set_bit(HBA_PCI_ERR, &phba->bit_flags);
15357 lpfc_sli4_prep_dev_for_reset(phba);
15358 /* Unknown state, prepare and request slot reset */
15359 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15360 "2825 Unknown PCI error state: x%x\n", state);
15361 lpfc_sli4_prep_dev_for_reset(phba);
15362 return PCI_ERS_RESULT_NEED_RESET;
15367 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
15368 * @pdev: pointer to PCI device.
15370 * This routine is called from the PCI subsystem for error handling to device
15371 * with SLI-4 interface spec. It is called after PCI bus has been reset to
15372 * restart the PCI card from scratch, as if from a cold-boot. During the
15373 * PCI subsystem error recovery, after the driver returns
15374 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
15375 * recovery and then call this routine before calling the .resume method to
15376 * recover the device. This function will initialize the HBA device, enable
15377 * the interrupt, but it will just put the HBA to offline state without
15378 * passing any I/O traffic.
15381 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
15382 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15384 static pci_ers_result_t
15385 lpfc_io_slot_reset_s4(struct pci_dev *pdev)
15387 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15388 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15389 struct lpfc_sli *psli = &phba->sli;
15390 uint32_t intr_mode;
15393 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
15394 if (pci_enable_device_mem(pdev)) {
15395 printk(KERN_ERR "lpfc: Cannot re-enable "
15396 "PCI device after reset.\n");
15397 return PCI_ERS_RESULT_DISCONNECT;
15400 pci_restore_state(pdev);
15402 hba_pci_err = test_and_clear_bit(HBA_PCI_ERR, &phba->bit_flags);
15404 dev_info(&pdev->dev,
15405 "hba_pci_err was not set, recovering slot reset.\n");
15407 * As the new kernel behavior of pci_restore_state() API call clears
15408 * device saved_state flag, need to save the restored state again.
15410 pci_save_state(pdev);
15412 if (pdev->is_busmaster)
15413 pci_set_master(pdev);
15415 spin_lock_irq(&phba->hbalock);
15416 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
15417 spin_unlock_irq(&phba->hbalock);
15419 /* Init cpu_map array */
15420 lpfc_cpu_map_array_init(phba);
15421 /* Configure and enable interrupt */
15422 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
15423 if (intr_mode == LPFC_INTR_ERROR) {
15424 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15425 "2824 Cannot re-enable interrupt after "
15427 return PCI_ERS_RESULT_DISCONNECT;
15429 phba->intr_mode = intr_mode;
15430 lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann);
15432 /* Log the current active interrupt mode */
15433 lpfc_log_intr_mode(phba, phba->intr_mode);
15435 return PCI_ERS_RESULT_RECOVERED;
15439 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
15440 * @pdev: pointer to PCI device
15442 * This routine is called from the PCI subsystem for error handling to device
15443 * with SLI-4 interface spec. It is called when kernel error recovery tells
15444 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
15445 * error recovery. After this call, traffic can start to flow from this device
15449 lpfc_io_resume_s4(struct pci_dev *pdev)
15451 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15452 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15455 * In case of slot reset, as function reset is performed through
15456 * mailbox command which needs DMA to be enabled, this operation
15457 * has to be moved to the io resume phase. Taking device offline
15458 * will perform the necessary cleanup.
15460 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
15461 /* Perform device reset */
15462 lpfc_sli_brdrestart(phba);
15463 /* Bring the device back online */
15469 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
15470 * @pdev: pointer to PCI device
15471 * @pid: pointer to PCI device identifier
15473 * This routine is to be registered to the kernel's PCI subsystem. When an
15474 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
15475 * at PCI device-specific information of the device and driver to see if the
15476 * driver state that it can support this kind of device. If the match is
15477 * successful, the driver core invokes this routine. This routine dispatches
15478 * the action to the proper SLI-3 or SLI-4 device probing routine, which will
15479 * do all the initialization that it needs to do to handle the HBA device
15483 * 0 - driver can claim the device
15484 * negative value - driver can not claim the device
15487 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
15490 struct lpfc_sli_intf intf;
15492 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
15495 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
15496 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
15497 rc = lpfc_pci_probe_one_s4(pdev, pid);
15499 rc = lpfc_pci_probe_one_s3(pdev, pid);
15505 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
15506 * @pdev: pointer to PCI device
15508 * This routine is to be registered to the kernel's PCI subsystem. When an
15509 * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
15510 * This routine dispatches the action to the proper SLI-3 or SLI-4 device
15511 * remove routine, which will perform all the necessary cleanup for the
15512 * device to be removed from the PCI subsystem properly.
15515 lpfc_pci_remove_one(struct pci_dev *pdev)
15517 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15518 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15520 switch (phba->pci_dev_grp) {
15521 case LPFC_PCI_DEV_LP:
15522 lpfc_pci_remove_one_s3(pdev);
15524 case LPFC_PCI_DEV_OC:
15525 lpfc_pci_remove_one_s4(pdev);
15528 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15529 "1424 Invalid PCI device group: 0x%x\n",
15530 phba->pci_dev_grp);
15537 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
15538 * @dev: pointer to device
15540 * This routine is to be registered to the kernel's PCI subsystem to support
15541 * system Power Management (PM). When PM invokes this method, it dispatches
15542 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
15543 * suspend the device.
15546 * 0 - driver suspended the device
15549 static int __maybe_unused
15550 lpfc_pci_suspend_one(struct device *dev)
15552 struct Scsi_Host *shost = dev_get_drvdata(dev);
15553 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15556 switch (phba->pci_dev_grp) {
15557 case LPFC_PCI_DEV_LP:
15558 rc = lpfc_pci_suspend_one_s3(dev);
15560 case LPFC_PCI_DEV_OC:
15561 rc = lpfc_pci_suspend_one_s4(dev);
15564 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15565 "1425 Invalid PCI device group: 0x%x\n",
15566 phba->pci_dev_grp);
15573 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
15574 * @dev: pointer to device
15576 * This routine is to be registered to the kernel's PCI subsystem to support
15577 * system Power Management (PM). When PM invokes this method, it dispatches
15578 * the action to the proper SLI-3 or SLI-4 device resume routine, which will
15579 * resume the device.
15582 * 0 - driver suspended the device
15585 static int __maybe_unused
15586 lpfc_pci_resume_one(struct device *dev)
15588 struct Scsi_Host *shost = dev_get_drvdata(dev);
15589 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15592 switch (phba->pci_dev_grp) {
15593 case LPFC_PCI_DEV_LP:
15594 rc = lpfc_pci_resume_one_s3(dev);
15596 case LPFC_PCI_DEV_OC:
15597 rc = lpfc_pci_resume_one_s4(dev);
15600 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15601 "1426 Invalid PCI device group: 0x%x\n",
15602 phba->pci_dev_grp);
15609 * lpfc_io_error_detected - lpfc method for handling PCI I/O error
15610 * @pdev: pointer to PCI device.
15611 * @state: the current PCI connection state.
15613 * This routine is registered to the PCI subsystem for error handling. This
15614 * function is called by the PCI subsystem after a PCI bus error affecting
15615 * this device has been detected. When this routine is invoked, it dispatches
15616 * the action to the proper SLI-3 or SLI-4 device error detected handling
15617 * routine, which will perform the proper error detected operation.
15620 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
15621 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15623 static pci_ers_result_t
15624 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
15626 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15627 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15628 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15630 if (phba->link_state == LPFC_HBA_ERROR &&
15631 phba->hba_flag & HBA_IOQ_FLUSH)
15632 return PCI_ERS_RESULT_NEED_RESET;
15634 switch (phba->pci_dev_grp) {
15635 case LPFC_PCI_DEV_LP:
15636 rc = lpfc_io_error_detected_s3(pdev, state);
15638 case LPFC_PCI_DEV_OC:
15639 rc = lpfc_io_error_detected_s4(pdev, state);
15642 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15643 "1427 Invalid PCI device group: 0x%x\n",
15644 phba->pci_dev_grp);
15651 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
15652 * @pdev: pointer to PCI device.
15654 * This routine is registered to the PCI subsystem for error handling. This
15655 * function is called after PCI bus has been reset to restart the PCI card
15656 * from scratch, as if from a cold-boot. When this routine is invoked, it
15657 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
15658 * routine, which will perform the proper device reset.
15661 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
15662 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15664 static pci_ers_result_t
15665 lpfc_io_slot_reset(struct pci_dev *pdev)
15667 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15668 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15669 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15671 switch (phba->pci_dev_grp) {
15672 case LPFC_PCI_DEV_LP:
15673 rc = lpfc_io_slot_reset_s3(pdev);
15675 case LPFC_PCI_DEV_OC:
15676 rc = lpfc_io_slot_reset_s4(pdev);
15679 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15680 "1428 Invalid PCI device group: 0x%x\n",
15681 phba->pci_dev_grp);
15688 * lpfc_io_resume - lpfc method for resuming PCI I/O operation
15689 * @pdev: pointer to PCI device
15691 * This routine is registered to the PCI subsystem for error handling. It
15692 * is called when kernel error recovery tells the lpfc driver that it is
15693 * OK to resume normal PCI operation after PCI bus error recovery. When
15694 * this routine is invoked, it dispatches the action to the proper SLI-3
15695 * or SLI-4 device io_resume routine, which will resume the device operation.
15698 lpfc_io_resume(struct pci_dev *pdev)
15700 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15701 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15703 switch (phba->pci_dev_grp) {
15704 case LPFC_PCI_DEV_LP:
15705 lpfc_io_resume_s3(pdev);
15707 case LPFC_PCI_DEV_OC:
15708 lpfc_io_resume_s4(pdev);
15711 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15712 "1429 Invalid PCI device group: 0x%x\n",
15713 phba->pci_dev_grp);
15720 * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter
15721 * @phba: pointer to lpfc hba data structure.
15723 * This routine checks to see if OAS is supported for this adapter. If
15724 * supported, the configure Flash Optimized Fabric flag is set. Otherwise,
15725 * the enable oas flag is cleared and the pool created for OAS device data
15730 lpfc_sli4_oas_verify(struct lpfc_hba *phba)
15733 if (!phba->cfg_EnableXLane)
15736 if (phba->sli4_hba.pc_sli4_params.oas_supported) {
15740 mempool_destroy(phba->device_data_mem_pool);
15741 phba->device_data_mem_pool = NULL;
15748 * lpfc_sli4_ras_init - Verify RAS-FW log is supported by this adapter
15749 * @phba: pointer to lpfc hba data structure.
15751 * This routine checks to see if RAS is supported by the adapter. Check the
15752 * function through which RAS support enablement is to be done.
15755 lpfc_sli4_ras_init(struct lpfc_hba *phba)
15757 /* if ASIC_GEN_NUM >= 0xC) */
15758 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
15759 LPFC_SLI_INTF_IF_TYPE_6) ||
15760 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
15761 LPFC_SLI_INTF_FAMILY_G6)) {
15762 phba->ras_fwlog.ras_hwsupport = true;
15763 if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) &&
15764 phba->cfg_ras_fwlog_buffsize)
15765 phba->ras_fwlog.ras_enabled = true;
15767 phba->ras_fwlog.ras_enabled = false;
15769 phba->ras_fwlog.ras_hwsupport = false;
15774 MODULE_DEVICE_TABLE(pci, lpfc_id_table);
15776 static const struct pci_error_handlers lpfc_err_handler = {
15777 .error_detected = lpfc_io_error_detected,
15778 .slot_reset = lpfc_io_slot_reset,
15779 .resume = lpfc_io_resume,
15782 static SIMPLE_DEV_PM_OPS(lpfc_pci_pm_ops_one,
15783 lpfc_pci_suspend_one,
15784 lpfc_pci_resume_one);
15786 static struct pci_driver lpfc_driver = {
15787 .name = LPFC_DRIVER_NAME,
15788 .id_table = lpfc_id_table,
15789 .probe = lpfc_pci_probe_one,
15790 .remove = lpfc_pci_remove_one,
15791 .shutdown = lpfc_pci_remove_one,
15792 .driver.pm = &lpfc_pci_pm_ops_one,
15793 .err_handler = &lpfc_err_handler,
15796 static const struct file_operations lpfc_mgmt_fop = {
15797 .owner = THIS_MODULE,
15800 static struct miscdevice lpfc_mgmt_dev = {
15801 .minor = MISC_DYNAMIC_MINOR,
15802 .name = "lpfcmgmt",
15803 .fops = &lpfc_mgmt_fop,
15807 * lpfc_init - lpfc module initialization routine
15809 * This routine is to be invoked when the lpfc module is loaded into the
15810 * kernel. The special kernel macro module_init() is used to indicate the
15811 * role of this routine to the kernel as lpfc module entry point.
15815 * -ENOMEM - FC attach transport failed
15816 * all others - failed
15823 pr_info(LPFC_MODULE_DESC "\n");
15824 pr_info(LPFC_COPYRIGHT "\n");
15826 error = misc_register(&lpfc_mgmt_dev);
15828 printk(KERN_ERR "Could not register lpfcmgmt device, "
15829 "misc_register returned with status %d", error);
15832 lpfc_transport_functions.vport_create = lpfc_vport_create;
15833 lpfc_transport_functions.vport_delete = lpfc_vport_delete;
15834 lpfc_transport_template =
15835 fc_attach_transport(&lpfc_transport_functions);
15836 if (lpfc_transport_template == NULL)
15838 lpfc_vport_transport_template =
15839 fc_attach_transport(&lpfc_vport_transport_functions);
15840 if (lpfc_vport_transport_template == NULL) {
15841 fc_release_transport(lpfc_transport_template);
15844 lpfc_wqe_cmd_template();
15845 lpfc_nvmet_cmd_template();
15847 /* Initialize in case vector mapping is needed */
15848 lpfc_present_cpu = num_present_cpus();
15850 lpfc_pldv_detect = false;
15852 error = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
15853 "lpfc/sli4:online",
15854 lpfc_cpu_online, lpfc_cpu_offline);
15856 goto cpuhp_failure;
15857 lpfc_cpuhp_state = error;
15859 error = pci_register_driver(&lpfc_driver);
15866 cpuhp_remove_multi_state(lpfc_cpuhp_state);
15868 fc_release_transport(lpfc_transport_template);
15869 fc_release_transport(lpfc_vport_transport_template);
15871 misc_deregister(&lpfc_mgmt_dev);
15876 void lpfc_dmp_dbg(struct lpfc_hba *phba)
15878 unsigned int start_idx;
15879 unsigned int dbg_cnt;
15880 unsigned int temp_idx;
15883 unsigned long rem_nsec;
15885 if (atomic_cmpxchg(&phba->dbg_log_dmping, 0, 1) != 0)
15888 start_idx = (unsigned int)atomic_read(&phba->dbg_log_idx) % DBG_LOG_SZ;
15889 dbg_cnt = (unsigned int)atomic_read(&phba->dbg_log_cnt);
15892 temp_idx = start_idx;
15893 if (dbg_cnt >= DBG_LOG_SZ) {
15894 dbg_cnt = DBG_LOG_SZ;
15897 if ((start_idx + dbg_cnt) > (DBG_LOG_SZ - 1)) {
15898 temp_idx = (start_idx + dbg_cnt) % DBG_LOG_SZ;
15900 if (start_idx < dbg_cnt)
15901 start_idx = DBG_LOG_SZ - (dbg_cnt - start_idx);
15903 start_idx -= dbg_cnt;
15906 dev_info(&phba->pcidev->dev, "start %d end %d cnt %d\n",
15907 start_idx, temp_idx, dbg_cnt);
15909 for (i = 0; i < dbg_cnt; i++) {
15910 if ((start_idx + i) < DBG_LOG_SZ)
15911 temp_idx = (start_idx + i) % DBG_LOG_SZ;
15914 rem_nsec = do_div(phba->dbg_log[temp_idx].t_ns, NSEC_PER_SEC);
15915 dev_info(&phba->pcidev->dev, "%d: [%5lu.%06lu] %s",
15917 (unsigned long)phba->dbg_log[temp_idx].t_ns,
15919 phba->dbg_log[temp_idx].log);
15922 atomic_set(&phba->dbg_log_cnt, 0);
15923 atomic_set(&phba->dbg_log_dmping, 0);
15927 void lpfc_dbg_print(struct lpfc_hba *phba, const char *fmt, ...)
15931 int dbg_dmping = atomic_read(&phba->dbg_log_dmping);
15932 struct va_format vaf;
15935 va_start(args, fmt);
15936 if (unlikely(dbg_dmping)) {
15939 dev_info(&phba->pcidev->dev, "%pV", &vaf);
15943 idx = (unsigned int)atomic_fetch_add(1, &phba->dbg_log_idx) %
15946 atomic_inc(&phba->dbg_log_cnt);
15948 vscnprintf(phba->dbg_log[idx].log,
15949 sizeof(phba->dbg_log[idx].log), fmt, args);
15952 phba->dbg_log[idx].t_ns = local_clock();
15956 * lpfc_exit - lpfc module removal routine
15958 * This routine is invoked when the lpfc module is removed from the kernel.
15959 * The special kernel macro module_exit() is used to indicate the role of
15960 * this routine to the kernel as lpfc module exit point.
15965 misc_deregister(&lpfc_mgmt_dev);
15966 pci_unregister_driver(&lpfc_driver);
15967 cpuhp_remove_multi_state(lpfc_cpuhp_state);
15968 fc_release_transport(lpfc_transport_template);
15969 fc_release_transport(lpfc_vport_transport_template);
15970 idr_destroy(&lpfc_hba_index);
15973 module_init(lpfc_init);
15974 module_exit(lpfc_exit);
15975 MODULE_LICENSE("GPL");
15976 MODULE_DESCRIPTION(LPFC_MODULE_DESC);
15977 MODULE_AUTHOR("Broadcom");
15978 MODULE_VERSION("0:" LPFC_DRIVER_VERSION);