Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
[sfrench/cifs-2.6.git] / drivers / scsi / lpfc / lpfc_sli.c
1
2 /*******************************************************************
3  * This file is part of the Emulex Linux Device Driver for         *
4  * Fibre Channel Host Bus Adapters.                                *
5  * Copyright (C) 2017 Broadcom. All Rights Reserved. The term      *
6  * “Broadcom” refers to Broadcom Limited and/or its subsidiaries.  *
7  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
8  * EMULEX and SLI are trademarks of Emulex.                        *
9  * www.broadcom.com                                                *
10  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
11  *                                                                 *
12  * This program is free software; you can redistribute it and/or   *
13  * modify it under the terms of version 2 of the GNU General       *
14  * Public License as published by the Free Software Foundation.    *
15  * This program is distributed in the hope that it will be useful. *
16  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
17  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
18  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
19  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
20  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
21  * more details, a copy of which can be found in the file COPYING  *
22  * included with this package.                                     *
23  *******************************************************************/
24
25 #include <linux/blkdev.h>
26 #include <linux/pci.h>
27 #include <linux/interrupt.h>
28 #include <linux/delay.h>
29 #include <linux/slab.h>
30 #include <linux/lockdep.h>
31
32 #include <scsi/scsi.h>
33 #include <scsi/scsi_cmnd.h>
34 #include <scsi/scsi_device.h>
35 #include <scsi/scsi_host.h>
36 #include <scsi/scsi_transport_fc.h>
37 #include <scsi/fc/fc_fs.h>
38 #include <linux/aer.h>
39
40 #include <linux/nvme-fc-driver.h>
41
42 #include "lpfc_hw4.h"
43 #include "lpfc_hw.h"
44 #include "lpfc_sli.h"
45 #include "lpfc_sli4.h"
46 #include "lpfc_nl.h"
47 #include "lpfc_disc.h"
48 #include "lpfc.h"
49 #include "lpfc_scsi.h"
50 #include "lpfc_nvme.h"
51 #include "lpfc_nvmet.h"
52 #include "lpfc_crtn.h"
53 #include "lpfc_logmsg.h"
54 #include "lpfc_compat.h"
55 #include "lpfc_debugfs.h"
56 #include "lpfc_vport.h"
57 #include "lpfc_version.h"
58
59 /* There are only four IOCB completion types. */
60 typedef enum _lpfc_iocb_type {
61         LPFC_UNKNOWN_IOCB,
62         LPFC_UNSOL_IOCB,
63         LPFC_SOL_IOCB,
64         LPFC_ABORT_IOCB
65 } lpfc_iocb_type;
66
67
68 /* Provide function prototypes local to this module. */
69 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
70                                   uint32_t);
71 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
72                               uint8_t *, uint32_t *);
73 static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
74                                                          struct lpfc_iocbq *);
75 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
76                                       struct hbq_dmabuf *);
77 static int lpfc_sli4_fp_handle_cqe(struct lpfc_hba *, struct lpfc_queue *,
78                                     struct lpfc_cqe *);
79 static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
80                                        int);
81 static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *, struct lpfc_eqe *,
82                         uint32_t);
83 static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
84 static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
85 static int lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba,
86                                    struct lpfc_sli_ring *pring,
87                                    struct lpfc_iocbq *cmdiocb);
88
89 static IOCB_t *
90 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
91 {
92         return &iocbq->iocb;
93 }
94
95 /**
96  * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
97  * @q: The Work Queue to operate on.
98  * @wqe: The work Queue Entry to put on the Work queue.
99  *
100  * This routine will copy the contents of @wqe to the next available entry on
101  * the @q. This function will then ring the Work Queue Doorbell to signal the
102  * HBA to start processing the Work Queue Entry. This function returns 0 if
103  * successful. If no entries are available on @q then this function will return
104  * -ENOMEM.
105  * The caller is expected to hold the hbalock when calling this routine.
106  **/
107 static uint32_t
108 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
109 {
110         union lpfc_wqe *temp_wqe;
111         struct lpfc_register doorbell;
112         uint32_t host_index;
113         uint32_t idx;
114
115         /* sanity check on queue memory */
116         if (unlikely(!q))
117                 return -ENOMEM;
118         temp_wqe = q->qe[q->host_index].wqe;
119
120         /* If the host has not yet processed the next entry then we are done */
121         idx = ((q->host_index + 1) % q->entry_count);
122         if (idx == q->hba_index) {
123                 q->WQ_overflow++;
124                 return -ENOMEM;
125         }
126         q->WQ_posted++;
127         /* set consumption flag every once in a while */
128         if (!((q->host_index + 1) % q->entry_repost))
129                 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
130         if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
131                 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
132         lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
133         /* ensure WQE bcopy flushed before doorbell write */
134         wmb();
135
136         /* Update the host index before invoking device */
137         host_index = q->host_index;
138
139         q->host_index = idx;
140
141         /* Ring Doorbell */
142         doorbell.word0 = 0;
143         if (q->db_format == LPFC_DB_LIST_FORMAT) {
144                 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
145                 bf_set(lpfc_wq_db_list_fm_index, &doorbell, host_index);
146                 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
147         } else if (q->db_format == LPFC_DB_RING_FORMAT) {
148                 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
149                 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
150         } else {
151                 return -EINVAL;
152         }
153         writel(doorbell.word0, q->db_regaddr);
154
155         return 0;
156 }
157
158 /**
159  * lpfc_sli4_wq_release - Updates internal hba index for WQ
160  * @q: The Work Queue to operate on.
161  * @index: The index to advance the hba index to.
162  *
163  * This routine will update the HBA index of a queue to reflect consumption of
164  * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
165  * an entry the host calls this function to update the queue's internal
166  * pointers. This routine returns the number of entries that were consumed by
167  * the HBA.
168  **/
169 static uint32_t
170 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
171 {
172         uint32_t released = 0;
173
174         /* sanity check on queue memory */
175         if (unlikely(!q))
176                 return 0;
177
178         if (q->hba_index == index)
179                 return 0;
180         do {
181                 q->hba_index = ((q->hba_index + 1) % q->entry_count);
182                 released++;
183         } while (q->hba_index != index);
184         return released;
185 }
186
187 /**
188  * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
189  * @q: The Mailbox Queue to operate on.
190  * @wqe: The Mailbox Queue Entry to put on the Work queue.
191  *
192  * This routine will copy the contents of @mqe to the next available entry on
193  * the @q. This function will then ring the Work Queue Doorbell to signal the
194  * HBA to start processing the Work Queue Entry. This function returns 0 if
195  * successful. If no entries are available on @q then this function will return
196  * -ENOMEM.
197  * The caller is expected to hold the hbalock when calling this routine.
198  **/
199 static uint32_t
200 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
201 {
202         struct lpfc_mqe *temp_mqe;
203         struct lpfc_register doorbell;
204
205         /* sanity check on queue memory */
206         if (unlikely(!q))
207                 return -ENOMEM;
208         temp_mqe = q->qe[q->host_index].mqe;
209
210         /* If the host has not yet processed the next entry then we are done */
211         if (((q->host_index + 1) % q->entry_count) == q->hba_index)
212                 return -ENOMEM;
213         lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
214         /* Save off the mailbox pointer for completion */
215         q->phba->mbox = (MAILBOX_t *)temp_mqe;
216
217         /* Update the host index before invoking device */
218         q->host_index = ((q->host_index + 1) % q->entry_count);
219
220         /* Ring Doorbell */
221         doorbell.word0 = 0;
222         bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
223         bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
224         writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
225         return 0;
226 }
227
228 /**
229  * lpfc_sli4_mq_release - Updates internal hba index for MQ
230  * @q: The Mailbox Queue to operate on.
231  *
232  * This routine will update the HBA index of a queue to reflect consumption of
233  * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
234  * an entry the host calls this function to update the queue's internal
235  * pointers. This routine returns the number of entries that were consumed by
236  * the HBA.
237  **/
238 static uint32_t
239 lpfc_sli4_mq_release(struct lpfc_queue *q)
240 {
241         /* sanity check on queue memory */
242         if (unlikely(!q))
243                 return 0;
244
245         /* Clear the mailbox pointer for completion */
246         q->phba->mbox = NULL;
247         q->hba_index = ((q->hba_index + 1) % q->entry_count);
248         return 1;
249 }
250
251 /**
252  * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
253  * @q: The Event Queue to get the first valid EQE from
254  *
255  * This routine will get the first valid Event Queue Entry from @q, update
256  * the queue's internal hba index, and return the EQE. If no valid EQEs are in
257  * the Queue (no more work to do), or the Queue is full of EQEs that have been
258  * processed, but not popped back to the HBA then this routine will return NULL.
259  **/
260 static struct lpfc_eqe *
261 lpfc_sli4_eq_get(struct lpfc_queue *q)
262 {
263         struct lpfc_eqe *eqe;
264         uint32_t idx;
265
266         /* sanity check on queue memory */
267         if (unlikely(!q))
268                 return NULL;
269         eqe = q->qe[q->hba_index].eqe;
270
271         /* If the next EQE is not valid then we are done */
272         if (!bf_get_le32(lpfc_eqe_valid, eqe))
273                 return NULL;
274         /* If the host has not yet processed the next entry then we are done */
275         idx = ((q->hba_index + 1) % q->entry_count);
276         if (idx == q->host_index)
277                 return NULL;
278
279         q->hba_index = idx;
280
281         /*
282          * insert barrier for instruction interlock : data from the hardware
283          * must have the valid bit checked before it can be copied and acted
284          * upon. Speculative instructions were allowing a bcopy at the start
285          * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
286          * after our return, to copy data before the valid bit check above
287          * was done. As such, some of the copied data was stale. The barrier
288          * ensures the check is before any data is copied.
289          */
290         mb();
291         return eqe;
292 }
293
294 /**
295  * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
296  * @q: The Event Queue to disable interrupts
297  *
298  **/
299 static inline void
300 lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
301 {
302         struct lpfc_register doorbell;
303
304         doorbell.word0 = 0;
305         bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
306         bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
307         bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
308                 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
309         bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
310         writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
311 }
312
313 /**
314  * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ
315  * @q: The Event Queue that the host has completed processing for.
316  * @arm: Indicates whether the host wants to arms this CQ.
317  *
318  * This routine will mark all Event Queue Entries on @q, from the last
319  * known completed entry to the last entry that was processed, as completed
320  * by clearing the valid bit for each completion queue entry. Then it will
321  * notify the HBA, by ringing the doorbell, that the EQEs have been processed.
322  * The internal host index in the @q will be updated by this routine to indicate
323  * that the host has finished processing the entries. The @arm parameter
324  * indicates that the queue should be rearmed when ringing the doorbell.
325  *
326  * This function will return the number of EQEs that were popped.
327  **/
328 uint32_t
329 lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
330 {
331         uint32_t released = 0;
332         struct lpfc_eqe *temp_eqe;
333         struct lpfc_register doorbell;
334
335         /* sanity check on queue memory */
336         if (unlikely(!q))
337                 return 0;
338
339         /* while there are valid entries */
340         while (q->hba_index != q->host_index) {
341                 temp_eqe = q->qe[q->host_index].eqe;
342                 bf_set_le32(lpfc_eqe_valid, temp_eqe, 0);
343                 released++;
344                 q->host_index = ((q->host_index + 1) % q->entry_count);
345         }
346         if (unlikely(released == 0 && !arm))
347                 return 0;
348
349         /* ring doorbell for number popped */
350         doorbell.word0 = 0;
351         if (arm) {
352                 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
353                 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
354         }
355         bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
356         bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
357         bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
358                         (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
359         bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
360         writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
361         /* PCI read to flush PCI pipeline on re-arming for INTx mode */
362         if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
363                 readl(q->phba->sli4_hba.EQCQDBregaddr);
364         return released;
365 }
366
367 /**
368  * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
369  * @q: The Completion Queue to get the first valid CQE from
370  *
371  * This routine will get the first valid Completion Queue Entry from @q, update
372  * the queue's internal hba index, and return the CQE. If no valid CQEs are in
373  * the Queue (no more work to do), or the Queue is full of CQEs that have been
374  * processed, but not popped back to the HBA then this routine will return NULL.
375  **/
376 static struct lpfc_cqe *
377 lpfc_sli4_cq_get(struct lpfc_queue *q)
378 {
379         struct lpfc_cqe *cqe;
380         uint32_t idx;
381
382         /* sanity check on queue memory */
383         if (unlikely(!q))
384                 return NULL;
385
386         /* If the next CQE is not valid then we are done */
387         if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
388                 return NULL;
389         /* If the host has not yet processed the next entry then we are done */
390         idx = ((q->hba_index + 1) % q->entry_count);
391         if (idx == q->host_index)
392                 return NULL;
393
394         cqe = q->qe[q->hba_index].cqe;
395         q->hba_index = idx;
396
397         /*
398          * insert barrier for instruction interlock : data from the hardware
399          * must have the valid bit checked before it can be copied and acted
400          * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
401          * instructions allowing action on content before valid bit checked,
402          * add barrier here as well. May not be needed as "content" is a
403          * single 32-bit entity here (vs multi word structure for cq's).
404          */
405         mb();
406         return cqe;
407 }
408
409 /**
410  * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ
411  * @q: The Completion Queue that the host has completed processing for.
412  * @arm: Indicates whether the host wants to arms this CQ.
413  *
414  * This routine will mark all Completion queue entries on @q, from the last
415  * known completed entry to the last entry that was processed, as completed
416  * by clearing the valid bit for each completion queue entry. Then it will
417  * notify the HBA, by ringing the doorbell, that the CQEs have been processed.
418  * The internal host index in the @q will be updated by this routine to indicate
419  * that the host has finished processing the entries. The @arm parameter
420  * indicates that the queue should be rearmed when ringing the doorbell.
421  *
422  * This function will return the number of CQEs that were released.
423  **/
424 uint32_t
425 lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
426 {
427         uint32_t released = 0;
428         struct lpfc_cqe *temp_qe;
429         struct lpfc_register doorbell;
430
431         /* sanity check on queue memory */
432         if (unlikely(!q))
433                 return 0;
434         /* while there are valid entries */
435         while (q->hba_index != q->host_index) {
436                 temp_qe = q->qe[q->host_index].cqe;
437                 bf_set_le32(lpfc_cqe_valid, temp_qe, 0);
438                 released++;
439                 q->host_index = ((q->host_index + 1) % q->entry_count);
440         }
441         if (unlikely(released == 0 && !arm))
442                 return 0;
443
444         /* ring doorbell for number popped */
445         doorbell.word0 = 0;
446         if (arm)
447                 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
448         bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
449         bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
450         bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
451                         (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
452         bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
453         writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
454         return released;
455 }
456
457 /**
458  * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
459  * @q: The Header Receive Queue to operate on.
460  * @wqe: The Receive Queue Entry to put on the Receive queue.
461  *
462  * This routine will copy the contents of @wqe to the next available entry on
463  * the @q. This function will then ring the Receive Queue Doorbell to signal the
464  * HBA to start processing the Receive Queue Entry. This function returns the
465  * index that the rqe was copied to if successful. If no entries are available
466  * on @q then this function will return -ENOMEM.
467  * The caller is expected to hold the hbalock when calling this routine.
468  **/
469 int
470 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
471                  struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
472 {
473         struct lpfc_rqe *temp_hrqe;
474         struct lpfc_rqe *temp_drqe;
475         struct lpfc_register doorbell;
476         int put_index;
477
478         /* sanity check on queue memory */
479         if (unlikely(!hq) || unlikely(!dq))
480                 return -ENOMEM;
481         put_index = hq->host_index;
482         temp_hrqe = hq->qe[hq->host_index].rqe;
483         temp_drqe = dq->qe[dq->host_index].rqe;
484
485         if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
486                 return -EINVAL;
487         if (hq->host_index != dq->host_index)
488                 return -EINVAL;
489         /* If the host has not yet processed the next entry then we are done */
490         if (((hq->host_index + 1) % hq->entry_count) == hq->hba_index)
491                 return -EBUSY;
492         lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
493         lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
494
495         /* Update the host index to point to the next slot */
496         hq->host_index = ((hq->host_index + 1) % hq->entry_count);
497         dq->host_index = ((dq->host_index + 1) % dq->entry_count);
498
499         /* Ring The Header Receive Queue Doorbell */
500         if (!(hq->host_index % hq->entry_repost)) {
501                 doorbell.word0 = 0;
502                 if (hq->db_format == LPFC_DB_RING_FORMAT) {
503                         bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
504                                hq->entry_repost);
505                         bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
506                 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
507                         bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
508                                hq->entry_repost);
509                         bf_set(lpfc_rq_db_list_fm_index, &doorbell,
510                                hq->host_index);
511                         bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
512                 } else {
513                         return -EINVAL;
514                 }
515                 writel(doorbell.word0, hq->db_regaddr);
516         }
517         return put_index;
518 }
519
520 /**
521  * lpfc_sli4_rq_release - Updates internal hba index for RQ
522  * @q: The Header Receive Queue to operate on.
523  *
524  * This routine will update the HBA index of a queue to reflect consumption of
525  * one Receive Queue Entry by the HBA. When the HBA indicates that it has
526  * consumed an entry the host calls this function to update the queue's
527  * internal pointers. This routine returns the number of entries that were
528  * consumed by the HBA.
529  **/
530 static uint32_t
531 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
532 {
533         /* sanity check on queue memory */
534         if (unlikely(!hq) || unlikely(!dq))
535                 return 0;
536
537         if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
538                 return 0;
539         hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
540         dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
541         return 1;
542 }
543
544 /**
545  * lpfc_cmd_iocb - Get next command iocb entry in the ring
546  * @phba: Pointer to HBA context object.
547  * @pring: Pointer to driver SLI ring object.
548  *
549  * This function returns pointer to next command iocb entry
550  * in the command ring. The caller must hold hbalock to prevent
551  * other threads consume the next command iocb.
552  * SLI-2/SLI-3 provide different sized iocbs.
553  **/
554 static inline IOCB_t *
555 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
556 {
557         return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
558                            pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
559 }
560
561 /**
562  * lpfc_resp_iocb - Get next response iocb entry in the ring
563  * @phba: Pointer to HBA context object.
564  * @pring: Pointer to driver SLI ring object.
565  *
566  * This function returns pointer to next response iocb entry
567  * in the response ring. The caller must hold hbalock to make sure
568  * that no other thread consume the next response iocb.
569  * SLI-2/SLI-3 provide different sized iocbs.
570  **/
571 static inline IOCB_t *
572 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
573 {
574         return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
575                            pring->sli.sli3.rspidx * phba->iocb_rsp_size);
576 }
577
578 /**
579  * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
580  * @phba: Pointer to HBA context object.
581  *
582  * This function is called with hbalock held. This function
583  * allocates a new driver iocb object from the iocb pool. If the
584  * allocation is successful, it returns pointer to the newly
585  * allocated iocb object else it returns NULL.
586  **/
587 struct lpfc_iocbq *
588 __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
589 {
590         struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
591         struct lpfc_iocbq * iocbq = NULL;
592
593         lockdep_assert_held(&phba->hbalock);
594
595         list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
596         if (iocbq)
597                 phba->iocb_cnt++;
598         if (phba->iocb_cnt > phba->iocb_max)
599                 phba->iocb_max = phba->iocb_cnt;
600         return iocbq;
601 }
602
603 /**
604  * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
605  * @phba: Pointer to HBA context object.
606  * @xritag: XRI value.
607  *
608  * This function clears the sglq pointer from the array of acive
609  * sglq's. The xritag that is passed in is used to index into the
610  * array. Before the xritag can be used it needs to be adjusted
611  * by subtracting the xribase.
612  *
613  * Returns sglq ponter = success, NULL = Failure.
614  **/
615 struct lpfc_sglq *
616 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
617 {
618         struct lpfc_sglq *sglq;
619
620         sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
621         phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
622         return sglq;
623 }
624
625 /**
626  * __lpfc_get_active_sglq - Get the active sglq for this XRI.
627  * @phba: Pointer to HBA context object.
628  * @xritag: XRI value.
629  *
630  * This function returns the sglq pointer from the array of acive
631  * sglq's. The xritag that is passed in is used to index into the
632  * array. Before the xritag can be used it needs to be adjusted
633  * by subtracting the xribase.
634  *
635  * Returns sglq ponter = success, NULL = Failure.
636  **/
637 struct lpfc_sglq *
638 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
639 {
640         struct lpfc_sglq *sglq;
641
642         sglq =  phba->sli4_hba.lpfc_sglq_active_list[xritag];
643         return sglq;
644 }
645
646 /**
647  * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
648  * @phba: Pointer to HBA context object.
649  * @xritag: xri used in this exchange.
650  * @rrq: The RRQ to be cleared.
651  *
652  **/
653 void
654 lpfc_clr_rrq_active(struct lpfc_hba *phba,
655                     uint16_t xritag,
656                     struct lpfc_node_rrq *rrq)
657 {
658         struct lpfc_nodelist *ndlp = NULL;
659
660         if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
661                 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
662
663         /* The target DID could have been swapped (cable swap)
664          * we should use the ndlp from the findnode if it is
665          * available.
666          */
667         if ((!ndlp) && rrq->ndlp)
668                 ndlp = rrq->ndlp;
669
670         if (!ndlp)
671                 goto out;
672
673         if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
674                 rrq->send_rrq = 0;
675                 rrq->xritag = 0;
676                 rrq->rrq_stop_time = 0;
677         }
678 out:
679         mempool_free(rrq, phba->rrq_pool);
680 }
681
682 /**
683  * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
684  * @phba: Pointer to HBA context object.
685  *
686  * This function is called with hbalock held. This function
687  * Checks if stop_time (ratov from setting rrq active) has
688  * been reached, if it has and the send_rrq flag is set then
689  * it will call lpfc_send_rrq. If the send_rrq flag is not set
690  * then it will just call the routine to clear the rrq and
691  * free the rrq resource.
692  * The timer is set to the next rrq that is going to expire before
693  * leaving the routine.
694  *
695  **/
696 void
697 lpfc_handle_rrq_active(struct lpfc_hba *phba)
698 {
699         struct lpfc_node_rrq *rrq;
700         struct lpfc_node_rrq *nextrrq;
701         unsigned long next_time;
702         unsigned long iflags;
703         LIST_HEAD(send_rrq);
704
705         spin_lock_irqsave(&phba->hbalock, iflags);
706         phba->hba_flag &= ~HBA_RRQ_ACTIVE;
707         next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
708         list_for_each_entry_safe(rrq, nextrrq,
709                                  &phba->active_rrq_list, list) {
710                 if (time_after(jiffies, rrq->rrq_stop_time))
711                         list_move(&rrq->list, &send_rrq);
712                 else if (time_before(rrq->rrq_stop_time, next_time))
713                         next_time = rrq->rrq_stop_time;
714         }
715         spin_unlock_irqrestore(&phba->hbalock, iflags);
716         if ((!list_empty(&phba->active_rrq_list)) &&
717             (!(phba->pport->load_flag & FC_UNLOADING)))
718                 mod_timer(&phba->rrq_tmr, next_time);
719         list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
720                 list_del(&rrq->list);
721                 if (!rrq->send_rrq)
722                         /* this call will free the rrq */
723                 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
724                 else if (lpfc_send_rrq(phba, rrq)) {
725                         /* if we send the rrq then the completion handler
726                         *  will clear the bit in the xribitmap.
727                         */
728                         lpfc_clr_rrq_active(phba, rrq->xritag,
729                                             rrq);
730                 }
731         }
732 }
733
734 /**
735  * lpfc_get_active_rrq - Get the active RRQ for this exchange.
736  * @vport: Pointer to vport context object.
737  * @xri: The xri used in the exchange.
738  * @did: The targets DID for this exchange.
739  *
740  * returns NULL = rrq not found in the phba->active_rrq_list.
741  *         rrq = rrq for this xri and target.
742  **/
743 struct lpfc_node_rrq *
744 lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
745 {
746         struct lpfc_hba *phba = vport->phba;
747         struct lpfc_node_rrq *rrq;
748         struct lpfc_node_rrq *nextrrq;
749         unsigned long iflags;
750
751         if (phba->sli_rev != LPFC_SLI_REV4)
752                 return NULL;
753         spin_lock_irqsave(&phba->hbalock, iflags);
754         list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
755                 if (rrq->vport == vport && rrq->xritag == xri &&
756                                 rrq->nlp_DID == did){
757                         list_del(&rrq->list);
758                         spin_unlock_irqrestore(&phba->hbalock, iflags);
759                         return rrq;
760                 }
761         }
762         spin_unlock_irqrestore(&phba->hbalock, iflags);
763         return NULL;
764 }
765
766 /**
767  * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
768  * @vport: Pointer to vport context object.
769  * @ndlp: Pointer to the lpfc_node_list structure.
770  * If ndlp is NULL Remove all active RRQs for this vport from the
771  * phba->active_rrq_list and clear the rrq.
772  * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
773  **/
774 void
775 lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
776
777 {
778         struct lpfc_hba *phba = vport->phba;
779         struct lpfc_node_rrq *rrq;
780         struct lpfc_node_rrq *nextrrq;
781         unsigned long iflags;
782         LIST_HEAD(rrq_list);
783
784         if (phba->sli_rev != LPFC_SLI_REV4)
785                 return;
786         if (!ndlp) {
787                 lpfc_sli4_vport_delete_els_xri_aborted(vport);
788                 lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
789         }
790         spin_lock_irqsave(&phba->hbalock, iflags);
791         list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list)
792                 if ((rrq->vport == vport) && (!ndlp  || rrq->ndlp == ndlp))
793                         list_move(&rrq->list, &rrq_list);
794         spin_unlock_irqrestore(&phba->hbalock, iflags);
795
796         list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
797                 list_del(&rrq->list);
798                 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
799         }
800 }
801
802 /**
803  * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
804  * @phba: Pointer to HBA context object.
805  * @ndlp: Targets nodelist pointer for this exchange.
806  * @xritag the xri in the bitmap to test.
807  *
808  * This function is called with hbalock held. This function
809  * returns 0 = rrq not active for this xri
810  *         1 = rrq is valid for this xri.
811  **/
812 int
813 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
814                         uint16_t  xritag)
815 {
816         lockdep_assert_held(&phba->hbalock);
817         if (!ndlp)
818                 return 0;
819         if (!ndlp->active_rrqs_xri_bitmap)
820                 return 0;
821         if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
822                         return 1;
823         else
824                 return 0;
825 }
826
827 /**
828  * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
829  * @phba: Pointer to HBA context object.
830  * @ndlp: nodelist pointer for this target.
831  * @xritag: xri used in this exchange.
832  * @rxid: Remote Exchange ID.
833  * @send_rrq: Flag used to determine if we should send rrq els cmd.
834  *
835  * This function takes the hbalock.
836  * The active bit is always set in the active rrq xri_bitmap even
837  * if there is no slot avaiable for the other rrq information.
838  *
839  * returns 0 rrq actived for this xri
840  *         < 0 No memory or invalid ndlp.
841  **/
842 int
843 lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
844                     uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
845 {
846         unsigned long iflags;
847         struct lpfc_node_rrq *rrq;
848         int empty;
849
850         if (!ndlp)
851                 return -EINVAL;
852
853         if (!phba->cfg_enable_rrq)
854                 return -EINVAL;
855
856         spin_lock_irqsave(&phba->hbalock, iflags);
857         if (phba->pport->load_flag & FC_UNLOADING) {
858                 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
859                 goto out;
860         }
861
862         /*
863          * set the active bit even if there is no mem available.
864          */
865         if (NLP_CHK_FREE_REQ(ndlp))
866                 goto out;
867
868         if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
869                 goto out;
870
871         if (!ndlp->active_rrqs_xri_bitmap)
872                 goto out;
873
874         if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
875                 goto out;
876
877         spin_unlock_irqrestore(&phba->hbalock, iflags);
878         rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
879         if (!rrq) {
880                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
881                                 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
882                                 " DID:0x%x Send:%d\n",
883                                 xritag, rxid, ndlp->nlp_DID, send_rrq);
884                 return -EINVAL;
885         }
886         if (phba->cfg_enable_rrq == 1)
887                 rrq->send_rrq = send_rrq;
888         else
889                 rrq->send_rrq = 0;
890         rrq->xritag = xritag;
891         rrq->rrq_stop_time = jiffies +
892                                 msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
893         rrq->ndlp = ndlp;
894         rrq->nlp_DID = ndlp->nlp_DID;
895         rrq->vport = ndlp->vport;
896         rrq->rxid = rxid;
897         spin_lock_irqsave(&phba->hbalock, iflags);
898         empty = list_empty(&phba->active_rrq_list);
899         list_add_tail(&rrq->list, &phba->active_rrq_list);
900         phba->hba_flag |= HBA_RRQ_ACTIVE;
901         if (empty)
902                 lpfc_worker_wake_up(phba);
903         spin_unlock_irqrestore(&phba->hbalock, iflags);
904         return 0;
905 out:
906         spin_unlock_irqrestore(&phba->hbalock, iflags);
907         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
908                         "2921 Can't set rrq active xri:0x%x rxid:0x%x"
909                         " DID:0x%x Send:%d\n",
910                         xritag, rxid, ndlp->nlp_DID, send_rrq);
911         return -EINVAL;
912 }
913
914 /**
915  * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool
916  * @phba: Pointer to HBA context object.
917  * @piocb: Pointer to the iocbq.
918  *
919  * This function is called with the ring lock held. This function
920  * gets a new driver sglq object from the sglq list. If the
921  * list is not empty then it is successful, it returns pointer to the newly
922  * allocated sglq object else it returns NULL.
923  **/
924 static struct lpfc_sglq *
925 __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
926 {
927         struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list;
928         struct lpfc_sglq *sglq = NULL;
929         struct lpfc_sglq *start_sglq = NULL;
930         struct lpfc_scsi_buf *lpfc_cmd;
931         struct lpfc_nodelist *ndlp;
932         int found = 0;
933
934         lockdep_assert_held(&phba->hbalock);
935
936         if (piocbq->iocb_flag &  LPFC_IO_FCP) {
937                 lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1;
938                 ndlp = lpfc_cmd->rdata->pnode;
939         } else  if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
940                         !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) {
941                 ndlp = piocbq->context_un.ndlp;
942         } else  if (piocbq->iocb_flag & LPFC_IO_LIBDFC) {
943                 if (piocbq->iocb_flag & LPFC_IO_LOOPBACK)
944                         ndlp = NULL;
945                 else
946                         ndlp = piocbq->context_un.ndlp;
947         } else {
948                 ndlp = piocbq->context1;
949         }
950
951         spin_lock(&phba->sli4_hba.sgl_list_lock);
952         list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list);
953         start_sglq = sglq;
954         while (!found) {
955                 if (!sglq)
956                         break;
957                 if (ndlp && ndlp->active_rrqs_xri_bitmap &&
958                     test_bit(sglq->sli4_lxritag,
959                     ndlp->active_rrqs_xri_bitmap)) {
960                         /* This xri has an rrq outstanding for this DID.
961                          * put it back in the list and get another xri.
962                          */
963                         list_add_tail(&sglq->list, lpfc_els_sgl_list);
964                         sglq = NULL;
965                         list_remove_head(lpfc_els_sgl_list, sglq,
966                                                 struct lpfc_sglq, list);
967                         if (sglq == start_sglq) {
968                                 sglq = NULL;
969                                 break;
970                         } else
971                                 continue;
972                 }
973                 sglq->ndlp = ndlp;
974                 found = 1;
975                 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
976                 sglq->state = SGL_ALLOCATED;
977         }
978         spin_unlock(&phba->sli4_hba.sgl_list_lock);
979         return sglq;
980 }
981
982 /**
983  * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool
984  * @phba: Pointer to HBA context object.
985  * @piocb: Pointer to the iocbq.
986  *
987  * This function is called with the sgl_list lock held. This function
988  * gets a new driver sglq object from the sglq list. If the
989  * list is not empty then it is successful, it returns pointer to the newly
990  * allocated sglq object else it returns NULL.
991  **/
992 struct lpfc_sglq *
993 __lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
994 {
995         struct list_head *lpfc_nvmet_sgl_list;
996         struct lpfc_sglq *sglq = NULL;
997
998         lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list;
999
1000         lockdep_assert_held(&phba->sli4_hba.sgl_list_lock);
1001
1002         list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list);
1003         if (!sglq)
1004                 return NULL;
1005         phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1006         sglq->state = SGL_ALLOCATED;
1007         return sglq;
1008 }
1009
1010 /**
1011  * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
1012  * @phba: Pointer to HBA context object.
1013  *
1014  * This function is called with no lock held. This function
1015  * allocates a new driver iocb object from the iocb pool. If the
1016  * allocation is successful, it returns pointer to the newly
1017  * allocated iocb object else it returns NULL.
1018  **/
1019 struct lpfc_iocbq *
1020 lpfc_sli_get_iocbq(struct lpfc_hba *phba)
1021 {
1022         struct lpfc_iocbq * iocbq = NULL;
1023         unsigned long iflags;
1024
1025         spin_lock_irqsave(&phba->hbalock, iflags);
1026         iocbq = __lpfc_sli_get_iocbq(phba);
1027         spin_unlock_irqrestore(&phba->hbalock, iflags);
1028         return iocbq;
1029 }
1030
1031 /**
1032  * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
1033  * @phba: Pointer to HBA context object.
1034  * @iocbq: Pointer to driver iocb object.
1035  *
1036  * This function is called with hbalock held to release driver
1037  * iocb object to the iocb pool. The iotag in the iocb object
1038  * does not change for each use of the iocb object. This function
1039  * clears all other fields of the iocb object when it is freed.
1040  * The sqlq structure that holds the xritag and phys and virtual
1041  * mappings for the scatter gather list is retrieved from the
1042  * active array of sglq. The get of the sglq pointer also clears
1043  * the entry in the array. If the status of the IO indiactes that
1044  * this IO was aborted then the sglq entry it put on the
1045  * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
1046  * IO has good status or fails for any other reason then the sglq
1047  * entry is added to the free list (lpfc_els_sgl_list).
1048  **/
1049 static void
1050 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1051 {
1052         struct lpfc_sglq *sglq;
1053         size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1054         unsigned long iflag = 0;
1055         struct lpfc_sli_ring *pring;
1056
1057         lockdep_assert_held(&phba->hbalock);
1058
1059         if (iocbq->sli4_xritag == NO_XRI)
1060                 sglq = NULL;
1061         else
1062                 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
1063
1064
1065         if (sglq)  {
1066                 if (iocbq->iocb_flag & LPFC_IO_NVMET) {
1067                         spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1068                                           iflag);
1069                         sglq->state = SGL_FREED;
1070                         sglq->ndlp = NULL;
1071                         list_add_tail(&sglq->list,
1072                                       &phba->sli4_hba.lpfc_nvmet_sgl_list);
1073                         spin_unlock_irqrestore(
1074                                 &phba->sli4_hba.sgl_list_lock, iflag);
1075                         goto out;
1076                 }
1077
1078                 pring = phba->sli4_hba.els_wq->pring;
1079                 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
1080                         (sglq->state != SGL_XRI_ABORTED)) {
1081                         spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1082                                           iflag);
1083                         list_add(&sglq->list,
1084                                  &phba->sli4_hba.lpfc_abts_els_sgl_list);
1085                         spin_unlock_irqrestore(
1086                                 &phba->sli4_hba.sgl_list_lock, iflag);
1087                 } else {
1088                         spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1089                                           iflag);
1090                         sglq->state = SGL_FREED;
1091                         sglq->ndlp = NULL;
1092                         list_add_tail(&sglq->list,
1093                                       &phba->sli4_hba.lpfc_els_sgl_list);
1094                         spin_unlock_irqrestore(
1095                                 &phba->sli4_hba.sgl_list_lock, iflag);
1096
1097                         /* Check if TXQ queue needs to be serviced */
1098                         if (!list_empty(&pring->txq))
1099                                 lpfc_worker_wake_up(phba);
1100                 }
1101         }
1102
1103 out:
1104         /*
1105          * Clean all volatile data fields, preserve iotag and node struct.
1106          */
1107         memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1108         iocbq->sli4_lxritag = NO_XRI;
1109         iocbq->sli4_xritag = NO_XRI;
1110         iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET |
1111                               LPFC_IO_NVME_LS);
1112         list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1113 }
1114
1115
1116 /**
1117  * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
1118  * @phba: Pointer to HBA context object.
1119  * @iocbq: Pointer to driver iocb object.
1120  *
1121  * This function is called with hbalock held to release driver
1122  * iocb object to the iocb pool. The iotag in the iocb object
1123  * does not change for each use of the iocb object. This function
1124  * clears all other fields of the iocb object when it is freed.
1125  **/
1126 static void
1127 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1128 {
1129         size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1130
1131         lockdep_assert_held(&phba->hbalock);
1132
1133         /*
1134          * Clean all volatile data fields, preserve iotag and node struct.
1135          */
1136         memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1137         iocbq->sli4_xritag = NO_XRI;
1138         list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1139 }
1140
1141 /**
1142  * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
1143  * @phba: Pointer to HBA context object.
1144  * @iocbq: Pointer to driver iocb object.
1145  *
1146  * This function is called with hbalock held to release driver
1147  * iocb object to the iocb pool. The iotag in the iocb object
1148  * does not change for each use of the iocb object. This function
1149  * clears all other fields of the iocb object when it is freed.
1150  **/
1151 static void
1152 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1153 {
1154         lockdep_assert_held(&phba->hbalock);
1155
1156         phba->__lpfc_sli_release_iocbq(phba, iocbq);
1157         phba->iocb_cnt--;
1158 }
1159
1160 /**
1161  * lpfc_sli_release_iocbq - Release iocb to the iocb pool
1162  * @phba: Pointer to HBA context object.
1163  * @iocbq: Pointer to driver iocb object.
1164  *
1165  * This function is called with no lock held to release the iocb to
1166  * iocb pool.
1167  **/
1168 void
1169 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1170 {
1171         unsigned long iflags;
1172
1173         /*
1174          * Clean all volatile data fields, preserve iotag and node struct.
1175          */
1176         spin_lock_irqsave(&phba->hbalock, iflags);
1177         __lpfc_sli_release_iocbq(phba, iocbq);
1178         spin_unlock_irqrestore(&phba->hbalock, iflags);
1179 }
1180
1181 /**
1182  * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1183  * @phba: Pointer to HBA context object.
1184  * @iocblist: List of IOCBs.
1185  * @ulpstatus: ULP status in IOCB command field.
1186  * @ulpWord4: ULP word-4 in IOCB command field.
1187  *
1188  * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1189  * on the list by invoking the complete callback function associated with the
1190  * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1191  * fields.
1192  **/
1193 void
1194 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1195                       uint32_t ulpstatus, uint32_t ulpWord4)
1196 {
1197         struct lpfc_iocbq *piocb;
1198
1199         while (!list_empty(iocblist)) {
1200                 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
1201                 if (!piocb->iocb_cmpl)
1202                         lpfc_sli_release_iocbq(phba, piocb);
1203                 else {
1204                         piocb->iocb.ulpStatus = ulpstatus;
1205                         piocb->iocb.un.ulpWord[4] = ulpWord4;
1206                         (piocb->iocb_cmpl) (phba, piocb, piocb);
1207                 }
1208         }
1209         return;
1210 }
1211
1212 /**
1213  * lpfc_sli_iocb_cmd_type - Get the iocb type
1214  * @iocb_cmnd: iocb command code.
1215  *
1216  * This function is called by ring event handler function to get the iocb type.
1217  * This function translates the iocb command to an iocb command type used to
1218  * decide the final disposition of each completed IOCB.
1219  * The function returns
1220  * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1221  * LPFC_SOL_IOCB     if it is a solicited iocb completion
1222  * LPFC_ABORT_IOCB   if it is an abort iocb
1223  * LPFC_UNSOL_IOCB   if it is an unsolicited iocb
1224  *
1225  * The caller is not required to hold any lock.
1226  **/
1227 static lpfc_iocb_type
1228 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1229 {
1230         lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1231
1232         if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1233                 return 0;
1234
1235         switch (iocb_cmnd) {
1236         case CMD_XMIT_SEQUENCE_CR:
1237         case CMD_XMIT_SEQUENCE_CX:
1238         case CMD_XMIT_BCAST_CN:
1239         case CMD_XMIT_BCAST_CX:
1240         case CMD_ELS_REQUEST_CR:
1241         case CMD_ELS_REQUEST_CX:
1242         case CMD_CREATE_XRI_CR:
1243         case CMD_CREATE_XRI_CX:
1244         case CMD_GET_RPI_CN:
1245         case CMD_XMIT_ELS_RSP_CX:
1246         case CMD_GET_RPI_CR:
1247         case CMD_FCP_IWRITE_CR:
1248         case CMD_FCP_IWRITE_CX:
1249         case CMD_FCP_IREAD_CR:
1250         case CMD_FCP_IREAD_CX:
1251         case CMD_FCP_ICMND_CR:
1252         case CMD_FCP_ICMND_CX:
1253         case CMD_FCP_TSEND_CX:
1254         case CMD_FCP_TRSP_CX:
1255         case CMD_FCP_TRECEIVE_CX:
1256         case CMD_FCP_AUTO_TRSP_CX:
1257         case CMD_ADAPTER_MSG:
1258         case CMD_ADAPTER_DUMP:
1259         case CMD_XMIT_SEQUENCE64_CR:
1260         case CMD_XMIT_SEQUENCE64_CX:
1261         case CMD_XMIT_BCAST64_CN:
1262         case CMD_XMIT_BCAST64_CX:
1263         case CMD_ELS_REQUEST64_CR:
1264         case CMD_ELS_REQUEST64_CX:
1265         case CMD_FCP_IWRITE64_CR:
1266         case CMD_FCP_IWRITE64_CX:
1267         case CMD_FCP_IREAD64_CR:
1268         case CMD_FCP_IREAD64_CX:
1269         case CMD_FCP_ICMND64_CR:
1270         case CMD_FCP_ICMND64_CX:
1271         case CMD_FCP_TSEND64_CX:
1272         case CMD_FCP_TRSP64_CX:
1273         case CMD_FCP_TRECEIVE64_CX:
1274         case CMD_GEN_REQUEST64_CR:
1275         case CMD_GEN_REQUEST64_CX:
1276         case CMD_XMIT_ELS_RSP64_CX:
1277         case DSSCMD_IWRITE64_CR:
1278         case DSSCMD_IWRITE64_CX:
1279         case DSSCMD_IREAD64_CR:
1280         case DSSCMD_IREAD64_CX:
1281                 type = LPFC_SOL_IOCB;
1282                 break;
1283         case CMD_ABORT_XRI_CN:
1284         case CMD_ABORT_XRI_CX:
1285         case CMD_CLOSE_XRI_CN:
1286         case CMD_CLOSE_XRI_CX:
1287         case CMD_XRI_ABORTED_CX:
1288         case CMD_ABORT_MXRI64_CN:
1289         case CMD_XMIT_BLS_RSP64_CX:
1290                 type = LPFC_ABORT_IOCB;
1291                 break;
1292         case CMD_RCV_SEQUENCE_CX:
1293         case CMD_RCV_ELS_REQ_CX:
1294         case CMD_RCV_SEQUENCE64_CX:
1295         case CMD_RCV_ELS_REQ64_CX:
1296         case CMD_ASYNC_STATUS:
1297         case CMD_IOCB_RCV_SEQ64_CX:
1298         case CMD_IOCB_RCV_ELS64_CX:
1299         case CMD_IOCB_RCV_CONT64_CX:
1300         case CMD_IOCB_RET_XRI64_CX:
1301                 type = LPFC_UNSOL_IOCB;
1302                 break;
1303         case CMD_IOCB_XMIT_MSEQ64_CR:
1304         case CMD_IOCB_XMIT_MSEQ64_CX:
1305         case CMD_IOCB_RCV_SEQ_LIST64_CX:
1306         case CMD_IOCB_RCV_ELS_LIST64_CX:
1307         case CMD_IOCB_CLOSE_EXTENDED_CN:
1308         case CMD_IOCB_ABORT_EXTENDED_CN:
1309         case CMD_IOCB_RET_HBQE64_CN:
1310         case CMD_IOCB_FCP_IBIDIR64_CR:
1311         case CMD_IOCB_FCP_IBIDIR64_CX:
1312         case CMD_IOCB_FCP_ITASKMGT64_CX:
1313         case CMD_IOCB_LOGENTRY_CN:
1314         case CMD_IOCB_LOGENTRY_ASYNC_CN:
1315                 printk("%s - Unhandled SLI-3 Command x%x\n",
1316                                 __func__, iocb_cmnd);
1317                 type = LPFC_UNKNOWN_IOCB;
1318                 break;
1319         default:
1320                 type = LPFC_UNKNOWN_IOCB;
1321                 break;
1322         }
1323
1324         return type;
1325 }
1326
1327 /**
1328  * lpfc_sli_ring_map - Issue config_ring mbox for all rings
1329  * @phba: Pointer to HBA context object.
1330  *
1331  * This function is called from SLI initialization code
1332  * to configure every ring of the HBA's SLI interface. The
1333  * caller is not required to hold any lock. This function issues
1334  * a config_ring mailbox command for each ring.
1335  * This function returns zero if successful else returns a negative
1336  * error code.
1337  **/
1338 static int
1339 lpfc_sli_ring_map(struct lpfc_hba *phba)
1340 {
1341         struct lpfc_sli *psli = &phba->sli;
1342         LPFC_MBOXQ_t *pmb;
1343         MAILBOX_t *pmbox;
1344         int i, rc, ret = 0;
1345
1346         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1347         if (!pmb)
1348                 return -ENOMEM;
1349         pmbox = &pmb->u.mb;
1350         phba->link_state = LPFC_INIT_MBX_CMDS;
1351         for (i = 0; i < psli->num_rings; i++) {
1352                 lpfc_config_ring(phba, i, pmb);
1353                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1354                 if (rc != MBX_SUCCESS) {
1355                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1356                                         "0446 Adapter failed to init (%d), "
1357                                         "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1358                                         "ring %d\n",
1359                                         rc, pmbox->mbxCommand,
1360                                         pmbox->mbxStatus, i);
1361                         phba->link_state = LPFC_HBA_ERROR;
1362                         ret = -ENXIO;
1363                         break;
1364                 }
1365         }
1366         mempool_free(pmb, phba->mbox_mem_pool);
1367         return ret;
1368 }
1369
1370 /**
1371  * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
1372  * @phba: Pointer to HBA context object.
1373  * @pring: Pointer to driver SLI ring object.
1374  * @piocb: Pointer to the driver iocb object.
1375  *
1376  * This function is called with hbalock held. The function adds the
1377  * new iocb to txcmplq of the given ring. This function always returns
1378  * 0. If this function is called for ELS ring, this function checks if
1379  * there is a vport associated with the ELS command. This function also
1380  * starts els_tmofunc timer if this is an ELS command.
1381  **/
1382 static int
1383 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1384                         struct lpfc_iocbq *piocb)
1385 {
1386         lockdep_assert_held(&phba->hbalock);
1387
1388         BUG_ON(!piocb);
1389
1390         list_add_tail(&piocb->list, &pring->txcmplq);
1391         piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
1392
1393         if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1394            (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
1395            (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
1396                 BUG_ON(!piocb->vport);
1397                 if (!(piocb->vport->load_flag & FC_UNLOADING))
1398                         mod_timer(&piocb->vport->els_tmofunc,
1399                                   jiffies +
1400                                   msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
1401         }
1402
1403         return 0;
1404 }
1405
1406 /**
1407  * lpfc_sli_ringtx_get - Get first element of the txq
1408  * @phba: Pointer to HBA context object.
1409  * @pring: Pointer to driver SLI ring object.
1410  *
1411  * This function is called with hbalock held to get next
1412  * iocb in txq of the given ring. If there is any iocb in
1413  * the txq, the function returns first iocb in the list after
1414  * removing the iocb from the list, else it returns NULL.
1415  **/
1416 struct lpfc_iocbq *
1417 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1418 {
1419         struct lpfc_iocbq *cmd_iocb;
1420
1421         lockdep_assert_held(&phba->hbalock);
1422
1423         list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
1424         return cmd_iocb;
1425 }
1426
1427 /**
1428  * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
1429  * @phba: Pointer to HBA context object.
1430  * @pring: Pointer to driver SLI ring object.
1431  *
1432  * This function is called with hbalock held and the caller must post the
1433  * iocb without releasing the lock. If the caller releases the lock,
1434  * iocb slot returned by the function is not guaranteed to be available.
1435  * The function returns pointer to the next available iocb slot if there
1436  * is available slot in the ring, else it returns NULL.
1437  * If the get index of the ring is ahead of the put index, the function
1438  * will post an error attention event to the worker thread to take the
1439  * HBA to offline state.
1440  **/
1441 static IOCB_t *
1442 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1443 {
1444         struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
1445         uint32_t  max_cmd_idx = pring->sli.sli3.numCiocb;
1446
1447         lockdep_assert_held(&phba->hbalock);
1448
1449         if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
1450            (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
1451                 pring->sli.sli3.next_cmdidx = 0;
1452
1453         if (unlikely(pring->sli.sli3.local_getidx ==
1454                 pring->sli.sli3.next_cmdidx)) {
1455
1456                 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
1457
1458                 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
1459                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1460                                         "0315 Ring %d issue: portCmdGet %d "
1461                                         "is bigger than cmd ring %d\n",
1462                                         pring->ringno,
1463                                         pring->sli.sli3.local_getidx,
1464                                         max_cmd_idx);
1465
1466                         phba->link_state = LPFC_HBA_ERROR;
1467                         /*
1468                          * All error attention handlers are posted to
1469                          * worker thread
1470                          */
1471                         phba->work_ha |= HA_ERATT;
1472                         phba->work_hs = HS_FFER3;
1473
1474                         lpfc_worker_wake_up(phba);
1475
1476                         return NULL;
1477                 }
1478
1479                 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
1480                         return NULL;
1481         }
1482
1483         return lpfc_cmd_iocb(phba, pring);
1484 }
1485
1486 /**
1487  * lpfc_sli_next_iotag - Get an iotag for the iocb
1488  * @phba: Pointer to HBA context object.
1489  * @iocbq: Pointer to driver iocb object.
1490  *
1491  * This function gets an iotag for the iocb. If there is no unused iotag and
1492  * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
1493  * array and assigns a new iotag.
1494  * The function returns the allocated iotag if successful, else returns zero.
1495  * Zero is not a valid iotag.
1496  * The caller is not required to hold any lock.
1497  **/
1498 uint16_t
1499 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1500 {
1501         struct lpfc_iocbq **new_arr;
1502         struct lpfc_iocbq **old_arr;
1503         size_t new_len;
1504         struct lpfc_sli *psli = &phba->sli;
1505         uint16_t iotag;
1506
1507         spin_lock_irq(&phba->hbalock);
1508         iotag = psli->last_iotag;
1509         if(++iotag < psli->iocbq_lookup_len) {
1510                 psli->last_iotag = iotag;
1511                 psli->iocbq_lookup[iotag] = iocbq;
1512                 spin_unlock_irq(&phba->hbalock);
1513                 iocbq->iotag = iotag;
1514                 return iotag;
1515         } else if (psli->iocbq_lookup_len < (0xffff
1516                                            - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
1517                 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
1518                 spin_unlock_irq(&phba->hbalock);
1519                 new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *),
1520                                   GFP_KERNEL);
1521                 if (new_arr) {
1522                         spin_lock_irq(&phba->hbalock);
1523                         old_arr = psli->iocbq_lookup;
1524                         if (new_len <= psli->iocbq_lookup_len) {
1525                                 /* highly unprobable case */
1526                                 kfree(new_arr);
1527                                 iotag = psli->last_iotag;
1528                                 if(++iotag < psli->iocbq_lookup_len) {
1529                                         psli->last_iotag = iotag;
1530                                         psli->iocbq_lookup[iotag] = iocbq;
1531                                         spin_unlock_irq(&phba->hbalock);
1532                                         iocbq->iotag = iotag;
1533                                         return iotag;
1534                                 }
1535                                 spin_unlock_irq(&phba->hbalock);
1536                                 return 0;
1537                         }
1538                         if (psli->iocbq_lookup)
1539                                 memcpy(new_arr, old_arr,
1540                                        ((psli->last_iotag  + 1) *
1541                                         sizeof (struct lpfc_iocbq *)));
1542                         psli->iocbq_lookup = new_arr;
1543                         psli->iocbq_lookup_len = new_len;
1544                         psli->last_iotag = iotag;
1545                         psli->iocbq_lookup[iotag] = iocbq;
1546                         spin_unlock_irq(&phba->hbalock);
1547                         iocbq->iotag = iotag;
1548                         kfree(old_arr);
1549                         return iotag;
1550                 }
1551         } else
1552                 spin_unlock_irq(&phba->hbalock);
1553
1554         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1555                         "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1556                         psli->last_iotag);
1557
1558         return 0;
1559 }
1560
1561 /**
1562  * lpfc_sli_submit_iocb - Submit an iocb to the firmware
1563  * @phba: Pointer to HBA context object.
1564  * @pring: Pointer to driver SLI ring object.
1565  * @iocb: Pointer to iocb slot in the ring.
1566  * @nextiocb: Pointer to driver iocb object which need to be
1567  *            posted to firmware.
1568  *
1569  * This function is called with hbalock held to post a new iocb to
1570  * the firmware. This function copies the new iocb to ring iocb slot and
1571  * updates the ring pointers. It adds the new iocb to txcmplq if there is
1572  * a completion call back for this iocb else the function will free the
1573  * iocb object.
1574  **/
1575 static void
1576 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1577                 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
1578 {
1579         lockdep_assert_held(&phba->hbalock);
1580         /*
1581          * Set up an iotag
1582          */
1583         nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
1584
1585
1586         if (pring->ringno == LPFC_ELS_RING) {
1587                 lpfc_debugfs_slow_ring_trc(phba,
1588                         "IOCB cmd ring:   wd4:x%08x wd6:x%08x wd7:x%08x",
1589                         *(((uint32_t *) &nextiocb->iocb) + 4),
1590                         *(((uint32_t *) &nextiocb->iocb) + 6),
1591                         *(((uint32_t *) &nextiocb->iocb) + 7));
1592         }
1593
1594         /*
1595          * Issue iocb command to adapter
1596          */
1597         lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
1598         wmb();
1599         pring->stats.iocb_cmd++;
1600
1601         /*
1602          * If there is no completion routine to call, we can release the
1603          * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
1604          * that have no rsp ring completion, iocb_cmpl MUST be NULL.
1605          */
1606         if (nextiocb->iocb_cmpl)
1607                 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
1608         else
1609                 __lpfc_sli_release_iocbq(phba, nextiocb);
1610
1611         /*
1612          * Let the HBA know what IOCB slot will be the next one the
1613          * driver will put a command into.
1614          */
1615         pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
1616         writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
1617 }
1618
1619 /**
1620  * lpfc_sli_update_full_ring - Update the chip attention register
1621  * @phba: Pointer to HBA context object.
1622  * @pring: Pointer to driver SLI ring object.
1623  *
1624  * The caller is not required to hold any lock for calling this function.
1625  * This function updates the chip attention bits for the ring to inform firmware
1626  * that there are pending work to be done for this ring and requests an
1627  * interrupt when there is space available in the ring. This function is
1628  * called when the driver is unable to post more iocbs to the ring due
1629  * to unavailability of space in the ring.
1630  **/
1631 static void
1632 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1633 {
1634         int ringno = pring->ringno;
1635
1636         pring->flag |= LPFC_CALL_RING_AVAILABLE;
1637
1638         wmb();
1639
1640         /*
1641          * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
1642          * The HBA will tell us when an IOCB entry is available.
1643          */
1644         writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
1645         readl(phba->CAregaddr); /* flush */
1646
1647         pring->stats.iocb_cmd_full++;
1648 }
1649
1650 /**
1651  * lpfc_sli_update_ring - Update chip attention register
1652  * @phba: Pointer to HBA context object.
1653  * @pring: Pointer to driver SLI ring object.
1654  *
1655  * This function updates the chip attention register bit for the
1656  * given ring to inform HBA that there is more work to be done
1657  * in this ring. The caller is not required to hold any lock.
1658  **/
1659 static void
1660 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1661 {
1662         int ringno = pring->ringno;
1663
1664         /*
1665          * Tell the HBA that there is work to do in this ring.
1666          */
1667         if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
1668                 wmb();
1669                 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
1670                 readl(phba->CAregaddr); /* flush */
1671         }
1672 }
1673
1674 /**
1675  * lpfc_sli_resume_iocb - Process iocbs in the txq
1676  * @phba: Pointer to HBA context object.
1677  * @pring: Pointer to driver SLI ring object.
1678  *
1679  * This function is called with hbalock held to post pending iocbs
1680  * in the txq to the firmware. This function is called when driver
1681  * detects space available in the ring.
1682  **/
1683 static void
1684 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1685 {
1686         IOCB_t *iocb;
1687         struct lpfc_iocbq *nextiocb;
1688
1689         lockdep_assert_held(&phba->hbalock);
1690
1691         /*
1692          * Check to see if:
1693          *  (a) there is anything on the txq to send
1694          *  (b) link is up
1695          *  (c) link attention events can be processed (fcp ring only)
1696          *  (d) IOCB processing is not blocked by the outstanding mbox command.
1697          */
1698
1699         if (lpfc_is_link_up(phba) &&
1700             (!list_empty(&pring->txq)) &&
1701             (pring->ringno != LPFC_FCP_RING ||
1702              phba->sli.sli_flag & LPFC_PROCESS_LA)) {
1703
1704                 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
1705                        (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
1706                         lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
1707
1708                 if (iocb)
1709                         lpfc_sli_update_ring(phba, pring);
1710                 else
1711                         lpfc_sli_update_full_ring(phba, pring);
1712         }
1713
1714         return;
1715 }
1716
1717 /**
1718  * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
1719  * @phba: Pointer to HBA context object.
1720  * @hbqno: HBQ number.
1721  *
1722  * This function is called with hbalock held to get the next
1723  * available slot for the given HBQ. If there is free slot
1724  * available for the HBQ it will return pointer to the next available
1725  * HBQ entry else it will return NULL.
1726  **/
1727 static struct lpfc_hbq_entry *
1728 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
1729 {
1730         struct hbq_s *hbqp = &phba->hbqs[hbqno];
1731
1732         lockdep_assert_held(&phba->hbalock);
1733
1734         if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
1735             ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
1736                 hbqp->next_hbqPutIdx = 0;
1737
1738         if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
1739                 uint32_t raw_index = phba->hbq_get[hbqno];
1740                 uint32_t getidx = le32_to_cpu(raw_index);
1741
1742                 hbqp->local_hbqGetIdx = getidx;
1743
1744                 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
1745                         lpfc_printf_log(phba, KERN_ERR,
1746                                         LOG_SLI | LOG_VPORT,
1747                                         "1802 HBQ %d: local_hbqGetIdx "
1748                                         "%u is > than hbqp->entry_count %u\n",
1749                                         hbqno, hbqp->local_hbqGetIdx,
1750                                         hbqp->entry_count);
1751
1752                         phba->link_state = LPFC_HBA_ERROR;
1753                         return NULL;
1754                 }
1755
1756                 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
1757                         return NULL;
1758         }
1759
1760         return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
1761                         hbqp->hbqPutIdx;
1762 }
1763
1764 /**
1765  * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
1766  * @phba: Pointer to HBA context object.
1767  *
1768  * This function is called with no lock held to free all the
1769  * hbq buffers while uninitializing the SLI interface. It also
1770  * frees the HBQ buffers returned by the firmware but not yet
1771  * processed by the upper layers.
1772  **/
1773 void
1774 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
1775 {
1776         struct lpfc_dmabuf *dmabuf, *next_dmabuf;
1777         struct hbq_dmabuf *hbq_buf;
1778         unsigned long flags;
1779         int i, hbq_count;
1780
1781         hbq_count = lpfc_sli_hbq_count();
1782         /* Return all memory used by all HBQs */
1783         spin_lock_irqsave(&phba->hbalock, flags);
1784         for (i = 0; i < hbq_count; ++i) {
1785                 list_for_each_entry_safe(dmabuf, next_dmabuf,
1786                                 &phba->hbqs[i].hbq_buffer_list, list) {
1787                         hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1788                         list_del(&hbq_buf->dbuf.list);
1789                         (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
1790                 }
1791                 phba->hbqs[i].buffer_count = 0;
1792         }
1793
1794         /* Mark the HBQs not in use */
1795         phba->hbq_in_use = 0;
1796         spin_unlock_irqrestore(&phba->hbalock, flags);
1797 }
1798
1799 /**
1800  * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
1801  * @phba: Pointer to HBA context object.
1802  * @hbqno: HBQ number.
1803  * @hbq_buf: Pointer to HBQ buffer.
1804  *
1805  * This function is called with the hbalock held to post a
1806  * hbq buffer to the firmware. If the function finds an empty
1807  * slot in the HBQ, it will post the buffer. The function will return
1808  * pointer to the hbq entry if it successfully post the buffer
1809  * else it will return NULL.
1810  **/
1811 static int
1812 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
1813                          struct hbq_dmabuf *hbq_buf)
1814 {
1815         lockdep_assert_held(&phba->hbalock);
1816         return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
1817 }
1818
1819 /**
1820  * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
1821  * @phba: Pointer to HBA context object.
1822  * @hbqno: HBQ number.
1823  * @hbq_buf: Pointer to HBQ buffer.
1824  *
1825  * This function is called with the hbalock held to post a hbq buffer to the
1826  * firmware. If the function finds an empty slot in the HBQ, it will post the
1827  * buffer and place it on the hbq_buffer_list. The function will return zero if
1828  * it successfully post the buffer else it will return an error.
1829  **/
1830 static int
1831 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
1832                             struct hbq_dmabuf *hbq_buf)
1833 {
1834         struct lpfc_hbq_entry *hbqe;
1835         dma_addr_t physaddr = hbq_buf->dbuf.phys;
1836
1837         lockdep_assert_held(&phba->hbalock);
1838         /* Get next HBQ entry slot to use */
1839         hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
1840         if (hbqe) {
1841                 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1842
1843                 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
1844                 hbqe->bde.addrLow  = le32_to_cpu(putPaddrLow(physaddr));
1845                 hbqe->bde.tus.f.bdeSize = hbq_buf->total_size;
1846                 hbqe->bde.tus.f.bdeFlags = 0;
1847                 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
1848                 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
1849                                 /* Sync SLIM */
1850                 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
1851                 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
1852                                 /* flush */
1853                 readl(phba->hbq_put + hbqno);
1854                 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
1855                 return 0;
1856         } else
1857                 return -ENOMEM;
1858 }
1859
1860 /**
1861  * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
1862  * @phba: Pointer to HBA context object.
1863  * @hbqno: HBQ number.
1864  * @hbq_buf: Pointer to HBQ buffer.
1865  *
1866  * This function is called with the hbalock held to post an RQE to the SLI4
1867  * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
1868  * the hbq_buffer_list and return zero, otherwise it will return an error.
1869  **/
1870 static int
1871 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
1872                             struct hbq_dmabuf *hbq_buf)
1873 {
1874         int rc;
1875         struct lpfc_rqe hrqe;
1876         struct lpfc_rqe drqe;
1877         struct lpfc_queue *hrq;
1878         struct lpfc_queue *drq;
1879
1880         if (hbqno != LPFC_ELS_HBQ)
1881                 return 1;
1882         hrq = phba->sli4_hba.hdr_rq;
1883         drq = phba->sli4_hba.dat_rq;
1884
1885         lockdep_assert_held(&phba->hbalock);
1886         hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
1887         hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
1888         drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
1889         drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
1890         rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
1891         if (rc < 0)
1892                 return rc;
1893         hbq_buf->tag = (rc | (hbqno << 16));
1894         list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
1895         return 0;
1896 }
1897
1898 /* HBQ for ELS and CT traffic. */
1899 static struct lpfc_hbq_init lpfc_els_hbq = {
1900         .rn = 1,
1901         .entry_count = 256,
1902         .mask_count = 0,
1903         .profile = 0,
1904         .ring_mask = (1 << LPFC_ELS_RING),
1905         .buffer_count = 0,
1906         .init_count = 40,
1907         .add_count = 40,
1908 };
1909
1910 /* Array of HBQs */
1911 struct lpfc_hbq_init *lpfc_hbq_defs[] = {
1912         &lpfc_els_hbq,
1913 };
1914
1915 /**
1916  * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
1917  * @phba: Pointer to HBA context object.
1918  * @hbqno: HBQ number.
1919  * @count: Number of HBQ buffers to be posted.
1920  *
1921  * This function is called with no lock held to post more hbq buffers to the
1922  * given HBQ. The function returns the number of HBQ buffers successfully
1923  * posted.
1924  **/
1925 static int
1926 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
1927 {
1928         uint32_t i, posted = 0;
1929         unsigned long flags;
1930         struct hbq_dmabuf *hbq_buffer;
1931         LIST_HEAD(hbq_buf_list);
1932         if (!phba->hbqs[hbqno].hbq_alloc_buffer)
1933                 return 0;
1934
1935         if ((phba->hbqs[hbqno].buffer_count + count) >
1936             lpfc_hbq_defs[hbqno]->entry_count)
1937                 count = lpfc_hbq_defs[hbqno]->entry_count -
1938                                         phba->hbqs[hbqno].buffer_count;
1939         if (!count)
1940                 return 0;
1941         /* Allocate HBQ entries */
1942         for (i = 0; i < count; i++) {
1943                 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
1944                 if (!hbq_buffer)
1945                         break;
1946                 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
1947         }
1948         /* Check whether HBQ is still in use */
1949         spin_lock_irqsave(&phba->hbalock, flags);
1950         if (!phba->hbq_in_use)
1951                 goto err;
1952         while (!list_empty(&hbq_buf_list)) {
1953                 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
1954                                  dbuf.list);
1955                 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
1956                                       (hbqno << 16));
1957                 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
1958                         phba->hbqs[hbqno].buffer_count++;
1959                         posted++;
1960                 } else
1961                         (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
1962         }
1963         spin_unlock_irqrestore(&phba->hbalock, flags);
1964         return posted;
1965 err:
1966         spin_unlock_irqrestore(&phba->hbalock, flags);
1967         while (!list_empty(&hbq_buf_list)) {
1968                 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
1969                                  dbuf.list);
1970                 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
1971         }
1972         return 0;
1973 }
1974
1975 /**
1976  * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
1977  * @phba: Pointer to HBA context object.
1978  * @qno: HBQ number.
1979  *
1980  * This function posts more buffers to the HBQ. This function
1981  * is called with no lock held. The function returns the number of HBQ entries
1982  * successfully allocated.
1983  **/
1984 int
1985 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
1986 {
1987         if (phba->sli_rev == LPFC_SLI_REV4)
1988                 return 0;
1989         else
1990                 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1991                                          lpfc_hbq_defs[qno]->add_count);
1992 }
1993
1994 /**
1995  * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
1996  * @phba: Pointer to HBA context object.
1997  * @qno:  HBQ queue number.
1998  *
1999  * This function is called from SLI initialization code path with
2000  * no lock held to post initial HBQ buffers to firmware. The
2001  * function returns the number of HBQ entries successfully allocated.
2002  **/
2003 static int
2004 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
2005 {
2006         if (phba->sli_rev == LPFC_SLI_REV4)
2007                 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2008                                         lpfc_hbq_defs[qno]->entry_count);
2009         else
2010                 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2011                                          lpfc_hbq_defs[qno]->init_count);
2012 }
2013
2014 /**
2015  * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
2016  * @phba: Pointer to HBA context object.
2017  * @hbqno: HBQ number.
2018  *
2019  * This function removes the first hbq buffer on an hbq list and returns a
2020  * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2021  **/
2022 static struct hbq_dmabuf *
2023 lpfc_sli_hbqbuf_get(struct list_head *rb_list)
2024 {
2025         struct lpfc_dmabuf *d_buf;
2026
2027         list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
2028         if (!d_buf)
2029                 return NULL;
2030         return container_of(d_buf, struct hbq_dmabuf, dbuf);
2031 }
2032
2033 /**
2034  * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list
2035  * @phba: Pointer to HBA context object.
2036  * @hbqno: HBQ number.
2037  *
2038  * This function removes the first RQ buffer on an RQ buffer list and returns a
2039  * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2040  **/
2041 static struct rqb_dmabuf *
2042 lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq)
2043 {
2044         struct lpfc_dmabuf *h_buf;
2045         struct lpfc_rqb *rqbp;
2046
2047         rqbp = hrq->rqbp;
2048         list_remove_head(&rqbp->rqb_buffer_list, h_buf,
2049                          struct lpfc_dmabuf, list);
2050         if (!h_buf)
2051                 return NULL;
2052         rqbp->buffer_count--;
2053         return container_of(h_buf, struct rqb_dmabuf, hbuf);
2054 }
2055
2056 /**
2057  * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
2058  * @phba: Pointer to HBA context object.
2059  * @tag: Tag of the hbq buffer.
2060  *
2061  * This function searches for the hbq buffer associated with the given tag in
2062  * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer
2063  * otherwise it returns NULL.
2064  **/
2065 static struct hbq_dmabuf *
2066 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
2067 {
2068         struct lpfc_dmabuf *d_buf;
2069         struct hbq_dmabuf *hbq_buf;
2070         uint32_t hbqno;
2071
2072         hbqno = tag >> 16;
2073         if (hbqno >= LPFC_MAX_HBQS)
2074                 return NULL;
2075
2076         spin_lock_irq(&phba->hbalock);
2077         list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
2078                 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2079                 if (hbq_buf->tag == tag) {
2080                         spin_unlock_irq(&phba->hbalock);
2081                         return hbq_buf;
2082                 }
2083         }
2084         spin_unlock_irq(&phba->hbalock);
2085         lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
2086                         "1803 Bad hbq tag. Data: x%x x%x\n",
2087                         tag, phba->hbqs[tag >> 16].buffer_count);
2088         return NULL;
2089 }
2090
2091 /**
2092  * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
2093  * @phba: Pointer to HBA context object.
2094  * @hbq_buffer: Pointer to HBQ buffer.
2095  *
2096  * This function is called with hbalock. This function gives back
2097  * the hbq buffer to firmware. If the HBQ does not have space to
2098  * post the buffer, it will free the buffer.
2099  **/
2100 void
2101 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
2102 {
2103         uint32_t hbqno;
2104
2105         if (hbq_buffer) {
2106                 hbqno = hbq_buffer->tag >> 16;
2107                 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
2108                         (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2109         }
2110 }
2111
2112 /**
2113  * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
2114  * @mbxCommand: mailbox command code.
2115  *
2116  * This function is called by the mailbox event handler function to verify
2117  * that the completed mailbox command is a legitimate mailbox command. If the
2118  * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
2119  * and the mailbox event handler will take the HBA offline.
2120  **/
2121 static int
2122 lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2123 {
2124         uint8_t ret;
2125
2126         switch (mbxCommand) {
2127         case MBX_LOAD_SM:
2128         case MBX_READ_NV:
2129         case MBX_WRITE_NV:
2130         case MBX_WRITE_VPARMS:
2131         case MBX_RUN_BIU_DIAG:
2132         case MBX_INIT_LINK:
2133         case MBX_DOWN_LINK:
2134         case MBX_CONFIG_LINK:
2135         case MBX_CONFIG_RING:
2136         case MBX_RESET_RING:
2137         case MBX_READ_CONFIG:
2138         case MBX_READ_RCONFIG:
2139         case MBX_READ_SPARM:
2140         case MBX_READ_STATUS:
2141         case MBX_READ_RPI:
2142         case MBX_READ_XRI:
2143         case MBX_READ_REV:
2144         case MBX_READ_LNK_STAT:
2145         case MBX_REG_LOGIN:
2146         case MBX_UNREG_LOGIN:
2147         case MBX_CLEAR_LA:
2148         case MBX_DUMP_MEMORY:
2149         case MBX_DUMP_CONTEXT:
2150         case MBX_RUN_DIAGS:
2151         case MBX_RESTART:
2152         case MBX_UPDATE_CFG:
2153         case MBX_DOWN_LOAD:
2154         case MBX_DEL_LD_ENTRY:
2155         case MBX_RUN_PROGRAM:
2156         case MBX_SET_MASK:
2157         case MBX_SET_VARIABLE:
2158         case MBX_UNREG_D_ID:
2159         case MBX_KILL_BOARD:
2160         case MBX_CONFIG_FARP:
2161         case MBX_BEACON:
2162         case MBX_LOAD_AREA:
2163         case MBX_RUN_BIU_DIAG64:
2164         case MBX_CONFIG_PORT:
2165         case MBX_READ_SPARM64:
2166         case MBX_READ_RPI64:
2167         case MBX_REG_LOGIN64:
2168         case MBX_READ_TOPOLOGY:
2169         case MBX_WRITE_WWN:
2170         case MBX_SET_DEBUG:
2171         case MBX_LOAD_EXP_ROM:
2172         case MBX_ASYNCEVT_ENABLE:
2173         case MBX_REG_VPI:
2174         case MBX_UNREG_VPI:
2175         case MBX_HEARTBEAT:
2176         case MBX_PORT_CAPABILITIES:
2177         case MBX_PORT_IOV_CONTROL:
2178         case MBX_SLI4_CONFIG:
2179         case MBX_SLI4_REQ_FTRS:
2180         case MBX_REG_FCFI:
2181         case MBX_UNREG_FCFI:
2182         case MBX_REG_VFI:
2183         case MBX_UNREG_VFI:
2184         case MBX_INIT_VPI:
2185         case MBX_INIT_VFI:
2186         case MBX_RESUME_RPI:
2187         case MBX_READ_EVENT_LOG_STATUS:
2188         case MBX_READ_EVENT_LOG:
2189         case MBX_SECURITY_MGMT:
2190         case MBX_AUTH_PORT:
2191         case MBX_ACCESS_VDATA:
2192                 ret = mbxCommand;
2193                 break;
2194         default:
2195                 ret = MBX_SHUTDOWN;
2196                 break;
2197         }
2198         return ret;
2199 }
2200
2201 /**
2202  * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
2203  * @phba: Pointer to HBA context object.
2204  * @pmboxq: Pointer to mailbox command.
2205  *
2206  * This is completion handler function for mailbox commands issued from
2207  * lpfc_sli_issue_mbox_wait function. This function is called by the
2208  * mailbox event handler function with no lock held. This function
2209  * will wake up thread waiting on the wait queue pointed by context1
2210  * of the mailbox.
2211  **/
2212 void
2213 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2214 {
2215         wait_queue_head_t *pdone_q;
2216         unsigned long drvr_flag;
2217
2218         /*
2219          * If pdone_q is empty, the driver thread gave up waiting and
2220          * continued running.
2221          */
2222         pmboxq->mbox_flag |= LPFC_MBX_WAKE;
2223         spin_lock_irqsave(&phba->hbalock, drvr_flag);
2224         pdone_q = (wait_queue_head_t *) pmboxq->context1;
2225         if (pdone_q)
2226                 wake_up_interruptible(pdone_q);
2227         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2228         return;
2229 }
2230
2231
2232 /**
2233  * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
2234  * @phba: Pointer to HBA context object.
2235  * @pmb: Pointer to mailbox object.
2236  *
2237  * This function is the default mailbox completion handler. It
2238  * frees the memory resources associated with the completed mailbox
2239  * command. If the completed command is a REG_LOGIN mailbox command,
2240  * this function will issue a UREG_LOGIN to re-claim the RPI.
2241  **/
2242 void
2243 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2244 {
2245         struct lpfc_vport  *vport = pmb->vport;
2246         struct lpfc_dmabuf *mp;
2247         struct lpfc_nodelist *ndlp;
2248         struct Scsi_Host *shost;
2249         uint16_t rpi, vpi;
2250         int rc;
2251
2252         mp = (struct lpfc_dmabuf *) (pmb->context1);
2253
2254         if (mp) {
2255                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2256                 kfree(mp);
2257         }
2258
2259         /*
2260          * If a REG_LOGIN succeeded  after node is destroyed or node
2261          * is in re-discovery driver need to cleanup the RPI.
2262          */
2263         if (!(phba->pport->load_flag & FC_UNLOADING) &&
2264             pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2265             !pmb->u.mb.mbxStatus) {
2266                 rpi = pmb->u.mb.un.varWords[0];
2267                 vpi = pmb->u.mb.un.varRegLogin.vpi;
2268                 lpfc_unreg_login(phba, vpi, rpi, pmb);
2269                 pmb->vport = vport;
2270                 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2271                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2272                 if (rc != MBX_NOT_FINISHED)
2273                         return;
2274         }
2275
2276         if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2277                 !(phba->pport->load_flag & FC_UNLOADING) &&
2278                 !pmb->u.mb.mbxStatus) {
2279                 shost = lpfc_shost_from_vport(vport);
2280                 spin_lock_irq(shost->host_lock);
2281                 vport->vpi_state |= LPFC_VPI_REGISTERED;
2282                 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2283                 spin_unlock_irq(shost->host_lock);
2284         }
2285
2286         if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2287                 ndlp = (struct lpfc_nodelist *)pmb->context2;
2288                 lpfc_nlp_put(ndlp);
2289                 pmb->context2 = NULL;
2290         }
2291
2292         /* Check security permission status on INIT_LINK mailbox command */
2293         if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2294             (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2295                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2296                                 "2860 SLI authentication is required "
2297                                 "for INIT_LINK but has not done yet\n");
2298
2299         if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2300                 lpfc_sli4_mbox_cmd_free(phba, pmb);
2301         else
2302                 mempool_free(pmb, phba->mbox_mem_pool);
2303 }
2304  /**
2305  * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
2306  * @phba: Pointer to HBA context object.
2307  * @pmb: Pointer to mailbox object.
2308  *
2309  * This function is the unreg rpi mailbox completion handler. It
2310  * frees the memory resources associated with the completed mailbox
2311  * command. An additional refrenece is put on the ndlp to prevent
2312  * lpfc_nlp_release from freeing the rpi bit in the bitmask before
2313  * the unreg mailbox command completes, this routine puts the
2314  * reference back.
2315  *
2316  **/
2317 void
2318 lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2319 {
2320         struct lpfc_vport  *vport = pmb->vport;
2321         struct lpfc_nodelist *ndlp;
2322
2323         ndlp = pmb->context1;
2324         if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2325                 if (phba->sli_rev == LPFC_SLI_REV4 &&
2326                     (bf_get(lpfc_sli_intf_if_type,
2327                      &phba->sli4_hba.sli_intf) ==
2328                      LPFC_SLI_INTF_IF_TYPE_2)) {
2329                         if (ndlp) {
2330                                 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
2331                                                  "0010 UNREG_LOGIN vpi:%x "
2332                                                  "rpi:%x DID:%x map:%x %p\n",
2333                                                  vport->vpi, ndlp->nlp_rpi,
2334                                                  ndlp->nlp_DID,
2335                                                  ndlp->nlp_usg_map, ndlp);
2336                                 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2337                                 lpfc_nlp_put(ndlp);
2338                         }
2339                 }
2340         }
2341
2342         mempool_free(pmb, phba->mbox_mem_pool);
2343 }
2344
2345 /**
2346  * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
2347  * @phba: Pointer to HBA context object.
2348  *
2349  * This function is called with no lock held. This function processes all
2350  * the completed mailbox commands and gives it to upper layers. The interrupt
2351  * service routine processes mailbox completion interrupt and adds completed
2352  * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
2353  * Worker thread call lpfc_sli_handle_mb_event, which will return the
2354  * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
2355  * function returns the mailbox commands to the upper layer by calling the
2356  * completion handler function of each mailbox.
2357  **/
2358 int
2359 lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
2360 {
2361         MAILBOX_t *pmbox;
2362         LPFC_MBOXQ_t *pmb;
2363         int rc;
2364         LIST_HEAD(cmplq);
2365
2366         phba->sli.slistat.mbox_event++;
2367
2368         /* Get all completed mailboxe buffers into the cmplq */
2369         spin_lock_irq(&phba->hbalock);
2370         list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
2371         spin_unlock_irq(&phba->hbalock);
2372
2373         /* Get a Mailbox buffer to setup mailbox commands for callback */
2374         do {
2375                 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
2376                 if (pmb == NULL)
2377                         break;
2378
2379                 pmbox = &pmb->u.mb;
2380
2381                 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
2382                         if (pmb->vport) {
2383                                 lpfc_debugfs_disc_trc(pmb->vport,
2384                                         LPFC_DISC_TRC_MBOX_VPORT,
2385                                         "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
2386                                         (uint32_t)pmbox->mbxCommand,
2387                                         pmbox->un.varWords[0],
2388                                         pmbox->un.varWords[1]);
2389                         }
2390                         else {
2391                                 lpfc_debugfs_disc_trc(phba->pport,
2392                                         LPFC_DISC_TRC_MBOX,
2393                                         "MBOX cmpl:       cmd:x%x mb:x%x x%x",
2394                                         (uint32_t)pmbox->mbxCommand,
2395                                         pmbox->un.varWords[0],
2396                                         pmbox->un.varWords[1]);
2397                         }
2398                 }
2399
2400                 /*
2401                  * It is a fatal error if unknown mbox command completion.
2402                  */
2403                 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
2404                     MBX_SHUTDOWN) {
2405                         /* Unknown mailbox command compl */
2406                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2407                                         "(%d):0323 Unknown Mailbox command "
2408                                         "x%x (x%x/x%x) Cmpl\n",
2409                                         pmb->vport ? pmb->vport->vpi : 0,
2410                                         pmbox->mbxCommand,
2411                                         lpfc_sli_config_mbox_subsys_get(phba,
2412                                                                         pmb),
2413                                         lpfc_sli_config_mbox_opcode_get(phba,
2414                                                                         pmb));
2415                         phba->link_state = LPFC_HBA_ERROR;
2416                         phba->work_hs = HS_FFER3;
2417                         lpfc_handle_eratt(phba);
2418                         continue;
2419                 }
2420
2421                 if (pmbox->mbxStatus) {
2422                         phba->sli.slistat.mbox_stat_err++;
2423                         if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
2424                                 /* Mbox cmd cmpl error - RETRYing */
2425                                 lpfc_printf_log(phba, KERN_INFO,
2426                                         LOG_MBOX | LOG_SLI,
2427                                         "(%d):0305 Mbox cmd cmpl "
2428                                         "error - RETRYing Data: x%x "
2429                                         "(x%x/x%x) x%x x%x x%x\n",
2430                                         pmb->vport ? pmb->vport->vpi : 0,
2431                                         pmbox->mbxCommand,
2432                                         lpfc_sli_config_mbox_subsys_get(phba,
2433                                                                         pmb),
2434                                         lpfc_sli_config_mbox_opcode_get(phba,
2435                                                                         pmb),
2436                                         pmbox->mbxStatus,
2437                                         pmbox->un.varWords[0],
2438                                         pmb->vport->port_state);
2439                                 pmbox->mbxStatus = 0;
2440                                 pmbox->mbxOwner = OWN_HOST;
2441                                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2442                                 if (rc != MBX_NOT_FINISHED)
2443                                         continue;
2444                         }
2445                 }
2446
2447                 /* Mailbox cmd <cmd> Cmpl <cmpl> */
2448                 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2449                                 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p "
2450                                 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
2451                                 "x%x x%x x%x\n",
2452                                 pmb->vport ? pmb->vport->vpi : 0,
2453                                 pmbox->mbxCommand,
2454                                 lpfc_sli_config_mbox_subsys_get(phba, pmb),
2455                                 lpfc_sli_config_mbox_opcode_get(phba, pmb),
2456                                 pmb->mbox_cmpl,
2457                                 *((uint32_t *) pmbox),
2458                                 pmbox->un.varWords[0],
2459                                 pmbox->un.varWords[1],
2460                                 pmbox->un.varWords[2],
2461                                 pmbox->un.varWords[3],
2462                                 pmbox->un.varWords[4],
2463                                 pmbox->un.varWords[5],
2464                                 pmbox->un.varWords[6],
2465                                 pmbox->un.varWords[7],
2466                                 pmbox->un.varWords[8],
2467                                 pmbox->un.varWords[9],
2468                                 pmbox->un.varWords[10]);
2469
2470                 if (pmb->mbox_cmpl)
2471                         pmb->mbox_cmpl(phba,pmb);
2472         } while (1);
2473         return 0;
2474 }
2475
2476 /**
2477  * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
2478  * @phba: Pointer to HBA context object.
2479  * @pring: Pointer to driver SLI ring object.
2480  * @tag: buffer tag.
2481  *
2482  * This function is called with no lock held. When QUE_BUFTAG_BIT bit
2483  * is set in the tag the buffer is posted for a particular exchange,
2484  * the function will return the buffer without replacing the buffer.
2485  * If the buffer is for unsolicited ELS or CT traffic, this function
2486  * returns the buffer and also posts another buffer to the firmware.
2487  **/
2488 static struct lpfc_dmabuf *
2489 lpfc_sli_get_buff(struct lpfc_hba *phba,
2490                   struct lpfc_sli_ring *pring,
2491                   uint32_t tag)
2492 {
2493         struct hbq_dmabuf *hbq_entry;
2494
2495         if (tag & QUE_BUFTAG_BIT)
2496                 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
2497         hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
2498         if (!hbq_entry)
2499                 return NULL;
2500         return &hbq_entry->dbuf;
2501 }
2502
2503 /**
2504  * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
2505  * @phba: Pointer to HBA context object.
2506  * @pring: Pointer to driver SLI ring object.
2507  * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
2508  * @fch_r_ctl: the r_ctl for the first frame of the sequence.
2509  * @fch_type: the type for the first frame of the sequence.
2510  *
2511  * This function is called with no lock held. This function uses the r_ctl and
2512  * type of the received sequence to find the correct callback function to call
2513  * to process the sequence.
2514  **/
2515 static int
2516 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2517                          struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
2518                          uint32_t fch_type)
2519 {
2520         int i;
2521
2522         switch (fch_type) {
2523         case FC_TYPE_NVME:
2524                 lpfc_nvmet_unsol_ls_event(phba, pring, saveq);
2525                 return 1;
2526         default:
2527                 break;
2528         }
2529
2530         /* unSolicited Responses */
2531         if (pring->prt[0].profile) {
2532                 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
2533                         (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
2534                                                                         saveq);
2535                 return 1;
2536         }
2537         /* We must search, based on rctl / type
2538            for the right routine */
2539         for (i = 0; i < pring->num_mask; i++) {
2540                 if ((pring->prt[i].rctl == fch_r_ctl) &&
2541                     (pring->prt[i].type == fch_type)) {
2542                         if (pring->prt[i].lpfc_sli_rcv_unsol_event)
2543                                 (pring->prt[i].lpfc_sli_rcv_unsol_event)
2544                                                 (phba, pring, saveq);
2545                         return 1;
2546                 }
2547         }
2548         return 0;
2549 }
2550
2551 /**
2552  * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
2553  * @phba: Pointer to HBA context object.
2554  * @pring: Pointer to driver SLI ring object.
2555  * @saveq: Pointer to the unsolicited iocb.
2556  *
2557  * This function is called with no lock held by the ring event handler
2558  * when there is an unsolicited iocb posted to the response ring by the
2559  * firmware. This function gets the buffer associated with the iocbs
2560  * and calls the event handler for the ring. This function handles both
2561  * qring buffers and hbq buffers.
2562  * When the function returns 1 the caller can free the iocb object otherwise
2563  * upper layer functions will free the iocb objects.
2564  **/
2565 static int
2566 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2567                             struct lpfc_iocbq *saveq)
2568 {
2569         IOCB_t           * irsp;
2570         WORD5            * w5p;
2571         uint32_t           Rctl, Type;
2572         struct lpfc_iocbq *iocbq;
2573         struct lpfc_dmabuf *dmzbuf;
2574
2575         irsp = &(saveq->iocb);
2576
2577         if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
2578                 if (pring->lpfc_sli_rcv_async_status)
2579                         pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
2580                 else
2581                         lpfc_printf_log(phba,
2582                                         KERN_WARNING,
2583                                         LOG_SLI,
2584                                         "0316 Ring %d handler: unexpected "
2585                                         "ASYNC_STATUS iocb received evt_code "
2586                                         "0x%x\n",
2587                                         pring->ringno,
2588                                         irsp->un.asyncstat.evt_code);
2589                 return 1;
2590         }
2591
2592         if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
2593                 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
2594                 if (irsp->ulpBdeCount > 0) {
2595                         dmzbuf = lpfc_sli_get_buff(phba, pring,
2596                                         irsp->un.ulpWord[3]);
2597                         lpfc_in_buf_free(phba, dmzbuf);
2598                 }
2599
2600                 if (irsp->ulpBdeCount > 1) {
2601                         dmzbuf = lpfc_sli_get_buff(phba, pring,
2602                                         irsp->unsli3.sli3Words[3]);
2603                         lpfc_in_buf_free(phba, dmzbuf);
2604                 }
2605
2606                 if (irsp->ulpBdeCount > 2) {
2607                         dmzbuf = lpfc_sli_get_buff(phba, pring,
2608                                 irsp->unsli3.sli3Words[7]);
2609                         lpfc_in_buf_free(phba, dmzbuf);
2610                 }
2611
2612                 return 1;
2613         }
2614
2615         if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
2616                 if (irsp->ulpBdeCount != 0) {
2617                         saveq->context2 = lpfc_sli_get_buff(phba, pring,
2618                                                 irsp->un.ulpWord[3]);
2619                         if (!saveq->context2)
2620                                 lpfc_printf_log(phba,
2621                                         KERN_ERR,
2622                                         LOG_SLI,
2623                                         "0341 Ring %d Cannot find buffer for "
2624                                         "an unsolicited iocb. tag 0x%x\n",
2625                                         pring->ringno,
2626                                         irsp->un.ulpWord[3]);
2627                 }
2628                 if (irsp->ulpBdeCount == 2) {
2629                         saveq->context3 = lpfc_sli_get_buff(phba, pring,
2630                                                 irsp->unsli3.sli3Words[7]);
2631                         if (!saveq->context3)
2632                                 lpfc_printf_log(phba,
2633                                         KERN_ERR,
2634                                         LOG_SLI,
2635                                         "0342 Ring %d Cannot find buffer for an"
2636                                         " unsolicited iocb. tag 0x%x\n",
2637                                         pring->ringno,
2638                                         irsp->unsli3.sli3Words[7]);
2639                 }
2640                 list_for_each_entry(iocbq, &saveq->list, list) {
2641                         irsp = &(iocbq->iocb);
2642                         if (irsp->ulpBdeCount != 0) {
2643                                 iocbq->context2 = lpfc_sli_get_buff(phba, pring,
2644                                                         irsp->un.ulpWord[3]);
2645                                 if (!iocbq->context2)
2646                                         lpfc_printf_log(phba,
2647                                                 KERN_ERR,
2648                                                 LOG_SLI,
2649                                                 "0343 Ring %d Cannot find "
2650                                                 "buffer for an unsolicited iocb"
2651                                                 ". tag 0x%x\n", pring->ringno,
2652                                                 irsp->un.ulpWord[3]);
2653                         }
2654                         if (irsp->ulpBdeCount == 2) {
2655                                 iocbq->context3 = lpfc_sli_get_buff(phba, pring,
2656                                                 irsp->unsli3.sli3Words[7]);
2657                                 if (!iocbq->context3)
2658                                         lpfc_printf_log(phba,
2659                                                 KERN_ERR,
2660                                                 LOG_SLI,
2661                                                 "0344 Ring %d Cannot find "
2662                                                 "buffer for an unsolicited "
2663                                                 "iocb. tag 0x%x\n",
2664                                                 pring->ringno,
2665                                                 irsp->unsli3.sli3Words[7]);
2666                         }
2667                 }
2668         }
2669         if (irsp->ulpBdeCount != 0 &&
2670             (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
2671              irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
2672                 int found = 0;
2673
2674                 /* search continue save q for same XRI */
2675                 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
2676                         if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
2677                                 saveq->iocb.unsli3.rcvsli3.ox_id) {
2678                                 list_add_tail(&saveq->list, &iocbq->list);
2679                                 found = 1;
2680                                 break;
2681                         }
2682                 }
2683                 if (!found)
2684                         list_add_tail(&saveq->clist,
2685                                       &pring->iocb_continue_saveq);
2686                 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
2687                         list_del_init(&iocbq->clist);
2688                         saveq = iocbq;
2689                         irsp = &(saveq->iocb);
2690                 } else
2691                         return 0;
2692         }
2693         if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
2694             (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
2695             (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
2696                 Rctl = FC_RCTL_ELS_REQ;
2697                 Type = FC_TYPE_ELS;
2698         } else {
2699                 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
2700                 Rctl = w5p->hcsw.Rctl;
2701                 Type = w5p->hcsw.Type;
2702
2703                 /* Firmware Workaround */
2704                 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
2705                         (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
2706                          irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
2707                         Rctl = FC_RCTL_ELS_REQ;
2708                         Type = FC_TYPE_ELS;
2709                         w5p->hcsw.Rctl = Rctl;
2710                         w5p->hcsw.Type = Type;
2711                 }
2712         }
2713
2714         if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
2715                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2716                                 "0313 Ring %d handler: unexpected Rctl x%x "
2717                                 "Type x%x received\n",
2718                                 pring->ringno, Rctl, Type);
2719
2720         return 1;
2721 }
2722
2723 /**
2724  * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
2725  * @phba: Pointer to HBA context object.
2726  * @pring: Pointer to driver SLI ring object.
2727  * @prspiocb: Pointer to response iocb object.
2728  *
2729  * This function looks up the iocb_lookup table to get the command iocb
2730  * corresponding to the given response iocb using the iotag of the
2731  * response iocb. This function is called with the hbalock held.
2732  * This function returns the command iocb object if it finds the command
2733  * iocb else returns NULL.
2734  **/
2735 static struct lpfc_iocbq *
2736 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
2737                       struct lpfc_sli_ring *pring,
2738                       struct lpfc_iocbq *prspiocb)
2739 {
2740         struct lpfc_iocbq *cmd_iocb = NULL;
2741         uint16_t iotag;
2742         lockdep_assert_held(&phba->hbalock);
2743
2744         iotag = prspiocb->iocb.ulpIoTag;
2745
2746         if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2747                 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2748                 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
2749                         /* remove from txcmpl queue list */
2750                         list_del_init(&cmd_iocb->list);
2751                         cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
2752                         return cmd_iocb;
2753                 }
2754         }
2755
2756         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2757                         "0317 iotag x%x is out of "
2758                         "range: max iotag x%x wd0 x%x\n",
2759                         iotag, phba->sli.last_iotag,
2760                         *(((uint32_t *) &prspiocb->iocb) + 7));
2761         return NULL;
2762 }
2763
2764 /**
2765  * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
2766  * @phba: Pointer to HBA context object.
2767  * @pring: Pointer to driver SLI ring object.
2768  * @iotag: IOCB tag.
2769  *
2770  * This function looks up the iocb_lookup table to get the command iocb
2771  * corresponding to the given iotag. This function is called with the
2772  * hbalock held.
2773  * This function returns the command iocb object if it finds the command
2774  * iocb else returns NULL.
2775  **/
2776 static struct lpfc_iocbq *
2777 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
2778                              struct lpfc_sli_ring *pring, uint16_t iotag)
2779 {
2780         struct lpfc_iocbq *cmd_iocb = NULL;
2781
2782         lockdep_assert_held(&phba->hbalock);
2783         if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2784                 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2785                 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
2786                         /* remove from txcmpl queue list */
2787                         list_del_init(&cmd_iocb->list);
2788                         cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
2789                         return cmd_iocb;
2790                 }
2791         }
2792
2793         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2794                         "0372 iotag x%x lookup error: max iotag (x%x) "
2795                         "iocb_flag x%x\n",
2796                         iotag, phba->sli.last_iotag,
2797                         cmd_iocb ? cmd_iocb->iocb_flag : 0xffff);
2798         return NULL;
2799 }
2800
2801 /**
2802  * lpfc_sli_process_sol_iocb - process solicited iocb completion
2803  * @phba: Pointer to HBA context object.
2804  * @pring: Pointer to driver SLI ring object.
2805  * @saveq: Pointer to the response iocb to be processed.
2806  *
2807  * This function is called by the ring event handler for non-fcp
2808  * rings when there is a new response iocb in the response ring.
2809  * The caller is not required to hold any locks. This function
2810  * gets the command iocb associated with the response iocb and
2811  * calls the completion handler for the command iocb. If there
2812  * is no completion handler, the function will free the resources
2813  * associated with command iocb. If the response iocb is for
2814  * an already aborted command iocb, the status of the completion
2815  * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
2816  * This function always returns 1.
2817  **/
2818 static int
2819 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2820                           struct lpfc_iocbq *saveq)
2821 {
2822         struct lpfc_iocbq *cmdiocbp;
2823         int rc = 1;
2824         unsigned long iflag;
2825
2826         /* Based on the iotag field, get the cmd IOCB from the txcmplq */
2827         spin_lock_irqsave(&phba->hbalock, iflag);
2828         cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
2829         spin_unlock_irqrestore(&phba->hbalock, iflag);
2830
2831         if (cmdiocbp) {
2832                 if (cmdiocbp->iocb_cmpl) {
2833                         /*
2834                          * If an ELS command failed send an event to mgmt
2835                          * application.
2836                          */
2837                         if (saveq->iocb.ulpStatus &&
2838                              (pring->ringno == LPFC_ELS_RING) &&
2839                              (cmdiocbp->iocb.ulpCommand ==
2840                                 CMD_ELS_REQUEST64_CR))
2841                                 lpfc_send_els_failure_event(phba,
2842                                         cmdiocbp, saveq);
2843
2844                         /*
2845                          * Post all ELS completions to the worker thread.
2846                          * All other are passed to the completion callback.
2847                          */
2848                         if (pring->ringno == LPFC_ELS_RING) {
2849                                 if ((phba->sli_rev < LPFC_SLI_REV4) &&
2850                                     (cmdiocbp->iocb_flag &
2851                                                         LPFC_DRIVER_ABORTED)) {
2852                                         spin_lock_irqsave(&phba->hbalock,
2853                                                           iflag);
2854                                         cmdiocbp->iocb_flag &=
2855                                                 ~LPFC_DRIVER_ABORTED;
2856                                         spin_unlock_irqrestore(&phba->hbalock,
2857                                                                iflag);
2858                                         saveq->iocb.ulpStatus =
2859                                                 IOSTAT_LOCAL_REJECT;
2860                                         saveq->iocb.un.ulpWord[4] =
2861                                                 IOERR_SLI_ABORTED;
2862
2863                                         /* Firmware could still be in progress
2864                                          * of DMAing payload, so don't free data
2865                                          * buffer till after a hbeat.
2866                                          */
2867                                         spin_lock_irqsave(&phba->hbalock,
2868                                                           iflag);
2869                                         saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
2870                                         spin_unlock_irqrestore(&phba->hbalock,
2871                                                                iflag);
2872                                 }
2873                                 if (phba->sli_rev == LPFC_SLI_REV4) {
2874                                         if (saveq->iocb_flag &
2875                                             LPFC_EXCHANGE_BUSY) {
2876                                                 /* Set cmdiocb flag for the
2877                                                  * exchange busy so sgl (xri)
2878                                                  * will not be released until
2879                                                  * the abort xri is received
2880                                                  * from hba.
2881                                                  */
2882                                                 spin_lock_irqsave(
2883                                                         &phba->hbalock, iflag);
2884                                                 cmdiocbp->iocb_flag |=
2885                                                         LPFC_EXCHANGE_BUSY;
2886                                                 spin_unlock_irqrestore(
2887                                                         &phba->hbalock, iflag);
2888                                         }
2889                                         if (cmdiocbp->iocb_flag &
2890                                             LPFC_DRIVER_ABORTED) {
2891                                                 /*
2892                                                  * Clear LPFC_DRIVER_ABORTED
2893                                                  * bit in case it was driver
2894                                                  * initiated abort.
2895                                                  */
2896                                                 spin_lock_irqsave(
2897                                                         &phba->hbalock, iflag);
2898                                                 cmdiocbp->iocb_flag &=
2899                                                         ~LPFC_DRIVER_ABORTED;
2900                                                 spin_unlock_irqrestore(
2901                                                         &phba->hbalock, iflag);
2902                                                 cmdiocbp->iocb.ulpStatus =
2903                                                         IOSTAT_LOCAL_REJECT;
2904                                                 cmdiocbp->iocb.un.ulpWord[4] =
2905                                                         IOERR_ABORT_REQUESTED;
2906                                                 /*
2907                                                  * For SLI4, irsiocb contains
2908                                                  * NO_XRI in sli_xritag, it
2909                                                  * shall not affect releasing
2910                                                  * sgl (xri) process.
2911                                                  */
2912                                                 saveq->iocb.ulpStatus =
2913                                                         IOSTAT_LOCAL_REJECT;
2914                                                 saveq->iocb.un.ulpWord[4] =
2915                                                         IOERR_SLI_ABORTED;
2916                                                 spin_lock_irqsave(
2917                                                         &phba->hbalock, iflag);
2918                                                 saveq->iocb_flag |=
2919                                                         LPFC_DELAY_MEM_FREE;
2920                                                 spin_unlock_irqrestore(
2921                                                         &phba->hbalock, iflag);
2922                                         }
2923                                 }
2924                         }
2925                         (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
2926                 } else
2927                         lpfc_sli_release_iocbq(phba, cmdiocbp);
2928         } else {
2929                 /*
2930                  * Unknown initiating command based on the response iotag.
2931                  * This could be the case on the ELS ring because of
2932                  * lpfc_els_abort().
2933                  */
2934                 if (pring->ringno != LPFC_ELS_RING) {
2935                         /*
2936                          * Ring <ringno> handler: unexpected completion IoTag
2937                          * <IoTag>
2938                          */
2939                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2940                                          "0322 Ring %d handler: "
2941                                          "unexpected completion IoTag x%x "
2942                                          "Data: x%x x%x x%x x%x\n",
2943                                          pring->ringno,
2944                                          saveq->iocb.ulpIoTag,
2945                                          saveq->iocb.ulpStatus,
2946                                          saveq->iocb.un.ulpWord[4],
2947                                          saveq->iocb.ulpCommand,
2948                                          saveq->iocb.ulpContext);
2949                 }
2950         }
2951
2952         return rc;
2953 }
2954
2955 /**
2956  * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
2957  * @phba: Pointer to HBA context object.
2958  * @pring: Pointer to driver SLI ring object.
2959  *
2960  * This function is called from the iocb ring event handlers when
2961  * put pointer is ahead of the get pointer for a ring. This function signal
2962  * an error attention condition to the worker thread and the worker
2963  * thread will transition the HBA to offline state.
2964  **/
2965 static void
2966 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2967 {
2968         struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
2969         /*
2970          * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
2971          * rsp ring <portRspMax>
2972          */
2973         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2974                         "0312 Ring %d handler: portRspPut %d "
2975                         "is bigger than rsp ring %d\n",
2976                         pring->ringno, le32_to_cpu(pgp->rspPutInx),
2977                         pring->sli.sli3.numRiocb);
2978
2979         phba->link_state = LPFC_HBA_ERROR;
2980
2981         /*
2982          * All error attention handlers are posted to
2983          * worker thread
2984          */
2985         phba->work_ha |= HA_ERATT;
2986         phba->work_hs = HS_FFER3;
2987
2988         lpfc_worker_wake_up(phba);
2989
2990         return;
2991 }
2992
2993 /**
2994  * lpfc_poll_eratt - Error attention polling timer timeout handler
2995  * @ptr: Pointer to address of HBA context object.
2996  *
2997  * This function is invoked by the Error Attention polling timer when the
2998  * timer times out. It will check the SLI Error Attention register for
2999  * possible attention events. If so, it will post an Error Attention event
3000  * and wake up worker thread to process it. Otherwise, it will set up the
3001  * Error Attention polling timer for the next poll.
3002  **/
3003 void lpfc_poll_eratt(unsigned long ptr)
3004 {
3005         struct lpfc_hba *phba;
3006         uint32_t eratt = 0;
3007         uint64_t sli_intr, cnt;
3008
3009         phba = (struct lpfc_hba *)ptr;
3010
3011         /* Here we will also keep track of interrupts per sec of the hba */
3012         sli_intr = phba->sli.slistat.sli_intr;
3013
3014         if (phba->sli.slistat.sli_prev_intr > sli_intr)
3015                 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
3016                         sli_intr);
3017         else
3018                 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
3019
3020         /* 64-bit integer division not supported on 32-bit x86 - use do_div */
3021         do_div(cnt, phba->eratt_poll_interval);
3022         phba->sli.slistat.sli_ips = cnt;
3023
3024         phba->sli.slistat.sli_prev_intr = sli_intr;
3025
3026         /* Check chip HA register for error event */
3027         eratt = lpfc_sli_check_eratt(phba);
3028
3029         if (eratt)
3030                 /* Tell the worker thread there is work to do */
3031                 lpfc_worker_wake_up(phba);
3032         else
3033                 /* Restart the timer for next eratt poll */
3034                 mod_timer(&phba->eratt_poll,
3035                           jiffies +
3036                           msecs_to_jiffies(1000 * phba->eratt_poll_interval));
3037         return;
3038 }
3039
3040
3041 /**
3042  * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
3043  * @phba: Pointer to HBA context object.
3044  * @pring: Pointer to driver SLI ring object.
3045  * @mask: Host attention register mask for this ring.
3046  *
3047  * This function is called from the interrupt context when there is a ring
3048  * event for the fcp ring. The caller does not hold any lock.
3049  * The function processes each response iocb in the response ring until it
3050  * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
3051  * LE bit set. The function will call the completion handler of the command iocb
3052  * if the response iocb indicates a completion for a command iocb or it is
3053  * an abort completion. The function will call lpfc_sli_process_unsol_iocb
3054  * function if this is an unsolicited iocb.
3055  * This routine presumes LPFC_FCP_RING handling and doesn't bother
3056  * to check it explicitly.
3057  */
3058 int
3059 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
3060                                 struct lpfc_sli_ring *pring, uint32_t mask)
3061 {
3062         struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3063         IOCB_t *irsp = NULL;
3064         IOCB_t *entry = NULL;
3065         struct lpfc_iocbq *cmdiocbq = NULL;
3066         struct lpfc_iocbq rspiocbq;
3067         uint32_t status;
3068         uint32_t portRspPut, portRspMax;
3069         int rc = 1;
3070         lpfc_iocb_type type;
3071         unsigned long iflag;
3072         uint32_t rsp_cmpl = 0;
3073
3074         spin_lock_irqsave(&phba->hbalock, iflag);
3075         pring->stats.iocb_event++;
3076
3077         /*
3078          * The next available response entry should never exceed the maximum
3079<