Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/kaber/nf-2.6
[sfrench/cifs-2.6.git] / drivers / scsi / qla2xxx / qla_isr.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2008 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8
9 #include <linux/delay.h>
10 #include <scsi/scsi_tcq.h>
11
12 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
13 static void qla2x00_process_completed_request(struct scsi_qla_host *,
14         struct req_que *, uint32_t);
15 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
16 static void qla2x00_status_cont_entry(scsi_qla_host_t *, sts_cont_entry_t *);
17 static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
18         sts_entry_t *);
19 static struct scsi_qla_host *qla2x00_get_rsp_host(struct rsp_que *);
20
21 /**
22  * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
23  * @irq:
24  * @dev_id: SCSI driver HA context
25  *
26  * Called by system whenever the host adapter generates an interrupt.
27  *
28  * Returns handled flag.
29  */
30 irqreturn_t
31 qla2100_intr_handler(int irq, void *dev_id)
32 {
33         scsi_qla_host_t *vha;
34         struct qla_hw_data *ha;
35         struct device_reg_2xxx __iomem *reg;
36         int             status;
37         unsigned long   iter;
38         uint16_t        hccr;
39         uint16_t        mb[4];
40         struct rsp_que *rsp;
41
42         rsp = (struct rsp_que *) dev_id;
43         if (!rsp) {
44                 printk(KERN_INFO
45                     "%s(): NULL response queue pointer\n", __func__);
46                 return (IRQ_NONE);
47         }
48
49         ha = rsp->hw;
50         reg = &ha->iobase->isp;
51         status = 0;
52
53         spin_lock(&ha->hardware_lock);
54         vha = qla2x00_get_rsp_host(rsp);
55         for (iter = 50; iter--; ) {
56                 hccr = RD_REG_WORD(&reg->hccr);
57                 if (hccr & HCCR_RISC_PAUSE) {
58                         if (pci_channel_offline(ha->pdev))
59                                 break;
60
61                         /*
62                          * Issue a "HARD" reset in order for the RISC interrupt
63                          * bit to be cleared.  Schedule a big hammmer to get
64                          * out of the RISC PAUSED state.
65                          */
66                         WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
67                         RD_REG_WORD(&reg->hccr);
68
69                         ha->isp_ops->fw_dump(vha, 1);
70                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
71                         break;
72                 } else if ((RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) == 0)
73                         break;
74
75                 if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
76                         WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
77                         RD_REG_WORD(&reg->hccr);
78
79                         /* Get mailbox data. */
80                         mb[0] = RD_MAILBOX_REG(ha, reg, 0);
81                         if (mb[0] > 0x3fff && mb[0] < 0x8000) {
82                                 qla2x00_mbx_completion(vha, mb[0]);
83                                 status |= MBX_INTERRUPT;
84                         } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
85                                 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
86                                 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
87                                 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
88                                 qla2x00_async_event(vha, rsp, mb);
89                         } else {
90                                 /*EMPTY*/
91                                 DEBUG2(printk("scsi(%ld): Unrecognized "
92                                     "interrupt type (%d).\n",
93                                     vha->host_no, mb[0]));
94                         }
95                         /* Release mailbox registers. */
96                         WRT_REG_WORD(&reg->semaphore, 0);
97                         RD_REG_WORD(&reg->semaphore);
98                 } else {
99                         qla2x00_process_response_queue(rsp);
100
101                         WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
102                         RD_REG_WORD(&reg->hccr);
103                 }
104         }
105         spin_unlock(&ha->hardware_lock);
106
107         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
108             (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
109                 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
110                 complete(&ha->mbx_intr_comp);
111         }
112
113         return (IRQ_HANDLED);
114 }
115
116 /**
117  * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
118  * @irq:
119  * @dev_id: SCSI driver HA context
120  *
121  * Called by system whenever the host adapter generates an interrupt.
122  *
123  * Returns handled flag.
124  */
125 irqreturn_t
126 qla2300_intr_handler(int irq, void *dev_id)
127 {
128         scsi_qla_host_t *vha;
129         struct device_reg_2xxx __iomem *reg;
130         int             status;
131         unsigned long   iter;
132         uint32_t        stat;
133         uint16_t        hccr;
134         uint16_t        mb[4];
135         struct rsp_que *rsp;
136         struct qla_hw_data *ha;
137
138         rsp = (struct rsp_que *) dev_id;
139         if (!rsp) {
140                 printk(KERN_INFO
141                     "%s(): NULL response queue pointer\n", __func__);
142                 return (IRQ_NONE);
143         }
144
145         ha = rsp->hw;
146         reg = &ha->iobase->isp;
147         status = 0;
148
149         spin_lock(&ha->hardware_lock);
150         vha = qla2x00_get_rsp_host(rsp);
151         for (iter = 50; iter--; ) {
152                 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
153                 if (stat & HSR_RISC_PAUSED) {
154                         if (pci_channel_offline(ha->pdev))
155                                 break;
156
157                         hccr = RD_REG_WORD(&reg->hccr);
158                         if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
159                                 qla_printk(KERN_INFO, ha, "Parity error -- "
160                                     "HCCR=%x, Dumping firmware!\n", hccr);
161                         else
162                                 qla_printk(KERN_INFO, ha, "RISC paused -- "
163                                     "HCCR=%x, Dumping firmware!\n", hccr);
164
165                         /*
166                          * Issue a "HARD" reset in order for the RISC
167                          * interrupt bit to be cleared.  Schedule a big
168                          * hammmer to get out of the RISC PAUSED state.
169                          */
170                         WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
171                         RD_REG_WORD(&reg->hccr);
172
173                         ha->isp_ops->fw_dump(vha, 1);
174                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
175                         break;
176                 } else if ((stat & HSR_RISC_INT) == 0)
177                         break;
178
179                 switch (stat & 0xff) {
180                 case 0x1:
181                 case 0x2:
182                 case 0x10:
183                 case 0x11:
184                         qla2x00_mbx_completion(vha, MSW(stat));
185                         status |= MBX_INTERRUPT;
186
187                         /* Release mailbox registers. */
188                         WRT_REG_WORD(&reg->semaphore, 0);
189                         break;
190                 case 0x12:
191                         mb[0] = MSW(stat);
192                         mb[1] = RD_MAILBOX_REG(ha, reg, 1);
193                         mb[2] = RD_MAILBOX_REG(ha, reg, 2);
194                         mb[3] = RD_MAILBOX_REG(ha, reg, 3);
195                         qla2x00_async_event(vha, rsp, mb);
196                         break;
197                 case 0x13:
198                         qla2x00_process_response_queue(rsp);
199                         break;
200                 case 0x15:
201                         mb[0] = MBA_CMPLT_1_16BIT;
202                         mb[1] = MSW(stat);
203                         qla2x00_async_event(vha, rsp, mb);
204                         break;
205                 case 0x16:
206                         mb[0] = MBA_SCSI_COMPLETION;
207                         mb[1] = MSW(stat);
208                         mb[2] = RD_MAILBOX_REG(ha, reg, 2);
209                         qla2x00_async_event(vha, rsp, mb);
210                         break;
211                 default:
212                         DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
213                             "(%d).\n",
214                             vha->host_no, stat & 0xff));
215                         break;
216                 }
217                 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
218                 RD_REG_WORD_RELAXED(&reg->hccr);
219         }
220         spin_unlock(&ha->hardware_lock);
221
222         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
223             (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
224                 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
225                 complete(&ha->mbx_intr_comp);
226         }
227
228         return (IRQ_HANDLED);
229 }
230
231 /**
232  * qla2x00_mbx_completion() - Process mailbox command completions.
233  * @ha: SCSI driver HA context
234  * @mb0: Mailbox0 register
235  */
236 static void
237 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
238 {
239         uint16_t        cnt;
240         uint16_t __iomem *wptr;
241         struct qla_hw_data *ha = vha->hw;
242         struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
243
244         /* Load return mailbox registers. */
245         ha->flags.mbox_int = 1;
246         ha->mailbox_out[0] = mb0;
247         wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
248
249         for (cnt = 1; cnt < ha->mbx_count; cnt++) {
250                 if (IS_QLA2200(ha) && cnt == 8)
251                         wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
252                 if (cnt == 4 || cnt == 5)
253                         ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
254                 else
255                         ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
256
257                 wptr++;
258         }
259
260         if (ha->mcp) {
261                 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
262                     __func__, vha->host_no, ha->mcp->mb[0]));
263         } else {
264                 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
265                     __func__, vha->host_no));
266         }
267 }
268
269 static void
270 qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
271 {
272         static char *event[] =
273                 { "Complete", "Request Notification", "Time Extension" };
274         int rval;
275         struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24;
276         uint16_t __iomem *wptr;
277         uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];
278
279         /* Seed data -- mailbox1 -> mailbox7. */
280         wptr = (uint16_t __iomem *)&reg24->mailbox1;
281         for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
282                 mb[cnt] = RD_REG_WORD(wptr);
283
284         DEBUG2(printk("scsi(%ld): Inter-Driver Commucation %s -- "
285             "%04x %04x %04x %04x %04x %04x %04x.\n", vha->host_no,
286             event[aen & 0xff],
287             mb[0], mb[1], mb[2], mb[3], mb[4], mb[5], mb[6]));
288
289         /* Acknowledgement needed? [Notify && non-zero timeout]. */
290         timeout = (descr >> 8) & 0xf;
291         if (aen != MBA_IDC_NOTIFY || !timeout)
292                 return;
293
294         DEBUG2(printk("scsi(%ld): Inter-Driver Commucation %s -- "
295             "ACK timeout=%d.\n", vha->host_no, event[aen & 0xff], timeout));
296
297         rval = qla2x00_post_idc_ack_work(vha, mb);
298         if (rval != QLA_SUCCESS)
299                 qla_printk(KERN_WARNING, vha->hw,
300                     "IDC failed to post ACK.\n");
301 }
302
303 /**
304  * qla2x00_async_event() - Process aynchronous events.
305  * @ha: SCSI driver HA context
306  * @mb: Mailbox registers (0 - 3)
307  */
308 void
309 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
310 {
311 #define LS_UNKNOWN      2
312         static char     *link_speeds[] = { "1", "2", "?", "4", "8", "10" };
313         char            *link_speed;
314         uint16_t        handle_cnt;
315         uint16_t        cnt;
316         uint32_t        handles[5];
317         struct qla_hw_data *ha = vha->hw;
318         struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
319         uint32_t        rscn_entry, host_pid;
320         uint8_t         rscn_queue_index;
321         unsigned long   flags;
322
323         /* Setup to process RIO completion. */
324         handle_cnt = 0;
325         if (IS_QLA81XX(ha))
326                 goto skip_rio;
327         switch (mb[0]) {
328         case MBA_SCSI_COMPLETION:
329                 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
330                 handle_cnt = 1;
331                 break;
332         case MBA_CMPLT_1_16BIT:
333                 handles[0] = mb[1];
334                 handle_cnt = 1;
335                 mb[0] = MBA_SCSI_COMPLETION;
336                 break;
337         case MBA_CMPLT_2_16BIT:
338                 handles[0] = mb[1];
339                 handles[1] = mb[2];
340                 handle_cnt = 2;
341                 mb[0] = MBA_SCSI_COMPLETION;
342                 break;
343         case MBA_CMPLT_3_16BIT:
344                 handles[0] = mb[1];
345                 handles[1] = mb[2];
346                 handles[2] = mb[3];
347                 handle_cnt = 3;
348                 mb[0] = MBA_SCSI_COMPLETION;
349                 break;
350         case MBA_CMPLT_4_16BIT:
351                 handles[0] = mb[1];
352                 handles[1] = mb[2];
353                 handles[2] = mb[3];
354                 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
355                 handle_cnt = 4;
356                 mb[0] = MBA_SCSI_COMPLETION;
357                 break;
358         case MBA_CMPLT_5_16BIT:
359                 handles[0] = mb[1];
360                 handles[1] = mb[2];
361                 handles[2] = mb[3];
362                 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
363                 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
364                 handle_cnt = 5;
365                 mb[0] = MBA_SCSI_COMPLETION;
366                 break;
367         case MBA_CMPLT_2_32BIT:
368                 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
369                 handles[1] = le32_to_cpu(
370                     ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
371                     RD_MAILBOX_REG(ha, reg, 6));
372                 handle_cnt = 2;
373                 mb[0] = MBA_SCSI_COMPLETION;
374                 break;
375         default:
376                 break;
377         }
378 skip_rio:
379         switch (mb[0]) {
380         case MBA_SCSI_COMPLETION:       /* Fast Post */
381                 if (!vha->flags.online)
382                         break;
383
384                 for (cnt = 0; cnt < handle_cnt; cnt++)
385                         qla2x00_process_completed_request(vha, rsp->req,
386                                 handles[cnt]);
387                 break;
388
389         case MBA_RESET:                 /* Reset */
390                 DEBUG2(printk("scsi(%ld): Asynchronous RESET.\n",
391                         vha->host_no));
392
393                 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
394                 break;
395
396         case MBA_SYSTEM_ERR:            /* System Error */
397                 qla_printk(KERN_INFO, ha,
398                     "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n",
399                     mb[1], mb[2], mb[3]);
400
401                 ha->isp_ops->fw_dump(vha, 1);
402
403                 if (IS_FWI2_CAPABLE(ha)) {
404                         if (mb[1] == 0 && mb[2] == 0) {
405                                 qla_printk(KERN_ERR, ha,
406                                     "Unrecoverable Hardware Error: adapter "
407                                     "marked OFFLINE!\n");
408                                 vha->flags.online = 0;
409                         } else
410                                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
411                 } else if (mb[1] == 0) {
412                         qla_printk(KERN_INFO, ha,
413                             "Unrecoverable Hardware Error: adapter marked "
414                             "OFFLINE!\n");
415                         vha->flags.online = 0;
416                 } else
417                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
418                 break;
419
420         case MBA_REQ_TRANSFER_ERR:      /* Request Transfer Error */
421                 DEBUG2(printk("scsi(%ld): ISP Request Transfer Error.\n",
422                     vha->host_no));
423                 qla_printk(KERN_WARNING, ha, "ISP Request Transfer Error.\n");
424
425                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
426                 break;
427
428         case MBA_RSP_TRANSFER_ERR:      /* Response Transfer Error */
429                 DEBUG2(printk("scsi(%ld): ISP Response Transfer Error.\n",
430                     vha->host_no));
431                 qla_printk(KERN_WARNING, ha, "ISP Response Transfer Error.\n");
432
433                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
434                 break;
435
436         case MBA_WAKEUP_THRES:          /* Request Queue Wake-up */
437                 DEBUG2(printk("scsi(%ld): Asynchronous WAKEUP_THRES.\n",
438                     vha->host_no));
439                 break;
440
441         case MBA_LIP_OCCURRED:          /* Loop Initialization Procedure */
442                 DEBUG2(printk("scsi(%ld): LIP occurred (%x).\n", vha->host_no,
443                     mb[1]));
444                 qla_printk(KERN_INFO, ha, "LIP occurred (%x).\n", mb[1]);
445
446                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
447                         atomic_set(&vha->loop_state, LOOP_DOWN);
448                         atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
449                         qla2x00_mark_all_devices_lost(vha, 1);
450                 }
451
452                 if (vha->vp_idx) {
453                         atomic_set(&vha->vp_state, VP_FAILED);
454                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
455                 }
456
457                 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
458                 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
459
460                 vha->flags.management_server_logged_in = 0;
461                 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
462                 break;
463
464         case MBA_LOOP_UP:               /* Loop Up Event */
465                 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
466                         link_speed = link_speeds[0];
467                         ha->link_data_rate = PORT_SPEED_1GB;
468                 } else {
469                         link_speed = link_speeds[LS_UNKNOWN];
470                         if (mb[1] < 5)
471                                 link_speed = link_speeds[mb[1]];
472                         else if (mb[1] == 0x13)
473                                 link_speed = link_speeds[5];
474                         ha->link_data_rate = mb[1];
475                 }
476
477                 DEBUG2(printk("scsi(%ld): Asynchronous LOOP UP (%s Gbps).\n",
478                     vha->host_no, link_speed));
479                 qla_printk(KERN_INFO, ha, "LOOP UP detected (%s Gbps).\n",
480                     link_speed);
481
482                 vha->flags.management_server_logged_in = 0;
483                 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
484                 break;
485
486         case MBA_LOOP_DOWN:             /* Loop Down Event */
487                 DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN "
488                     "(%x %x %x).\n", vha->host_no, mb[1], mb[2], mb[3]));
489                 qla_printk(KERN_INFO, ha, "LOOP DOWN detected (%x %x %x).\n",
490                     mb[1], mb[2], mb[3]);
491
492                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
493                         atomic_set(&vha->loop_state, LOOP_DOWN);
494                         atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
495                         vha->device_flags |= DFLG_NO_CABLE;
496                         qla2x00_mark_all_devices_lost(vha, 1);
497                 }
498
499                 if (vha->vp_idx) {
500                         atomic_set(&vha->vp_state, VP_FAILED);
501                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
502                 }
503
504                 vha->flags.management_server_logged_in = 0;
505                 ha->link_data_rate = PORT_SPEED_UNKNOWN;
506                 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
507                 break;
508
509         case MBA_LIP_RESET:             /* LIP reset occurred */
510                 DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n",
511                     vha->host_no, mb[1]));
512                 qla_printk(KERN_INFO, ha,
513                     "LIP reset occurred (%x).\n", mb[1]);
514
515                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
516                         atomic_set(&vha->loop_state, LOOP_DOWN);
517                         atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
518                         qla2x00_mark_all_devices_lost(vha, 1);
519                 }
520
521                 if (vha->vp_idx) {
522                         atomic_set(&vha->vp_state, VP_FAILED);
523                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
524                 }
525
526                 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
527
528                 ha->operating_mode = LOOP;
529                 vha->flags.management_server_logged_in = 0;
530                 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
531                 break;
532
533         /* case MBA_DCBX_COMPLETE: */
534         case MBA_POINT_TO_POINT:        /* Point-to-Point */
535                 if (IS_QLA2100(ha))
536                         break;
537
538                 if (IS_QLA81XX(ha))
539                         DEBUG2(printk("scsi(%ld): DCBX Completed -- %04x %04x "
540                             "%04x\n", vha->host_no, mb[1], mb[2], mb[3]));
541                 else
542                         DEBUG2(printk("scsi(%ld): Asynchronous P2P MODE "
543                             "received.\n", vha->host_no));
544
545                 /*
546                  * Until there's a transition from loop down to loop up, treat
547                  * this as loop down only.
548                  */
549                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
550                         atomic_set(&vha->loop_state, LOOP_DOWN);
551                         if (!atomic_read(&vha->loop_down_timer))
552                                 atomic_set(&vha->loop_down_timer,
553                                     LOOP_DOWN_TIME);
554                         qla2x00_mark_all_devices_lost(vha, 1);
555                 }
556
557                 if (vha->vp_idx) {
558                         atomic_set(&vha->vp_state, VP_FAILED);
559                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
560                 }
561
562                 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
563                         set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
564
565                 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
566                 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
567
568                 ha->flags.gpsc_supported = 1;
569                 vha->flags.management_server_logged_in = 0;
570                 break;
571
572         case MBA_CHG_IN_CONNECTION:     /* Change in connection mode */
573                 if (IS_QLA2100(ha))
574                         break;
575
576                 DEBUG2(printk("scsi(%ld): Asynchronous Change In Connection "
577                     "received.\n",
578                     vha->host_no));
579                 qla_printk(KERN_INFO, ha,
580                     "Configuration change detected: value=%x.\n", mb[1]);
581
582                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
583                         atomic_set(&vha->loop_state, LOOP_DOWN);
584                         if (!atomic_read(&vha->loop_down_timer))
585                                 atomic_set(&vha->loop_down_timer,
586                                     LOOP_DOWN_TIME);
587                         qla2x00_mark_all_devices_lost(vha, 1);
588                 }
589
590                 if (vha->vp_idx) {
591                         atomic_set(&vha->vp_state, VP_FAILED);
592                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
593                 }
594
595                 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
596                 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
597                 break;
598
599         case MBA_PORT_UPDATE:           /* Port database update */
600                 /* Only handle SCNs for our Vport index. */
601                 if (vha->vp_idx && vha->vp_idx != (mb[3] & 0xff))
602                         break;
603
604                 /*
605                  * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
606                  * event etc. earlier indicating loop is down) then process
607                  * it.  Otherwise ignore it and Wait for RSCN to come in.
608                  */
609                 atomic_set(&vha->loop_down_timer, 0);
610                 if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
611                     atomic_read(&vha->loop_state) != LOOP_DEAD) {
612                         DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE "
613                             "ignored %04x/%04x/%04x.\n", vha->host_no, mb[1],
614                             mb[2], mb[3]));
615                         break;
616                 }
617
618                 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n",
619                     vha->host_no));
620                 DEBUG(printk(KERN_INFO
621                     "scsi(%ld): Port database changed %04x %04x %04x.\n",
622                     vha->host_no, mb[1], mb[2], mb[3]));
623
624                 /*
625                  * Mark all devices as missing so we will login again.
626                  */
627                 atomic_set(&vha->loop_state, LOOP_UP);
628
629                 qla2x00_mark_all_devices_lost(vha, 1);
630
631                 vha->flags.rscn_queue_overflow = 1;
632
633                 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
634                 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
635                 break;
636
637         case MBA_RSCN_UPDATE:           /* State Change Registration */
638                 /* Check if the Vport has issued a SCR */
639                 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
640                         break;
641                 /* Only handle SCNs for our Vport index. */
642                 if (vha->vp_idx && vha->vp_idx != (mb[3] & 0xff))
643                         break;
644                 DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n",
645                     vha->host_no));
646                 DEBUG(printk(KERN_INFO
647                     "scsi(%ld): RSCN database changed -- %04x %04x %04x.\n",
648                     vha->host_no, mb[1], mb[2], mb[3]));
649
650                 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
651                 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
652                                 | vha->d_id.b.al_pa;
653                 if (rscn_entry == host_pid) {
654                         DEBUG(printk(KERN_INFO
655                             "scsi(%ld): Ignoring RSCN update to local host "
656                             "port ID (%06x)\n",
657                             vha->host_no, host_pid));
658                         break;
659                 }
660
661                 /* Ignore reserved bits from RSCN-payload. */
662                 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
663                 rscn_queue_index = vha->rscn_in_ptr + 1;
664                 if (rscn_queue_index == MAX_RSCN_COUNT)
665                         rscn_queue_index = 0;
666                 if (rscn_queue_index != vha->rscn_out_ptr) {
667                         vha->rscn_queue[vha->rscn_in_ptr] = rscn_entry;
668                         vha->rscn_in_ptr = rscn_queue_index;
669                 } else {
670                         vha->flags.rscn_queue_overflow = 1;
671                 }
672
673                 atomic_set(&vha->loop_state, LOOP_UPDATE);
674                 atomic_set(&vha->loop_down_timer, 0);
675                 vha->flags.management_server_logged_in = 0;
676
677                 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
678                 set_bit(RSCN_UPDATE, &vha->dpc_flags);
679                 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
680                 break;
681
682         /* case MBA_RIO_RESPONSE: */
683         case MBA_ZIO_RESPONSE:
684                 DEBUG3(printk("scsi(%ld): [R|Z]IO update completion.\n",
685                     vha->host_no));
686
687                 if (IS_FWI2_CAPABLE(ha))
688                         qla24xx_process_response_queue(rsp);
689                 else
690                         qla2x00_process_response_queue(rsp);
691                 break;
692
693         case MBA_DISCARD_RND_FRAME:
694                 DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x "
695                     "%04x.\n", vha->host_no, mb[1], mb[2], mb[3]));
696                 break;
697
698         case MBA_TRACE_NOTIFICATION:
699                 DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n",
700                 vha->host_no, mb[1], mb[2]));
701                 break;
702
703         case MBA_ISP84XX_ALERT:
704                 DEBUG2(printk("scsi(%ld): ISP84XX Alert Notification -- "
705                     "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3]));
706
707                 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
708                 switch (mb[1]) {
709                 case A84_PANIC_RECOVERY:
710                         qla_printk(KERN_INFO, ha, "Alert 84XX: panic recovery "
711                             "%04x %04x\n", mb[2], mb[3]);
712                         break;
713                 case A84_OP_LOGIN_COMPLETE:
714                         ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
715                         DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX:"
716                             "firmware version %x\n", ha->cs84xx->op_fw_version));
717                         break;
718                 case A84_DIAG_LOGIN_COMPLETE:
719                         ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
720                         DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX:"
721                             "diagnostic firmware version %x\n",
722                             ha->cs84xx->diag_fw_version));
723                         break;
724                 case A84_GOLD_LOGIN_COMPLETE:
725                         ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
726                         ha->cs84xx->fw_update = 1;
727                         DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX: gold "
728                             "firmware version %x\n",
729                             ha->cs84xx->gold_fw_version));
730                         break;
731                 default:
732                         qla_printk(KERN_ERR, ha,
733                             "Alert 84xx: Invalid Alert %04x %04x %04x\n",
734                             mb[1], mb[2], mb[3]);
735                 }
736                 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
737                 break;
738         case MBA_DCBX_START:
739                 DEBUG2(printk("scsi(%ld): DCBX Started -- %04x %04x %04x\n",
740                     vha->host_no, mb[1], mb[2], mb[3]));
741                 break;
742         case MBA_DCBX_PARAM_UPDATE:
743                 DEBUG2(printk("scsi(%ld): DCBX Parameters Updated -- "
744                     "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3]));
745                 break;
746         case MBA_FCF_CONF_ERR:
747                 DEBUG2(printk("scsi(%ld): FCF Configuration Error -- "
748                     "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3]));
749                 break;
750         case MBA_IDC_COMPLETE:
751         case MBA_IDC_NOTIFY:
752         case MBA_IDC_TIME_EXT:
753                 qla81xx_idc_event(vha, mb[0], mb[1]);
754                 break;
755         }
756
757         if (!vha->vp_idx && ha->num_vhosts)
758                 qla2x00_alert_all_vps(rsp, mb);
759 }
760
761 static void
762 qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data)
763 {
764         fc_port_t *fcport = data;
765         struct scsi_qla_host *vha = fcport->vha;
766         struct qla_hw_data *ha = vha->hw;
767         struct req_que *req = NULL;
768
769         req = ha->req_q_map[vha->req_ques[0]];
770         if (!req)
771                 return;
772         if (req->max_q_depth <= sdev->queue_depth)
773                 return;
774
775         if (sdev->ordered_tags)
776                 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG,
777                     sdev->queue_depth + 1);
778         else
779                 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG,
780                     sdev->queue_depth + 1);
781
782         fcport->last_ramp_up = jiffies;
783
784         DEBUG2(qla_printk(KERN_INFO, ha,
785             "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n",
786             fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
787             sdev->queue_depth));
788 }
789
790 static void
791 qla2x00_adjust_sdev_qdepth_down(struct scsi_device *sdev, void *data)
792 {
793         fc_port_t *fcport = data;
794
795         if (!scsi_track_queue_full(sdev, sdev->queue_depth - 1))
796                 return;
797
798         DEBUG2(qla_printk(KERN_INFO, fcport->vha->hw,
799             "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n",
800             fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
801             sdev->queue_depth));
802 }
803
804 static inline void
805 qla2x00_ramp_up_queue_depth(scsi_qla_host_t *vha, struct req_que *req,
806                                                                 srb_t *sp)
807 {
808         fc_port_t *fcport;
809         struct scsi_device *sdev;
810
811         sdev = sp->cmd->device;
812         if (sdev->queue_depth >= req->max_q_depth)
813                 return;
814
815         fcport = sp->fcport;
816         if (time_before(jiffies,
817             fcport->last_ramp_up + ql2xqfullrampup * HZ))
818                 return;
819         if (time_before(jiffies,
820             fcport->last_queue_full + ql2xqfullrampup * HZ))
821                 return;
822
823         starget_for_each_device(sdev->sdev_target, fcport,
824             qla2x00_adjust_sdev_qdepth_up);
825 }
826
827 /**
828  * qla2x00_process_completed_request() - Process a Fast Post response.
829  * @ha: SCSI driver HA context
830  * @index: SRB index
831  */
832 static void
833 qla2x00_process_completed_request(struct scsi_qla_host *vha,
834                                 struct req_que *req, uint32_t index)
835 {
836         srb_t *sp;
837         struct qla_hw_data *ha = vha->hw;
838
839         /* Validate handle. */
840         if (index >= MAX_OUTSTANDING_COMMANDS) {
841                 DEBUG2(printk("scsi(%ld): Invalid SCSI completion handle %d.\n",
842                     vha->host_no, index));
843                 qla_printk(KERN_WARNING, ha,
844                     "Invalid SCSI completion handle %d.\n", index);
845
846                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
847                 return;
848         }
849
850         sp = req->outstanding_cmds[index];
851         if (sp) {
852                 /* Free outstanding command slot. */
853                 req->outstanding_cmds[index] = NULL;
854
855                 CMD_COMPL_STATUS(sp->cmd) = 0L;
856                 CMD_SCSI_STATUS(sp->cmd) = 0L;
857
858                 /* Save ISP completion status */
859                 sp->cmd->result = DID_OK << 16;
860
861                 qla2x00_ramp_up_queue_depth(vha, req, sp);
862                 qla2x00_sp_compl(ha, sp);
863         } else {
864                 DEBUG2(printk("scsi(%ld): Invalid ISP SCSI completion handle\n",
865                     vha->host_no));
866                 qla_printk(KERN_WARNING, ha,
867                     "Invalid ISP SCSI completion handle\n");
868
869                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
870         }
871 }
872
873 /**
874  * qla2x00_process_response_queue() - Process response queue entries.
875  * @ha: SCSI driver HA context
876  */
877 void
878 qla2x00_process_response_queue(struct rsp_que *rsp)
879 {
880         struct scsi_qla_host *vha;
881         struct qla_hw_data *ha = rsp->hw;
882         struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
883         sts_entry_t     *pkt;
884         uint16_t        handle_cnt;
885         uint16_t        cnt;
886
887         vha = qla2x00_get_rsp_host(rsp);
888
889         if (!vha->flags.online)
890                 return;
891
892         while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
893                 pkt = (sts_entry_t *)rsp->ring_ptr;
894
895                 rsp->ring_index++;
896                 if (rsp->ring_index == rsp->length) {
897                         rsp->ring_index = 0;
898                         rsp->ring_ptr = rsp->ring;
899                 } else {
900                         rsp->ring_ptr++;
901                 }
902
903                 if (pkt->entry_status != 0) {
904                         DEBUG3(printk(KERN_INFO
905                             "scsi(%ld): Process error entry.\n", vha->host_no));
906
907                         qla2x00_error_entry(vha, rsp, pkt);
908                         ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
909                         wmb();
910                         continue;
911                 }
912
913                 switch (pkt->entry_type) {
914                 case STATUS_TYPE:
915                         qla2x00_status_entry(vha, rsp, pkt);
916                         break;
917                 case STATUS_TYPE_21:
918                         handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
919                         for (cnt = 0; cnt < handle_cnt; cnt++) {
920                                 qla2x00_process_completed_request(vha, rsp->req,
921                                     ((sts21_entry_t *)pkt)->handle[cnt]);
922                         }
923                         break;
924                 case STATUS_TYPE_22:
925                         handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
926                         for (cnt = 0; cnt < handle_cnt; cnt++) {
927                                 qla2x00_process_completed_request(vha, rsp->req,
928                                     ((sts22_entry_t *)pkt)->handle[cnt]);
929                         }
930                         break;
931                 case STATUS_CONT_TYPE:
932                         qla2x00_status_cont_entry(vha, (sts_cont_entry_t *)pkt);
933                         break;
934                 default:
935                         /* Type Not Supported. */
936                         DEBUG4(printk(KERN_WARNING
937                             "scsi(%ld): Received unknown response pkt type %x "
938                             "entry status=%x.\n",
939                             vha->host_no, pkt->entry_type, pkt->entry_status));
940                         break;
941                 }
942                 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
943                 wmb();
944         }
945
946         /* Adjust ring index */
947         WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
948 }
949
950 static inline void
951 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len)
952 {
953         struct scsi_cmnd *cp = sp->cmd;
954
955         if (sense_len >= SCSI_SENSE_BUFFERSIZE)
956                 sense_len = SCSI_SENSE_BUFFERSIZE;
957
958         CMD_ACTUAL_SNSLEN(cp) = sense_len;
959         sp->request_sense_length = sense_len;
960         sp->request_sense_ptr = cp->sense_buffer;
961         if (sp->request_sense_length > 32)
962                 sense_len = 32;
963
964         memcpy(cp->sense_buffer, sense_data, sense_len);
965
966         sp->request_sense_ptr += sense_len;
967         sp->request_sense_length -= sense_len;
968         if (sp->request_sense_length != 0)
969                 sp->fcport->vha->status_srb = sp;
970
971         DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) "
972             "cmd=%p pid=%ld\n", __func__, sp->fcport->vha->host_no,
973             cp->device->channel, cp->device->id, cp->device->lun, cp,
974             cp->serial_number));
975         if (sense_len)
976                 DEBUG5(qla2x00_dump_buffer(cp->sense_buffer,
977                     CMD_ACTUAL_SNSLEN(cp)));
978 }
979
980 /**
981  * qla2x00_status_entry() - Process a Status IOCB entry.
982  * @ha: SCSI driver HA context
983  * @pkt: Entry pointer
984  */
985 static void
986 qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
987 {
988         srb_t           *sp;
989         fc_port_t       *fcport;
990         struct scsi_cmnd *cp;
991         sts_entry_t *sts;
992         struct sts_entry_24xx *sts24;
993         uint16_t        comp_status;
994         uint16_t        scsi_status;
995         uint8_t         lscsi_status;
996         int32_t         resid;
997         uint32_t        sense_len, rsp_info_len, resid_len, fw_resid_len;
998         uint8_t         *rsp_info, *sense_data;
999         struct qla_hw_data *ha = vha->hw;
1000         struct req_que *req = rsp->req;
1001
1002         sts = (sts_entry_t *) pkt;
1003         sts24 = (struct sts_entry_24xx *) pkt;
1004         if (IS_FWI2_CAPABLE(ha)) {
1005                 comp_status = le16_to_cpu(sts24->comp_status);
1006                 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
1007         } else {
1008                 comp_status = le16_to_cpu(sts->comp_status);
1009                 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
1010         }
1011
1012         /* Fast path completion. */
1013         if (comp_status == CS_COMPLETE && scsi_status == 0) {
1014                 qla2x00_process_completed_request(vha, req, sts->handle);
1015
1016                 return;
1017         }
1018
1019         /* Validate handle. */
1020         if (sts->handle < MAX_OUTSTANDING_COMMANDS) {
1021                 sp = req->outstanding_cmds[sts->handle];
1022                 req->outstanding_cmds[sts->handle] = NULL;
1023         } else
1024                 sp = NULL;
1025
1026         if (sp == NULL) {
1027                 DEBUG2(printk("scsi(%ld): Status Entry invalid handle.\n",
1028                     vha->host_no));
1029                 qla_printk(KERN_WARNING, ha, "Status Entry invalid handle.\n");
1030
1031                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1032                 qla2xxx_wake_dpc(vha);
1033                 return;
1034         }
1035         cp = sp->cmd;
1036         if (cp == NULL) {
1037                 DEBUG2(printk("scsi(%ld): Command already returned back to OS "
1038                     "pkt->handle=%d sp=%p.\n", vha->host_no, sts->handle, sp));
1039                 qla_printk(KERN_WARNING, ha,
1040                     "Command is NULL: already returned to OS (sp=%p)\n", sp);
1041
1042                 return;
1043         }
1044
1045         lscsi_status = scsi_status & STATUS_MASK;
1046         CMD_ENTRY_STATUS(cp) = sts->entry_status;
1047         CMD_COMPL_STATUS(cp) = comp_status;
1048         CMD_SCSI_STATUS(cp) = scsi_status;
1049
1050         fcport = sp->fcport;
1051
1052         sense_len = rsp_info_len = resid_len = fw_resid_len = 0;
1053         if (IS_FWI2_CAPABLE(ha)) {
1054                 sense_len = le32_to_cpu(sts24->sense_len);
1055                 rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
1056                 resid_len = le32_to_cpu(sts24->rsp_residual_count);
1057                 fw_resid_len = le32_to_cpu(sts24->residual_len);
1058                 rsp_info = sts24->data;
1059                 sense_data = sts24->data;
1060                 host_to_fcp_swap(sts24->data, sizeof(sts24->data));
1061         } else {
1062                 sense_len = le16_to_cpu(sts->req_sense_length);
1063                 rsp_info_len = le16_to_cpu(sts->rsp_info_len);
1064                 resid_len = le32_to_cpu(sts->residual_length);
1065                 rsp_info = sts->rsp_info;
1066                 sense_data = sts->req_sense_data;
1067         }
1068
1069         /* Check for any FCP transport errors. */
1070         if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
1071                 /* Sense data lies beyond any FCP RESPONSE data. */
1072                 if (IS_FWI2_CAPABLE(ha))
1073                         sense_data += rsp_info_len;
1074                 if (rsp_info_len > 3 && rsp_info[3]) {
1075                         DEBUG2(printk("scsi(%ld:%d:%d:%d) FCP I/O protocol "
1076                             "failure (%x/%02x%02x%02x%02x%02x%02x%02x%02x)..."
1077                             "retrying command\n", vha->host_no,
1078                             cp->device->channel, cp->device->id,
1079                             cp->device->lun, rsp_info_len, rsp_info[0],
1080                             rsp_info[1], rsp_info[2], rsp_info[3], rsp_info[4],
1081                             rsp_info[5], rsp_info[6], rsp_info[7]));
1082
1083                         cp->result = DID_BUS_BUSY << 16;
1084                         qla2x00_sp_compl(ha, sp);
1085                         return;
1086                 }
1087         }
1088
1089         /* Check for overrun. */
1090         if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
1091             scsi_status & SS_RESIDUAL_OVER)
1092                 comp_status = CS_DATA_OVERRUN;
1093
1094         /*
1095          * Based on Host and scsi status generate status code for Linux
1096          */
1097         switch (comp_status) {
1098         case CS_COMPLETE:
1099         case CS_QUEUE_FULL:
1100                 if (scsi_status == 0) {
1101                         cp->result = DID_OK << 16;
1102                         break;
1103                 }
1104                 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
1105                         resid = resid_len;
1106                         scsi_set_resid(cp, resid);
1107                         CMD_RESID_LEN(cp) = resid;
1108
1109                         if (!lscsi_status &&
1110                             ((unsigned)(scsi_bufflen(cp) - resid) <
1111                              cp->underflow)) {
1112                                 qla_printk(KERN_INFO, ha,
1113                                            "scsi(%ld:%d:%d:%d): Mid-layer underflow "
1114                                            "detected (%x of %x bytes)...returning "
1115                                            "error status.\n", vha->host_no,
1116                                            cp->device->channel, cp->device->id,
1117                                            cp->device->lun, resid,
1118                                            scsi_bufflen(cp));
1119
1120                                 cp->result = DID_ERROR << 16;
1121                                 break;
1122                         }
1123                 }
1124                 cp->result = DID_OK << 16 | lscsi_status;
1125
1126                 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1127                         DEBUG2(printk(KERN_INFO
1128                             "scsi(%ld): QUEUE FULL status detected "
1129                             "0x%x-0x%x.\n", vha->host_no, comp_status,
1130                             scsi_status));
1131
1132                         /* Adjust queue depth for all luns on the port. */
1133                         fcport->last_queue_full = jiffies;
1134                         starget_for_each_device(cp->device->sdev_target,
1135                             fcport, qla2x00_adjust_sdev_qdepth_down);
1136                         break;
1137                 }
1138                 if (lscsi_status != SS_CHECK_CONDITION)
1139                         break;
1140
1141                 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1142                 if (!(scsi_status & SS_SENSE_LEN_VALID))
1143                         break;
1144
1145                 qla2x00_handle_sense(sp, sense_data, sense_len);
1146                 break;
1147
1148         case CS_DATA_UNDERRUN:
1149                 resid = resid_len;
1150                 /* Use F/W calculated residual length. */
1151                 if (IS_FWI2_CAPABLE(ha)) {
1152                         if (!(scsi_status & SS_RESIDUAL_UNDER)) {
1153                                 lscsi_status = 0;
1154                         } else if (resid != fw_resid_len) {
1155                                 scsi_status &= ~SS_RESIDUAL_UNDER;
1156                                 lscsi_status = 0;
1157                         }
1158                         resid = fw_resid_len;
1159                 }
1160
1161                 if (scsi_status & SS_RESIDUAL_UNDER) {
1162                         scsi_set_resid(cp, resid);
1163                         CMD_RESID_LEN(cp) = resid;
1164                 } else {
1165                         DEBUG2(printk(KERN_INFO
1166                             "scsi(%ld:%d:%d) UNDERRUN status detected "
1167                             "0x%x-0x%x. resid=0x%x fw_resid=0x%x cdb=0x%x "
1168                             "os_underflow=0x%x\n", vha->host_no,
1169                             cp->device->id, cp->device->lun, comp_status,
1170                             scsi_status, resid_len, resid, cp->cmnd[0],
1171                             cp->underflow));
1172
1173                 }
1174
1175                 /*
1176                  * Check to see if SCSI Status is non zero. If so report SCSI
1177                  * Status.
1178                  */
1179                 if (lscsi_status != 0) {
1180                         cp->result = DID_OK << 16 | lscsi_status;
1181
1182                         if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1183                                 DEBUG2(printk(KERN_INFO
1184                                     "scsi(%ld): QUEUE FULL status detected "
1185                                     "0x%x-0x%x.\n", vha->host_no, comp_status,
1186                                     scsi_status));
1187
1188                                 /*
1189                                  * Adjust queue depth for all luns on the
1190                                  * port.
1191                                  */
1192                                 fcport->last_queue_full = jiffies;
1193                                 starget_for_each_device(
1194                                     cp->device->sdev_target, fcport,
1195                                     qla2x00_adjust_sdev_qdepth_down);
1196                                 break;
1197                         }
1198                         if (lscsi_status != SS_CHECK_CONDITION)
1199                                 break;
1200
1201                         memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1202                         if (!(scsi_status & SS_SENSE_LEN_VALID))
1203                                 break;
1204
1205                         qla2x00_handle_sense(sp, sense_data, sense_len);
1206                 } else {
1207                         /*
1208                          * If RISC reports underrun and target does not report
1209                          * it then we must have a lost frame, so tell upper
1210                          * layer to retry it by reporting a bus busy.
1211                          */
1212                         if (!(scsi_status & SS_RESIDUAL_UNDER)) {
1213                                 DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped "
1214                                               "frame(s) detected (%x of %x bytes)..."
1215                                               "retrying command.\n",
1216                                         vha->host_no, cp->device->channel,
1217                                         cp->device->id, cp->device->lun, resid,
1218                                         scsi_bufflen(cp)));
1219
1220                                 cp->result = DID_BUS_BUSY << 16;
1221                                 break;
1222                         }
1223
1224                         /* Handle mid-layer underflow */
1225                         if ((unsigned)(scsi_bufflen(cp) - resid) <
1226                             cp->underflow) {
1227                                 qla_printk(KERN_INFO, ha,
1228                                            "scsi(%ld:%d:%d:%d): Mid-layer underflow "
1229                                            "detected (%x of %x bytes)...returning "
1230                                            "error status.\n", vha->host_no,
1231                                            cp->device->channel, cp->device->id,
1232                                            cp->device->lun, resid,
1233                                            scsi_bufflen(cp));
1234
1235                                 cp->result = DID_ERROR << 16;
1236                                 break;
1237                         }
1238
1239                         /* Everybody online, looking good... */
1240                         cp->result = DID_OK << 16;
1241                 }
1242                 break;
1243
1244         case CS_DATA_OVERRUN:
1245                 DEBUG2(printk(KERN_INFO
1246                     "scsi(%ld:%d:%d): OVERRUN status detected 0x%x-0x%x\n",
1247                     vha->host_no, cp->device->id, cp->device->lun, comp_status,
1248                     scsi_status));
1249                 DEBUG2(printk(KERN_INFO
1250                     "CDB: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
1251                     cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3],
1252                     cp->cmnd[4], cp->cmnd[5]));
1253                 DEBUG2(printk(KERN_INFO
1254                     "PID=0x%lx req=0x%x xtra=0x%x -- returning DID_ERROR "
1255                     "status!\n",
1256                     cp->serial_number, scsi_bufflen(cp), resid_len));
1257
1258                 cp->result = DID_ERROR << 16;
1259                 break;
1260
1261         case CS_PORT_LOGGED_OUT:
1262         case CS_PORT_CONFIG_CHG:
1263         case CS_PORT_BUSY:
1264         case CS_INCOMPLETE:
1265         case CS_PORT_UNAVAILABLE:
1266                 /*
1267                  * If the port is in Target Down state, return all IOs for this
1268                  * Target with DID_NO_CONNECT ELSE Queue the IOs in the
1269                  * retry_queue.
1270                  */
1271                 DEBUG2(printk("scsi(%ld:%d:%d): status_entry: Port Down "
1272                     "pid=%ld, compl status=0x%x, port state=0x%x\n",
1273                     vha->host_no, cp->device->id, cp->device->lun,
1274                     cp->serial_number, comp_status,
1275                     atomic_read(&fcport->state)));
1276
1277                 /*
1278                  * We are going to have the fc class block the rport
1279                  * while we try to recover so instruct the mid layer
1280                  * to requeue until the class decides how to handle this.
1281                  */
1282                 cp->result = DID_TRANSPORT_DISRUPTED << 16;
1283                 if (atomic_read(&fcport->state) == FCS_ONLINE)
1284                         qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
1285                 break;
1286
1287         case CS_RESET:
1288                 DEBUG2(printk(KERN_INFO
1289                     "scsi(%ld): RESET status detected 0x%x-0x%x.\n",
1290                     vha->host_no, comp_status, scsi_status));
1291
1292                 cp->result = DID_RESET << 16;
1293                 break;
1294
1295         case CS_ABORTED:
1296                 /*
1297                  * hv2.19.12 - DID_ABORT does not retry the request if we
1298                  * aborted this request then abort otherwise it must be a
1299                  * reset.
1300                  */
1301                 DEBUG2(printk(KERN_INFO
1302                     "scsi(%ld): ABORT status detected 0x%x-0x%x.\n",
1303                     vha->host_no, comp_status, scsi_status));
1304
1305                 cp->result = DID_RESET << 16;
1306                 break;
1307
1308         case CS_TIMEOUT:
1309                 /*
1310                  * We are going to have the fc class block the rport
1311                  * while we try to recover so instruct the mid layer
1312                  * to requeue until the class decides how to handle this.
1313                  */
1314                 cp->result = DID_TRANSPORT_DISRUPTED << 16;
1315
1316                 if (IS_FWI2_CAPABLE(ha)) {
1317                         DEBUG2(printk(KERN_INFO
1318                             "scsi(%ld:%d:%d:%d): TIMEOUT status detected "
1319                             "0x%x-0x%x\n", vha->host_no, cp->device->channel,
1320                             cp->device->id, cp->device->lun, comp_status,
1321                             scsi_status));
1322                         break;
1323                 }
1324                 DEBUG2(printk(KERN_INFO
1325                     "scsi(%ld:%d:%d:%d): TIMEOUT status detected 0x%x-0x%x "
1326                     "sflags=%x.\n", vha->host_no, cp->device->channel,
1327                     cp->device->id, cp->device->lun, comp_status, scsi_status,
1328                     le16_to_cpu(sts->status_flags)));
1329
1330                 /* Check to see if logout occurred. */
1331                 if ((le16_to_cpu(sts->status_flags) & SF_LOGOUT_SENT))
1332                         qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
1333                 break;
1334
1335         default:
1336                 DEBUG3(printk("scsi(%ld): Error detected (unknown status) "
1337                     "0x%x-0x%x.\n", vha->host_no, comp_status, scsi_status));
1338                 qla_printk(KERN_INFO, ha,
1339                     "Unknown status detected 0x%x-0x%x.\n",
1340                     comp_status, scsi_status);
1341
1342                 cp->result = DID_ERROR << 16;
1343                 break;
1344         }
1345
1346         /* Place command on done queue. */
1347         if (vha->status_srb == NULL)
1348                 qla2x00_sp_compl(ha, sp);
1349 }
1350
1351 /**
1352  * qla2x00_status_cont_entry() - Process a Status Continuations entry.
1353  * @ha: SCSI driver HA context
1354  * @pkt: Entry pointer
1355  *
1356  * Extended sense data.
1357  */
1358 static void
1359 qla2x00_status_cont_entry(scsi_qla_host_t *vha, sts_cont_entry_t *pkt)
1360 {
1361         uint8_t         sense_sz = 0;
1362         struct qla_hw_data *ha = vha->hw;
1363         srb_t           *sp = vha->status_srb;
1364         struct scsi_cmnd *cp;
1365
1366         if (sp != NULL && sp->request_sense_length != 0) {
1367                 cp = sp->cmd;
1368                 if (cp == NULL) {
1369                         DEBUG2(printk("%s(): Cmd already returned back to OS "
1370                             "sp=%p.\n", __func__, sp));
1371                         qla_printk(KERN_INFO, ha,
1372                             "cmd is NULL: already returned to OS (sp=%p)\n",
1373                             sp);
1374
1375                         vha->status_srb = NULL;
1376                         return;
1377                 }
1378
1379                 if (sp->request_sense_length > sizeof(pkt->data)) {
1380                         sense_sz = sizeof(pkt->data);
1381                 } else {
1382                         sense_sz = sp->request_sense_length;
1383                 }
1384
1385                 /* Move sense data. */
1386                 if (IS_FWI2_CAPABLE(ha))
1387                         host_to_fcp_swap(pkt->data, sizeof(pkt->data));
1388                 memcpy(sp->request_sense_ptr, pkt->data, sense_sz);
1389                 DEBUG5(qla2x00_dump_buffer(sp->request_sense_ptr, sense_sz));
1390
1391                 sp->request_sense_ptr += sense_sz;
1392                 sp->request_sense_length -= sense_sz;
1393
1394                 /* Place command on done queue. */
1395                 if (sp->request_sense_length == 0) {
1396                         vha->status_srb = NULL;
1397                         qla2x00_sp_compl(ha, sp);
1398                 }
1399         }
1400 }
1401
1402 /**
1403  * qla2x00_error_entry() - Process an error entry.
1404  * @ha: SCSI driver HA context
1405  * @pkt: Entry pointer
1406  */
1407 static void
1408 qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
1409 {
1410         srb_t *sp;
1411         struct qla_hw_data *ha = vha->hw;
1412         struct req_que *req = rsp->req;
1413 #if defined(QL_DEBUG_LEVEL_2)
1414         if (pkt->entry_status & RF_INV_E_ORDER)
1415                 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__);
1416         else if (pkt->entry_status & RF_INV_E_COUNT)
1417                 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Count\n", __func__);
1418         else if (pkt->entry_status & RF_INV_E_PARAM)
1419                 qla_printk(KERN_ERR, ha,
1420                     "%s: Invalid Entry Parameter\n", __func__);
1421         else if (pkt->entry_status & RF_INV_E_TYPE)
1422                 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Type\n", __func__);
1423         else if (pkt->entry_status & RF_BUSY)
1424                 qla_printk(KERN_ERR, ha, "%s: Busy\n", __func__);
1425         else
1426                 qla_printk(KERN_ERR, ha, "%s: UNKNOWN flag error\n", __func__);
1427 #endif
1428
1429         /* Validate handle. */
1430         if (pkt->handle < MAX_OUTSTANDING_COMMANDS)
1431                 sp = req->outstanding_cmds[pkt->handle];
1432         else
1433                 sp = NULL;
1434
1435         if (sp) {
1436                 /* Free outstanding command slot. */
1437                 req->outstanding_cmds[pkt->handle] = NULL;
1438
1439                 /* Bad payload or header */
1440                 if (pkt->entry_status &
1441                     (RF_INV_E_ORDER | RF_INV_E_COUNT |
1442                      RF_INV_E_PARAM | RF_INV_E_TYPE)) {
1443                         sp->cmd->result = DID_ERROR << 16;
1444                 } else if (pkt->entry_status & RF_BUSY) {
1445                         sp->cmd->result = DID_BUS_BUSY << 16;
1446                 } else {
1447                         sp->cmd->result = DID_ERROR << 16;
1448                 }
1449                 qla2x00_sp_compl(ha, sp);
1450
1451         } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type ==
1452             COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7) {
1453                 DEBUG2(printk("scsi(%ld): Error entry - invalid handle\n",
1454                     vha->host_no));
1455                 qla_printk(KERN_WARNING, ha,
1456                     "Error entry - invalid handle\n");
1457
1458                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1459                 qla2xxx_wake_dpc(vha);
1460         }
1461 }
1462
1463 /**
1464  * qla24xx_mbx_completion() - Process mailbox command completions.
1465  * @ha: SCSI driver HA context
1466  * @mb0: Mailbox0 register
1467  */
1468 static void
1469 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
1470 {
1471         uint16_t        cnt;
1472         uint16_t __iomem *wptr;
1473         struct qla_hw_data *ha = vha->hw;
1474         struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1475
1476         /* Load return mailbox registers. */
1477         ha->flags.mbox_int = 1;
1478         ha->mailbox_out[0] = mb0;
1479         wptr = (uint16_t __iomem *)&reg->mailbox1;
1480
1481         for (cnt = 1; cnt < ha->mbx_count; cnt++) {
1482                 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
1483                 wptr++;
1484         }
1485
1486         if (ha->mcp) {
1487                 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
1488                     __func__, vha->host_no, ha->mcp->mb[0]));
1489         } else {
1490                 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
1491                     __func__, vha->host_no));
1492         }
1493 }
1494
1495 /**
1496  * qla24xx_process_response_queue() - Process response queue entries.
1497  * @ha: SCSI driver HA context
1498  */
1499 void
1500 qla24xx_process_response_queue(struct rsp_que *rsp)
1501 {
1502         struct qla_hw_data *ha = rsp->hw;
1503         struct sts_entry_24xx *pkt;
1504         struct scsi_qla_host *vha;
1505
1506         vha = qla2x00_get_rsp_host(rsp);
1507
1508         if (!vha->flags.online)
1509                 return;
1510
1511         while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
1512                 pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
1513
1514                 rsp->ring_index++;
1515                 if (rsp->ring_index == rsp->length) {
1516                         rsp->ring_index = 0;
1517                         rsp->ring_ptr = rsp->ring;
1518                 } else {
1519                         rsp->ring_ptr++;
1520                 }
1521
1522                 if (pkt->entry_status != 0) {
1523                         DEBUG3(printk(KERN_INFO
1524                             "scsi(%ld): Process error entry.\n", vha->host_no));
1525
1526                         qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
1527                         ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1528                         wmb();
1529                         continue;
1530                 }
1531
1532                 switch (pkt->entry_type) {
1533                 case STATUS_TYPE:
1534                         qla2x00_status_entry(vha, rsp, pkt);
1535                         break;
1536                 case STATUS_CONT_TYPE:
1537                         qla2x00_status_cont_entry(vha, (sts_cont_entry_t *)pkt);
1538                         break;
1539                 case VP_RPT_ID_IOCB_TYPE:
1540                         qla24xx_report_id_acquisition(vha,
1541                             (struct vp_rpt_id_entry_24xx *)pkt);
1542                         break;
1543                 default:
1544                         /* Type Not Supported. */
1545                         DEBUG4(printk(KERN_WARNING
1546                             "scsi(%ld): Received unknown response pkt type %x "
1547                             "entry status=%x.\n",
1548                             vha->host_no, pkt->entry_type, pkt->entry_status));
1549                         break;
1550                 }
1551                 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1552                 wmb();
1553         }
1554
1555         /* Adjust ring index */
1556         ha->isp_ops->wrt_rsp_reg(ha, rsp->id, rsp->ring_index);
1557 }
1558
1559 static void
1560 qla2xxx_check_risc_status(scsi_qla_host_t *vha)
1561 {
1562         int rval;
1563         uint32_t cnt;
1564         struct qla_hw_data *ha = vha->hw;
1565         struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1566
1567         if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))
1568                 return;
1569
1570         rval = QLA_SUCCESS;
1571         WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
1572         RD_REG_DWORD(&reg->iobase_addr);
1573         WRT_REG_DWORD(&reg->iobase_window, 0x0001);
1574         for (cnt = 10000; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
1575             rval == QLA_SUCCESS; cnt--) {
1576                 if (cnt) {
1577                         WRT_REG_DWORD(&reg->iobase_window, 0x0001);
1578                         udelay(10);
1579                 } else
1580                         rval = QLA_FUNCTION_TIMEOUT;
1581         }
1582         if (rval == QLA_SUCCESS)
1583                 goto next_test;
1584
1585         WRT_REG_DWORD(&reg->iobase_window, 0x0003);
1586         for (cnt = 100; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
1587             rval == QLA_SUCCESS; cnt--) {
1588                 if (cnt) {
1589                         WRT_REG_DWORD(&reg->iobase_window, 0x0003);
1590                         udelay(10);
1591                 } else
1592                         rval = QLA_FUNCTION_TIMEOUT;
1593         }
1594         if (rval != QLA_SUCCESS)
1595                 goto done;
1596
1597 next_test:
1598         if (RD_REG_DWORD(&reg->iobase_c8) & BIT_3)
1599                 qla_printk(KERN_INFO, ha, "Additional code -- 0x55AA.\n");
1600
1601 done:
1602         WRT_REG_DWORD(&reg->iobase_window, 0x0000);
1603         RD_REG_DWORD(&reg->iobase_window);
1604 }
1605
1606 /**
1607  * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
1608  * @irq:
1609  * @dev_id: SCSI driver HA context
1610  *
1611  * Called by system whenever the host adapter generates an interrupt.
1612  *
1613  * Returns handled flag.
1614  */
1615 irqreturn_t
1616 qla24xx_intr_handler(int irq, void *dev_id)
1617 {
1618         scsi_qla_host_t *vha;
1619         struct qla_hw_data *ha;
1620         struct device_reg_24xx __iomem *reg;
1621         int             status;
1622         unsigned long   iter;
1623         uint32_t        stat;
1624         uint32_t        hccr;
1625         uint16_t        mb[4];
1626         struct rsp_que *rsp;
1627
1628         rsp = (struct rsp_que *) dev_id;
1629         if (!rsp) {
1630                 printk(KERN_INFO
1631                     "%s(): NULL response queue pointer\n", __func__);
1632                 return IRQ_NONE;
1633         }
1634
1635         ha = rsp->hw;
1636         reg = &ha->iobase->isp24;
1637         status = 0;
1638
1639         spin_lock(&ha->hardware_lock);
1640         vha = qla2x00_get_rsp_host(rsp);
1641         for (iter = 50; iter--; ) {
1642                 stat = RD_REG_DWORD(&reg->host_status);
1643                 if (stat & HSRX_RISC_PAUSED) {
1644                         if (pci_channel_offline(ha->pdev))
1645                                 break;
1646
1647                         hccr = RD_REG_DWORD(&reg->hccr);
1648
1649                         qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
1650                             "Dumping firmware!\n", hccr);
1651
1652                         qla2xxx_check_risc_status(vha);
1653
1654                         ha->isp_ops->fw_dump(vha, 1);
1655                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1656                         break;
1657                 } else if ((stat & HSRX_RISC_INT) == 0)
1658                         break;
1659
1660                 switch (stat & 0xff) {
1661                 case 0x1:
1662                 case 0x2:
1663                 case 0x10:
1664                 case 0x11:
1665                         qla24xx_mbx_completion(vha, MSW(stat));
1666                         status |= MBX_INTERRUPT;
1667
1668                         break;
1669                 case 0x12:
1670                         mb[0] = MSW(stat);
1671                         mb[1] = RD_REG_WORD(&reg->mailbox1);
1672                         mb[2] = RD_REG_WORD(&reg->mailbox2);
1673                         mb[3] = RD_REG_WORD(&reg->mailbox3);
1674                         qla2x00_async_event(vha, rsp, mb);
1675                         break;
1676                 case 0x13:
1677                 case 0x14:
1678                         qla24xx_process_response_queue(rsp);
1679                         break;
1680                 default:
1681                         DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
1682                             "(%d).\n",
1683                             vha->host_no, stat & 0xff));
1684                         break;
1685                 }
1686                 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1687                 RD_REG_DWORD_RELAXED(&reg->hccr);
1688         }
1689         spin_unlock(&ha->hardware_lock);
1690
1691         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
1692             (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
1693                 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
1694                 complete(&ha->mbx_intr_comp);
1695         }
1696
1697         return IRQ_HANDLED;
1698 }
1699
1700 static irqreturn_t
1701 qla24xx_msix_rsp_q(int irq, void *dev_id)
1702 {
1703         struct qla_hw_data *ha;
1704         struct rsp_que *rsp;
1705         struct device_reg_24xx __iomem *reg;
1706
1707         rsp = (struct rsp_que *) dev_id;
1708         if (!rsp) {
1709                 printk(KERN_INFO
1710                 "%s(): NULL response queue pointer\n", __func__);
1711                 return IRQ_NONE;
1712         }
1713         ha = rsp->hw;
1714         reg = &ha->iobase->isp24;
1715
1716         spin_lock_irq(&ha->hardware_lock);
1717
1718         qla24xx_process_response_queue(rsp);
1719         WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1720
1721         spin_unlock_irq(&ha->hardware_lock);
1722
1723         return IRQ_HANDLED;
1724 }
1725
1726 static irqreturn_t
1727 qla25xx_msix_rsp_q(int irq, void *dev_id)
1728 {
1729         struct qla_hw_data *ha;
1730         struct rsp_que *rsp;
1731         struct device_reg_24xx __iomem *reg;
1732
1733         rsp = (struct rsp_que *) dev_id;
1734         if (!rsp) {
1735                 printk(KERN_INFO
1736                         "%s(): NULL response queue pointer\n", __func__);
1737                 return IRQ_NONE;
1738         }
1739         ha = rsp->hw;
1740         reg = &ha->iobase->isp24;
1741
1742         spin_lock_irq(&ha->hardware_lock);
1743
1744         qla24xx_process_response_queue(rsp);
1745
1746         spin_unlock_irq(&ha->hardware_lock);
1747
1748         return IRQ_HANDLED;
1749 }
1750
1751 static irqreturn_t
1752 qla24xx_msix_default(int irq, void *dev_id)
1753 {
1754         scsi_qla_host_t *vha;
1755         struct qla_hw_data *ha;
1756         struct rsp_que *rsp;
1757         struct device_reg_24xx __iomem *reg;
1758         int             status;
1759         uint32_t        stat;
1760         uint32_t        hccr;
1761         uint16_t        mb[4];
1762
1763         rsp = (struct rsp_que *) dev_id;
1764         if (!rsp) {
1765                 DEBUG(printk(
1766                 "%s(): NULL response queue pointer\n", __func__));
1767                 return IRQ_NONE;
1768         }
1769         ha = rsp->hw;
1770         reg = &ha->iobase->isp24;
1771         status = 0;
1772
1773         spin_lock_irq(&ha->hardware_lock);
1774         vha = qla2x00_get_rsp_host(rsp);
1775         do {
1776                 stat = RD_REG_DWORD(&reg->host_status);
1777                 if (stat & HSRX_RISC_PAUSED) {
1778                         if (pci_channel_offline(ha->pdev))
1779                                 break;
1780
1781                         hccr = RD_REG_DWORD(&reg->hccr);
1782
1783                         qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
1784                             "Dumping firmware!\n", hccr);
1785
1786                         qla2xxx_check_risc_status(vha);
1787
1788                         ha->isp_ops->fw_dump(vha, 1);
1789                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1790                         break;
1791                 } else if ((stat & HSRX_RISC_INT) == 0)
1792                         break;
1793
1794                 switch (stat & 0xff) {
1795                 case 0x1:
1796                 case 0x2:
1797                 case 0x10:
1798                 case 0x11:
1799                         qla24xx_mbx_completion(vha, MSW(stat));
1800                         status |= MBX_INTERRUPT;
1801
1802                         break;
1803                 case 0x12:
1804                         mb[0] = MSW(stat);
1805                         mb[1] = RD_REG_WORD(&reg->mailbox1);
1806                         mb[2] = RD_REG_WORD(&reg->mailbox2);
1807                         mb[3] = RD_REG_WORD(&reg->mailbox3);
1808                         qla2x00_async_event(vha, rsp, mb);
1809                         break;
1810                 case 0x13:
1811                 case 0x14:
1812                         qla24xx_process_response_queue(rsp);
1813                         break;
1814                 default:
1815                         DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
1816                             "(%d).\n",
1817                             vha->host_no, stat & 0xff));
1818                         break;
1819                 }
1820                 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1821         } while (0);
1822         spin_unlock_irq(&ha->hardware_lock);
1823
1824         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
1825             (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
1826                 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
1827                 complete(&ha->mbx_intr_comp);
1828         }
1829
1830         return IRQ_HANDLED;
1831 }
1832
1833 /* Interrupt handling helpers. */
1834
1835 struct qla_init_msix_entry {
1836         uint16_t entry;
1837         uint16_t index;
1838         const char *name;
1839         irq_handler_t handler;
1840 };
1841
1842 static struct qla_init_msix_entry base_queue = {
1843         .entry = 0,
1844         .index = 0,
1845         .name = "qla2xxx (default)",
1846         .handler = qla24xx_msix_default,
1847 };
1848
1849 static struct qla_init_msix_entry base_rsp_queue = {
1850         .entry = 1,
1851         .index = 1,
1852         .name = "qla2xxx (rsp_q)",
1853         .handler = qla24xx_msix_rsp_q,
1854 };
1855
1856 static struct qla_init_msix_entry multi_rsp_queue = {
1857         .entry = 1,
1858         .index = 1,
1859         .name = "qla2xxx (multi_q)",
1860         .handler = qla25xx_msix_rsp_q,
1861 };
1862
1863 static void
1864 qla24xx_disable_msix(struct qla_hw_data *ha)
1865 {
1866         int i;
1867         struct qla_msix_entry *qentry;
1868
1869         for (i = 0; i < ha->msix_count; i++) {
1870                 qentry = &ha->msix_entries[i];
1871                 if (qentry->have_irq)
1872                         free_irq(qentry->vector, qentry->rsp);
1873         }
1874         pci_disable_msix(ha->pdev);
1875         kfree(ha->msix_entries);
1876         ha->msix_entries = NULL;
1877         ha->flags.msix_enabled = 0;
1878 }
1879
1880 static int
1881 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
1882 {
1883 #define MIN_MSIX_COUNT  2
1884         int i, ret;
1885         struct msix_entry *entries;
1886         struct qla_msix_entry *qentry;
1887         struct qla_init_msix_entry *msix_queue;
1888
1889         entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
1890                                         GFP_KERNEL);
1891         if (!entries)
1892                 return -ENOMEM;
1893
1894         for (i = 0; i < ha->msix_count; i++)
1895                 entries[i].entry = i;
1896
1897         ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
1898         if (ret) {
1899                 if (ret < MIN_MSIX_COUNT)
1900                         goto msix_failed;
1901
1902                 qla_printk(KERN_WARNING, ha,
1903                         "MSI-X: Failed to enable support -- %d/%d\n"
1904                         " Retry with %d vectors\n", ha->msix_count, ret, ret);
1905                 ha->msix_count = ret;
1906                 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
1907                 if (ret) {
1908 msix_failed:
1909                         qla_printk(KERN_WARNING, ha, "MSI-X: Failed to enable"
1910                                 " support, giving up -- %d/%d\n",
1911                                 ha->msix_count, ret);
1912                         goto msix_out;
1913                 }
1914                 ha->max_queues = ha->msix_count - 1;
1915         }
1916         ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
1917                                 ha->msix_count, GFP_KERNEL);
1918         if (!ha->msix_entries) {
1919                 ret = -ENOMEM;
1920                 goto msix_out;
1921         }
1922         ha->flags.msix_enabled = 1;
1923
1924         for (i = 0; i < ha->msix_count; i++) {
1925                 qentry = &ha->msix_entries[i];
1926                 qentry->vector = entries[i].vector;
1927                 qentry->entry = entries[i].entry;
1928                 qentry->have_irq = 0;
1929                 qentry->rsp = NULL;
1930         }
1931
1932         /* Enable MSI-X for AENs for queue 0 */
1933         qentry = &ha->msix_entries[0];
1934         ret = request_irq(qentry->vector, base_queue.handler, 0,
1935                                         base_queue.name, rsp);
1936         if (ret) {
1937                 qla_printk(KERN_WARNING, ha,
1938                         "MSI-X: Unable to register handler -- %x/%d.\n",
1939                         qentry->vector, ret);
1940                 qla24xx_disable_msix(ha);
1941                 goto msix_out;
1942         }
1943         qentry->have_irq = 1;
1944         qentry->rsp = rsp;
1945
1946         /* Enable MSI-X vector for response queue update for queue 0 */
1947         if (ha->max_queues > 1 && ha->mqiobase) {
1948                 ha->mqenable = 1;
1949                 msix_queue = &multi_rsp_queue;
1950                 qla_printk(KERN_INFO, ha,
1951                                 "MQ enabled, Number of Queue Resources: %d \n",
1952                                 ha->max_queues);
1953         } else {
1954                 ha->mqenable = 0;
1955                 msix_queue = &base_rsp_queue;
1956         }
1957
1958         qentry = &ha->msix_entries[1];
1959         ret = request_irq(qentry->vector, msix_queue->handler, 0,
1960                                                 msix_queue->name, rsp);
1961         if (ret) {
1962                 qla_printk(KERN_WARNING, ha,
1963                         "MSI-X: Unable to register handler -- %x/%d.\n",
1964                         qentry->vector, ret);
1965                 qla24xx_disable_msix(ha);
1966                 ha->mqenable = 0;
1967                 goto msix_out;
1968         }
1969         qentry->have_irq = 1;
1970         qentry->rsp = rsp;
1971
1972 msix_out:
1973         kfree(entries);
1974         return ret;
1975 }
1976
1977 int
1978 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
1979 {
1980         int ret;
1981         device_reg_t __iomem *reg = ha->iobase;
1982
1983         /* If possible, enable MSI-X. */
1984         if (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
1985             !IS_QLA8432(ha) && !IS_QLA8001(ha))
1986                 goto skip_msix;
1987
1988         if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX ||
1989                 !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) {
1990                 DEBUG2(qla_printk(KERN_WARNING, ha,
1991                 "MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n",
1992                         ha->pdev->revision, ha->fw_attributes));
1993
1994                 goto skip_msix;
1995         }
1996
1997         if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
1998             (ha->pdev->subsystem_device == 0x7040 ||
1999                 ha->pdev->subsystem_device == 0x7041 ||
2000                 ha->pdev->subsystem_device == 0x1705)) {
2001                 DEBUG2(qla_printk(KERN_WARNING, ha,
2002                     "MSI-X: Unsupported ISP2432 SSVID/SSDID (0x%X, 0x%X).\n",
2003                     ha->pdev->subsystem_vendor,
2004                     ha->pdev->subsystem_device));
2005
2006                 goto skip_msi;
2007         }
2008
2009         ret = qla24xx_enable_msix(ha, rsp);
2010         if (!ret) {
2011                 DEBUG2(qla_printk(KERN_INFO, ha,
2012                     "MSI-X: Enabled (0x%X, 0x%X).\n", ha->chip_revision,
2013                     ha->fw_attributes));
2014                 goto clear_risc_ints;
2015         }
2016         qla_printk(KERN_WARNING, ha,
2017             "MSI-X: Falling back-to INTa mode -- %d.\n", ret);
2018 skip_msix:
2019
2020         if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
2021             !IS_QLA8001(ha))
2022                 goto skip_msi;
2023
2024         ret = pci_enable_msi(ha->pdev);
2025         if (!ret) {
2026                 DEBUG2(qla_printk(KERN_INFO, ha, "MSI: Enabled.\n"));
2027                 ha->flags.msi_enabled = 1;
2028         }
2029 skip_msi:
2030
2031         ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
2032             IRQF_DISABLED|IRQF_SHARED, QLA2XXX_DRIVER_NAME, rsp);
2033         if (ret) {
2034                 qla_printk(KERN_WARNING, ha,
2035                     "Failed to reserve interrupt %d already in use.\n",
2036                     ha->pdev->irq);
2037                 goto fail;
2038         }
2039         ha->flags.inta_enabled = 1;
2040 clear_risc_ints:
2041
2042         /*
2043          * FIXME: Noted that 8014s were being dropped during NK testing.
2044          * Timing deltas during MSI-X/INTa transitions?
2045          */
2046         if (IS_QLA81XX(ha))
2047                 goto fail;
2048         spin_lock_irq(&ha->hardware_lock);
2049         if (IS_FWI2_CAPABLE(ha)) {
2050                 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT);
2051                 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_RISC_INT);
2052         } else {
2053                 WRT_REG_WORD(&reg->isp.semaphore, 0);
2054                 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_RISC_INT);
2055                 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_HOST_INT);
2056         }
2057         spin_unlock_irq(&ha->hardware_lock);
2058
2059 fail:
2060         return ret;
2061 }
2062
2063 void
2064 qla2x00_free_irqs(scsi_qla_host_t *vha)
2065 {
2066         struct qla_hw_data *ha = vha->hw;
2067         struct rsp_que *rsp = ha->rsp_q_map[0];
2068
2069         if (ha->flags.msix_enabled)
2070                 qla24xx_disable_msix(ha);
2071         else if (ha->flags.inta_enabled) {
2072                 free_irq(ha->pdev->irq, rsp);
2073                 pci_disable_msi(ha->pdev);
2074         }
2075 }
2076
2077 static struct scsi_qla_host *
2078 qla2x00_get_rsp_host(struct rsp_que *rsp)
2079 {
2080         srb_t *sp;
2081         struct qla_hw_data *ha = rsp->hw;
2082         struct scsi_qla_host *vha = NULL;
2083         struct sts_entry_24xx *pkt;
2084         struct req_que *req;
2085
2086         if (rsp->id) {
2087                 pkt = (struct sts_entry_24xx *) rsp->ring_ptr;
2088                 req = rsp->req;
2089                 if (pkt && pkt->handle < MAX_OUTSTANDING_COMMANDS) {
2090                         sp = req->outstanding_cmds[pkt->handle];
2091                         if (sp)
2092                                 vha = sp->fcport->vha;
2093                 }
2094         }
2095         if (!vha)
2096         /* handle it in base queue */
2097                 vha = pci_get_drvdata(ha->pdev);
2098
2099         return vha;
2100 }
2101
2102 int qla25xx_request_irq(struct rsp_que *rsp)
2103 {
2104         struct qla_hw_data *ha = rsp->hw;
2105         struct qla_init_msix_entry *intr = &multi_rsp_queue;
2106         struct qla_msix_entry *msix = rsp->msix;
2107         int ret;
2108
2109         ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp);
2110         if (ret) {
2111                 qla_printk(KERN_WARNING, ha,
2112                         "MSI-X: Unable to register handler -- %x/%d.\n",
2113                         msix->vector, ret);
2114                 return ret;
2115         }
2116         msix->have_irq = 1;
2117         msix->rsp = rsp;
2118         return ret;
2119 }
2120
2121 void
2122 qla25xx_wrt_rsp_reg(struct qla_hw_data *ha, uint16_t id, uint16_t index)
2123 {
2124         device_reg_t __iomem *reg = (void *) ha->mqiobase + QLA_QUE_PAGE * id;
2125         WRT_REG_DWORD(&reg->isp25mq.rsp_q_out, index);
2126 }
2127
2128 void
2129 qla24xx_wrt_rsp_reg(struct qla_hw_data *ha, uint16_t id, uint16_t index)
2130 {
2131         device_reg_t __iomem *reg = (void *) ha->iobase;
2132         WRT_REG_DWORD(&reg->isp24.rsp_q_out, index);
2133 }
2134