Merge branch 'for-4.14-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj...
[sfrench/cifs-2.6.git] / drivers / scsi / pmcraid.c
1 /*
2  * pmcraid.c -- driver for PMC Sierra MaxRAID controller adapters
3  *
4  * Written By: Anil Ravindranath<anil_ravindranath@pmc-sierra.com>
5  *             PMC-Sierra Inc
6  *
7  * Copyright (C) 2008, 2009 PMC Sierra Inc
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software
21  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307,
22  * USA
23  *
24  */
25 #include <linux/fs.h>
26 #include <linux/init.h>
27 #include <linux/types.h>
28 #include <linux/errno.h>
29 #include <linux/kernel.h>
30 #include <linux/ioport.h>
31 #include <linux/delay.h>
32 #include <linux/pci.h>
33 #include <linux/wait.h>
34 #include <linux/spinlock.h>
35 #include <linux/sched.h>
36 #include <linux/interrupt.h>
37 #include <linux/blkdev.h>
38 #include <linux/firmware.h>
39 #include <linux/module.h>
40 #include <linux/moduleparam.h>
41 #include <linux/hdreg.h>
42 #include <linux/io.h>
43 #include <linux/slab.h>
44 #include <asm/irq.h>
45 #include <asm/processor.h>
46 #include <linux/libata.h>
47 #include <linux/mutex.h>
48 #include <linux/ktime.h>
49 #include <scsi/scsi.h>
50 #include <scsi/scsi_host.h>
51 #include <scsi/scsi_device.h>
52 #include <scsi/scsi_tcq.h>
53 #include <scsi/scsi_eh.h>
54 #include <scsi/scsi_cmnd.h>
55 #include <scsi/scsicam.h>
56
57 #include "pmcraid.h"
58
59 /*
60  *   Module configuration parameters
61  */
62 static unsigned int pmcraid_debug_log;
63 static unsigned int pmcraid_disable_aen;
64 static unsigned int pmcraid_log_level = IOASC_LOG_LEVEL_MUST;
65 static unsigned int pmcraid_enable_msix;
66
67 /*
68  * Data structures to support multiple adapters by the LLD.
69  * pmcraid_adapter_count - count of configured adapters
70  */
71 static atomic_t pmcraid_adapter_count = ATOMIC_INIT(0);
72
73 /*
74  * Supporting user-level control interface through IOCTL commands.
75  * pmcraid_major - major number to use
76  * pmcraid_minor - minor number(s) to use
77  */
78 static unsigned int pmcraid_major;
79 static struct class *pmcraid_class;
80 static DECLARE_BITMAP(pmcraid_minor, PMCRAID_MAX_ADAPTERS);
81
82 /*
83  * Module parameters
84  */
85 MODULE_AUTHOR("Anil Ravindranath<anil_ravindranath@pmc-sierra.com>");
86 MODULE_DESCRIPTION("PMC Sierra MaxRAID Controller Driver");
87 MODULE_LICENSE("GPL");
88 MODULE_VERSION(PMCRAID_DRIVER_VERSION);
89
90 module_param_named(log_level, pmcraid_log_level, uint, (S_IRUGO | S_IWUSR));
91 MODULE_PARM_DESC(log_level,
92                  "Enables firmware error code logging, default :1 high-severity"
93                  " errors, 2: all errors including high-severity errors,"
94                  " 0: disables logging");
95
96 module_param_named(debug, pmcraid_debug_log, uint, (S_IRUGO | S_IWUSR));
97 MODULE_PARM_DESC(debug,
98                  "Enable driver verbose message logging. Set 1 to enable."
99                  "(default: 0)");
100
101 module_param_named(disable_aen, pmcraid_disable_aen, uint, (S_IRUGO | S_IWUSR));
102 MODULE_PARM_DESC(disable_aen,
103                  "Disable driver aen notifications to apps. Set 1 to disable."
104                  "(default: 0)");
105
106 /* chip specific constants for PMC MaxRAID controllers (same for
107  * 0x5220 and 0x8010
108  */
109 static struct pmcraid_chip_details pmcraid_chip_cfg[] = {
110         {
111          .ioastatus = 0x0,
112          .ioarrin = 0x00040,
113          .mailbox = 0x7FC30,
114          .global_intr_mask = 0x00034,
115          .ioa_host_intr = 0x0009C,
116          .ioa_host_intr_clr = 0x000A0,
117          .ioa_host_msix_intr = 0x7FC40,
118          .ioa_host_mask = 0x7FC28,
119          .ioa_host_mask_clr = 0x7FC28,
120          .host_ioa_intr = 0x00020,
121          .host_ioa_intr_clr = 0x00020,
122          .transop_timeout = 300
123          }
124 };
125
126 /*
127  * PCI device ids supported by pmcraid driver
128  */
129 static struct pci_device_id pmcraid_pci_table[] = {
130         { PCI_DEVICE(PCI_VENDOR_ID_PMC, PCI_DEVICE_ID_PMC_MAXRAID),
131           0, 0, (kernel_ulong_t)&pmcraid_chip_cfg[0]
132         },
133         {}
134 };
135
136 MODULE_DEVICE_TABLE(pci, pmcraid_pci_table);
137
138
139
140 /**
141  * pmcraid_slave_alloc - Prepare for commands to a device
142  * @scsi_dev: scsi device struct
143  *
144  * This function is called by mid-layer prior to sending any command to the new
145  * device. Stores resource entry details of the device in scsi_device struct.
146  * Queuecommand uses the resource handle and other details to fill up IOARCB
147  * while sending commands to the device.
148  *
149  * Return value:
150  *        0 on success / -ENXIO if device does not exist
151  */
152 static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
153 {
154         struct pmcraid_resource_entry *temp, *res = NULL;
155         struct pmcraid_instance *pinstance;
156         u8 target, bus, lun;
157         unsigned long lock_flags;
158         int rc = -ENXIO;
159         u16 fw_version;
160
161         pinstance = shost_priv(scsi_dev->host);
162
163         fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
164
165         /* Driver exposes VSET and GSCSI resources only; all other device types
166          * are not exposed. Resource list is synchronized using resource lock
167          * so any traversal or modifications to the list should be done inside
168          * this lock
169          */
170         spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
171         list_for_each_entry(temp, &pinstance->used_res_q, queue) {
172
173                 /* do not expose VSETs with order-ids > MAX_VSET_TARGETS */
174                 if (RES_IS_VSET(temp->cfg_entry)) {
175                         if (fw_version <= PMCRAID_FW_VERSION_1)
176                                 target = temp->cfg_entry.unique_flags1;
177                         else
178                                 target = le16_to_cpu(temp->cfg_entry.array_id) & 0xFF;
179
180                         if (target > PMCRAID_MAX_VSET_TARGETS)
181                                 continue;
182                         bus = PMCRAID_VSET_BUS_ID;
183                         lun = 0;
184                 } else if (RES_IS_GSCSI(temp->cfg_entry)) {
185                         target = RES_TARGET(temp->cfg_entry.resource_address);
186                         bus = PMCRAID_PHYS_BUS_ID;
187                         lun = RES_LUN(temp->cfg_entry.resource_address);
188                 } else {
189                         continue;
190                 }
191
192                 if (bus == scsi_dev->channel &&
193                     target == scsi_dev->id &&
194                     lun == scsi_dev->lun) {
195                         res = temp;
196                         break;
197                 }
198         }
199
200         if (res) {
201                 res->scsi_dev = scsi_dev;
202                 scsi_dev->hostdata = res;
203                 res->change_detected = 0;
204                 atomic_set(&res->read_failures, 0);
205                 atomic_set(&res->write_failures, 0);
206                 rc = 0;
207         }
208         spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
209         return rc;
210 }
211
212 /**
213  * pmcraid_slave_configure - Configures a SCSI device
214  * @scsi_dev: scsi device struct
215  *
216  * This function is executed by SCSI mid layer just after a device is first
217  * scanned (i.e. it has responded to an INQUIRY). For VSET resources, the
218  * timeout value (default 30s) will be over-written to a higher value (60s)
219  * and max_sectors value will be over-written to 512. It also sets queue depth
220  * to host->cmd_per_lun value
221  *
222  * Return value:
223  *        0 on success
224  */
225 static int pmcraid_slave_configure(struct scsi_device *scsi_dev)
226 {
227         struct pmcraid_resource_entry *res = scsi_dev->hostdata;
228
229         if (!res)
230                 return 0;
231
232         /* LLD exposes VSETs and Enclosure devices only */
233         if (RES_IS_GSCSI(res->cfg_entry) &&
234             scsi_dev->type != TYPE_ENCLOSURE)
235                 return -ENXIO;
236
237         pmcraid_info("configuring %x:%x:%x:%x\n",
238                      scsi_dev->host->unique_id,
239                      scsi_dev->channel,
240                      scsi_dev->id,
241                      (u8)scsi_dev->lun);
242
243         if (RES_IS_GSCSI(res->cfg_entry)) {
244                 scsi_dev->allow_restart = 1;
245         } else if (RES_IS_VSET(res->cfg_entry)) {
246                 scsi_dev->allow_restart = 1;
247                 blk_queue_rq_timeout(scsi_dev->request_queue,
248                                      PMCRAID_VSET_IO_TIMEOUT);
249                 blk_queue_max_hw_sectors(scsi_dev->request_queue,
250                                       PMCRAID_VSET_MAX_SECTORS);
251         }
252
253         /*
254          * We never want to report TCQ support for these types of devices.
255          */
256         if (!RES_IS_GSCSI(res->cfg_entry) && !RES_IS_VSET(res->cfg_entry))
257                 scsi_dev->tagged_supported = 0;
258
259         return 0;
260 }
261
262 /**
263  * pmcraid_slave_destroy - Unconfigure a SCSI device before removing it
264  *
265  * @scsi_dev: scsi device struct
266  *
267  * This is called by mid-layer before removing a device. Pointer assignments
268  * done in pmcraid_slave_alloc will be reset to NULL here.
269  *
270  * Return value
271  *   none
272  */
273 static void pmcraid_slave_destroy(struct scsi_device *scsi_dev)
274 {
275         struct pmcraid_resource_entry *res;
276
277         res = (struct pmcraid_resource_entry *)scsi_dev->hostdata;
278
279         if (res)
280                 res->scsi_dev = NULL;
281
282         scsi_dev->hostdata = NULL;
283 }
284
285 /**
286  * pmcraid_change_queue_depth - Change the device's queue depth
287  * @scsi_dev: scsi device struct
288  * @depth: depth to set
289  *
290  * Return value
291  *      actual depth set
292  */
293 static int pmcraid_change_queue_depth(struct scsi_device *scsi_dev, int depth)
294 {
295         if (depth > PMCRAID_MAX_CMD_PER_LUN)
296                 depth = PMCRAID_MAX_CMD_PER_LUN;
297         return scsi_change_queue_depth(scsi_dev, depth);
298 }
299
300 /**
301  * pmcraid_init_cmdblk - initializes a command block
302  *
303  * @cmd: pointer to struct pmcraid_cmd to be initialized
304  * @index: if >=0 first time initialization; otherwise reinitialization
305  *
306  * Return Value
307  *       None
308  */
309 static void pmcraid_init_cmdblk(struct pmcraid_cmd *cmd, int index)
310 {
311         struct pmcraid_ioarcb *ioarcb = &(cmd->ioa_cb->ioarcb);
312         dma_addr_t dma_addr = cmd->ioa_cb_bus_addr;
313
314         if (index >= 0) {
315                 /* first time initialization (called from  probe) */
316                 u32 ioasa_offset =
317                         offsetof(struct pmcraid_control_block, ioasa);
318
319                 cmd->index = index;
320                 ioarcb->response_handle = cpu_to_le32(index << 2);
321                 ioarcb->ioarcb_bus_addr = cpu_to_le64(dma_addr);
322                 ioarcb->ioasa_bus_addr = cpu_to_le64(dma_addr + ioasa_offset);
323                 ioarcb->ioasa_len = cpu_to_le16(sizeof(struct pmcraid_ioasa));
324         } else {
325                 /* re-initialization of various lengths, called once command is
326                  * processed by IOA
327                  */
328                 memset(&cmd->ioa_cb->ioarcb.cdb, 0, PMCRAID_MAX_CDB_LEN);
329                 ioarcb->hrrq_id = 0;
330                 ioarcb->request_flags0 = 0;
331                 ioarcb->request_flags1 = 0;
332                 ioarcb->cmd_timeout = 0;
333                 ioarcb->ioarcb_bus_addr &= cpu_to_le64(~0x1FULL);
334                 ioarcb->ioadl_bus_addr = 0;
335                 ioarcb->ioadl_length = 0;
336                 ioarcb->data_transfer_length = 0;
337                 ioarcb->add_cmd_param_length = 0;
338                 ioarcb->add_cmd_param_offset = 0;
339                 cmd->ioa_cb->ioasa.ioasc = 0;
340                 cmd->ioa_cb->ioasa.residual_data_length = 0;
341                 cmd->time_left = 0;
342         }
343
344         cmd->cmd_done = NULL;
345         cmd->scsi_cmd = NULL;
346         cmd->release = 0;
347         cmd->completion_req = 0;
348         cmd->sense_buffer = NULL;
349         cmd->sense_buffer_dma = 0;
350         cmd->dma_handle = 0;
351         init_timer(&cmd->timer);
352 }
353
354 /**
355  * pmcraid_reinit_cmdblk - reinitialize a command block
356  *
357  * @cmd: pointer to struct pmcraid_cmd to be reinitialized
358  *
359  * Return Value
360  *       None
361  */
362 static void pmcraid_reinit_cmdblk(struct pmcraid_cmd *cmd)
363 {
364         pmcraid_init_cmdblk(cmd, -1);
365 }
366
367 /**
368  * pmcraid_get_free_cmd - get a free cmd block from command block pool
369  * @pinstance: adapter instance structure
370  *
371  * Return Value:
372  *      returns pointer to cmd block or NULL if no blocks are available
373  */
374 static struct pmcraid_cmd *pmcraid_get_free_cmd(
375         struct pmcraid_instance *pinstance
376 )
377 {
378         struct pmcraid_cmd *cmd = NULL;
379         unsigned long lock_flags;
380
381         /* free cmd block list is protected by free_pool_lock */
382         spin_lock_irqsave(&pinstance->free_pool_lock, lock_flags);
383
384         if (!list_empty(&pinstance->free_cmd_pool)) {
385                 cmd = list_entry(pinstance->free_cmd_pool.next,
386                                  struct pmcraid_cmd, free_list);
387                 list_del(&cmd->free_list);
388         }
389         spin_unlock_irqrestore(&pinstance->free_pool_lock, lock_flags);
390
391         /* Initialize the command block before giving it the caller */
392         if (cmd != NULL)
393                 pmcraid_reinit_cmdblk(cmd);
394         return cmd;
395 }
396
397 /**
398  * pmcraid_return_cmd - return a completed command block back into free pool
399  * @cmd: pointer to the command block
400  *
401  * Return Value:
402  *      nothing
403  */
404 static void pmcraid_return_cmd(struct pmcraid_cmd *cmd)
405 {
406         struct pmcraid_instance *pinstance = cmd->drv_inst;
407         unsigned long lock_flags;
408
409         spin_lock_irqsave(&pinstance->free_pool_lock, lock_flags);
410         list_add_tail(&cmd->free_list, &pinstance->free_cmd_pool);
411         spin_unlock_irqrestore(&pinstance->free_pool_lock, lock_flags);
412 }
413
414 /**
415  * pmcraid_read_interrupts -  reads IOA interrupts
416  *
417  * @pinstance: pointer to adapter instance structure
418  *
419  * Return value
420  *       interrupts read from IOA
421  */
422 static u32 pmcraid_read_interrupts(struct pmcraid_instance *pinstance)
423 {
424         return (pinstance->interrupt_mode) ?
425                 ioread32(pinstance->int_regs.ioa_host_msix_interrupt_reg) :
426                 ioread32(pinstance->int_regs.ioa_host_interrupt_reg);
427 }
428
429 /**
430  * pmcraid_disable_interrupts - Masks and clears all specified interrupts
431  *
432  * @pinstance: pointer to per adapter instance structure
433  * @intrs: interrupts to disable
434  *
435  * Return Value
436  *       None
437  */
438 static void pmcraid_disable_interrupts(
439         struct pmcraid_instance *pinstance,
440         u32 intrs
441 )
442 {
443         u32 gmask = ioread32(pinstance->int_regs.global_interrupt_mask_reg);
444         u32 nmask = gmask | GLOBAL_INTERRUPT_MASK;
445
446         iowrite32(intrs, pinstance->int_regs.ioa_host_interrupt_clr_reg);
447         iowrite32(nmask, pinstance->int_regs.global_interrupt_mask_reg);
448         ioread32(pinstance->int_regs.global_interrupt_mask_reg);
449
450         if (!pinstance->interrupt_mode) {
451                 iowrite32(intrs,
452                         pinstance->int_regs.ioa_host_interrupt_mask_reg);
453                 ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg);
454         }
455 }
456
457 /**
458  * pmcraid_enable_interrupts - Enables specified interrupts
459  *
460  * @pinstance: pointer to per adapter instance structure
461  * @intr: interrupts to enable
462  *
463  * Return Value
464  *       None
465  */
466 static void pmcraid_enable_interrupts(
467         struct pmcraid_instance *pinstance,
468         u32 intrs
469 )
470 {
471         u32 gmask = ioread32(pinstance->int_regs.global_interrupt_mask_reg);
472         u32 nmask = gmask & (~GLOBAL_INTERRUPT_MASK);
473
474         iowrite32(nmask, pinstance->int_regs.global_interrupt_mask_reg);
475
476         if (!pinstance->interrupt_mode) {
477                 iowrite32(~intrs,
478                          pinstance->int_regs.ioa_host_interrupt_mask_reg);
479                 ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg);
480         }
481
482         pmcraid_info("enabled interrupts global mask = %x intr_mask = %x\n",
483                 ioread32(pinstance->int_regs.global_interrupt_mask_reg),
484                 ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg));
485 }
486
487 /**
488  * pmcraid_clr_trans_op - clear trans to op interrupt
489  *
490  * @pinstance: pointer to per adapter instance structure
491  *
492  * Return Value
493  *       None
494  */
495 static void pmcraid_clr_trans_op(
496         struct pmcraid_instance *pinstance
497 )
498 {
499         unsigned long lock_flags;
500
501         if (!pinstance->interrupt_mode) {
502                 iowrite32(INTRS_TRANSITION_TO_OPERATIONAL,
503                         pinstance->int_regs.ioa_host_interrupt_mask_reg);
504                 ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg);
505                 iowrite32(INTRS_TRANSITION_TO_OPERATIONAL,
506                         pinstance->int_regs.ioa_host_interrupt_clr_reg);
507                 ioread32(pinstance->int_regs.ioa_host_interrupt_clr_reg);
508         }
509
510         if (pinstance->reset_cmd != NULL) {
511                 del_timer(&pinstance->reset_cmd->timer);
512                 spin_lock_irqsave(
513                         pinstance->host->host_lock, lock_flags);
514                 pinstance->reset_cmd->cmd_done(pinstance->reset_cmd);
515                 spin_unlock_irqrestore(
516                         pinstance->host->host_lock, lock_flags);
517         }
518 }
519
520 /**
521  * pmcraid_reset_type - Determine the required reset type
522  * @pinstance: pointer to adapter instance structure
523  *
524  * IOA requires hard reset if any of the following conditions is true.
525  * 1. If HRRQ valid interrupt is not masked
526  * 2. IOA reset alert doorbell is set
527  * 3. If there are any error interrupts
528  */
529 static void pmcraid_reset_type(struct pmcraid_instance *pinstance)
530 {
531         u32 mask;
532         u32 intrs;
533         u32 alerts;
534
535         mask = ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg);
536         intrs = ioread32(pinstance->int_regs.ioa_host_interrupt_reg);
537         alerts = ioread32(pinstance->int_regs.host_ioa_interrupt_reg);
538
539         if ((mask & INTRS_HRRQ_VALID) == 0 ||
540             (alerts & DOORBELL_IOA_RESET_ALERT) ||
541             (intrs & PMCRAID_ERROR_INTERRUPTS)) {
542                 pmcraid_info("IOA requires hard reset\n");
543                 pinstance->ioa_hard_reset = 1;
544         }
545
546         /* If unit check is active, trigger the dump */
547         if (intrs & INTRS_IOA_UNIT_CHECK)
548                 pinstance->ioa_unit_check = 1;
549 }
550
551 /**
552  * pmcraid_bist_done - completion function for PCI BIST
553  * @cmd: pointer to reset command
554  * Return Value
555  *      none
556  */
557
558 static void pmcraid_ioa_reset(struct pmcraid_cmd *);
559
560 static void pmcraid_bist_done(struct pmcraid_cmd *cmd)
561 {
562         struct pmcraid_instance *pinstance = cmd->drv_inst;
563         unsigned long lock_flags;
564         int rc;
565         u16 pci_reg;
566
567         rc = pci_read_config_word(pinstance->pdev, PCI_COMMAND, &pci_reg);
568
569         /* If PCI config space can't be accessed wait for another two secs */
570         if ((rc != PCIBIOS_SUCCESSFUL || (!(pci_reg & PCI_COMMAND_MEMORY))) &&
571             cmd->time_left > 0) {
572                 pmcraid_info("BIST not complete, waiting another 2 secs\n");
573                 cmd->timer.expires = jiffies + cmd->time_left;
574                 cmd->time_left = 0;
575                 cmd->timer.data = (unsigned long)cmd;
576                 cmd->timer.function =
577                         (void (*)(unsigned long))pmcraid_bist_done;
578                 add_timer(&cmd->timer);
579         } else {
580                 cmd->time_left = 0;
581                 pmcraid_info("BIST is complete, proceeding with reset\n");
582                 spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
583                 pmcraid_ioa_reset(cmd);
584                 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
585         }
586 }
587
588 /**
589  * pmcraid_start_bist - starts BIST
590  * @cmd: pointer to reset cmd
591  * Return Value
592  *   none
593  */
594 static void pmcraid_start_bist(struct pmcraid_cmd *cmd)
595 {
596         struct pmcraid_instance *pinstance = cmd->drv_inst;
597         u32 doorbells, intrs;
598
599         /* proceed with bist and wait for 2 seconds */
600         iowrite32(DOORBELL_IOA_START_BIST,
601                 pinstance->int_regs.host_ioa_interrupt_reg);
602         doorbells = ioread32(pinstance->int_regs.host_ioa_interrupt_reg);
603         intrs = ioread32(pinstance->int_regs.ioa_host_interrupt_reg);
604         pmcraid_info("doorbells after start bist: %x intrs: %x\n",
605                       doorbells, intrs);
606
607         cmd->time_left = msecs_to_jiffies(PMCRAID_BIST_TIMEOUT);
608         cmd->timer.data = (unsigned long)cmd;
609         cmd->timer.expires = jiffies + msecs_to_jiffies(PMCRAID_BIST_TIMEOUT);
610         cmd->timer.function = (void (*)(unsigned long))pmcraid_bist_done;
611         add_timer(&cmd->timer);
612 }
613
614 /**
615  * pmcraid_reset_alert_done - completion routine for reset_alert
616  * @cmd: pointer to command block used in reset sequence
617  * Return value
618  *  None
619  */
620 static void pmcraid_reset_alert_done(struct pmcraid_cmd *cmd)
621 {
622         struct pmcraid_instance *pinstance = cmd->drv_inst;
623         u32 status = ioread32(pinstance->ioa_status);
624         unsigned long lock_flags;
625
626         /* if the critical operation in progress bit is set or the wait times
627          * out, invoke reset engine to proceed with hard reset. If there is
628          * some more time to wait, restart the timer
629          */
630         if (((status & INTRS_CRITICAL_OP_IN_PROGRESS) == 0) ||
631             cmd->time_left <= 0) {
632                 pmcraid_info("critical op is reset proceeding with reset\n");
633                 spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
634                 pmcraid_ioa_reset(cmd);
635                 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
636         } else {
637                 pmcraid_info("critical op is not yet reset waiting again\n");
638                 /* restart timer if some more time is available to wait */
639                 cmd->time_left -= PMCRAID_CHECK_FOR_RESET_TIMEOUT;
640                 cmd->timer.data = (unsigned long)cmd;
641                 cmd->timer.expires = jiffies + PMCRAID_CHECK_FOR_RESET_TIMEOUT;
642                 cmd->timer.function =
643                         (void (*)(unsigned long))pmcraid_reset_alert_done;
644                 add_timer(&cmd->timer);
645         }
646 }
647
648 /**
649  * pmcraid_reset_alert - alerts IOA for a possible reset
650  * @cmd : command block to be used for reset sequence.
651  *
652  * Return Value
653  *      returns 0 if pci config-space is accessible and RESET_DOORBELL is
654  *      successfully written to IOA. Returns non-zero in case pci_config_space
655  *      is not accessible
656  */
657 static void pmcraid_notify_ioastate(struct pmcraid_instance *, u32);
658 static void pmcraid_reset_alert(struct pmcraid_cmd *cmd)
659 {
660         struct pmcraid_instance *pinstance = cmd->drv_inst;
661         u32 doorbells;
662         int rc;
663         u16 pci_reg;
664
665         /* If we are able to access IOA PCI config space, alert IOA that we are
666          * going to reset it soon. This enables IOA to preserv persistent error
667          * data if any. In case memory space is not accessible, proceed with
668          * BIST or slot_reset
669          */
670         rc = pci_read_config_word(pinstance->pdev, PCI_COMMAND, &pci_reg);
671         if ((rc == PCIBIOS_SUCCESSFUL) && (pci_reg & PCI_COMMAND_MEMORY)) {
672
673                 /* wait for IOA permission i.e until CRITICAL_OPERATION bit is
674                  * reset IOA doesn't generate any interrupts when CRITICAL
675                  * OPERATION bit is reset. A timer is started to wait for this
676                  * bit to be reset.
677                  */
678                 cmd->time_left = PMCRAID_RESET_TIMEOUT;
679                 cmd->timer.data = (unsigned long)cmd;
680                 cmd->timer.expires = jiffies + PMCRAID_CHECK_FOR_RESET_TIMEOUT;
681                 cmd->timer.function =
682                         (void (*)(unsigned long))pmcraid_reset_alert_done;
683                 add_timer(&cmd->timer);
684
685                 iowrite32(DOORBELL_IOA_RESET_ALERT,
686                         pinstance->int_regs.host_ioa_interrupt_reg);
687                 doorbells =
688                         ioread32(pinstance->int_regs.host_ioa_interrupt_reg);
689                 pmcraid_info("doorbells after reset alert: %x\n", doorbells);
690         } else {
691                 pmcraid_info("PCI config is not accessible starting BIST\n");
692                 pinstance->ioa_state = IOA_STATE_IN_HARD_RESET;
693                 pmcraid_start_bist(cmd);
694         }
695 }
696
697 /**
698  * pmcraid_timeout_handler -  Timeout handler for internally generated ops
699  *
700  * @cmd : pointer to command structure, that got timedout
701  *
702  * This function blocks host requests and initiates an adapter reset.
703  *
704  * Return value:
705  *   None
706  */
707 static void pmcraid_timeout_handler(struct pmcraid_cmd *cmd)
708 {
709         struct pmcraid_instance *pinstance = cmd->drv_inst;
710         unsigned long lock_flags;
711
712         dev_info(&pinstance->pdev->dev,
713                 "Adapter being reset due to cmd(CDB[0] = %x) timeout\n",
714                 cmd->ioa_cb->ioarcb.cdb[0]);
715
716         /* Command timeouts result in hard reset sequence. The command that got
717          * timed out may be the one used as part of reset sequence. In this
718          * case restart reset sequence using the same command block even if
719          * reset is in progress. Otherwise fail this command and get a free
720          * command block to restart the reset sequence.
721          */
722         spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
723         if (!pinstance->ioa_reset_in_progress) {
724                 pinstance->ioa_reset_attempts = 0;
725                 cmd = pmcraid_get_free_cmd(pinstance);
726
727                 /* If we are out of command blocks, just return here itself.
728                  * Some other command's timeout handler can do the reset job
729                  */
730                 if (cmd == NULL) {
731                         spin_unlock_irqrestore(pinstance->host->host_lock,
732                                                lock_flags);
733                         pmcraid_err("no free cmnd block for timeout handler\n");
734                         return;
735                 }
736
737                 pinstance->reset_cmd = cmd;
738                 pinstance->ioa_reset_in_progress = 1;
739         } else {
740                 pmcraid_info("reset is already in progress\n");
741
742                 if (pinstance->reset_cmd != cmd) {
743                         /* This command should have been given to IOA, this
744                          * command will be completed by fail_outstanding_cmds
745                          * anyway
746                          */
747                         pmcraid_err("cmd is pending but reset in progress\n");
748                 }
749
750                 /* If this command was being used as part of the reset
751                  * sequence, set cmd_done pointer to pmcraid_ioa_reset. This
752                  * causes fail_outstanding_commands not to return the command
753                  * block back to free pool
754                  */
755                 if (cmd == pinstance->reset_cmd)
756                         cmd->cmd_done = pmcraid_ioa_reset;
757         }
758
759         /* Notify apps of important IOA bringup/bringdown sequences */
760         if (pinstance->scn.ioa_state != PMC_DEVICE_EVENT_RESET_START &&
761             pinstance->scn.ioa_state != PMC_DEVICE_EVENT_SHUTDOWN_START)
762                 pmcraid_notify_ioastate(pinstance,
763                                         PMC_DEVICE_EVENT_RESET_START);
764
765         pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
766         scsi_block_requests(pinstance->host);
767         pmcraid_reset_alert(cmd);
768         spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
769 }
770
771 /**
772  * pmcraid_internal_done - completion routine for internally generated cmds
773  *
774  * @cmd: command that got response from IOA
775  *
776  * Return Value:
777  *       none
778  */
779 static void pmcraid_internal_done(struct pmcraid_cmd *cmd)
780 {
781         pmcraid_info("response internal cmd CDB[0] = %x ioasc = %x\n",
782                      cmd->ioa_cb->ioarcb.cdb[0],
783                      le32_to_cpu(cmd->ioa_cb->ioasa.ioasc));
784
785         /* Some of the internal commands are sent with callers blocking for the
786          * response. Same will be indicated as part of cmd->completion_req
787          * field. Response path needs to wake up any waiters waiting for cmd
788          * completion if this flag is set.
789          */
790         if (cmd->completion_req) {
791                 cmd->completion_req = 0;
792                 complete(&cmd->wait_for_completion);
793         }
794
795         /* most of the internal commands are completed by caller itself, so
796          * no need to return the command block back to free pool until we are
797          * required to do so (e.g once done with initialization).
798          */
799         if (cmd->release) {
800                 cmd->release = 0;
801                 pmcraid_return_cmd(cmd);
802         }
803 }
804
805 /**
806  * pmcraid_reinit_cfgtable_done - done function for cfg table reinitialization
807  *
808  * @cmd: command that got response from IOA
809  *
810  * This routine is called after driver re-reads configuration table due to a
811  * lost CCN. It returns the command block back to free pool and schedules
812  * worker thread to add/delete devices into the system.
813  *
814  * Return Value:
815  *       none
816  */
817 static void pmcraid_reinit_cfgtable_done(struct pmcraid_cmd *cmd)
818 {
819         pmcraid_info("response internal cmd CDB[0] = %x ioasc = %x\n",
820                      cmd->ioa_cb->ioarcb.cdb[0],
821                      le32_to_cpu(cmd->ioa_cb->ioasa.ioasc));
822
823         if (cmd->release) {
824                 cmd->release = 0;
825                 pmcraid_return_cmd(cmd);
826         }
827         pmcraid_info("scheduling worker for config table reinitialization\n");
828         schedule_work(&cmd->drv_inst->worker_q);
829 }
830
831 /**
832  * pmcraid_erp_done - Process completion of SCSI error response from device
833  * @cmd: pmcraid_command
834  *
835  * This function copies the sense buffer into the scsi_cmd struct and completes
836  * scsi_cmd by calling scsi_done function.
837  *
838  * Return value:
839  *  none
840  */
841 static void pmcraid_erp_done(struct pmcraid_cmd *cmd)
842 {
843         struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
844         struct pmcraid_instance *pinstance = cmd->drv_inst;
845         u32 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc);
846
847         if (PMCRAID_IOASC_SENSE_KEY(ioasc) > 0) {
848                 scsi_cmd->result |= (DID_ERROR << 16);
849                 scmd_printk(KERN_INFO, scsi_cmd,
850                             "command CDB[0] = %x failed with IOASC: 0x%08X\n",
851                             cmd->ioa_cb->ioarcb.cdb[0], ioasc);
852         }
853
854         /* if we had allocated sense buffers for request sense, copy the sense
855          * release the buffers
856          */
857         if (cmd->sense_buffer != NULL) {
858                 memcpy(scsi_cmd->sense_buffer,
859                        cmd->sense_buffer,
860                        SCSI_SENSE_BUFFERSIZE);
861                 pci_free_consistent(pinstance->pdev,
862                                     SCSI_SENSE_BUFFERSIZE,
863                                     cmd->sense_buffer, cmd->sense_buffer_dma);
864                 cmd->sense_buffer = NULL;
865                 cmd->sense_buffer_dma = 0;
866         }
867
868         scsi_dma_unmap(scsi_cmd);
869         pmcraid_return_cmd(cmd);
870         scsi_cmd->scsi_done(scsi_cmd);
871 }
872
873 /**
874  * pmcraid_fire_command - sends an IOA command to adapter
875  *
876  * This function adds the given block into pending command list
877  * and returns without waiting
878  *
879  * @cmd : command to be sent to the device
880  *
881  * Return Value
882  *      None
883  */
884 static void _pmcraid_fire_command(struct pmcraid_cmd *cmd)
885 {
886         struct pmcraid_instance *pinstance = cmd->drv_inst;
887         unsigned long lock_flags;
888
889         /* Add this command block to pending cmd pool. We do this prior to
890          * writting IOARCB to ioarrin because IOA might complete the command
891          * by the time we are about to add it to the list. Response handler
892          * (isr/tasklet) looks for cmd block in the pending pending list.
893          */
894         spin_lock_irqsave(&pinstance->pending_pool_lock, lock_flags);
895         list_add_tail(&cmd->free_list, &pinstance->pending_cmd_pool);
896         spin_unlock_irqrestore(&pinstance->pending_pool_lock, lock_flags);
897         atomic_inc(&pinstance->outstanding_cmds);
898
899         /* driver writes lower 32-bit value of IOARCB address only */
900         mb();
901         iowrite32(le64_to_cpu(cmd->ioa_cb->ioarcb.ioarcb_bus_addr), pinstance->ioarrin);
902 }
903
904 /**
905  * pmcraid_send_cmd - fires a command to IOA
906  *
907  * This function also sets up timeout function, and command completion
908  * function
909  *
910  * @cmd: pointer to the command block to be fired to IOA
911  * @cmd_done: command completion function, called once IOA responds
912  * @timeout: timeout to wait for this command completion
913  * @timeout_func: timeout handler
914  *
915  * Return value
916  *   none
917  */
918 static void pmcraid_send_cmd(
919         struct pmcraid_cmd *cmd,
920         void (*cmd_done) (struct pmcraid_cmd *),
921         unsigned long timeout,
922         void (*timeout_func) (struct pmcraid_cmd *)
923 )
924 {
925         /* initialize done function */
926         cmd->cmd_done = cmd_done;
927
928         if (timeout_func) {
929                 /* setup timeout handler */
930                 cmd->timer.data = (unsigned long)cmd;
931                 cmd->timer.expires = jiffies + timeout;
932                 cmd->timer.function = (void (*)(unsigned long))timeout_func;
933                 add_timer(&cmd->timer);
934         }
935
936         /* fire the command to IOA */
937         _pmcraid_fire_command(cmd);
938 }
939
940 /**
941  * pmcraid_ioa_shutdown_done - completion function for IOA shutdown command
942  * @cmd: pointer to the command block used for sending IOA shutdown command
943  *
944  * Return value
945  *  None
946  */
947 static void pmcraid_ioa_shutdown_done(struct pmcraid_cmd *cmd)
948 {
949         struct pmcraid_instance *pinstance = cmd->drv_inst;
950         unsigned long lock_flags;
951
952         spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
953         pmcraid_ioa_reset(cmd);
954         spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
955 }
956
957 /**
958  * pmcraid_ioa_shutdown - sends SHUTDOWN command to ioa
959  *
960  * @cmd: pointer to the command block used as part of reset sequence
961  *
962  * Return Value
963  *  None
964  */
965 static void pmcraid_ioa_shutdown(struct pmcraid_cmd *cmd)
966 {
967         pmcraid_info("response for Cancel CCN CDB[0] = %x ioasc = %x\n",
968                      cmd->ioa_cb->ioarcb.cdb[0],
969                      le32_to_cpu(cmd->ioa_cb->ioasa.ioasc));
970
971         /* Note that commands sent during reset require next command to be sent
972          * to IOA. Hence reinit the done function as well as timeout function
973          */
974         pmcraid_reinit_cmdblk(cmd);
975         cmd->ioa_cb->ioarcb.request_type = REQ_TYPE_IOACMD;
976         cmd->ioa_cb->ioarcb.resource_handle =
977                 cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
978         cmd->ioa_cb->ioarcb.cdb[0] = PMCRAID_IOA_SHUTDOWN;
979         cmd->ioa_cb->ioarcb.cdb[1] = PMCRAID_SHUTDOWN_NORMAL;
980
981         /* fire shutdown command to hardware. */
982         pmcraid_info("firing normal shutdown command (%d) to IOA\n",
983                      le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle));
984
985         pmcraid_notify_ioastate(cmd->drv_inst, PMC_DEVICE_EVENT_SHUTDOWN_START);
986
987         pmcraid_send_cmd(cmd, pmcraid_ioa_shutdown_done,
988                          PMCRAID_SHUTDOWN_TIMEOUT,
989                          pmcraid_timeout_handler);
990 }
991
992 /**
993  * pmcraid_get_fwversion_done - completion function for get_fwversion
994  *
995  * @cmd: pointer to command block used to send INQUIRY command
996  *
997  * Return Value
998  *      none
999  */
1000 static void pmcraid_querycfg(struct pmcraid_cmd *);
1001
1002 static void pmcraid_get_fwversion_done(struct pmcraid_cmd *cmd)
1003 {
1004         struct pmcraid_instance *pinstance = cmd->drv_inst;
1005         u32 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc);
1006         unsigned long lock_flags;
1007
1008         /* configuration table entry size depends on firmware version. If fw
1009          * version is not known, it is not possible to interpret IOA config
1010          * table
1011          */
1012         if (ioasc) {
1013                 pmcraid_err("IOA Inquiry failed with %x\n", ioasc);
1014                 spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
1015                 pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
1016                 pmcraid_reset_alert(cmd);
1017                 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
1018         } else  {
1019                 pmcraid_querycfg(cmd);
1020         }
1021 }
1022
1023 /**
1024  * pmcraid_get_fwversion - reads firmware version information
1025  *
1026  * @cmd: pointer to command block used to send INQUIRY command
1027  *
1028  * Return Value
1029  *      none
1030  */
1031 static void pmcraid_get_fwversion(struct pmcraid_cmd *cmd)
1032 {
1033         struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
1034         struct pmcraid_ioadl_desc *ioadl = ioarcb->add_data.u.ioadl;
1035         struct pmcraid_instance *pinstance = cmd->drv_inst;
1036         u16 data_size = sizeof(struct pmcraid_inquiry_data);
1037
1038         pmcraid_reinit_cmdblk(cmd);
1039         ioarcb->request_type = REQ_TYPE_SCSI;
1040         ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
1041         ioarcb->cdb[0] = INQUIRY;
1042         ioarcb->cdb[1] = 1;
1043         ioarcb->cdb[2] = 0xD0;
1044         ioarcb->cdb[3] = (data_size >> 8) & 0xFF;
1045         ioarcb->cdb[4] = data_size & 0xFF;
1046
1047         /* Since entire inquiry data it can be part of IOARCB itself
1048          */
1049         ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) +
1050                                         offsetof(struct pmcraid_ioarcb,
1051                                                 add_data.u.ioadl[0]));
1052         ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc));
1053         ioarcb->ioarcb_bus_addr &= cpu_to_le64(~(0x1FULL));
1054
1055         ioarcb->request_flags0 |= NO_LINK_DESCS;
1056         ioarcb->data_transfer_length = cpu_to_le32(data_size);
1057         ioadl = &(ioarcb->add_data.u.ioadl[0]);
1058         ioadl->flags = IOADL_FLAGS_LAST_DESC;
1059         ioadl->address = cpu_to_le64(pinstance->inq_data_baddr);
1060         ioadl->data_len = cpu_to_le32(data_size);
1061
1062         pmcraid_send_cmd(cmd, pmcraid_get_fwversion_done,
1063                          PMCRAID_INTERNAL_TIMEOUT, pmcraid_timeout_handler);
1064 }
1065
1066 /**
1067  * pmcraid_identify_hrrq - registers host rrq buffers with IOA
1068  * @cmd: pointer to command block to be used for identify hrrq
1069  *
1070  * Return Value
1071  *       none
1072  */
1073 static void pmcraid_identify_hrrq(struct pmcraid_cmd *cmd)
1074 {
1075         struct pmcraid_instance *pinstance = cmd->drv_inst;
1076         struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
1077         int index = cmd->hrrq_index;
1078         __be64 hrrq_addr = cpu_to_be64(pinstance->hrrq_start_bus_addr[index]);
1079         __be32 hrrq_size = cpu_to_be32(sizeof(u32) * PMCRAID_MAX_CMD);
1080         void (*done_function)(struct pmcraid_cmd *);
1081
1082         pmcraid_reinit_cmdblk(cmd);
1083         cmd->hrrq_index = index + 1;
1084
1085         if (cmd->hrrq_index < pinstance->num_hrrq) {
1086                 done_function = pmcraid_identify_hrrq;
1087         } else {
1088                 cmd->hrrq_index = 0;
1089                 done_function = pmcraid_get_fwversion;
1090         }
1091
1092         /* Initialize ioarcb */
1093         ioarcb->request_type = REQ_TYPE_IOACMD;
1094         ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
1095
1096         /* initialize the hrrq number where IOA will respond to this command */
1097         ioarcb->hrrq_id = index;
1098         ioarcb->cdb[0] = PMCRAID_IDENTIFY_HRRQ;
1099         ioarcb->cdb[1] = index;
1100
1101         /* IOA expects 64-bit pci address to be written in B.E format
1102          * (i.e cdb[2]=MSByte..cdb[9]=LSB.
1103          */
1104         pmcraid_info("HRRQ_IDENTIFY with hrrq:ioarcb:index => %llx:%llx:%x\n",
1105                      hrrq_addr, ioarcb->ioarcb_bus_addr, index);
1106
1107         memcpy(&(ioarcb->cdb[2]), &hrrq_addr, sizeof(hrrq_addr));
1108         memcpy(&(ioarcb->cdb[10]), &hrrq_size, sizeof(hrrq_size));
1109
1110         /* Subsequent commands require HRRQ identification to be successful.
1111          * Note that this gets called even during reset from SCSI mid-layer
1112          * or tasklet
1113          */
1114         pmcraid_send_cmd(cmd, done_function,
1115                          PMCRAID_INTERNAL_TIMEOUT,
1116                          pmcraid_timeout_handler);
1117 }
1118
1119 static void pmcraid_process_ccn(struct pmcraid_cmd *cmd);
1120 static void pmcraid_process_ldn(struct pmcraid_cmd *cmd);
1121
1122 /**
1123  * pmcraid_send_hcam_cmd - send an initialized command block(HCAM) to IOA
1124  *
1125  * @cmd: initialized command block pointer
1126  *
1127  * Return Value
1128  *   none
1129  */
1130 static void pmcraid_send_hcam_cmd(struct pmcraid_cmd *cmd)
1131 {
1132         if (cmd->ioa_cb->ioarcb.cdb[1] == PMCRAID_HCAM_CODE_CONFIG_CHANGE)
1133                 atomic_set(&(cmd->drv_inst->ccn.ignore), 0);
1134         else
1135                 atomic_set(&(cmd->drv_inst->ldn.ignore), 0);
1136
1137         pmcraid_send_cmd(cmd, cmd->cmd_done, 0, NULL);
1138 }
1139
1140 /**
1141  * pmcraid_init_hcam - send an initialized command block(HCAM) to IOA
1142  *
1143  * @pinstance: pointer to adapter instance structure
1144  * @type: HCAM type
1145  *
1146  * Return Value
1147  *   pointer to initialized pmcraid_cmd structure or NULL
1148  */
1149 static struct pmcraid_cmd *pmcraid_init_hcam
1150 (
1151         struct pmcraid_instance *pinstance,
1152         u8 type
1153 )
1154 {
1155         struct pmcraid_cmd *cmd;
1156         struct pmcraid_ioarcb *ioarcb;
1157         struct pmcraid_ioadl_desc *ioadl;
1158         struct pmcraid_hostrcb *hcam;
1159         void (*cmd_done) (struct pmcraid_cmd *);
1160         dma_addr_t dma;
1161         int rcb_size;
1162
1163         cmd = pmcraid_get_free_cmd(pinstance);
1164
1165         if (!cmd) {
1166                 pmcraid_err("no free command blocks for hcam\n");
1167                 return cmd;
1168         }
1169
1170         if (type == PMCRAID_HCAM_CODE_CONFIG_CHANGE) {
1171                 rcb_size = sizeof(struct pmcraid_hcam_ccn_ext);
1172                 cmd_done = pmcraid_process_ccn;
1173                 dma = pinstance->ccn.baddr + PMCRAID_AEN_HDR_SIZE;
1174                 hcam = &pinstance->ccn;
1175         } else {
1176                 rcb_size = sizeof(struct pmcraid_hcam_ldn);
1177                 cmd_done = pmcraid_process_ldn;
1178                 dma = pinstance->ldn.baddr + PMCRAID_AEN_HDR_SIZE;
1179                 hcam = &pinstance->ldn;
1180         }
1181
1182         /* initialize command pointer used for HCAM registration */
1183         hcam->cmd = cmd;
1184
1185         ioarcb = &cmd->ioa_cb->ioarcb;
1186         ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) +
1187                                         offsetof(struct pmcraid_ioarcb,
1188                                                 add_data.u.ioadl[0]));
1189         ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc));
1190         ioadl = ioarcb->add_data.u.ioadl;
1191
1192         /* Initialize ioarcb */
1193         ioarcb->request_type = REQ_TYPE_HCAM;
1194         ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
1195         ioarcb->cdb[0] = PMCRAID_HOST_CONTROLLED_ASYNC;
1196         ioarcb->cdb[1] = type;
1197         ioarcb->cdb[7] = (rcb_size >> 8) & 0xFF;
1198         ioarcb->cdb[8] = (rcb_size) & 0xFF;
1199
1200         ioarcb->data_transfer_length = cpu_to_le32(rcb_size);
1201
1202         ioadl[0].flags |= IOADL_FLAGS_READ_LAST;
1203         ioadl[0].data_len = cpu_to_le32(rcb_size);
1204         ioadl[0].address = cpu_to_le64(dma);
1205
1206         cmd->cmd_done = cmd_done;
1207         return cmd;
1208 }
1209
1210 /**
1211  * pmcraid_send_hcam - Send an HCAM to IOA
1212  * @pinstance: ioa config struct
1213  * @type: HCAM type
1214  *
1215  * This function will send a Host Controlled Async command to IOA.
1216  *
1217  * Return value:
1218  *      none
1219  */
1220 static void pmcraid_send_hcam(struct pmcraid_instance *pinstance, u8 type)
1221 {
1222         struct pmcraid_cmd *cmd = pmcraid_init_hcam(pinstance, type);
1223         pmcraid_send_hcam_cmd(cmd);
1224 }
1225
1226
1227 /**
1228  * pmcraid_prepare_cancel_cmd - prepares a command block to abort another
1229  *
1230  * @cmd: pointer to cmd that is used as cancelling command
1231  * @cmd_to_cancel: pointer to the command that needs to be cancelled
1232  */
1233 static void pmcraid_prepare_cancel_cmd(
1234         struct pmcraid_cmd *cmd,
1235         struct pmcraid_cmd *cmd_to_cancel
1236 )
1237 {
1238         struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
1239         __be64 ioarcb_addr;
1240
1241         /* IOARCB address of the command to be cancelled is given in
1242          * cdb[2]..cdb[9] is Big-Endian format. Note that length bits in
1243          * IOARCB address are not masked.
1244          */
1245         ioarcb_addr = cpu_to_be64(le64_to_cpu(cmd_to_cancel->ioa_cb->ioarcb.ioarcb_bus_addr));
1246
1247         /* Get the resource handle to where the command to be aborted has been
1248          * sent.
1249          */
1250         ioarcb->resource_handle = cmd_to_cancel->ioa_cb->ioarcb.resource_handle;
1251         ioarcb->request_type = REQ_TYPE_IOACMD;
1252         memset(ioarcb->cdb, 0, PMCRAID_MAX_CDB_LEN);
1253         ioarcb->cdb[0] = PMCRAID_ABORT_CMD;
1254
1255         memcpy(&(ioarcb->cdb[2]), &ioarcb_addr, sizeof(ioarcb_addr));
1256 }
1257
1258 /**
1259  * pmcraid_cancel_hcam - sends ABORT task to abort a given HCAM
1260  *
1261  * @cmd: command to be used as cancelling command
1262  * @type: HCAM type
1263  * @cmd_done: op done function for the cancelling command
1264  */
1265 static void pmcraid_cancel_hcam(
1266         struct pmcraid_cmd *cmd,
1267         u8 type,
1268         void (*cmd_done) (struct pmcraid_cmd *)
1269 )
1270 {
1271         struct pmcraid_instance *pinstance;
1272         struct pmcraid_hostrcb  *hcam;
1273
1274         pinstance = cmd->drv_inst;
1275         hcam =  (type == PMCRAID_HCAM_CODE_LOG_DATA) ?
1276                 &pinstance->ldn : &pinstance->ccn;
1277
1278         /* prepare for cancelling previous hcam command. If the HCAM is
1279          * currently not pending with IOA, we would have hcam->cmd as non-null
1280          */
1281         if (hcam->cmd == NULL)
1282                 return;
1283
1284         pmcraid_prepare_cancel_cmd(cmd, hcam->cmd);
1285
1286         /* writing to IOARRIN must be protected by host_lock, as mid-layer
1287          * schedule queuecommand while we are doing this
1288          */
1289         pmcraid_send_cmd(cmd, cmd_done,
1290                          PMCRAID_INTERNAL_TIMEOUT,
1291                          pmcraid_timeout_handler);
1292 }
1293
1294 /**
1295  * pmcraid_cancel_ccn - cancel CCN HCAM already registered with IOA
1296  *
1297  * @cmd: command block to be used for cancelling the HCAM
1298  */
1299 static void pmcraid_cancel_ccn(struct pmcraid_cmd *cmd)
1300 {
1301         pmcraid_info("response for Cancel LDN CDB[0] = %x ioasc = %x\n",
1302                      cmd->ioa_cb->ioarcb.cdb[0],
1303                      le32_to_cpu(cmd->ioa_cb->ioasa.ioasc));
1304
1305         pmcraid_reinit_cmdblk(cmd);
1306
1307         pmcraid_cancel_hcam(cmd,
1308                             PMCRAID_HCAM_CODE_CONFIG_CHANGE,
1309                             pmcraid_ioa_shutdown);
1310 }
1311
1312 /**
1313  * pmcraid_cancel_ldn - cancel LDN HCAM already registered with IOA
1314  *
1315  * @cmd: command block to be used for cancelling the HCAM
1316  */
1317 static void pmcraid_cancel_ldn(struct pmcraid_cmd *cmd)
1318 {
1319         pmcraid_cancel_hcam(cmd,
1320                             PMCRAID_HCAM_CODE_LOG_DATA,
1321                             pmcraid_cancel_ccn);
1322 }
1323
1324 /**
1325  * pmcraid_expose_resource - check if the resource can be exposed to OS
1326  *
1327  * @fw_version: firmware version code
1328  * @cfgte: pointer to configuration table entry of the resource
1329  *
1330  * Return value:
1331  *      true if resource can be added to midlayer, false(0) otherwise
1332  */
1333 static int pmcraid_expose_resource(u16 fw_version,
1334                                    struct pmcraid_config_table_entry *cfgte)
1335 {
1336         int retval = 0;
1337
1338         if (cfgte->resource_type == RES_TYPE_VSET) {
1339                 if (fw_version <= PMCRAID_FW_VERSION_1)
1340                         retval = ((cfgte->unique_flags1 & 0x80) == 0);
1341                 else
1342                         retval = ((cfgte->unique_flags0 & 0x80) == 0 &&
1343                                   (cfgte->unique_flags1 & 0x80) == 0);
1344
1345         } else if (cfgte->resource_type == RES_TYPE_GSCSI)
1346                 retval = (RES_BUS(cfgte->resource_address) !=
1347                                 PMCRAID_VIRTUAL_ENCL_BUS_ID);
1348         return retval;
1349 }
1350
1351 /* attributes supported by pmcraid_event_family */
1352 enum {
1353         PMCRAID_AEN_ATTR_UNSPEC,
1354         PMCRAID_AEN_ATTR_EVENT,
1355         __PMCRAID_AEN_ATTR_MAX,
1356 };
1357 #define PMCRAID_AEN_ATTR_MAX (__PMCRAID_AEN_ATTR_MAX - 1)
1358
1359 /* commands supported by pmcraid_event_family */
1360 enum {
1361         PMCRAID_AEN_CMD_UNSPEC,
1362         PMCRAID_AEN_CMD_EVENT,
1363         __PMCRAID_AEN_CMD_MAX,
1364 };
1365 #define PMCRAID_AEN_CMD_MAX (__PMCRAID_AEN_CMD_MAX - 1)
1366
1367 static struct genl_multicast_group pmcraid_mcgrps[] = {
1368         { .name = "events", /* not really used - see ID discussion below */ },
1369 };
1370
1371 static struct genl_family pmcraid_event_family __ro_after_init = {
1372         .module = THIS_MODULE,
1373         .name = "pmcraid",
1374         .version = 1,
1375         .maxattr = PMCRAID_AEN_ATTR_MAX,
1376         .mcgrps = pmcraid_mcgrps,
1377         .n_mcgrps = ARRAY_SIZE(pmcraid_mcgrps),
1378 };
1379
1380 /**
1381  * pmcraid_netlink_init - registers pmcraid_event_family
1382  *
1383  * Return value:
1384  *      0 if the pmcraid_event_family is successfully registered
1385  *      with netlink generic, non-zero otherwise
1386  */
1387 static int __init pmcraid_netlink_init(void)
1388 {
1389         int result;
1390
1391         result = genl_register_family(&pmcraid_event_family);
1392
1393         if (result)
1394                 return result;
1395
1396         pmcraid_info("registered NETLINK GENERIC group: %d\n",
1397                      pmcraid_event_family.id);
1398
1399         return result;
1400 }
1401
1402 /**
1403  * pmcraid_netlink_release - unregisters pmcraid_event_family
1404  *
1405  * Return value:
1406  *      none
1407  */
1408 static void pmcraid_netlink_release(void)
1409 {
1410         genl_unregister_family(&pmcraid_event_family);
1411 }
1412
1413 /**
1414  * pmcraid_notify_aen - sends event msg to user space application
1415  * @pinstance: pointer to adapter instance structure
1416  * @type: HCAM type
1417  *
1418  * Return value:
1419  *      0 if success, error value in case of any failure.
1420  */
1421 static int pmcraid_notify_aen(
1422         struct pmcraid_instance *pinstance,
1423         struct pmcraid_aen_msg  *aen_msg,
1424         u32    data_size
1425 )
1426 {
1427         struct sk_buff *skb;
1428         void *msg_header;
1429         u32  total_size, nla_genl_hdr_total_size;
1430         int result;
1431
1432         aen_msg->hostno = (pinstance->host->unique_id << 16 |
1433                            MINOR(pinstance->cdev.dev));
1434         aen_msg->length = data_size;
1435
1436         data_size += sizeof(*aen_msg);
1437
1438         total_size = nla_total_size(data_size);
1439         /* Add GENL_HDR to total_size */
1440         nla_genl_hdr_total_size =
1441                 (total_size + (GENL_HDRLEN +
1442                 ((struct genl_family *)&pmcraid_event_family)->hdrsize)
1443                  + NLMSG_HDRLEN);
1444         skb = genlmsg_new(nla_genl_hdr_total_size, GFP_ATOMIC);
1445
1446
1447         if (!skb) {
1448                 pmcraid_err("Failed to allocate aen data SKB of size: %x\n",
1449                              total_size);
1450                 return -ENOMEM;
1451         }
1452
1453         /* add the genetlink message header */
1454         msg_header = genlmsg_put(skb, 0, 0,
1455                                  &pmcraid_event_family, 0,
1456                                  PMCRAID_AEN_CMD_EVENT);
1457         if (!msg_header) {
1458                 pmcraid_err("failed to copy command details\n");
1459                 nlmsg_free(skb);
1460                 return -ENOMEM;
1461         }
1462
1463         result = nla_put(skb, PMCRAID_AEN_ATTR_EVENT, data_size, aen_msg);
1464
1465         if (result) {
1466                 pmcraid_err("failed to copy AEN attribute data\n");
1467                 nlmsg_free(skb);
1468                 return -EINVAL;
1469         }
1470
1471         /* send genetlink multicast message to notify appplications */
1472         genlmsg_end(skb, msg_header);
1473
1474         result = genlmsg_multicast(&pmcraid_event_family, skb,
1475                                    0, 0, GFP_ATOMIC);
1476
1477         /* If there are no listeners, genlmsg_multicast may return non-zero
1478          * value.
1479          */
1480         if (result)
1481                 pmcraid_info("error (%x) sending aen event message\n", result);
1482         return result;
1483 }
1484
1485 /**
1486  * pmcraid_notify_ccn - notifies about CCN event msg to user space
1487  * @pinstance: pointer adapter instance structure
1488  *
1489  * Return value:
1490  *      0 if success, error value in case of any failure
1491  */
1492 static int pmcraid_notify_ccn(struct pmcraid_instance *pinstance)
1493 {
1494         return pmcraid_notify_aen(pinstance,
1495                                 pinstance->ccn.msg,
1496                                 le32_to_cpu(pinstance->ccn.hcam->data_len) +
1497                                 sizeof(struct pmcraid_hcam_hdr));
1498 }
1499
1500 /**
1501  * pmcraid_notify_ldn - notifies about CCN event msg to user space
1502  * @pinstance: pointer adapter instance structure
1503  *
1504  * Return value:
1505  *      0 if success, error value in case of any failure
1506  */
1507 static int pmcraid_notify_ldn(struct pmcraid_instance *pinstance)
1508 {
1509         return pmcraid_notify_aen(pinstance,
1510                                 pinstance->ldn.msg,
1511                                 le32_to_cpu(pinstance->ldn.hcam->data_len) +
1512                                 sizeof(struct pmcraid_hcam_hdr));
1513 }
1514
1515 /**
1516  * pmcraid_notify_ioastate - sends IOA state event msg to user space
1517  * @pinstance: pointer adapter instance structure
1518  * @evt: controller state event to be sent
1519  *
1520  * Return value:
1521  *      0 if success, error value in case of any failure
1522  */
1523 static void pmcraid_notify_ioastate(struct pmcraid_instance *pinstance, u32 evt)
1524 {
1525         pinstance->scn.ioa_state = evt;
1526         pmcraid_notify_aen(pinstance,
1527                           &pinstance->scn.msg,
1528                           sizeof(u32));
1529 }
1530
1531 /**
1532  * pmcraid_handle_config_change - Handle a config change from the adapter
1533  * @pinstance: pointer to per adapter instance structure
1534  *
1535  * Return value:
1536  *  none
1537  */
1538
1539 static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
1540 {
1541         struct pmcraid_config_table_entry *cfg_entry;
1542         struct pmcraid_hcam_ccn *ccn_hcam;
1543         struct pmcraid_cmd *cmd;
1544         struct pmcraid_cmd *cfgcmd;
1545         struct pmcraid_resource_entry *res = NULL;
1546         unsigned long lock_flags;
1547         unsigned long host_lock_flags;
1548         u32 new_entry = 1;
1549         u32 hidden_entry = 0;
1550         u16 fw_version;
1551         int rc;
1552
1553         ccn_hcam = (struct pmcraid_hcam_ccn *)pinstance->ccn.hcam;
1554         cfg_entry = &ccn_hcam->cfg_entry;
1555         fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
1556
1557         pmcraid_info("CCN(%x): %x timestamp: %llx type: %x lost: %x flags: %x \
1558                  res: %x:%x:%x:%x\n",
1559                  le32_to_cpu(pinstance->ccn.hcam->ilid),
1560                  pinstance->ccn.hcam->op_code,
1561                 (le32_to_cpu(pinstance->ccn.hcam->timestamp1) |
1562                 ((le32_to_cpu(pinstance->ccn.hcam->timestamp2) & 0xffffffffLL) << 32)),
1563                  pinstance->ccn.hcam->notification_type,
1564                  pinstance->ccn.hcam->notification_lost,
1565                  pinstance->ccn.hcam->flags,
1566                  pinstance->host->unique_id,
1567                  RES_IS_VSET(*cfg_entry) ? PMCRAID_VSET_BUS_ID :
1568                  (RES_IS_GSCSI(*cfg_entry) ? PMCRAID_PHYS_BUS_ID :
1569                         RES_BUS(cfg_entry->resource_address)),
1570                  RES_IS_VSET(*cfg_entry) ?
1571                         (fw_version <= PMCRAID_FW_VERSION_1 ?
1572                                 cfg_entry->unique_flags1 :
1573                                 le16_to_cpu(cfg_entry->array_id) & 0xFF) :
1574                         RES_TARGET(cfg_entry->resource_address),
1575                  RES_LUN(cfg_entry->resource_address));
1576
1577
1578         /* If this HCAM indicates a lost notification, read the config table */
1579         if (pinstance->ccn.hcam->notification_lost) {
1580                 cfgcmd = pmcraid_get_free_cmd(pinstance);
1581                 if (cfgcmd) {
1582                         pmcraid_info("lost CCN, reading config table\b");
1583                         pinstance->reinit_cfg_table = 1;
1584                         pmcraid_querycfg(cfgcmd);
1585                 } else {
1586                         pmcraid_err("lost CCN, no free cmd for querycfg\n");
1587                 }
1588                 goto out_notify_apps;
1589         }
1590
1591         /* If this resource is not going to be added to mid-layer, just notify
1592          * applications and return. If this notification is about hiding a VSET
1593          * resource, check if it was exposed already.
1594          */
1595         if (pinstance->ccn.hcam->notification_type ==
1596             NOTIFICATION_TYPE_ENTRY_CHANGED &&
1597             cfg_entry->resource_type == RES_TYPE_VSET) {
1598                 hidden_entry = (cfg_entry->unique_flags1 & 0x80) != 0;
1599         } else if (!pmcraid_expose_resource(fw_version, cfg_entry)) {
1600                 goto out_notify_apps;
1601         }
1602
1603         spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
1604         list_for_each_entry(res, &pinstance->used_res_q, queue) {
1605                 rc = memcmp(&res->cfg_entry.resource_address,
1606                             &cfg_entry->resource_address,
1607                             sizeof(cfg_entry->resource_address));
1608                 if (!rc) {
1609                         new_entry = 0;
1610                         break;
1611                 }
1612         }
1613
1614         if (new_entry) {
1615
1616                 if (hidden_entry) {
1617                         spin_unlock_irqrestore(&pinstance->resource_lock,
1618                                                 lock_flags);
1619                         goto out_notify_apps;
1620                 }
1621
1622                 /* If there are more number of resources than what driver can
1623                  * manage, do not notify the applications about the CCN. Just
1624                  * ignore this notifications and re-register the same HCAM
1625                  */
1626                 if (list_empty(&pinstance->free_res_q)) {
1627                         spin_unlock_irqrestore(&pinstance->resource_lock,
1628                                                 lock_flags);
1629                         pmcraid_err("too many resources attached\n");
1630                         spin_lock_irqsave(pinstance->host->host_lock,
1631                                           host_lock_flags);
1632                         pmcraid_send_hcam(pinstance,
1633                                           PMCRAID_HCAM_CODE_CONFIG_CHANGE);
1634                         spin_unlock_irqrestore(pinstance->host->host_lock,
1635                                                host_lock_flags);
1636                         return;
1637                 }
1638
1639                 res = list_entry(pinstance->free_res_q.next,
1640                                  struct pmcraid_resource_entry, queue);
1641
1642                 list_del(&res->queue);
1643                 res->scsi_dev = NULL;
1644                 res->reset_progress = 0;
1645                 list_add_tail(&res->queue, &pinstance->used_res_q);
1646         }
1647
1648         memcpy(&res->cfg_entry, cfg_entry, pinstance->config_table_entry_size);
1649
1650         if (pinstance->ccn.hcam->notification_type ==
1651             NOTIFICATION_TYPE_ENTRY_DELETED || hidden_entry) {
1652                 if (res->scsi_dev) {
1653                         if (fw_version <= PMCRAID_FW_VERSION_1)
1654                                 res->cfg_entry.unique_flags1 &= 0x7F;
1655                         else
1656                                 res->cfg_entry.array_id &= cpu_to_le16(0xFF);
1657                         res->change_detected = RES_CHANGE_DEL;
1658                         res->cfg_entry.resource_handle =
1659                                 PMCRAID_INVALID_RES_HANDLE;
1660                         schedule_work(&pinstance->worker_q);
1661                 } else {
1662                         /* This may be one of the non-exposed resources */
1663                         list_move_tail(&res->queue, &pinstance->free_res_q);
1664                 }
1665         } else if (!res->scsi_dev) {
1666                 res->change_detected = RES_CHANGE_ADD;
1667                 schedule_work(&pinstance->worker_q);
1668         }
1669         spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
1670
1671 out_notify_apps:
1672
1673         /* Notify configuration changes to registered applications.*/
1674         if (!pmcraid_disable_aen)
1675                 pmcraid_notify_ccn(pinstance);
1676
1677         cmd = pmcraid_init_hcam(pinstance, PMCRAID_HCAM_CODE_CONFIG_CHANGE);
1678         if (cmd)
1679                 pmcraid_send_hcam_cmd(cmd);
1680 }
1681
1682 /**
1683  * pmcraid_get_error_info - return error string for an ioasc
1684  * @ioasc: ioasc code
1685  * Return Value
1686  *       none
1687  */
1688 static struct pmcraid_ioasc_error *pmcraid_get_error_info(u32 ioasc)
1689 {
1690         int i;
1691         for (i = 0; i < ARRAY_SIZE(pmcraid_ioasc_error_table); i++) {
1692                 if (pmcraid_ioasc_error_table[i].ioasc_code == ioasc)
1693                         return &pmcraid_ioasc_error_table[i];
1694         }
1695         return NULL;
1696 }
1697
1698 /**
1699  * pmcraid_ioasc_logger - log IOASC information based user-settings
1700  * @ioasc: ioasc code
1701  * @cmd: pointer to command that resulted in 'ioasc'
1702  */
1703 static void pmcraid_ioasc_logger(u32 ioasc, struct pmcraid_cmd *cmd)
1704 {
1705         struct pmcraid_ioasc_error *error_info = pmcraid_get_error_info(ioasc);
1706
1707         if (error_info == NULL ||
1708                 cmd->drv_inst->current_log_level < error_info->log_level)
1709                 return;
1710
1711         /* log the error string */
1712         pmcraid_err("cmd [%x] for resource %x failed with %x(%s)\n",
1713                 cmd->ioa_cb->ioarcb.cdb[0],
1714                 le32_to_cpu(cmd->ioa_cb->ioarcb.resource_handle),
1715                 ioasc, error_info->error_string);
1716 }
1717
1718 /**
1719  * pmcraid_handle_error_log - Handle a config change (error log) from the IOA
1720  *
1721  * @pinstance: pointer to per adapter instance structure
1722  *
1723  * Return value:
1724  *  none
1725  */
1726 static void pmcraid_handle_error_log(struct pmcraid_instance *pinstance)
1727 {
1728         struct pmcraid_hcam_ldn *hcam_ldn;
1729         u32 ioasc;
1730
1731         hcam_ldn = (struct pmcraid_hcam_ldn *)pinstance->ldn.hcam;
1732
1733         pmcraid_info
1734                 ("LDN(%x): %x type: %x lost: %x flags: %x overlay id: %x\n",
1735                  pinstance->ldn.hcam->ilid,
1736                  pinstance->ldn.hcam->op_code,
1737                  pinstance->ldn.hcam->notification_type,
1738                  pinstance->ldn.hcam->notification_lost,
1739                  pinstance->ldn.hcam->flags,
1740                  pinstance->ldn.hcam->overlay_id);
1741
1742         /* log only the errors, no need to log informational log entries */
1743         if (pinstance->ldn.hcam->notification_type !=
1744             NOTIFICATION_TYPE_ERROR_LOG)
1745                 return;
1746
1747         if (pinstance->ldn.hcam->notification_lost ==
1748             HOSTRCB_NOTIFICATIONS_LOST)
1749                 dev_info(&pinstance->pdev->dev, "Error notifications lost\n");
1750
1751         ioasc = le32_to_cpu(hcam_ldn->error_log.fd_ioasc);
1752
1753         if (ioasc == PMCRAID_IOASC_UA_BUS_WAS_RESET ||
1754                 ioasc == PMCRAID_IOASC_UA_BUS_WAS_RESET_BY_OTHER) {
1755                 dev_info(&pinstance->pdev->dev,
1756                         "UnitAttention due to IOA Bus Reset\n");
1757                 scsi_report_bus_reset(
1758                         pinstance->host,
1759                         RES_BUS(hcam_ldn->error_log.fd_ra));
1760         }
1761
1762         return;
1763 }
1764
1765 /**
1766  * pmcraid_process_ccn - Op done function for a CCN.
1767  * @cmd: pointer to command struct
1768  *
1769  * This function is the op done function for a configuration
1770  * change notification
1771  *
1772  * Return value:
1773  * none
1774  */
1775 static void pmcraid_process_ccn(struct pmcraid_cmd *cmd)
1776 {
1777         struct pmcraid_instance *pinstance = cmd->drv_inst;
1778         u32 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc);
1779         unsigned long lock_flags;
1780
1781         pinstance->ccn.cmd = NULL;
1782         pmcraid_return_cmd(cmd);
1783
1784         /* If driver initiated IOA reset happened while this hcam was pending
1785          * with IOA, or IOA bringdown sequence is in progress, no need to
1786          * re-register the hcam
1787          */
1788         if (ioasc == PMCRAID_IOASC_IOA_WAS_RESET ||
1789             atomic_read(&pinstance->ccn.ignore) == 1) {
1790                 return;
1791         } else if (ioasc) {
1792                 dev_info(&pinstance->pdev->dev,
1793                         "Host RCB (CCN) failed with IOASC: 0x%08X\n", ioasc);
1794                 spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
1795                 pmcraid_send_hcam(pinstance, PMCRAID_HCAM_CODE_CONFIG_CHANGE);
1796                 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
1797         } else {
1798                 pmcraid_handle_config_change(pinstance);
1799         }
1800 }
1801
1802 /**
1803  * pmcraid_process_ldn - op done function for an LDN
1804  * @cmd: pointer to command block
1805  *
1806  * Return value
1807  *   none
1808  */
1809 static void pmcraid_initiate_reset(struct pmcraid_instance *);
1810 static void pmcraid_set_timestamp(struct pmcraid_cmd *cmd);
1811
1812 static void pmcraid_process_ldn(struct pmcraid_cmd *cmd)
1813 {
1814         struct pmcraid_instance *pinstance = cmd->drv_inst;
1815         struct pmcraid_hcam_ldn *ldn_hcam =
1816                         (struct pmcraid_hcam_ldn *)pinstance->ldn.hcam;
1817         u32 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc);
1818         u32 fd_ioasc = le32_to_cpu(ldn_hcam->error_log.fd_ioasc);
1819         unsigned long lock_flags;
1820
1821         /* return the command block back to freepool */
1822         pinstance->ldn.cmd = NULL;
1823         pmcraid_return_cmd(cmd);
1824
1825         /* If driver initiated IOA reset happened while this hcam was pending
1826          * with IOA, no need to re-register the hcam as reset engine will do it
1827          * once reset sequence is complete
1828          */
1829         if (ioasc == PMCRAID_IOASC_IOA_WAS_RESET ||
1830             atomic_read(&pinstance->ccn.ignore) == 1) {
1831                 return;
1832         } else if (!ioasc) {
1833                 pmcraid_handle_error_log(pinstance);
1834                 if (fd_ioasc == PMCRAID_IOASC_NR_IOA_RESET_REQUIRED) {
1835                         spin_lock_irqsave(pinstance->host->host_lock,
1836                                           lock_flags);
1837                         pmcraid_initiate_reset(pinstance);
1838                         spin_unlock_irqrestore(pinstance->host->host_lock,
1839                                                lock_flags);
1840                         return;
1841                 }
1842                 if (fd_ioasc == PMCRAID_IOASC_TIME_STAMP_OUT_OF_SYNC) {
1843                         pinstance->timestamp_error = 1;
1844                         pmcraid_set_timestamp(cmd);
1845                 }
1846         } else {
1847                 dev_info(&pinstance->pdev->dev,
1848                         "Host RCB(LDN) failed with IOASC: 0x%08X\n", ioasc);
1849         }
1850         /* send netlink message for HCAM notification if enabled */
1851         if (!pmcraid_disable_aen)
1852                 pmcraid_notify_ldn(pinstance);
1853
1854         cmd = pmcraid_init_hcam(pinstance, PMCRAID_HCAM_CODE_LOG_DATA);
1855         if (cmd)
1856                 pmcraid_send_hcam_cmd(cmd);
1857 }
1858
1859 /**
1860  * pmcraid_register_hcams - register HCAMs for CCN and LDN
1861  *
1862  * @pinstance: pointer per adapter instance structure
1863  *
1864  * Return Value
1865  *   none
1866  */
1867 static void pmcraid_register_hcams(struct pmcraid_instance *pinstance)
1868 {
1869         pmcraid_send_hcam(pinstance, PMCRAID_HCAM_CODE_CONFIG_CHANGE);
1870         pmcraid_send_hcam(pinstance, PMCRAID_HCAM_CODE_LOG_DATA);
1871 }
1872
1873 /**
1874  * pmcraid_unregister_hcams - cancel HCAMs registered already
1875  * @cmd: pointer to command used as part of reset sequence
1876  */
1877 static void pmcraid_unregister_hcams(struct pmcraid_cmd *cmd)
1878 {
1879         struct pmcraid_instance *pinstance = cmd->drv_inst;
1880
1881         /* During IOA bringdown, HCAM gets fired and tasklet proceeds with
1882          * handling hcam response though it is not necessary. In order to
1883          * prevent this, set 'ignore', so that bring-down sequence doesn't
1884          * re-send any more hcams
1885          */
1886         atomic_set(&pinstance->ccn.ignore, 1);
1887         atomic_set(&pinstance->ldn.ignore, 1);
1888
1889         /* If adapter reset was forced as part of runtime reset sequence,
1890          * start the reset sequence. Reset will be triggered even in case
1891          * IOA unit_check.
1892          */
1893         if ((pinstance->force_ioa_reset && !pinstance->ioa_bringdown) ||
1894              pinstance->ioa_unit_check) {
1895                 pinstance->force_ioa_reset = 0;
1896                 pinstance->ioa_unit_check = 0;
1897                 pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
1898                 pmcraid_reset_alert(cmd);
1899                 return;
1900         }
1901
1902         /* Driver tries to cancel HCAMs by sending ABORT TASK for each HCAM
1903          * one after the other. So CCN cancellation will be triggered by
1904          * pmcraid_cancel_ldn itself.
1905          */
1906         pmcraid_cancel_ldn(cmd);
1907 }
1908
1909 /**
1910  * pmcraid_reset_enable_ioa - re-enable IOA after a hard reset
1911  * @pinstance: pointer to adapter instance structure
1912  * Return Value
1913  *  1 if TRANSITION_TO_OPERATIONAL is active, otherwise 0
1914  */
1915 static void pmcraid_reinit_buffers(struct pmcraid_instance *);
1916
1917 static int pmcraid_reset_enable_ioa(struct pmcraid_instance *pinstance)
1918 {
1919         u32 intrs;
1920
1921         pmcraid_reinit_buffers(pinstance);
1922         intrs = pmcraid_read_interrupts(pinstance);
1923
1924         pmcraid_enable_interrupts(pinstance, PMCRAID_PCI_INTERRUPTS);
1925
1926         if (intrs & INTRS_TRANSITION_TO_OPERATIONAL) {
1927                 if (!pinstance->interrupt_mode) {
1928                         iowrite32(INTRS_TRANSITION_TO_OPERATIONAL,
1929                                 pinstance->int_regs.
1930                                 ioa_host_interrupt_mask_reg);
1931                         iowrite32(INTRS_TRANSITION_TO_OPERATIONAL,
1932                                 pinstance->int_regs.ioa_host_interrupt_clr_reg);
1933                 }
1934                 return 1;
1935         } else {
1936                 return 0;
1937         }
1938 }
1939
1940 /**
1941  * pmcraid_soft_reset - performs a soft reset and makes IOA become ready
1942  * @cmd : pointer to reset command block
1943  *
1944  * Return Value
1945  *      none
1946  */
1947 static void pmcraid_soft_reset(struct pmcraid_cmd *cmd)
1948 {
1949         struct pmcraid_instance *pinstance = cmd->drv_inst;
1950         u32 int_reg;
1951         u32 doorbell;
1952
1953         /* There will be an interrupt when Transition to Operational bit is
1954          * set so tasklet would execute next reset task. The timeout handler
1955          * would re-initiate a reset
1956          */
1957         cmd->cmd_done = pmcraid_ioa_reset;
1958         cmd->timer.data = (unsigned long)cmd;
1959         cmd->timer.expires = jiffies +
1960                              msecs_to_jiffies(PMCRAID_TRANSOP_TIMEOUT);
1961         cmd->timer.function = (void (*)(unsigned long))pmcraid_timeout_handler;
1962
1963         if (!timer_pending(&cmd->timer))
1964                 add_timer(&cmd->timer);
1965
1966         /* Enable destructive diagnostics on IOA if it is not yet in
1967          * operational state
1968          */
1969         doorbell = DOORBELL_RUNTIME_RESET |
1970                    DOORBELL_ENABLE_DESTRUCTIVE_DIAGS;
1971
1972         /* Since we do RESET_ALERT and Start BIST we have to again write
1973          * MSIX Doorbell to indicate the interrupt mode
1974          */
1975         if (pinstance->interrupt_mode) {
1976                 iowrite32(DOORBELL_INTR_MODE_MSIX,
1977                           pinstance->int_regs.host_ioa_interrupt_reg);
1978                 ioread32(pinstance->int_regs.host_ioa_interrupt_reg);
1979         }
1980
1981         iowrite32(doorbell, pinstance->int_regs.host_ioa_interrupt_reg);
1982         ioread32(pinstance->int_regs.host_ioa_interrupt_reg),
1983         int_reg = ioread32(pinstance->int_regs.ioa_host_interrupt_reg);
1984
1985         pmcraid_info("Waiting for IOA to become operational %x:%x\n",
1986                      ioread32(pinstance->int_regs.host_ioa_interrupt_reg),
1987                      int_reg);
1988 }
1989
1990 /**
1991  * pmcraid_get_dump - retrieves IOA dump in case of Unit Check interrupt
1992  *
1993  * @pinstance: pointer to adapter instance structure
1994  *
1995  * Return Value
1996  *      none
1997  */
1998 static void pmcraid_get_dump(struct pmcraid_instance *pinstance)
1999 {
2000         pmcraid_info("%s is not yet implemented\n", __func__);
2001 }
2002
2003 /**
2004  * pmcraid_fail_outstanding_cmds - Fails all outstanding ops.
2005  * @pinstance: pointer to adapter instance structure
2006  *
2007  * This function fails all outstanding ops. If they are submitted to IOA
2008  * already, it sends cancel all messages if IOA is still accepting IOARCBs,
2009  * otherwise just completes the commands and returns the cmd blocks to free
2010  * pool.
2011  *
2012  * Return value:
2013  *       none
2014  */
2015 static void pmcraid_fail_outstanding_cmds(struct pmcraid_instance *pinstance)
2016 {
2017         struct pmcraid_cmd *cmd, *temp;
2018         unsigned long lock_flags;
2019
2020         /* pending command list is protected by pending_pool_lock. Its
2021          * traversal must be done as within this lock
2022          */
2023         spin_lock_irqsave(&pinstance->pending_pool_lock, lock_flags);
2024         list_for_each_entry_safe(cmd, temp, &pinstance->pending_cmd_pool,
2025                                  free_list) {
2026                 list_del(&cmd->free_list);
2027                 spin_unlock_irqrestore(&pinstance->pending_pool_lock,
2028                                         lock_flags);
2029                 cmd->ioa_cb->ioasa.ioasc =
2030                         cpu_to_le32(PMCRAID_IOASC_IOA_WAS_RESET);
2031                 cmd->ioa_cb->ioasa.ilid =
2032                         cpu_to_le32(PMCRAID_DRIVER_ILID);
2033
2034                 /* In case the command timer is still running */
2035                 del_timer(&cmd->timer);
2036
2037                 /* If this is an IO command, complete it by invoking scsi_done
2038                  * function. If this is one of the internal commands other
2039                  * than pmcraid_ioa_reset and HCAM commands invoke cmd_done to
2040                  * complete it
2041                  */
2042                 if (cmd->scsi_cmd) {
2043
2044                         struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
2045                         __le32 resp = cmd->ioa_cb->ioarcb.response_handle;
2046
2047                         scsi_cmd->result |= DID_ERROR << 16;
2048
2049                         scsi_dma_unmap(scsi_cmd);
2050                         pmcraid_return_cmd(cmd);
2051
2052                         pmcraid_info("failing(%d) CDB[0] = %x result: %x\n",
2053                                      le32_to_cpu(resp) >> 2,
2054                                      cmd->ioa_cb->ioarcb.cdb[0],
2055                                      scsi_cmd->result);
2056                         scsi_cmd->scsi_done(scsi_cmd);
2057                 } else if (cmd->cmd_done == pmcraid_internal_done ||
2058                            cmd->cmd_done == pmcraid_erp_done) {
2059                         cmd->cmd_done(cmd);
2060                 } else if (cmd->cmd_done != pmcraid_ioa_reset &&
2061                            cmd->cmd_done != pmcraid_ioa_shutdown_done) {
2062                         pmcraid_return_cmd(cmd);
2063                 }
2064
2065                 atomic_dec(&pinstance->outstanding_cmds);
2066                 spin_lock_irqsave(&pinstance->pending_pool_lock, lock_flags);
2067         }
2068
2069         spin_unlock_irqrestore(&pinstance->pending_pool_lock, lock_flags);
2070 }
2071
2072 /**
2073  * pmcraid_ioa_reset - Implementation of IOA reset logic
2074  *
2075  * @cmd: pointer to the cmd block to be used for entire reset process
2076  *
2077  * This function executes most of the steps required for IOA reset. This gets
2078  * called by user threads (modprobe/insmod/rmmod) timer, tasklet and midlayer's
2079  * 'eh_' thread. Access to variables used for controlling the reset sequence is
2080  * synchronized using host lock. Various functions called during reset process
2081  * would make use of a single command block, pointer to which is also stored in
2082  * adapter instance structure.
2083  *
2084  * Return Value
2085  *       None
2086  */
2087 static void pmcraid_ioa_reset(struct pmcraid_cmd *cmd)
2088 {
2089         struct pmcraid_instance *pinstance = cmd->drv_inst;
2090         u8 reset_complete = 0;
2091
2092         pinstance->ioa_reset_in_progress = 1;
2093
2094         if (pinstance->reset_cmd != cmd) {
2095                 pmcraid_err("reset is called with different command block\n");
2096                 pinstance->reset_cmd = cmd;
2097         }
2098
2099         pmcraid_info("reset_engine: state = %d, command = %p\n",
2100                       pinstance->ioa_state, cmd);
2101
2102         switch (pinstance->ioa_state) {
2103
2104         case IOA_STATE_DEAD:
2105                 /* If IOA is offline, whatever may be the reset reason, just
2106                  * return. callers might be waiting on the reset wait_q, wake
2107                  * up them
2108                  */
2109                 pmcraid_err("IOA is offline no reset is possible\n");
2110                 reset_complete = 1;
2111                 break;
2112
2113         case IOA_STATE_IN_BRINGDOWN:
2114                 /* we enter here, once ioa shutdown command is processed by IOA
2115                  * Alert IOA for a possible reset. If reset alert fails, IOA
2116                  * goes through hard-reset
2117                  */
2118                 pmcraid_disable_interrupts(pinstance, ~0);
2119                 pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
2120                 pmcraid_reset_alert(cmd);
2121                 break;
2122
2123         case IOA_STATE_UNKNOWN:
2124                 /* We may be called during probe or resume. Some pre-processing
2125                  * is required for prior to reset
2126                  */
2127                 scsi_block_requests(pinstance->host);
2128
2129                 /* If asked to reset while IOA was processing responses or
2130                  * there are any error responses then IOA may require
2131                  * hard-reset.
2132                  */
2133                 if (pinstance->ioa_hard_reset == 0) {
2134                         if (ioread32(pinstance->ioa_status) &
2135                             INTRS_TRANSITION_TO_OPERATIONAL) {
2136                                 pmcraid_info("sticky bit set, bring-up\n");
2137                                 pinstance->ioa_state = IOA_STATE_IN_BRINGUP;
2138                                 pmcraid_reinit_cmdblk(cmd);
2139                                 pmcraid_identify_hrrq(cmd);
2140                         } else {
2141                                 pinstance->ioa_state = IOA_STATE_IN_SOFT_RESET;
2142                                 pmcraid_soft_reset(cmd);
2143                         }
2144                 } else {
2145                         /* Alert IOA of a possible reset and wait for critical
2146                          * operation in progress bit to reset
2147                          */
2148                         pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
2149                         pmcraid_reset_alert(cmd);
2150                 }
2151                 break;
2152
2153         case IOA_STATE_IN_RESET_ALERT:
2154                 /* If critical operation in progress bit is reset or wait gets
2155                  * timed out, reset proceeds with starting BIST on the IOA.
2156                  * pmcraid_ioa_hard_reset keeps a count of reset attempts. If
2157                  * they are 3 or more, reset engine marks IOA dead and returns
2158                  */
2159                 pinstance->ioa_state = IOA_STATE_IN_HARD_RESET;
2160                 pmcraid_start_bist(cmd);
2161                 break;
2162
2163         case IOA_STATE_IN_HARD_RESET:
2164                 pinstance->ioa_reset_attempts++;
2165
2166                 /* retry reset if we haven't reached maximum allowed limit */
2167                 if (pinstance->ioa_reset_attempts > PMCRAID_RESET_ATTEMPTS) {
2168                         pinstance->ioa_reset_attempts = 0;
2169                         pmcraid_err("IOA didn't respond marking it as dead\n");
2170                         pinstance->ioa_state = IOA_STATE_DEAD;
2171
2172                         if (pinstance->ioa_bringdown)
2173                                 pmcraid_notify_ioastate(pinstance,
2174                                         PMC_DEVICE_EVENT_SHUTDOWN_FAILED);
2175                         else
2176                                 pmcraid_notify_ioastate(pinstance,
2177                                                 PMC_DEVICE_EVENT_RESET_FAILED);
2178                         reset_complete = 1;
2179                         break;
2180                 }
2181
2182                 /* Once either bist or pci reset is done, restore PCI config
2183                  * space. If this fails, proceed with hard reset again
2184                  */
2185                 pci_restore_state(pinstance->pdev);
2186
2187                 /* fail all pending commands */
2188                 pmcraid_fail_outstanding_cmds(pinstance);
2189
2190                 /* check if unit check is active, if so extract dump */
2191                 if (pinstance->ioa_unit_check) {
2192                         pmcraid_info("unit check is active\n");
2193                         pinstance->ioa_unit_check = 0;
2194                         pmcraid_get_dump(pinstance);
2195                         pinstance->ioa_reset_attempts--;
2196                         pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
2197                         pmcraid_reset_alert(cmd);
2198                         break;
2199                 }
2200
2201                 /* if the reset reason is to bring-down the ioa, we might be
2202                  * done with the reset restore pci_config_space and complete
2203                  * the reset
2204                  */
2205                 if (pinstance->ioa_bringdown) {
2206                         pmcraid_info("bringing down the adapter\n");
2207                         pinstance->ioa_shutdown_type = SHUTDOWN_NONE;
2208                         pinstance->ioa_bringdown = 0;
2209                         pinstance->ioa_state = IOA_STATE_UNKNOWN;
2210                         pmcraid_notify_ioastate(pinstance,
2211                                         PMC_DEVICE_EVENT_SHUTDOWN_SUCCESS);
2212                         reset_complete = 1;
2213                 } else {
2214                         /* bring-up IOA, so proceed with soft reset
2215                          * Reinitialize hrrq_buffers and their indices also
2216                          * enable interrupts after a pci_restore_state
2217                          */
2218                         if (pmcraid_reset_enable_ioa(pinstance)) {
2219                                 pinstance->ioa_state = IOA_STATE_IN_BRINGUP;
2220                                 pmcraid_info("bringing up the adapter\n");
2221                                 pmcraid_reinit_cmdblk(cmd);
2222                                 pmcraid_identify_hrrq(cmd);
2223                         } else {
2224                                 pinstance->ioa_state = IOA_STATE_IN_SOFT_RESET;
2225                                 pmcraid_soft_reset(cmd);
2226                         }
2227                 }
2228                 break;
2229
2230         case IOA_STATE_IN_SOFT_RESET:
2231                 /* TRANSITION TO OPERATIONAL is on so start initialization
2232                  * sequence
2233                  */
2234                 pmcraid_info("In softreset proceeding with bring-up\n");
2235                 pinstance->ioa_state = IOA_STATE_IN_BRINGUP;
2236
2237                 /* Initialization commands start with HRRQ identification. From
2238                  * now on tasklet completes most of the commands as IOA is up
2239                  * and intrs are enabled
2240                  */
2241                 pmcraid_identify_hrrq(cmd);
2242                 break;
2243
2244         case IOA_STATE_IN_BRINGUP:
2245                 /* we are done with bringing up of IOA, change the ioa_state to
2246                  * operational and wake up any waiters
2247                  */
2248                 pinstance->ioa_state = IOA_STATE_OPERATIONAL;
2249                 reset_complete = 1;
2250                 break;
2251
2252         case IOA_STATE_OPERATIONAL:
2253         default:
2254                 /* When IOA is operational and a reset is requested, check for
2255                  * the reset reason. If reset is to bring down IOA, unregister
2256                  * HCAMs and initiate shutdown; if adapter reset is forced then
2257                  * restart reset sequence again
2258                  */
2259                 if (pinstance->ioa_shutdown_type == SHUTDOWN_NONE &&
2260                     pinstance->force_ioa_reset == 0) {
2261                         pmcraid_notify_ioastate(pinstance,
2262                                                 PMC_DEVICE_EVENT_RESET_SUCCESS);
2263                         reset_complete = 1;
2264                 } else {
2265                         if (pinstance->ioa_shutdown_type != SHUTDOWN_NONE)
2266                                 pinstance->ioa_state = IOA_STATE_IN_BRINGDOWN;
2267                         pmcraid_reinit_cmdblk(cmd);
2268                         pmcraid_unregister_hcams(cmd);
2269                 }
2270                 break;
2271         }
2272
2273         /* reset will be completed if ioa_state is either DEAD or UNKNOWN or
2274          * OPERATIONAL. Reset all control variables used during reset, wake up
2275          * any waiting threads and let the SCSI mid-layer send commands. Note
2276          * that host_lock must be held before invoking scsi_report_bus_reset.
2277          */
2278         if (reset_complete) {
2279                 pinstance->ioa_reset_in_progress = 0;
2280                 pinstance->ioa_reset_attempts = 0;
2281                 pinstance->reset_cmd = NULL;
2282                 pinstance->ioa_shutdown_type = SHUTDOWN_NONE;
2283                 pinstance->ioa_bringdown = 0;
2284                 pmcraid_return_cmd(cmd);
2285
2286                 /* If target state is to bring up the adapter, proceed with
2287                  * hcam registration and resource exposure to mid-layer.
2288                  */
2289                 if (pinstance->ioa_state == IOA_STATE_OPERATIONAL)
2290                         pmcraid_register_hcams(pinstance);
2291
2292                 wake_up_all(&pinstance->reset_wait_q);
2293         }
2294
2295         return;
2296 }
2297
2298 /**
2299  * pmcraid_initiate_reset - initiates reset sequence. This is called from
2300  * ISR/tasklet during error interrupts including IOA unit check. If reset
2301  * is already in progress, it just returns, otherwise initiates IOA reset
2302  * to bring IOA up to operational state.
2303  *
2304  * @pinstance: pointer to adapter instance structure
2305  *
2306  * Return value
2307  *       none
2308  */
2309 static void pmcraid_initiate_reset(struct pmcraid_instance *pinstance)
2310 {
2311         struct pmcraid_cmd *cmd;
2312
2313         /* If the reset is already in progress, just return, otherwise start
2314          * reset sequence and return
2315          */
2316         if (!pinstance->ioa_reset_in_progress) {
2317                 scsi_block_requests(pinstance->host);
2318                 cmd = pmcraid_get_free_cmd(pinstance);
2319
2320                 if (cmd == NULL) {
2321                         pmcraid_err("no cmnd blocks for initiate_reset\n");
2322                         return;
2323                 }
2324
2325                 pinstance->ioa_shutdown_type = SHUTDOWN_NONE;
2326                 pinstance->reset_cmd = cmd;
2327                 pinstance->force_ioa_reset = 1;
2328                 pmcraid_notify_ioastate(pinstance,
2329                                         PMC_DEVICE_EVENT_RESET_START);
2330                 pmcraid_ioa_reset(cmd);
2331         }
2332 }
2333
2334 /**
2335  * pmcraid_reset_reload - utility routine for doing IOA reset either to bringup
2336  *                        or bringdown IOA
2337  * @pinstance: pointer adapter instance structure
2338  * @shutdown_type: shutdown type to be used NONE, NORMAL or ABRREV
2339  * @target_state: expected target state after reset
2340  *
2341  * Note: This command initiates reset and waits for its completion. Hence this
2342  * should not be called from isr/timer/tasklet functions (timeout handlers,
2343  * error response handlers and interrupt handlers).
2344  *
2345  * Return Value
2346  *       1 in case ioa_state is not target_state, 0 otherwise.
2347  */
2348 static int pmcraid_reset_reload(
2349         struct pmcraid_instance *pinstance,
2350         u8 shutdown_type,
2351         u8 target_state
2352 )
2353 {
2354         struct pmcraid_cmd *reset_cmd = NULL;
2355         unsigned long lock_flags;
2356         int reset = 1;
2357
2358         spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
2359
2360         if (pinstance->ioa_reset_in_progress) {
2361                 pmcraid_info("reset_reload: reset is already in progress\n");
2362
2363                 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
2364
2365                 wait_event(pinstance->reset_wait_q,
2366                            !pinstance->ioa_reset_in_progress);
2367
2368                 spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
2369
2370                 if (pinstance->ioa_state == IOA_STATE_DEAD) {
2371                         pmcraid_info("reset_reload: IOA is dead\n");
2372                         goto out_unlock;
2373                 }
2374
2375                 if (pinstance->ioa_state == target_state) {
2376                         reset = 0;
2377                         goto out_unlock;
2378                 }
2379         }
2380
2381         pmcraid_info("reset_reload: proceeding with reset\n");
2382         scsi_block_requests(pinstance->host);
2383         reset_cmd = pmcraid_get_free_cmd(pinstance);
2384         if (reset_cmd == NULL) {
2385                 pmcraid_err("no free cmnd for reset_reload\n");
2386                 goto out_unlock;
2387         }
2388
2389         if (shutdown_type == SHUTDOWN_NORMAL)
2390                 pinstance->ioa_bringdown = 1;
2391
2392         pinstance->ioa_shutdown_type = shutdown_type;
2393         pinstance->reset_cmd = reset_cmd;
2394         pinstance->force_ioa_reset = reset;
2395         pmcraid_info("reset_reload: initiating reset\n");
2396         pmcraid_ioa_reset(reset_cmd);
2397         spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
2398         pmcraid_info("reset_reload: waiting for reset to complete\n");
2399         wait_event(pinstance->reset_wait_q,
2400                    !pinstance->ioa_reset_in_progress);
2401
2402         pmcraid_info("reset_reload: reset is complete !!\n");
2403         scsi_unblock_requests(pinstance->host);
2404         return pinstance->ioa_state != target_state;
2405
2406 out_unlock:
2407         spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
2408         return reset;
2409 }
2410
2411 /**
2412  * pmcraid_reset_bringdown - wrapper over pmcraid_reset_reload to bringdown IOA
2413  *
2414  * @pinstance: pointer to adapter instance structure
2415  *
2416  * Return Value
2417  *       whatever is returned from pmcraid_reset_reload
2418  */
2419 static int pmcraid_reset_bringdown(struct pmcraid_instance *pinstance)
2420 {
2421         return pmcraid_reset_reload(pinstance,
2422                                     SHUTDOWN_NORMAL,
2423                                     IOA_STATE_UNKNOWN);
2424 }
2425
2426 /**
2427  * pmcraid_reset_bringup - wrapper over pmcraid_reset_reload to bring up IOA
2428  *
2429  * @pinstance: pointer to adapter instance structure
2430  *
2431  * Return Value
2432  *       whatever is returned from pmcraid_reset_reload
2433  */
2434 static int pmcraid_reset_bringup(struct pmcraid_instance *pinstance)
2435 {
2436         pmcraid_notify_ioastate(pinstance, PMC_DEVICE_EVENT_RESET_START);
2437
2438         return pmcraid_reset_reload(pinstance,
2439                                     SHUTDOWN_NONE,
2440                                     IOA_STATE_OPERATIONAL);
2441 }
2442
2443 /**
2444  * pmcraid_request_sense - Send request sense to a device
2445  * @cmd: pmcraid command struct
2446  *
2447  * This function sends a request sense to a device as a result of a check
2448  * condition. This method re-uses the same command block that failed earlier.
2449  */
2450 static void pmcraid_request_sense(struct pmcraid_cmd *cmd)
2451 {
2452         struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
2453         struct pmcraid_ioadl_desc *ioadl = ioarcb->add_data.u.ioadl;
2454
2455         /* allocate DMAable memory for sense buffers */
2456         cmd->sense_buffer = pci_alloc_consistent(cmd->drv_inst->pdev,
2457                                                  SCSI_SENSE_BUFFERSIZE,
2458                                                  &cmd->sense_buffer_dma);
2459
2460         if (cmd->sense_buffer == NULL) {
2461                 pmcraid_err
2462                         ("couldn't allocate sense buffer for request sense\n");
2463                 pmcraid_erp_done(cmd);
2464                 return;
2465         }
2466
2467         /* re-use the command block */
2468         memset(&cmd->ioa_cb->ioasa, 0, sizeof(struct pmcraid_ioasa));
2469         memset(ioarcb->cdb, 0, PMCRAID_MAX_CDB_LEN);
2470         ioarcb->request_flags0 = (SYNC_COMPLETE |
2471                                   NO_LINK_DESCS |
2472                                   INHIBIT_UL_CHECK);
2473         ioarcb->request_type = REQ_TYPE_SCSI;
2474         ioarcb->cdb[0] = REQUEST_SENSE;
2475         ioarcb->cdb[4] = SCSI_SENSE_BUFFERSIZE;
2476
2477         ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) +
2478                                         offsetof(struct pmcraid_ioarcb,
2479                                                 add_data.u.ioadl[0]));
2480         ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc));
2481
2482         ioarcb->data_transfer_length = cpu_to_le32(SCSI_SENSE_BUFFERSIZE);
2483
2484         ioadl->address = cpu_to_le64(cmd->sense_buffer_dma);
2485         ioadl->data_len = cpu_to_le32(SCSI_SENSE_BUFFERSIZE);
2486         ioadl->flags = IOADL_FLAGS_LAST_DESC;
2487
2488         /* request sense might be called as part of error response processing
2489          * which runs in tasklets context. It is possible that mid-layer might
2490          * schedule queuecommand during this time, hence, writting to IOARRIN
2491          * must be protect by host_lock
2492          */
2493         pmcraid_send_cmd(cmd, pmcraid_erp_done,
2494                          PMCRAID_REQUEST_SENSE_TIMEOUT,
2495                          pmcraid_timeout_handler);
2496 }
2497
2498 /**
2499  * pmcraid_cancel_all - cancel all outstanding IOARCBs as part of error recovery
2500  * @cmd: command that failed
2501  * @sense: true if request_sense is required after cancel all
2502  *
2503  * This function sends a cancel all to a device to clear the queue.
2504  */
2505 static void pmcraid_cancel_all(struct pmcraid_cmd *cmd, u32 sense)
2506 {
2507         struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
2508         struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
2509         struct pmcraid_resource_entry *res = scsi_cmd->device->hostdata;
2510         void (*cmd_done) (struct pmcraid_cmd *) = sense ? pmcraid_erp_done
2511                                                         : pmcraid_request_sense;
2512
2513         memset(ioarcb->cdb, 0, PMCRAID_MAX_CDB_LEN);
2514         ioarcb->request_flags0 = SYNC_OVERRIDE;
2515         ioarcb->request_type = REQ_TYPE_IOACMD;
2516         ioarcb->cdb[0] = PMCRAID_CANCEL_ALL_REQUESTS;
2517
2518         if (RES_IS_GSCSI(res->cfg_entry))
2519                 ioarcb->cdb[1] = PMCRAID_SYNC_COMPLETE_AFTER_CANCEL;
2520
2521         ioarcb->ioadl_bus_addr = 0;
2522         ioarcb->ioadl_length = 0;
2523         ioarcb->data_transfer_length = 0;
2524         ioarcb->ioarcb_bus_addr &= cpu_to_le64((~0x1FULL));
2525
2526         /* writing to IOARRIN must be protected by host_lock, as mid-layer
2527          * schedule queuecommand while we are doing this
2528          */
2529         pmcraid_send_cmd(cmd, cmd_done,
2530                          PMCRAID_REQUEST_SENSE_TIMEOUT,
2531                          pmcraid_timeout_handler);
2532 }
2533
2534 /**
2535  * pmcraid_frame_auto_sense: frame fixed format sense information
2536  *
2537  * @cmd: pointer to failing command block
2538  *
2539  * Return value
2540  *  none
2541  */
2542 static void pmcraid_frame_auto_sense(struct pmcraid_cmd *cmd)
2543 {
2544         u8 *sense_buf = cmd->scsi_cmd->sense_buffer;
2545         struct pmcraid_resource_entry *res = cmd->scsi_cmd->device->hostdata;
2546         struct pmcraid_ioasa *ioasa = &cmd->ioa_cb->ioasa;
2547         u32 ioasc = le32_to_cpu(ioasa->ioasc);
2548         u32 failing_lba = 0;
2549
2550         memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
2551         cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
2552
2553         if (RES_IS_VSET(res->cfg_entry) &&
2554             ioasc == PMCRAID_IOASC_ME_READ_ERROR_NO_REALLOC &&
2555             ioasa->u.vset.failing_lba_hi != 0) {
2556
2557                 sense_buf[0] = 0x72;
2558                 sense_buf[1] = PMCRAID_IOASC_SENSE_KEY(ioasc);
2559                 sense_buf[2] = PMCRAID_IOASC_SENSE_CODE(ioasc);
2560                 sense_buf[3] = PMCRAID_IOASC_SENSE_QUAL(ioasc);
2561
2562                 sense_buf[7] = 12;
2563                 sense_buf[8] = 0;
2564                 sense_buf[9] = 0x0A;
2565                 sense_buf[10] = 0x80;
2566
2567                 failing_lba = le32_to_cpu(ioasa->u.vset.failing_lba_hi);
2568
2569                 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
2570                 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
2571                 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
2572                 sense_buf[15] = failing_lba & 0x000000ff;
2573
2574                 failing_lba = le32_to_cpu(ioasa->u.vset.failing_lba_lo);
2575
2576                 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
2577                 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
2578                 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
2579                 sense_buf[19] = failing_lba & 0x000000ff;
2580         } else {
2581                 sense_buf[0] = 0x70;
2582                 sense_buf[2] = PMCRAID_IOASC_SENSE_KEY(ioasc);
2583                 sense_buf[12] = PMCRAID_IOASC_SENSE_CODE(ioasc);
2584                 sense_buf[13] = PMCRAID_IOASC_SENSE_QUAL(ioasc);
2585
2586                 if (ioasc == PMCRAID_IOASC_ME_READ_ERROR_NO_REALLOC) {
2587                         if (RES_IS_VSET(res->cfg_entry))
2588                                 failing_lba =
2589                                         le32_to_cpu(ioasa->u.
2590                                                  vset.failing_lba_lo);
2591                         sense_buf[0] |= 0x80;
2592                         sense_buf[3] = (failing_lba >> 24) & 0xff;
2593                         sense_buf[4] = (failing_lba >> 16) & 0xff;
2594                         sense_buf[5] = (failing_lba >> 8) & 0xff;
2595                         sense_buf[6] = failing_lba & 0xff;
2596                 }
2597
2598                 sense_buf[7] = 6; /* additional length */
2599         }
2600 }
2601
2602 /**
2603  * pmcraid_error_handler - Error response handlers for a SCSI op
2604  * @cmd: pointer to pmcraid_cmd that has failed
2605  *
2606  * This function determines whether or not to initiate ERP on the affected
2607  * device. This is called from a tasklet, which doesn't hold any locks.
2608  *
2609  * Return value:
2610  *       0 it caller can complete the request, otherwise 1 where in error
2611  *       handler itself completes the request and returns the command block
2612  *       back to free-pool
2613  */
2614 static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
2615 {
2616         struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
2617         struct pmcraid_resource_entry *res = scsi_cmd->device->hostdata;
2618         struct pmcraid_instance *pinstance = cmd->drv_inst;
2619         struct pmcraid_ioasa *ioasa = &cmd->ioa_cb->ioasa;
2620         u32 ioasc = le32_to_cpu(ioasa->ioasc);
2621         u32 masked_ioasc = ioasc & PMCRAID_IOASC_SENSE_MASK;
2622         u32 sense_copied = 0;
2623
2624         if (!res) {
2625                 pmcraid_info("resource pointer is NULL\n");
2626                 return 0;
2627         }
2628
2629         /* If this was a SCSI read/write command keep count of errors */
2630         if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
2631                 atomic_inc(&res->read_failures);
2632         else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
2633                 atomic_inc(&res->write_failures);
2634
2635         if (!RES_IS_GSCSI(res->cfg_entry) &&
2636                 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
2637                 pmcraid_frame_auto_sense(cmd);
2638         }
2639
2640         /* Log IOASC/IOASA information based on user settings */
2641         pmcraid_ioasc_logger(ioasc, cmd);
2642
2643         switch (masked_ioasc) {
2644
2645         case PMCRAID_IOASC_AC_TERMINATED_BY_HOST:
2646                 scsi_cmd->result |= (DID_ABORT << 16);
2647                 break;
2648
2649         case PMCRAID_IOASC_IR_INVALID_RESOURCE_HANDLE:
2650         case PMCRAID_IOASC_HW_CANNOT_COMMUNICATE:
2651                 scsi_cmd->result |= (DID_NO_CONNECT << 16);
2652                 break;
2653
2654         case PMCRAID_IOASC_NR_SYNC_REQUIRED:
2655                 res->sync_reqd = 1;
2656                 scsi_cmd->result |= (DID_IMM_RETRY << 16);
2657                 break;
2658
2659         case PMCRAID_IOASC_ME_READ_ERROR_NO_REALLOC:
2660                 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
2661                 break;
2662
2663         case PMCRAID_IOASC_UA_BUS_WAS_RESET:
2664         case PMCRAID_IOASC_UA_BUS_WAS_RESET_BY_OTHER:
2665                 if (!res->reset_progress)
2666                         scsi_report_bus_reset(pinstance->host,
2667                                               scsi_cmd->device->channel);
2668                 scsi_cmd->result |= (DID_ERROR << 16);
2669                 break;
2670
2671         case PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR:
2672                 scsi_cmd->result |= PMCRAID_IOASC_SENSE_STATUS(ioasc);
2673                 res->sync_reqd = 1;
2674
2675                 /* if check_condition is not active return with error otherwise
2676                  * get/frame the sense buffer
2677                  */
2678                 if (PMCRAID_IOASC_SENSE_STATUS(ioasc) !=
2679                     SAM_STAT_CHECK_CONDITION &&
2680                     PMCRAID_IOASC_SENSE_STATUS(ioasc) != SAM_STAT_ACA_ACTIVE)
2681                         return 0;
2682
2683                 /* If we have auto sense data as part of IOASA pass it to
2684                  * mid-layer
2685                  */
2686                 if (ioasa->auto_sense_length != 0) {
2687                         short sense_len = le16_to_cpu(ioasa->auto_sense_length);
2688                         int data_size = min_t(u16, sense_len,
2689                                               SCSI_SENSE_BUFFERSIZE);
2690
2691                         memcpy(scsi_cmd->sense_buffer,
2692                                ioasa->sense_data,
2693                                data_size);
2694                         sense_copied = 1;
2695                 }
2696
2697                 if (RES_IS_GSCSI(res->cfg_entry))
2698                         pmcraid_cancel_all(cmd, sense_copied);
2699                 else if (sense_copied)
2700                         pmcraid_erp_done(cmd);
2701                 else
2702                         pmcraid_request_sense(cmd);
2703
2704                 return 1;
2705
2706         case PMCRAID_IOASC_NR_INIT_CMD_REQUIRED:
2707                 break;
2708
2709         default:
2710                 if (PMCRAID_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
2711                         scsi_cmd->result |= (DID_ERROR << 16);
2712                 break;
2713         }
2714         return 0;
2715 }
2716
2717 /**
2718  * pmcraid_reset_device - device reset handler functions
2719  *
2720  * @scsi_cmd: scsi command struct
2721  * @modifier: reset modifier indicating the reset sequence to be performed
2722  *
2723  * This function issues a device reset to the affected device.
2724  * A LUN reset will be sent to the device first. If that does
2725  * not work, a target reset will be sent.
2726  *
2727  * Return value:
2728  *      SUCCESS / FAILED
2729  */
2730 static int pmcraid_reset_device(
2731         struct scsi_cmnd *scsi_cmd,
2732         unsigned long timeout,
2733         u8 modifier
2734 )
2735 {
2736         struct pmcraid_cmd *cmd;
2737         struct pmcraid_instance *pinstance;
2738         struct pmcraid_resource_entry *res;
2739         struct pmcraid_ioarcb *ioarcb;
2740         unsigned long lock_flags;
2741         u32 ioasc;
2742
2743         pinstance =
2744                 (struct pmcraid_instance *)scsi_cmd->device->host->hostdata;
2745         res = scsi_cmd->device->hostdata;
2746
2747         if (!res) {
2748                 sdev_printk(KERN_ERR, scsi_cmd->device,
2749                             "reset_device: NULL resource pointer\n");
2750                 return FAILED;
2751         }
2752
2753         /* If adapter is currently going through reset/reload, return failed.
2754          * This will force the mid-layer to call _eh_bus/host reset, which
2755          * will then go to sleep and wait for the reset to complete
2756          */
2757         spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
2758         if (pinstance->ioa_reset_in_progress ||
2759             pinstance->ioa_state == IOA_STATE_DEAD) {
2760                 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
2761                 return FAILED;
2762         }
2763
2764         res->reset_progress = 1;
2765         pmcraid_info("Resetting %s resource with addr %x\n",
2766                      ((modifier & RESET_DEVICE_LUN) ? "LUN" :
2767                      ((modifier & RESET_DEVICE_TARGET) ? "TARGET" : "BUS")),
2768                      le32_to_cpu(res->cfg_entry.resource_address));
2769
2770         /* get a free cmd block */
2771         cmd = pmcraid_get_free_cmd(pinstance);
2772
2773         if (cmd == NULL) {
2774                 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
2775                 pmcraid_err("%s: no cmd blocks are available\n", __func__);
2776                 return FAILED;
2777         }
2778
2779         ioarcb = &cmd->ioa_cb->ioarcb;
2780         ioarcb->resource_handle = res->cfg_entry.resource_handle;
2781         ioarcb->request_type = REQ_TYPE_IOACMD;
2782         ioarcb->cdb[0] = PMCRAID_RESET_DEVICE;
2783
2784         /* Initialize reset modifier bits */
2785         if (modifier)
2786                 modifier = ENABLE_RESET_MODIFIER | modifier;
2787
2788         ioarcb->cdb[1] = modifier;
2789
2790         init_completion(&cmd->wait_for_completion);
2791         cmd->completion_req = 1;
2792
2793         pmcraid_info("cmd(CDB[0] = %x) for %x with index = %d\n",
2794                      cmd->ioa_cb->ioarcb.cdb[0],
2795                      le32_to_cpu(cmd->ioa_cb->ioarcb.resource_handle),
2796                      le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2);
2797
2798         pmcraid_send_cmd(cmd,
2799                          pmcraid_internal_done,
2800                          timeout,
2801                          pmcraid_timeout_handler);
2802
2803         spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
2804
2805         /* RESET_DEVICE command completes after all pending IOARCBs are
2806          * completed. Once this command is completed, pmcraind_internal_done
2807          * will wake up the 'completion' queue.
2808          */
2809         wait_for_completion(&cmd->wait_for_completion);
2810
2811         /* complete the command here itself and return the command block
2812          * to free list
2813          */
2814         pmcraid_return_cmd(cmd);
2815         res->reset_progress = 0;
2816         ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc);
2817
2818         /* set the return value based on the returned ioasc */
2819         return PMCRAID_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
2820 }
2821
2822 /**
2823  * _pmcraid_io_done - helper for pmcraid_io_done function
2824  *
2825  * @cmd: pointer to pmcraid command struct
2826  * @reslen: residual data length to be set in the ioasa
2827  * @ioasc: ioasc either returned by IOA or set by driver itself.
2828  *
2829  * This function is invoked by pmcraid_io_done to complete mid-layer
2830  * scsi ops.
2831  *
2832  * Return value:
2833  *        0 if caller is required to return it to free_pool. Returns 1 if
2834  *        caller need not worry about freeing command block as error handler
2835  *        will take care of that.
2836  */
2837
2838 static int _pmcraid_io_done(struct pmcraid_cmd *cmd, int reslen, int ioasc)
2839 {
2840         struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
2841         int rc = 0;
2842
2843         scsi_set_resid(scsi_cmd, reslen);
2844
2845         pmcraid_info("response(%d) CDB[0] = %x ioasc:result: %x:%x\n",
2846                 le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2,
2847                 cmd->ioa_cb->ioarcb.cdb[0],
2848                 ioasc, scsi_cmd->result);
2849
2850         if (PMCRAID_IOASC_SENSE_KEY(ioasc) != 0)
2851                 rc = pmcraid_error_handler(cmd);
2852
2853         if (rc == 0) {
2854                 scsi_dma_unmap(scsi_cmd);
2855                 scsi_cmd->scsi_done(scsi_cmd);
2856         }
2857
2858         return rc;
2859 }
2860
2861 /**
2862  * pmcraid_io_done - SCSI completion function
2863  *
2864  * @cmd: pointer to pmcraid command struct
2865  *
2866  * This function is invoked by tasklet/mid-layer error handler to completing
2867  * the SCSI ops sent from mid-layer.
2868  *
2869  * Return value
2870  *        none
2871  */
2872
2873 static void pmcraid_io_done(struct pmcraid_cmd *cmd)
2874 {
2875         u32 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc);
2876         u32 reslen = le32_to_cpu(cmd->ioa_cb->ioasa.residual_data_length);
2877
2878         if (_pmcraid_io_done(cmd, reslen, ioasc) == 0)
2879                 pmcraid_return_cmd(cmd);
2880 }
2881
2882 /**
2883  * pmcraid_abort_cmd - Aborts a single IOARCB already submitted to IOA
2884  *
2885  * @cmd: command block of the command to be aborted
2886  *
2887  * Return Value:
2888  *       returns pointer to command structure used as cancelling cmd
2889  */
2890 static struct pmcraid_cmd *pmcraid_abort_cmd(struct pmcraid_cmd *cmd)
2891 {
2892         struct pmcraid_cmd *cancel_cmd;
2893         struct pmcraid_instance *pinstance;
2894         struct pmcraid_resource_entry *res;
2895
2896         pinstance = (struct pmcraid_instance *)cmd->drv_inst;
2897         res = cmd->scsi_cmd->device->hostdata;
2898
2899         cancel_cmd = pmcraid_get_free_cmd(pinstance);
2900
2901         if (cancel_cmd == NULL) {
2902                 pmcraid_err("%s: no cmd blocks are available\n", __func__);
2903                 return NULL;
2904         }
2905
2906         pmcraid_prepare_cancel_cmd(cancel_cmd, cmd);
2907
2908         pmcraid_info("aborting command CDB[0]= %x with index = %d\n",
2909                 cmd->ioa_cb->ioarcb.cdb[0],
2910                 le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2);
2911
2912         init_completion(&cancel_cmd->wait_for_completion);
2913         cancel_cmd->completion_req = 1;
2914
2915         pmcraid_info("command (%d) CDB[0] = %x for %x\n",
2916                 le32_to_cpu(cancel_cmd->ioa_cb->ioarcb.response_handle) >> 2,
2917                 cancel_cmd->ioa_cb->ioarcb.cdb[0],
2918                 le32_to_cpu(cancel_cmd->ioa_cb->ioarcb.resource_handle));
2919
2920         pmcraid_send_cmd(cancel_cmd,
2921                          pmcraid_internal_done,
2922                          PMCRAID_INTERNAL_TIMEOUT,
2923                          pmcraid_timeout_handler);
2924         return cancel_cmd;
2925 }
2926
2927 /**
2928  * pmcraid_abort_complete - Waits for ABORT TASK completion
2929  *
2930  * @cancel_cmd: command block use as cancelling command
2931  *
2932  * Return Value:
2933  *       returns SUCCESS if ABORT TASK has good completion
2934  *       otherwise FAILED
2935  */
2936 static int pmcraid_abort_complete(struct pmcraid_cmd *cancel_cmd)
2937 {
2938         struct pmcraid_resource_entry *res;
2939         u32 ioasc;
2940
2941         wait_for_completion(&cancel_cmd->wait_for_completion);
2942         res = cancel_cmd->res;
2943         cancel_cmd->res = NULL;
2944         ioasc = le32_to_cpu(cancel_cmd->ioa_cb->ioasa.ioasc);
2945
2946         /* If the abort task is not timed out we will get a Good completion
2947          * as sense_key, otherwise we may get one the following responses
2948          * due to subsequent bus reset or device reset. In case IOASC is
2949          * NR_SYNC_REQUIRED, set sync_reqd flag for the corresponding resource
2950          */
2951         if (ioasc == PMCRAID_IOASC_UA_BUS_WAS_RESET ||
2952             ioasc == PMCRAID_IOASC_NR_SYNC_REQUIRED) {
2953                 if (ioasc == PMCRAID_IOASC_NR_SYNC_REQUIRED)
2954                         res->sync_reqd = 1;
2955                 ioasc = 0;
2956         }
2957
2958         /* complete the command here itself */
2959         pmcraid_return_cmd(cancel_cmd);
2960         return PMCRAID_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
2961 }
2962
2963 /**
2964  * pmcraid_eh_abort_handler - entry point for aborting a single task on errors
2965  *
2966  * @scsi_cmd:   scsi command struct given by mid-layer. When this is called
2967  *              mid-layer ensures that no other commands are queued. This
2968  *              never gets called under interrupt, but a separate eh thread.
2969  *
2970  * Return value:
2971  *       SUCCESS / FAILED
2972  */
2973 static int pmcraid_eh_abort_handler(struct scsi_cmnd *scsi_cmd)
2974 {
2975         struct pmcraid_instance *pinstance;
2976         struct pmcraid_cmd *cmd;
2977         struct pmcraid_resource_entry *res;
2978         unsigned long host_lock_flags;
2979         unsigned long pending_lock_flags;
2980         struct pmcraid_cmd *cancel_cmd = NULL;
2981         int cmd_found = 0;
2982         int rc = FAILED;
2983
2984         pinstance =
2985                 (struct pmcraid_instance *)scsi_cmd->device->host->hostdata;
2986
2987         scmd_printk(KERN_INFO, scsi_cmd,
2988                     "I/O command timed out, aborting it.\n");
2989
2990         res = scsi_cmd->device->hostdata;
2991
2992         if (res == NULL)
2993                 return rc;
2994
2995         /* If we are currently going through reset/reload, return failed.
2996          * This will force the mid-layer to eventually call
2997          * pmcraid_eh_host_reset which will then go to sleep and wait for the
2998          * reset to complete
2999          */
3000         spin_lock_irqsave(pinstance->host->host_lock, host_lock_flags);
3001
3002         if (pinstance->ioa_reset_in_progress ||
3003             pinstance->ioa_state == IOA_STATE_DEAD) {
3004                 spin_unlock_irqrestore(pinstance->host->host_lock,
3005                                        host_lock_flags);
3006                 return rc;
3007         }
3008
3009         /* loop over pending cmd list to find cmd corresponding to this
3010          * scsi_cmd. Note that this command might not have been completed
3011          * already. locking: all pending commands are protected with
3012          * pending_pool_lock.
3013          */
3014         spin_lock_irqsave(&pinstance->pending_pool_lock, pending_lock_flags);
3015         list_for_each_entry(cmd, &pinstance->pending_cmd_pool, free_list) {
3016
3017                 if (cmd->scsi_cmd == scsi_cmd) {
3018                         cmd_found = 1;
3019                         break;
3020                 }
3021         }
3022
3023         spin_unlock_irqrestore(&pinstance->pending_pool_lock,
3024                                 pending_lock_flags);
3025
3026         /* If the command to be aborted was given to IOA and still pending with
3027          * it, send ABORT_TASK to abort this and wait for its completion
3028          */
3029         if (cmd_found)
3030                 cancel_cmd = pmcraid_abort_cmd(cmd);
3031
3032         spin_unlock_irqrestore(pinstance->host->host_lock,
3033                                host_lock_flags);
3034
3035         if (cancel_cmd) {
3036                 cancel_cmd->res = cmd->scsi_cmd->device->hostdata;
3037                 rc = pmcraid_abort_complete(cancel_cmd);
3038         }
3039
3040         return cmd_found ? rc : SUCCESS;
3041 }
3042
3043 /**
3044  * pmcraid_eh_xxxx_reset_handler - bus/target/device reset handler callbacks
3045  *
3046  * @scmd: pointer to scsi_cmd that was sent to the resource to be reset.
3047  *
3048  * All these routines invokve pmcraid_reset_device with appropriate parameters.
3049  * Since these are called from mid-layer EH thread, no other IO will be queued
3050  * to the resource being reset. However, control path (IOCTL) may be active so
3051  * it is necessary to synchronize IOARRIN writes which pmcraid_reset_device
3052  * takes care by locking/unlocking host_lock.
3053  *
3054  * Return value
3055  *      SUCCESS or FAILED
3056  */
3057 static int pmcraid_eh_device_reset_handler(struct scsi_cmnd *scmd)
3058 {
3059         scmd_printk(KERN_INFO, scmd,
3060                     "resetting device due to an I/O command timeout.\n");
3061         return pmcraid_reset_device(scmd,
3062                                     PMCRAID_INTERNAL_TIMEOUT,
3063                                     RESET_DEVICE_LUN);
3064 }
3065
3066 static int pmcraid_eh_bus_reset_handler(struct scsi_cmnd *scmd)
3067 {
3068         scmd_printk(KERN_INFO, scmd,
3069                     "Doing bus reset due to an I/O command timeout.\n");
3070         return pmcraid_reset_device(scmd,
3071                                     PMCRAID_RESET_BUS_TIMEOUT,
3072                                     RESET_DEVICE_BUS);
3073 }
3074
3075 static int pmcraid_eh_target_reset_handler(struct scsi_cmnd *scmd)
3076 {
3077         scmd_printk(KERN_INFO, scmd,
3078                     "Doing target reset due to an I/O command timeout.\n");
3079         return pmcraid_reset_device(scmd,
3080                                     PMCRAID_INTERNAL_TIMEOUT,
3081                                     RESET_DEVICE_TARGET);
3082 }
3083
3084 /**
3085  * pmcraid_eh_host_reset_handler - adapter reset handler callback
3086  *
3087  * @scmd: pointer to scsi_cmd that was sent to a resource of adapter
3088  *
3089  * Initiates adapter reset to bring it up to operational state
3090  *
3091  * Return value
3092  *      SUCCESS or FAILED
3093  */
3094 static int pmcraid_eh_host_reset_handler(struct scsi_cmnd *scmd)
3095 {
3096         unsigned long interval = 10000; /* 10 seconds interval */
3097         int waits = jiffies_to_msecs(PMCRAID_RESET_HOST_TIMEOUT) / interval;
3098         struct pmcraid_instance *pinstance =
3099                 (struct pmcraid_instance *)(scmd->device->host->hostdata);
3100
3101
3102         /* wait for an additional 150 seconds just in case firmware could come
3103          * up and if it could complete all the pending commands excluding the
3104          * two HCAM (CCN and LDN).
3105          */
3106         while (waits--) {
3107                 if (atomic_read(&pinstance->outstanding_cmds) <=
3108                     PMCRAID_MAX_HCAM_CMD)
3109                         return SUCCESS;
3110                 msleep(interval);
3111         }
3112
3113         dev_err(&pinstance->pdev->dev,
3114                 "Adapter being reset due to an I/O command timeout.\n");
3115         return pmcraid_reset_bringup(pinstance) == 0 ? SUCCESS : FAILED;
3116 }
3117
3118 /**
3119  * pmcraid_init_ioadls - initializes IOADL related fields in IOARCB
3120  * @cmd: pmcraid command struct
3121  * @sgcount: count of scatter-gather elements
3122  *
3123  * Return value
3124  *   returns pointer pmcraid_ioadl_desc, initialized to point to internal
3125  *   or external IOADLs
3126  */
3127 static struct pmcraid_ioadl_desc *
3128 pmcraid_init_ioadls(struct pmcraid_cmd *cmd, int sgcount)
3129 {
3130         struct pmcraid_ioadl_desc *ioadl;
3131         struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
3132         int ioadl_count = 0;
3133
3134         if (ioarcb->add_cmd_param_length)
3135                 ioadl_count = DIV_ROUND_UP(le16_to_cpu(ioarcb->add_cmd_param_length), 16);
3136         ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc) * sgcount);
3137
3138         if ((sgcount + ioadl_count) > (ARRAY_SIZE(ioarcb->add_data.u.ioadl))) {
3139                 /* external ioadls start at offset 0x80 from control_block
3140                  * structure, re-using 24 out of 27 ioadls part of IOARCB.
3141                  * It is necessary to indicate to firmware that driver is
3142                  * using ioadls to be treated as external to IOARCB.
3143                  */
3144                 ioarcb->ioarcb_bus_addr &= cpu_to_le64(~(0x1FULL));
3145                 ioarcb->ioadl_bus_addr =
3146                         cpu_to_le64((cmd->ioa_cb_bus_addr) +
3147                                 offsetof(struct pmcraid_ioarcb,
3148                                         add_data.u.ioadl[3]));
3149                 ioadl = &ioarcb->add_data.u.ioadl[3];
3150         } else {
3151                 ioarcb->ioadl_bus_addr =
3152                         cpu_to_le64((cmd->ioa_cb_bus_addr) +
3153                                 offsetof(struct pmcraid_ioarcb,
3154                                         add_data.u.ioadl[ioadl_count]));
3155
3156                 ioadl = &ioarcb->add_data.u.ioadl[ioadl_count];
3157                 ioarcb->ioarcb_bus_addr |=
3158                         cpu_to_le64(DIV_ROUND_CLOSEST(sgcount + ioadl_count, 8));
3159         }
3160
3161         return ioadl;
3162 }
3163
3164 /**
3165  * pmcraid_build_ioadl - Build a scatter/gather list and map the buffer
3166  * @pinstance: pointer to adapter instance structure
3167  * @cmd: pmcraid command struct
3168  *
3169  * This function is invoked by queuecommand entry point while sending a command
3170  * to firmware. This builds ioadl descriptors and sets up ioarcb fields.
3171  *
3172  * Return value:
3173  *      0 on success or -1 on failure
3174  */
3175 static int pmcraid_build_ioadl(
3176         struct pmcraid_instance *pinstance,
3177         struct pmcraid_cmd *cmd
3178 )
3179 {
3180         int i, nseg;
3181         struct scatterlist *sglist;
3182
3183         struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
3184         struct pmcraid_ioarcb *ioarcb = &(cmd->ioa_cb->ioarcb);
3185         struct pmcraid_ioadl_desc *ioadl = ioarcb->add_data.u.ioadl;
3186
3187         u32 length = scsi_bufflen(scsi_cmd);
3188
3189         if (!length)
3190                 return 0;
3191
3192         nseg = scsi_dma_map(scsi_cmd);
3193
3194         if (nseg < 0) {
3195                 scmd_printk(KERN_ERR, scsi_cmd, "scsi_map_dma failed!\n");
3196                 return -1;
3197         } else if (nseg > PMCRAID_MAX_IOADLS) {
3198                 scsi_dma_unmap(scsi_cmd);
3199                 scmd_printk(KERN_ERR, scsi_cmd,
3200                         "sg count is (%d) more than allowed!\n", nseg);
3201                 return -1;
3202         }
3203
3204         /* Initialize IOARCB data transfer length fields */
3205         if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE)
3206                 ioarcb->request_flags0 |= TRANSFER_DIR_WRITE;
3207
3208         ioarcb->request_flags0 |= NO_LINK_DESCS;
3209         ioarcb->data_transfer_length = cpu_to_le32(length);
3210         ioadl = pmcraid_init_ioadls(cmd, nseg);
3211
3212         /* Initialize IOADL descriptor addresses */
3213         scsi_for_each_sg(scsi_cmd, sglist, nseg, i) {
3214                 ioadl[i].data_len = cpu_to_le32(sg_dma_len(sglist));
3215                 ioadl[i].address = cpu_to_le64(sg_dma_address(sglist));
3216                 ioadl[i].flags = 0;
3217         }
3218         /* setup last descriptor */
3219         ioadl[i - 1].flags = IOADL_FLAGS_LAST_DESC;
3220
3221         return 0;
3222 }
3223
3224 /**
3225  * pmcraid_free_sglist - Frees an allocated SG buffer list
3226  * @sglist: scatter/gather list pointer
3227  *
3228  * Free a DMA'able memory previously allocated with pmcraid_alloc_sglist
3229  *
3230  * Return value:
3231  *      none
3232  */
3233 static void pmcraid_free_sglist(struct pmcraid_sglist *sglist)
3234 {
3235         int i;
3236
3237         for (i = 0; i < sglist->num_sg; i++)
3238                 __free_pages(sg_page(&(sglist->scatterlist[i])),
3239                              sglist->order);
3240
3241         kfree(sglist);
3242 }
3243
3244 /**
3245  * pmcraid_alloc_sglist - Allocates memory for a SG list
3246  * @buflen: buffer length
3247  *
3248  * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3249  * list.
3250  *
3251  * Return value
3252  *      pointer to sglist / NULL on failure
3253  */
3254 static struct pmcraid_sglist *pmcraid_alloc_sglist(int buflen)
3255 {
3256         struct pmcraid_sglist *sglist;
3257         struct scatterlist *scatterlist;
3258         struct page *page;
3259         int num_elem, i, j;
3260         int sg_size;
3261         int order;
3262         int bsize_elem;
3263
3264         sg_size = buflen / (PMCRAID_MAX_IOADLS - 1);
3265         order = (sg_size > 0) ? get_order(sg_size) : 0;
3266         bsize_elem = PAGE_SIZE * (1 << order);
3267
3268         /* Determine the actual number of sg entries needed */
3269         if (buflen % bsize_elem)
3270                 num_elem = (buflen / bsize_elem) + 1;
3271         else
3272                 num_elem = buflen / bsize_elem;
3273
3274         /* Allocate a scatter/gather list for the DMA */
3275         sglist = kzalloc(sizeof(struct pmcraid_sglist) +
3276                          (sizeof(struct scatterlist) * (num_elem - 1)),
3277                          GFP_KERNEL);
3278
3279         if (sglist == NULL)
3280                 return NULL;
3281
3282         scatterlist = sglist->scatterlist;
3283         sg_init_table(scatterlist, num_elem);
3284         sglist->order = order;
3285         sglist->num_sg = num_elem;
3286         sg_size = buflen;
3287
3288         for (i = 0; i < num_elem; i++) {
3289                 page = alloc_pages(GFP_KERNEL|GFP_DMA|__GFP_ZERO, order);
3290                 if (!page) {
3291                         for (j = i - 1; j >= 0; j--)
3292                                 __free_pages(sg_page(&scatterlist[j]), order);
3293                         kfree(sglist);
3294                         return NULL;
3295                 }
3296
3297                 sg_set_page(&scatterlist[i], page,
3298                         sg_size < bsize_elem ? sg_size : bsize_elem, 0);
3299                 sg_size -= bsize_elem;
3300         }
3301
3302         return sglist;
3303 }
3304
3305 /**
3306  * pmcraid_copy_sglist - Copy user buffer to kernel buffer's SG list
3307  * @sglist: scatter/gather list pointer
3308  * @buffer: buffer pointer
3309  * @len: buffer length
3310  * @direction: data transfer direction
3311  *
3312  * Copy a user buffer into a buffer allocated by pmcraid_alloc_sglist
3313  *
3314  * Return value:
3315  * 0 on success / other on failure
3316  */
3317 static int pmcraid_copy_sglist(
3318         struct pmcraid_sglist *sglist,
3319         void __user *buffer,
3320         u32 len,
3321         int direction
3322 )
3323 {
3324         struct scatterlist *scatterlist;
3325         void *kaddr;
3326         int bsize_elem;
3327         int i;
3328         int rc = 0;
3329
3330         /* Determine the actual number of bytes per element */
3331         bsize_elem = PAGE_SIZE * (1 << sglist->order);
3332
3333         scatterlist = sglist->scatterlist;
3334
3335         for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3336                 struct page *page = sg_page(&scatterlist[i]);
3337
3338                 kaddr = kmap(page);
3339                 if (direction == DMA_TO_DEVICE)
3340                         rc = copy_from_user(kaddr, buffer, bsize_elem);
3341                 else
3342                         rc = copy_to_user(buffer, kaddr, bsize_elem);
3343
3344                 kunmap(page);
3345
3346                 if (rc) {
3347                         pmcraid_err("failed to copy user data into sg list\n");
3348                         return -EFAULT;
3349                 }
3350
3351                 scatterlist[i].length = bsize_elem;
3352         }
3353
3354         if (len % bsize_elem) {
3355                 struct page *page = sg_page(&scatterlist[i]);
3356
3357                 kaddr = kmap(page);
3358
3359                 if (direction == DMA_TO_DEVICE)
3360                         rc = copy_from_user(kaddr, buffer, len % bsize_elem);
3361                 else
3362                         rc = copy_to_user(buffer, kaddr, len % bsize_elem);
3363
3364                 kunmap(page);
3365
3366                 scatterlist[i].length = len % bsize_elem;
3367         }
3368
3369         if (rc) {
3370                 pmcraid_err("failed to copy user data into sg list\n");
3371                 rc = -EFAULT;
3372         }
3373
3374         return rc;
3375 }
3376
3377 /**
3378  * pmcraid_queuecommand - Queue a mid-layer request
3379  * @scsi_cmd: scsi command struct
3380  * @done: done function
3381  *
3382  * This function queues a request generated by the mid-layer. Midlayer calls
3383  * this routine within host->lock. Some of the functions called by queuecommand
3384  * would use cmd block queue locks (free_pool_lock and pending_pool_lock)
3385  *
3386  * Return value:
3387  *        0 on success
3388  *        SCSI_MLQUEUE_DEVICE_BUSY if device is busy
3389  *        SCSI_MLQUEUE_HOST_BUSY if host is busy
3390  */
3391 static int pmcraid_queuecommand_lck(
3392         struct scsi_cmnd *scsi_cmd,
3393         void (*done) (struct scsi_cmnd *)
3394 )
3395 {
3396         struct pmcraid_instance *pinstance;
3397         struct pmcraid_resource_entry *res;
3398         struct pmcraid_ioarcb *ioarcb;
3399         struct pmcraid_cmd *cmd;
3400         u32 fw_version;
3401         int rc = 0;
3402
3403         pinstance =
3404                 (struct pmcraid_instance *)scsi_cmd->device->host->hostdata;
3405         fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
3406         scsi_cmd->scsi_done = done;
3407         res = scsi_cmd->device->hostdata;
3408         scsi_cmd->result = (DID_OK << 16);
3409
3410         /* if adapter is marked as dead, set result to DID_NO_CONNECT complete
3411          * the command
3412          */
3413         if (pinstance->ioa_state == IOA_STATE_DEAD) {
3414                 pmcraid_info("IOA is dead, but queuecommand is scheduled\n");
3415                 scsi_cmd->result = (DID_NO_CONNECT << 16);
3416                 scsi_cmd->scsi_done(scsi_cmd);
3417                 return 0;
3418         }
3419
3420         /* If IOA reset is in progress, can't queue the commands */
3421         if (pinstance->ioa_reset_in_progress)
3422                 return SCSI_MLQUEUE_HOST_BUSY;
3423
3424         /* Firmware doesn't support SYNCHRONIZE_CACHE command (0x35), complete
3425          * the command here itself with success return
3426          */
3427         if (scsi_cmd->cmnd[0] == SYNCHRONIZE_CACHE) {
3428                 pmcraid_info("SYNC_CACHE(0x35), completing in driver itself\n");
3429                 scsi_cmd->scsi_done(scsi_cmd);
3430                 return 0;
3431         }
3432
3433         /* initialize the command and IOARCB to be sent to IOA */
3434         cmd = pmcraid_get_free_cmd(pinstance);
3435
3436         if (cmd == NULL) {
3437                 pmcraid_err("free command block is not available\n");
3438                 return SCSI_MLQUEUE_HOST_BUSY;
3439         }
3440
3441         cmd->scsi_cmd = scsi_cmd;
3442         ioarcb = &(cmd->ioa_cb->ioarcb);
3443         memcpy(ioarcb->cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
3444         ioarcb->resource_handle = res->cfg_entry.resource_handle;
3445         ioarcb->request_type = REQ_TYPE_SCSI;
3446
3447         /* set hrrq number where the IOA should respond to. Note that all cmds
3448          * generated internally uses hrrq_id 0, exception to this is the cmd
3449          * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
3450          * hrrq_id assigned here in queuecommand
3451          */
3452         ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
3453                           pinstance->num_hrrq;
3454         cmd->cmd_done = pmcraid_io_done;
3455
3456         if (RES_IS_GSCSI(res->cfg_entry) || RES_IS_VSET(res->cfg_entry)) {
3457                 if (scsi_cmd->underflow == 0)
3458                         ioarcb->request_flags0 |= INHIBIT_UL_CHECK;
3459
3460                 if (res->sync_reqd) {
3461                         ioarcb->request_flags0 |= SYNC_COMPLETE;
3462                         res->sync_reqd = 0;
3463                 }
3464
3465                 ioarcb->request_flags0 |= NO_LINK_DESCS;
3466
3467                 if (scsi_cmd->flags & SCMD_TAGGED)
3468                         ioarcb->request_flags1 |= TASK_TAG_SIMPLE;
3469
3470                 if (RES_IS_GSCSI(res->cfg_entry))
3471                         ioarcb->request_flags1 |= DELAY_AFTER_RESET;
3472         }
3473
3474         rc = pmcraid_build_ioadl(pinstance, cmd);
3475
3476         pmcraid_info("command (%d) CDB[0] = %x for %x:%x:%x:%x\n",
3477                      le32_to_cpu(ioarcb->response_handle) >> 2,
3478                      scsi_cmd->cmnd[0], pinstance->host->unique_id,
3479                      RES_IS_VSET(res->cfg_entry) ? PMCRAID_VSET_BUS_ID :
3480                         PMCRAID_PHYS_BUS_ID,
3481                      RES_IS_VSET(res->cfg_entry) ?
3482                         (fw_version <= PMCRAID_FW_VERSION_1 ?
3483                                 res->cfg_entry.unique_flags1 :
3484                                 le16_to_cpu(res->cfg_entry.array_id) & 0xFF) :
3485                         RES_TARGET(res->cfg_entry.resource_address),
3486                      RES_LUN(res->cfg_entry.resource_address));
3487
3488         if (likely(rc == 0)) {
3489                 _pmcraid_fire_command(cmd);
3490         } else {
3491                 pmcraid_err("queuecommand could not build ioadl\n");
3492                 pmcraid_return_cmd(cmd);
3493                 rc = SCSI_MLQUEUE_HOST_BUSY;
3494         }
3495
3496         return rc;
3497 }
3498
3499 static DEF_SCSI_QCMD(pmcraid_queuecommand)
3500
3501 /**
3502  * pmcraid_open -char node "open" entry, allowed only users with admin access
3503  */
3504 static int pmcraid_chr_open(struct inode *inode, struct file *filep)
3505 {
3506         struct pmcraid_instance *pinstance;
3507
3508         if (!capable(CAP_SYS_ADMIN))
3509                 return -EACCES;
3510
3511         /* Populate adapter instance * pointer for use by ioctl */
3512         pinstance = container_of(inode->i_cdev, struct pmcraid_instance, cdev);
3513         filep->private_data = pinstance;
3514
3515         return 0;
3516 }
3517
3518 /**
3519  * pmcraid_fasync - Async notifier registration from applications
3520  *
3521  * This function adds the calling process to a driver global queue. When an
3522  * event occurs, SIGIO will be sent to all processes in this queue.
3523  */
3524 static int pmcraid_chr_fasync(int fd, struct file *filep, int mode)
3525 {
3526         struct pmcraid_instance *pinstance;
3527         int rc;
3528
3529         pinstance = filep->private_data;
3530         mutex_lock(&pinstance->aen_queue_lock);
3531         rc = fasync_helper(fd, filep, mode, &pinstance->aen_queue);
3532         mutex_unlock(&pinstance->aen_queue_lock);
3533
3534         return rc;
3535 }
3536
3537
3538 /**
3539  * pmcraid_build_passthrough_ioadls - builds SG elements for passthrough
3540  * commands sent over IOCTL interface
3541  *
3542  * @cmd       : pointer to struct pmcraid_cmd
3543  * @buflen    : length of the request buffer
3544  * @direction : data transfer direction
3545  *
3546  * Return value
3547  *  0 on success, non-zero error code on failure
3548  */
3549 static int pmcraid_build_passthrough_ioadls(
3550         struct pmcraid_cmd *cmd,
3551         int buflen,
3552         int direction
3553 )
3554 {
3555         struct pmcraid_sglist *sglist = NULL;
3556         struct scatterlist *sg = NULL;
3557         struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
3558         struct pmcraid_ioadl_desc *ioadl;
3559         int i;
3560
3561         sglist = pmcraid_alloc_sglist(buflen);
3562
3563         if (!sglist) {
3564                 pmcraid_err("can't allocate memory for passthrough SGls\n");
3565                 return -ENOMEM;
3566         }
3567
3568         sglist->num_dma_sg = pci_map_sg(cmd->drv_inst->pdev,
3569                                         sglist->scatterlist,
3570                                         sglist->num_sg, direction);
3571
3572         if (!sglist->num_dma_sg || sglist->num_dma_sg > PMCRAID_MAX_IOADLS) {
3573                 dev_err(&cmd->drv_inst->pdev->dev,
3574                         "Failed to map passthrough buffer!\n");
3575                 pmcraid_free_sglist(sglist);
3576                 return -EIO;
3577         }
3578
3579         cmd->sglist = sglist;
3580         ioarcb->request_flags0 |= NO_LINK_DESCS;
3581
3582         ioadl = pmcraid_init_ioadls(cmd, sglist->num_dma_sg);
3583
3584         /* Initialize IOADL descriptor addresses */
3585         for_each_sg(sglist->scatterlist, sg, sglist->num_dma_sg, i) {
3586                 ioadl[i].data_len = cpu_to_le32(sg_dma_len(sg));
3587                 ioadl[i].address = cpu_to_le64(sg_dma_address(sg));
3588                 ioadl[i].flags = 0;
3589         }
3590
3591         /* setup the last descriptor */
3592         ioadl[i - 1].flags = IOADL_FLAGS_LAST_DESC;
3593
3594         return 0;
3595 }
3596
3597
3598 /**
3599  * pmcraid_release_passthrough_ioadls - release passthrough ioadls
3600  *
3601  * @cmd: pointer to struct pmcraid_cmd for which ioadls were allocated
3602  * @buflen: size of the request buffer
3603  * @direction: data transfer direction
3604  *
3605  * Return value
3606  *  0 on success, non-zero error code on failure
3607  */
3608 static void pmcraid_release_passthrough_ioadls(
3609         struct pmcraid_cmd *cmd,
3610         int buflen,
3611         int direction
3612 )
3613 {
3614         struct pmcraid_sglist *sglist = cmd->sglist;
3615
3616         if (buflen > 0) {
3617                 pci_unmap_sg(cmd->drv_inst->pdev,
3618                              sglist->scatterlist,
3619                              sglist->num_sg,
3620                              direction);
3621                 pmcraid_free_sglist(sglist);
3622                 cmd->sglist = NULL;
3623         }
3624 }
3625
3626 /**
3627  * pmcraid_ioctl_passthrough - handling passthrough IOCTL commands
3628  *
3629  * @pinstance: pointer to adapter instance structure
3630  * @cmd: ioctl code
3631  * @arg: pointer to pmcraid_passthrough_buffer user buffer
3632  *
3633  * Return value
3634  *  0 on success, non-zero error code on failure
3635  */
3636 static long pmcraid_ioctl_passthrough(
3637         struct pmcraid_instance *pinstance,
3638         unsigned int ioctl_cmd,
3639         unsigned int buflen,
3640         void __user *arg
3641 )
3642 {
3643         struct pmcraid_passthrough_ioctl_buffer *buffer;
3644         struct pmcraid_ioarcb *ioarcb;
3645         struct pmcraid_cmd *cmd;
3646         struct pmcraid_cmd *cancel_cmd;
3647         void __user *request_buffer;
3648         unsigned long request_offset;
3649         unsigned long lock_flags;
3650         void __user *ioasa;
3651         u32 ioasc;
3652         int request_size;
3653         int buffer_size;
3654         u8 access, direction;
3655         int rc = 0;
3656
3657         /* If IOA reset is in progress, wait 10 secs for reset to complete */
3658         if (pinstance->ioa_reset_in_progress) {
3659                 rc = wait_event_interruptible_timeout(
3660                                 pinstance->reset_wait_q,
3661                                 !pinstance->ioa_reset_in_progress,
3662                                 msecs_to_jiffies(10000));
3663
3664                 if (!rc)
3665                         return -ETIMEDOUT;
3666                 else if (rc < 0)
3667                         return -ERESTARTSYS;
3668         }
3669
3670         /* If adapter is not in operational state, return error */
3671         if (pinstance->ioa_state != IOA_STATE_OPERATIONAL) {
3672                 pmcraid_err("IOA is not operational\n");
3673                 return -ENOTTY;
3674         }
3675
3676         buffer_size = sizeof(struct pmcraid_passthrough_ioctl_buffer);
3677         buffer = kmalloc(buffer_size, GFP_KERNEL);
3678
3679         if (!buffer) {
3680                 pmcraid_err("no memory for passthrough buffer\n");
3681                 return -ENOMEM;
3682         }
3683
3684         request_offset =
3685             offsetof(struct pmcraid_passthrough_ioctl_buffer, request_buffer);
3686
3687         request_buffer = arg + request_offset;
3688
3689         rc = copy_from_user(buffer, arg,
3690                              sizeof(struct pmcraid_passthrough_ioctl_buffer));
3691
3692         ioasa = arg + offsetof(struct pmcraid_passthrough_ioctl_buffer, ioasa);
3693
3694         if (rc) {
3695                 pmcraid_err("ioctl: can't copy passthrough buffer\n");
3696                 rc = -EFAULT;
3697                 goto out_free_buffer;
3698         }
3699
3700         request_size = le32_to_cpu(buffer->ioarcb.data_transfer_length);
3701
3702         if (buffer->ioarcb.request_flags0 & TRANSFER_DIR_WRITE) {
3703                 access = VERIFY_READ;
3704                 direction = DMA_TO_DEVICE;
3705         } else {
3706                 access = VERIFY_WRITE;
3707                 direction = DMA_FROM_DEVICE;
3708         }
3709
3710         if (request_size < 0) {
3711                 rc = -EINVAL;
3712                 goto out_free_buffer;
3713         }
3714
3715         /* check if we have any additional command parameters */
3716         if (le16_to_cpu(buffer->ioarcb.add_cmd_param_length)
3717              > PMCRAID_ADD_CMD_PARAM_LEN) {
3718                 rc = -EINVAL;
3719                 goto out_free_buffer;
3720         }
3721
3722         cmd = pmcraid_get_free_cmd(pinstance);
3723
3724         if (!cmd) {
3725                 pmcraid_err("free command block is not available\n");
3726                 rc = -ENOMEM;
3727                 goto out_free_buffer;
3728         }
3729
3730         cmd->scsi_cmd = NULL;
3731         ioarcb = &(cmd->ioa_cb->ioarcb);
3732
3733         /* Copy the user-provided IOARCB stuff field by field */
3734         ioarcb->resource_handle = buffer->ioarcb.resource_handle;
3735         ioarcb->data_transfer_length = buffer->ioarcb.data_transfer_length;
3736         ioarcb->cmd_timeout = buffer->ioarcb.cmd_timeout;
3737         ioarcb->request_type = buffer->ioarcb.request_type;
3738         ioarcb->request_flags0 = buffer->ioarcb.request_flags0;
3739         ioarcb->request_flags1 = buffer->ioarcb.request_flags1;
3740         memcpy(ioarcb->cdb, buffer->ioarcb.cdb, PMCRAID_MAX_CDB_LEN);
3741
3742         if (buffer->ioarcb.add_cmd_param_length) {
3743                 ioarcb->add_cmd_param_length =
3744                         buffer->ioarcb.add_cmd_param_length;
3745                 ioarcb->add_cmd_param_offset =
3746                         buffer->ioarcb.add_cmd_param_offset;
3747                 memcpy(ioarcb->add_data.u.add_cmd_params,
3748                         buffer->ioarcb.add_data.u.add_cmd_params,
3749                         le16_to_cpu(buffer->ioarcb.add_cmd_param_length));
3750         }
3751
3752         /* set hrrq number where the IOA should respond to. Note that all cmds
3753          * generated internally uses hrrq_id 0, exception to this is the cmd
3754          * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
3755          * hrrq_id assigned here in queuecommand
3756          */
3757         ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
3758                           pinstance->num_hrrq;
3759
3760         if (request_size) {
3761                 rc = pmcraid_build_passthrough_ioadls(cmd,
3762                                                       request_size,
3763                                                       direction);
3764                 if (rc) {
3765                         pmcraid_err("couldn't build passthrough ioadls\n");
3766                         goto out_free_cmd;
3767                 }
3768         }
3769
3770         /* If data is being written into the device, copy the data from user
3771          * buffers
3772          */
3773         if (direction == DMA_TO_DEVICE && request_size > 0) {
3774                 rc = pmcraid_copy_sglist(cmd->sglist,
3775                                          request_buffer,
3776                                          request_size,
3777                                          direction);
3778                 if (rc) {
3779                         pmcraid_err("failed to copy user buffer\n");
3780                         goto out_free_sglist;
3781                 }
3782         }
3783
3784         /* passthrough ioctl is a blocking command so, put the user to sleep
3785          * until timeout. Note that a timeout value of 0 means, do timeout.
3786          */
3787         cmd->cmd_done = pmcraid_internal_done;
3788         init_completion(&cmd->wait_for_completion);
3789         cmd->completion_req = 1;
3790
3791         pmcraid_info("command(%d) (CDB[0] = %x) for %x\n",
3792                      le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2,
3793                      cmd->ioa_cb->ioarcb.cdb[0],
3794                      le32_to_cpu(cmd->ioa_cb->ioarcb.resource_handle));
3795
3796         spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
3797         _pmcraid_fire_command(cmd);
3798         spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
3799
3800         /* NOTE ! Remove the below line once abort_task is implemented
3801          * in firmware. This line disables ioctl command timeout handling logic
3802          * similar to IO command timeout handling, making ioctl commands to wait
3803          * until the command completion regardless of timeout value specified in
3804          * ioarcb
3805          */
3806         buffer->ioarcb.cmd_timeout = 0;
3807
3808         /* If command timeout is specified put caller to wait till that time,
3809          * otherwise it would be blocking wait. If command gets timed out, it
3810          * will be aborted.
3811          */
3812         if (buffer->ioarcb.cmd_timeout == 0) {
3813                 wait_for_completion(&cmd->wait_for_completion);
3814         } else if (!wait_for_completion_timeout(
3815                         &cmd->wait_for_completion,
3816                         msecs_to_jiffies(le16_to_cpu(buffer->ioarcb.cmd_timeout) * 1000))) {
3817
3818                 pmcraid_info("aborting cmd %d (CDB[0] = %x) due to timeout\n",
3819                         le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2,
3820                         cmd->ioa_cb->ioarcb.cdb[0]);
3821
3822                 spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
3823                 cancel_cmd = pmcraid_abort_cmd(cmd);
3824                 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
3825
3826                 if (cancel_cmd) {
3827                         wait_for_completion(&cancel_cmd->wait_for_completion);
3828                         ioasc = le32_to_cpu(cancel_cmd->ioa_cb->ioasa.ioasc);
3829                         pmcraid_return_cmd(cancel_cmd);
3830
3831                         /* if abort task couldn't find the command i.e it got
3832                          * completed prior to aborting, return good completion.
3833                          * if command got aborted successfully or there was IOA
3834                          * reset due to abort task itself getting timedout then
3835                          * return -ETIMEDOUT
3836                          */
3837                         if (ioasc == PMCRAID_IOASC_IOA_WAS_RESET ||
3838                             PMCRAID_IOASC_SENSE_KEY(ioasc) == 0x00) {
3839                                 if (ioasc != PMCRAID_IOASC_GC_IOARCB_NOTFOUND)
3840                                         rc = -ETIMEDOUT;
3841                                 goto out_handle_response;
3842                         }
3843                 }
3844
3845                 /* no command block for abort task or abort task failed to abort
3846                  * the IOARCB, then wait for 150 more seconds and initiate reset
3847                  * sequence after timeout
3848                  */
3849                 if (!wait_for_completion_timeout(
3850                         &cmd->wait_for_completion,
3851                         msecs_to_jiffies(150 * 1000))) {
3852                         pmcraid_reset_bringup(cmd->drv_inst);
3853                         rc = -ETIMEDOUT;
3854                 }
3855         }
3856
3857 out_handle_response:
3858         /* copy entire IOASA buffer and return IOCTL success.
3859          * If copying IOASA to user-buffer fails, return
3860          * EFAULT
3861          */
3862         if (copy_to_user(ioasa, &cmd->ioa_cb->ioasa,
3863                 sizeof(struct pmcraid_ioasa))) {
3864                 pmcraid_err("failed to copy ioasa buffer to user\n");
3865                 rc = -EFAULT;
3866         }
3867
3868         /* If the data transfer was from device, copy the data onto user
3869          * buffers
3870          */
3871         else if (direction == DMA_FROM_DEVICE && request_size > 0) {
3872                 rc = pmcraid_copy_sglist(cmd->sglist,
3873                                          request_buffer,
3874                                          request_size,
3875                                          direction);
3876                 if (rc) {
3877                         pmcraid_err("failed to copy user buffer\n");
3878                         rc = -EFAULT;
3879                 }
3880         }
3881
3882 out_free_sglist:
3883         pmcraid_release_passthrough_ioadls(cmd, request_size, direction);
3884
3885 out_free_cmd:
3886         pmcraid_return_cmd(cmd);
3887
3888 out_free_buffer:
3889         kfree(buffer);
3890
3891         return rc;
3892 }
3893
3894
3895
3896
3897 /**
3898  * pmcraid_ioctl_driver - ioctl handler for commands handled by driver itself
3899  *
3900  * @pinstance: pointer to adapter instance structure
3901  * @cmd: ioctl command passed in
3902  * @buflen: length of user_buffer
3903  * @user_buffer: user buffer pointer
3904  *
3905  * Return Value
3906  *   0 in case of success, otherwise appropriate error code
3907  */
3908 static long pmcraid_ioctl_driver(
3909         struct pmcraid_instance *pinstance,
3910         unsigned int cmd,
3911         unsigned int buflen,
3912         void __user *user_buffer
3913 )
3914 {
3915         int rc = -ENOSYS;
3916
3917         switch (cmd) {
3918         case PMCRAID_IOCTL_RESET_ADAPTER:
3919                 pmcraid_reset_bringup(pinstance);
3920                 rc = 0;
3921                 break;
3922
3923         default:
3924                 break;
3925         }
3926
3927         return rc;
3928 }
3929
3930 /**
3931  * pmcraid_check_ioctl_buffer - check for proper access to user buffer
3932  *
3933  * @cmd: ioctl command
3934  * @arg: user buffer
3935  * @hdr: pointer to kernel memory for pmcraid_ioctl_header
3936  *
3937  * Return Value
3938  *      negetive error code if there are access issues, otherwise zero.
3939  *      Upon success, returns ioctl header copied out of user buffer.
3940  */
3941
3942 static int pmcraid_check_ioctl_buffer(
3943         int cmd,
3944         void __user *arg,
3945         struct pmcraid_ioctl_header *hdr
3946 )
3947 {
3948         int rc;
3949
3950         if (copy_from_user(hdr, arg, sizeof(struct pmcraid_ioctl_header))) {
3951                 pmcraid_err("couldn't copy ioctl header from user buffer\n");
3952                 return -EFAULT;
3953         }
3954
3955         /* check for valid driver signature */
3956         rc = memcmp(hdr->signature,
3957                     PMCRAID_IOCTL_SIGNATURE,
3958                     sizeof(hdr->signature));
3959         if (rc) {
3960                 pmcraid_err("signature verification failed\n");
3961                 return -EINVAL;
3962         }
3963
3964         return 0;
3965 }
3966
3967 /**
3968  *  pmcraid_ioctl - char node ioctl entry point
3969  */
3970 static long pmcraid_chr_ioctl(
3971         struct file *filep,
3972         unsigned int cmd,
3973         unsigned long arg
3974 )
3975 {
3976         struct pmcraid_instance *pinstance = NULL;
3977         struct pmcraid_ioctl_header *hdr = NULL;
3978         void __user *argp = (void __user *)arg;
3979         int retval = -ENOTTY;
3980
3981         hdr = kmalloc(sizeof(struct pmcraid_ioctl_header), GFP_KERNEL);
3982
3983         if (!hdr) {
3984                 pmcraid_err("failed to allocate memory for ioctl header\n");
3985                 return -ENOMEM;
3986         }
3987
3988         retval = pmcraid_check_ioctl_buffer(cmd, argp, hdr);
3989
3990         if (retval) {
3991                 pmcraid_info("chr_ioctl: header check failed\n");
3992                 kfree(hdr);
3993                 return retval;
3994         }
3995
3996         pinstance = filep->private_data;
3997
3998         if (!pinstance) {
3999                 pmcraid_info("adapter instance is not found\n");
4000                 kfree(hdr);
4001                 return -ENOTTY;
4002         }
4003
4004         switch (_IOC_TYPE(cmd)) {
4005
4006         case PMCRAID_PASSTHROUGH_IOCTL:
4007                 /* If ioctl code is to download microcode, we need to block
4008                  * mid-layer requests.
4009                  */
4010                 if (cmd == PMCRAID_IOCTL_DOWNLOAD_MICROCODE)
4011                         scsi_block_requests(pinstance->host);
4012
4013                 retval = pmcraid_ioctl_passthrough(pinstance, cmd,
4014                                                    hdr->buffer_length, argp);
4015
4016                 if (cmd == PMCRAID_IOCTL_DOWNLOAD_MICROCODE)
4017                         scsi_unblock_requests(pinstance->host);
4018                 break;
4019
4020         case PMCRAID_DRIVER_IOCTL:
4021                 arg += sizeof(struct pmcraid_ioctl_header);
4022                 retval = pmcraid_ioctl_driver(pinstance, cmd,
4023                                               hdr->buffer_length, argp);
4024                 break;
4025
4026         default:
4027                 retval = -ENOTTY;
4028                 break;
4029         }
4030
4031         kfree(hdr);
4032
4033         return retval;
4034 }
4035
4036 /**
4037  * File operations structure for management interface
4038  */
4039 static const struct file_operations pmcraid_fops = {
4040         .owner = THIS_MODULE,
4041         .open = pmcraid_chr_open,
4042         .fasync = pmcraid_chr_fasync,
4043         .unlocked_ioctl = pmcraid_chr_ioctl,
4044 #ifdef CONFIG_COMPAT
4045         .compat_ioctl = pmcraid_chr_ioctl,
4046 #endif
4047         .llseek = noop_llseek,
4048 };
4049
4050
4051
4052
4053 /**
4054  * pmcraid_show_log_level - Display adapter's error logging level
4055  * @dev: class device struct
4056  * @buf: buffer
4057  *
4058  * Return value:
4059  *  number of bytes printed to buffer
4060  */
4061 static ssize_t pmcraid_show_log_level(
4062         struct device *dev,
4063         struct device_attribute *attr,
4064         char *buf)
4065 {
4066         struct Scsi_Host *shost = class_to_shost(dev);
4067         struct pmcraid_instance *pinstance =
4068                 (struct pmcraid_instance *)shost->hostdata;
4069         return snprintf(buf, PAGE_SIZE, "%d\n", pinstance->current_log_level);
4070 }
4071
4072 /**
4073  * pmcraid_store_log_level - Change the adapter's error logging level
4074  * @dev: class device struct
4075  * @buf: buffer
4076  * @count: not used
4077  *
4078  * Return value:
4079  *  number of bytes printed to buffer
4080  */
4081 static ssize_t pmcraid_store_log_level(
4082         struct device *dev,
4083         struct device_attribute *attr,
4084         const char *buf,
4085         size_t count
4086 )
4087 {
4088         struct Scsi_Host *shost;
4089         struct pmcraid_instance *pinstance;
4090         u8 val;
4091
4092         if (kstrtou8(buf, 10, &val))
4093                 return -EINVAL;
4094         /* log-level should be from 0 to 2 */
4095         if (val > 2)
4096                 return -EINVAL;
4097
4098         shost = class_to_shost(dev);
4099         pinstance = (struct pmcraid_instance *)shost->hostdata;
4100         pinstance->current_log_level = val;
4101
4102         return strlen(buf);
4103 }
4104
4105 static struct device_attribute pmcraid_log_level_attr = {
4106         .attr = {
4107                  .name = "log_level",
4108                  .mode = S_IRUGO | S_IWUSR,
4109                  },
4110         .show = pmcraid_show_log_level,
4111         .store = pmcraid_store_log_level,
4112 };
4113
4114 /**
4115  * pmcraid_show_drv_version - Display driver version
4116  * @dev: class device struct
4117  * @buf: buffer
4118  *
4119  * Return value:
4120  *  number of bytes printed to buffer
4121  */
4122 static ssize_t pmcraid_show_drv_version(
4123         struct device *dev,
4124         struct device_attribute *attr,
4125         char *buf
4126 )
4127 {
4128         return snprintf(buf, PAGE_SIZE, "version: %s\n",
4129                         PMCRAID_DRIVER_VERSION);
4130 }
4131
4132 static struct device_attribute pmcraid_driver_version_attr = {
4133         .attr = {
4134                  .name = "drv_version",
4135                  .mode = S_IRUGO,
4136                  },
4137         .show = pmcraid_show_drv_version,
4138 };
4139
4140 /**
4141  * pmcraid_show_io_adapter_id - Display driver assigned adapter id
4142  * @dev: class device struct
4143  * @buf: buffer
4144  *
4145  * Return value:
4146  *  number of bytes printed to buffer
4147  */
4148 static ssize_t pmcraid_show_adapter_id(
4149         struct device *dev,
4150         struct device_attribute *attr,
4151         char *buf
4152 )
4153 {
4154         struct Scsi_Host *shost = class_to_shost(dev);
4155         struct pmcraid_instance *pinstance =
4156                 (struct pmcraid_instance *)shost->hostdata;
4157         u32 adapter_id = (pinstance->pdev->bus->number << 8) |
4158                 pinstance->pdev->devfn;
4159         u32 aen_group = pmcraid_event_family.id;
4160
4161         return snprintf(buf, PAGE_SIZE,
4162                         "adapter id: %d\nminor: %d\naen group: %d\n",
4163                         adapter_id, MINOR(pinstance->cdev.dev), aen_group);
4164 }
4165
4166 static struct device_attribute pmcraid_adapter_id_attr = {
4167         .attr = {
4168                  .name = "adapter_id",
4169                  .mode = S_IRUGO,
4170                  },
4171         .show = pmcraid_show_adapter_id,
4172 };
4173
4174 static struct device_attribute *pmcraid_host_attrs[] = {
4175         &pmcraid_log_level_attr,
4176         &pmcraid_driver_version_attr,
4177         &pmcraid_adapter_id_attr,
4178         NULL,
4179 };
4180
4181
4182 /* host template structure for pmcraid driver */
4183 static struct scsi_host_template pmcraid_host_template = {
4184         .module = THIS_MODULE,
4185         .name = PMCRAID_DRIVER_NAME,
4186         .queuecommand = pmcraid_queuecommand,
4187         .eh_abort_handler = pmcraid_eh_abort_handler,
4188         .eh_bus_reset_handler = pmcraid_eh_bus_reset_handler,
4189         .eh_target_reset_handler = pmcraid_eh_target_reset_handler,
4190         .eh_device_reset_handler = pmcraid_eh_device_reset_handler,
4191         .eh_host_reset_handler = pmcraid_eh_host_reset_handler,
4192
4193         .slave_alloc = pmcraid_slave_alloc,
4194         .slave_configure = pmcraid_slave_configure,
4195         .slave_destroy = pmcraid_slave_destroy,
4196         .change_queue_depth = pmcraid_change_queue_depth,
4197         .can_queue = PMCRAID_MAX_IO_CMD,
4198         .this_id = -1,
4199         .sg_tablesize = PMCRAID_MAX_IOADLS,
4200         .max_sectors = PMCRAID_IOA_MAX_SECTORS,
4201         .no_write_same = 1,
4202         .cmd_per_lun = PMCRAID_MAX_CMD_PER_LUN,
4203         .use_clustering = ENABLE_CLUSTERING,
4204         .shost_attrs = pmcraid_host_attrs,
4205         .proc_name = PMCRAID_DRIVER_NAME,
4206 };
4207
4208 /*
4209  * pmcraid_isr_msix - implements MSI-X interrupt handling routine
4210  * @irq: interrupt vector number
4211  * @dev_id: pointer hrrq_vector
4212  *
4213  * Return Value
4214  *       IRQ_HANDLED if interrupt is handled or IRQ_NONE if ignored
4215  */
4216
4217 static irqreturn_t pmcraid_isr_msix(int irq, void *dev_id)
4218 {
4219         struct pmcraid_isr_param *hrrq_vector;
4220         struct pmcraid_instance *pinstance;
4221         unsigned long lock_flags;
4222         u32 intrs_val;
4223         int hrrq_id;
4224
4225         hrrq_vector = (struct pmcraid_isr_param *)dev_id;
4226         hrrq_id = hrrq_vector->hrrq_id;
4227         pinstance = hrrq_vector->drv_inst;
4228
4229         if (!hrrq_id) {
4230                 /* Read the interrupt */
4231                 intrs_val = pmcraid_read_interrupts(pinstance);
4232                 if (intrs_val &&
4233                         ((ioread32(pinstance->int_regs.host_ioa_interrupt_reg)
4234                         & DOORBELL_INTR_MSIX_CLR) == 0)) {
4235                         /* Any error interrupts including unit_check,
4236                          * initiate IOA reset.In case of unit check indicate
4237                          * to reset_sequence that IOA unit checked and prepare
4238                          * for a dump during reset sequence
4239                          */
4240                         if (intrs_val & PMCRAID_ERROR_INTERRUPTS) {
4241                                 if (intrs_val & INTRS_IOA_UNIT_CHECK)
4242                                         pinstance->ioa_unit_check = 1;
4243
4244                                 pmcraid_err("ISR: error interrupts: %x \
4245                                         initiating reset\n", intrs_val);
4246                                 spin_lock_irqsave(pinstance->host->host_lock,
4247                                         lock_flags);
4248                                 pmcraid_initiate_reset(pinstance);
4249                                 spin_unlock_irqrestore(
4250                                         pinstance->host->host_lock,
4251                                         lock_flags);
4252                         }
4253                         /* If interrupt was as part of the ioa initialization,
4254                          * clear it. Delete the timer and wakeup the
4255                          * reset engine to proceed with reset sequence
4256                          */
4257                         if (intrs_val & INTRS_TRANSITION_TO_OPERATIONAL)
4258                                 pmcraid_clr_trans_op(pinstance);
4259
4260                         /* Clear the interrupt register by writing
4261                          * to host to ioa doorbell. Once done
4262                          * FW will clear the interrupt.
4263                          */
4264                         iowrite32(DOORBELL_INTR_MSIX_CLR,
4265                                 pinstance->int_regs.host_ioa_interrupt_reg);
4266                         ioread32(pinstance->int_regs.host_ioa_interrupt_reg);
4267
4268
4269                 }
4270         }
4271
4272         tasklet_schedule(&(pinstance->isr_tasklet[hrrq_id]));
4273
4274         return IRQ_HANDLED;
4275 }
4276
4277 /**
4278  * pmcraid_isr  - implements legacy interrupt handling routine
4279  *
4280  * @irq: interrupt vector number
4281  * @dev_id: pointer hrrq_vector
4282  *
4283  * Return Value
4284  *       IRQ_HANDLED if interrupt is handled or IRQ_NONE if ignored
4285  */
4286 static irqreturn_t pmcraid_isr(int irq, void *dev_id)
4287 {
4288         struct pmcraid_isr_param *hrrq_vector;
4289         struct pmcraid_instance *pinstance;
4290         u32 intrs;
4291         unsigned long lock_flags;
4292         int hrrq_id = 0;
4293
4294         /* In case of legacy interrupt mode where interrupts are shared across
4295          * isrs, it may be possible that the current interrupt is not from IOA
4296          */
4297         if (!dev_id) {
4298                 printk(KERN_INFO "%s(): NULL host pointer\n", __func__);
4299                 return IRQ_NONE;
4300         }
4301         hrrq_vector = (struct pmcraid_isr_param *)dev_id;
4302         pinstance = hrrq_vector->drv_inst;
4303
4304         intrs = pmcraid_read_interrupts(pinstance);
4305
4306         if (unlikely((intrs & PMCRAID_PCI_INTERRUPTS) == 0))
4307                 return IRQ_NONE;
4308
4309         /* Any error interrupts including unit_check, initiate IOA reset.
4310          * In case of unit check indicate to reset_sequence that IOA unit
4311          * checked and prepare for a dump during reset sequence
4312          */
4313         if (intrs & PMCRAID_ERROR_INTERRUPTS) {
4314
4315                 if (intrs & INTRS_IOA_UNIT_CHECK)
4316                         pinstance->ioa_unit_check = 1;
4317
4318                 iowrite32(intrs,
4319                           pinstance->int_regs.ioa_host_interrupt_clr_reg);
4320                 pmcraid_err("ISR: error interrupts: %x initiating reset\n",
4321                             intrs);
4322                 intrs = ioread32(
4323                                 pinstance->int_regs.ioa_host_interrupt_clr_reg);
4324                 spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
4325                 pmcraid_initiate_reset(pinstance);
4326                 spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
4327         } else {
4328                 /* If interrupt was as part of the ioa initialization,
4329                  * clear. Delete the timer and wakeup the
4330                  * reset engine to proceed with reset sequence
4331                  */
4332                 if (intrs & INTRS_TRANSITION_TO_OPERATIONAL) {
4333                         pmcraid_clr_trans_op(pinstance);
4334                 } else {
4335                         iowrite32(intrs,
4336                                 pinstance->int_regs.ioa_host_interrupt_clr_reg);
4337                         ioread32(
4338                                 pinstance->int_regs.ioa_host_interrupt_clr_reg);
4339
4340                         tasklet_schedule(
4341                                         &(pinstance->isr_tasklet[hrrq_id]));
4342                 }
4343         }
4344
4345         return IRQ_HANDLED;
4346 }
4347
4348
4349 /**
4350  * pmcraid_worker_function -  worker thread function
4351  *
4352  * @workp: pointer to struct work queue
4353  *
4354  * Return Value
4355  *       None
4356  */
4357
4358 static void pmcraid_worker_function(struct work_struct *workp)
4359 {
4360         struct pmcraid_instance *pinstance;
4361         struct pmcraid_resource_entry *res;
4362         struct pmcraid_resource_entry *temp;
4363         struct scsi_device *sdev;
4364         unsigned long lock_flags;
4365         unsigned long host_lock_flags;
4366         u16 fw_version;
4367         u8 bus, target, lun;
4368
4369         pinstance = container_of(workp, struct pmcraid_instance, worker_q);
4370         /* add resources only after host is added into system */
4371         if (!atomic_read(&pinstance->expose_resources))
4372                 return;
4373
4374         fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
4375
4376         spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
4377         list_for_each_entry_safe(res, temp, &pinstance->used_res_q, queue) {
4378
4379                 if (res->change_detected == RES_CHANGE_DEL && res->scsi_dev) {
4380                         sdev = res->scsi_dev;
4381
4382                         /* host_lock must be held before calling
4383                          * scsi_device_get
4384                          */
4385                         spin_lock_irqsave(pinstance->host->host_lock,
4386                                           host_lock_flags);
4387                         if (!scsi_device_get(sdev)) {
4388                                 spin_unlock_irqrestore(
4389                                                 pinstance->host->host_lock,
4390                                                 host_lock_flags);
4391                                 pmcraid_info("deleting %x from midlayer\n",
4392                                              res->cfg_entry.resource_address);
4393                                 list_move_tail(&res->queue,
4394                                                 &pinstance->free_res_q);
4395                                 spin_unlock_irqrestore(
4396                                         &pinstance->resource_lock,
4397                                         lock_flags);
4398                                 scsi_remove_device(sdev);
4399                                 scsi_device_put(sdev);
4400                                 spin_lock_irqsave(&pinstance->resource_lock,
4401                                                    lock_flags);
4402                                 res->change_detected = 0;
4403                         } else {
4404                                 spin_unlock_irqrestore(
4405                                                 pinstance->host->host_lock,
4406                                                 host_lock_flags);
4407                         }
4408                 }
4409         }
4410
4411         list_for_each_entry(res, &pinstance->used_res_q, queue) {
4412
4413                 if (res->change_detected == RES_CHANGE_ADD) {
4414
4415                         if (!pmcraid_expose_resource(fw_version,
4416                                                      &res->cfg_entry))
4417                                 continue;
4418
4419                         if (RES_IS_VSET(res->cfg_entry)) {
4420                                 bus = PMCRAID_VSET_BUS_ID;
4421                                 if (fw_version <= PMCRAID_FW_VERSION_1)
4422                                         target = res->cfg_entry.unique_flags1;
4423                                 else
4424                                         target = le16_to_cpu(res->cfg_entry.array_id) & 0xFF;
4425                                 lun = PMCRAID_VSET_LUN_ID;
4426                         } else {
4427                                 bus = PMCRAID_PHYS_BUS_ID;
4428                                 target =
4429                                      RES_TARGET(
4430                                         res->cfg_entry.resource_address);
4431                                 lun = RES_LUN(res->cfg_entry.resource_address);
4432                         }
4433
4434                         res->change_detected = 0;
4435                         spin_unlock_irqrestore(&pinstance->resource_lock,
4436                                                 lock_flags);
4437                         scsi_add_device(pinstance->host, bus, target, lun);
4438                         spin_lock_irqsave(&pinstance->resource_lock,
4439                                            lock_flags);
4440                 }
4441         }
4442
4443         spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
4444 }
4445
4446 /**
4447  * pmcraid_tasklet_function - Tasklet function
4448  *
4449  * @instance: pointer to msix param structure
4450  *
4451  * Return Value
4452  *      None
4453  */
4454 static void pmcraid_tasklet_function(unsigned long instance)
4455 {
4456         struct pmcraid_isr_param *hrrq_vector;
4457         struct pmcraid_instance *pinstance;
4458         unsigned long hrrq_lock_flags;
4459         unsigned long pending_lock_flags;
4460         unsigned long host_lock_flags;
4461         spinlock_t *lockp; /* hrrq buffer lock */
4462         int id;
4463         u32 resp;
4464
4465         hrrq_vector = (struct pmcraid_isr_param *)instance;
4466         pinstance = hrrq_vector->drv_inst;
4467         id = hrrq_vector->hrrq_id;
4468         lockp = &(pinstance->hrrq_lock[id]);
4469
4470         /* loop through each of the commands responded by IOA. Each HRRQ buf is
4471          * protected by its own lock. Traversals must be done within this lock
4472          * as there may be multiple tasklets running on multiple CPUs. Note
4473          * that the lock is held just for picking up the response handle and
4474          * manipulating hrrq_curr/toggle_bit values.
4475          */
4476         spin_lock_irqsave(lockp, hrrq_lock_flags);
4477
4478         resp = le32_to_cpu(*(pinstance->hrrq_curr[id]));
4479
4480         while ((resp & HRRQ_TOGGLE_BIT) ==
4481                 pinstance->host_toggle_bit[id]) {
4482
4483                 int cmd_index = resp >> 2;
4484                 struct pmcraid_cmd *cmd = NULL;
4485
4486                 if (pinstance->hrrq_curr[id] < pinstance->hrrq_end[id]) {
4487                         pinstance->hrrq_curr[id]++;
4488                 } else {
4489                         pinstance->hrrq_curr[id] = pinstance->hrrq_start[id];
4490                         pinstance->host_toggle_bit[id] ^= 1u;
4491                 }
4492
4493                 if (cmd_index >= PMCRAID_MAX_CMD) {
4494                         /* In case of invalid response handle, log message */
4495                         pmcraid_err("Invalid response handle %d\n", cmd_index);
4496                         resp = le32_to_cpu(*(pinstance->hrrq_curr[id]));
4497                         continue;
4498                 }
4499
4500                 cmd = pinstance->cmd_list[cmd_index];
4501                 spin_unlock_irqrestore(lockp, hrrq_lock_flags);
4502
4503                 spin_lock_irqsave(&pinstance->pending_pool_lock,
4504                                    pending_lock_flags);
4505                 list_del(&cmd->free_list);
4506                 spin_unlock_irqrestore(&pinstance->pending_pool_lock,
4507                                         pending_lock_flags);
4508                 del_timer(&cmd->timer);
4509                 atomic_dec(&pinstance->outstanding_cmds);
4510
4511                 if (cmd->cmd_done == pmcraid_ioa_reset) {
4512                         spin_lock_irqsave(pinstance->host->host_lock,
4513                                           host_lock_flags);
4514                         cmd->cmd_done(cmd);
4515                         spin_unlock_irqrestore(pinstance->host->host_lock,
4516                                                host_lock_flags);
4517                 } else if (cmd->cmd_done != NULL) {
4518                         cmd->cmd_done(cmd);
4519                 }
4520                 /* loop over until we are done with all responses */
4521                 spin_lock_irqsave(lockp, hrrq_lock_flags);
4522                 resp = le32_to_cpu(*(pinstance->hrrq_curr[id]));
4523         }
4524
4525         spin_unlock_irqrestore(lockp, hrrq_lock_flags);
4526 }
4527
4528 /**
4529  * pmcraid_unregister_interrupt_handler - de-register interrupts handlers
4530  * @pinstance: pointer to adapter instance structure
4531  *
4532  * This routine un-registers registered interrupt handler and
4533  * also frees irqs/vectors.
4534  *
4535  * Retun Value
4536  *      None
4537  */
4538 static
4539 void pmcraid_unregister_interrupt_handler(struct pmcraid_instance *pinstance)
4540 {
4541         struct pci_dev *pdev = pinstance->pdev;
4542         int i;
4543
4544         for (i = 0; i < pinstance->num_hrrq; i++)
4545                 free_irq(pci_irq_vector(pdev, i), &pinstance->hrrq_vector[i]);
4546
4547         pinstance->interrupt_mode = 0;
4548         pci_free_irq_vectors(pdev);
4549 }
4550
4551 /**
4552  * pmcraid_register_interrupt_handler - registers interrupt handler
4553  * @pinstance: pointer to per-adapter instance structure
4554  *
4555  * Return Value
4556  *      0 on success, non-zero error code otherwise.
4557  */
4558 static int
4559 pmcraid_register_interrupt_handler(struct pmcraid_instance *pinstance)
4560 {
4561         struct pci_dev *pdev = pinstance->pdev;
4562         unsigned int irq_flag = PCI_IRQ_LEGACY, flag;
4563         int num_hrrq, rc, i;
4564         irq_handler_t isr;
4565
4566         if (pmcraid_enable_msix)
4567                 irq_flag |= PCI_IRQ_MSIX;
4568
4569         num_hrrq = pci_alloc_irq_vectors(pdev, 1, PMCRAID_NUM_MSIX_VECTORS,
4570                         irq_flag);
4571         if (num_hrrq < 0)
4572                 return num_hrrq;
4573
4574         if (pdev->msix_enabled) {
4575                 flag = 0;
4576                 isr = pmcraid_isr_msix;
4577         } else {
4578                 flag = IRQF_SHARED;
4579                 isr = pmcraid_isr;
4580         }
4581
4582         for (i = 0; i < num_hrrq; i++) {
4583                 struct pmcraid_isr_param *vec = &pinstance->hrrq_vector[i];
4584
4585                 vec->hrrq_id = i;
4586                 vec->drv_inst = pinstance;
4587                 rc = request_irq(pci_irq_vector(pdev, i), isr, flag,
4588                                 PMCRAID_DRIVER_NAME, vec);
4589                 if (rc)
4590                         goto out_unwind;
4591         }
4592
4593         pinstance->num_hrrq = num_hrrq;
4594         if (pdev->msix_enabled) {
4595                 pinstance->interrupt_mode = 1;
4596                 iowrite32(DOORBELL_INTR_MODE_MSIX,
4597                           pinstance->int_regs.host_ioa_interrupt_reg);
4598                 ioread32(pinstance->int_regs.host_ioa_interrupt_reg);
4599         }
4600
4601         return 0;
4602
4603 out_unwind:
4604         while (--i > 0)
4605                 free_irq(pci_irq_vector(pdev, i), &pinstance->hrrq_vector[i]);
4606         pci_free_irq_vectors(pdev);
4607         return rc;
4608 }
4609
4610 /**
4611  * pmcraid_release_cmd_blocks - release buufers allocated for command blocks
4612  * @pinstance: per adapter instance structure pointer
4613  * @max_index: number of buffer blocks to release
4614  *
4615  * Return Value
4616  *  None
4617  */
4618 static void
4619 pmcraid_release_cmd_blocks(struct pmcraid_instance *pinstance, int max_index)
4620 {
4621         int i;
4622         for (i = 0; i < max_index; i++) {
4623                 kmem_cache_free(pinstance->cmd_cachep, pinstance->cmd_list[i]);
4624                 pinstance->cmd_list[i] = NULL;
4625         }
4626         kmem_cache_destroy(pinstance->cmd_cachep);
4627         pinstance->cmd_cachep = NULL;
4628 }
4629
4630 /**
4631  * pmcraid_release_control_blocks - releases buffers alloced for control blocks
4632  * @pinstance: pointer to per adapter instance structure
4633  * @max_index: number of buffers (from 0 onwards) to release
4634  *
4635  * This function assumes that the command blocks for which control blocks are
4636  * linked are not released.
4637  *
4638  * Return Value
4639  *       None
4640  */
4641 static void
4642 pmcraid_release_control_blocks(
4643         struct pmcraid_instance *pinstance,
4644         int max_index
4645 )
4646 {
4647         int i;
4648
4649         if (pinstance->control_pool == NULL)
4650                 return;
4651
4652         for (i = 0; i < max_index; i++) {
4653                 dma_pool_free(pinstance->control_pool,
4654                               pinstance->cmd_list[i]->ioa_cb,
4655                               pinstance->cmd_list[i]->ioa_cb_bus_addr);
4656                 pinstance->cmd_list[i]->ioa_cb = NULL;
4657                 pinstance->cmd_list[i]->ioa_cb_bus_addr = 0;
4658         }
4659         dma_pool_destroy(pinstance->control_pool);
4660         pinstance->control_pool = NULL;
4661 }
4662
4663 /**
4664  * pmcraid_allocate_cmd_blocks - allocate memory for cmd block structures
4665  * @pinstance - pointer to per adapter instance structure
4666  *
4667  * Allocates memory for command blocks using kernel slab allocator.
4668  *
4669  * Return Value
4670  *      0 in case of success; -ENOMEM in case of failure
4671  */
4672 static int pmcraid_allocate_cmd_blocks(struct pmcraid_instance *pinstance)
4673 {
4674         int i;
4675
4676         sprintf(pinstance->cmd_pool_name, "pmcraid_cmd_pool_%d",
4677                 pinstance->host->unique_id);
4678
4679
4680         pinstance->cmd_cachep = kmem_cache_create(
4681                                         pinstance->cmd_pool_name,
4682                                         sizeof(struct pmcraid_cmd), 0,
4683                                         SLAB_HWCACHE_ALIGN, NULL);
4684         if (!pinstance->cmd_cachep)
4685                 return -ENOMEM;
4686
4687         for (i = 0; i < PMCRAID_MAX_CMD; i++) {
4688                 pinstance->cmd_list[i] =
4689                         kmem_cache_alloc(pinstance->cmd_cachep, GFP_KERNEL);
4690                 if (!pinstance->cmd_list[i]) {
4691                         pmcraid_release_cmd_blocks(pinstance, i);
4692                         return -ENOMEM;
4693                 }
4694         }
4695         return 0;
4696 }
4697
4698 /**
4699  * pmcraid_allocate_control_blocks - allocates memory control blocks
4700  * @pinstance : pointer to per adapter instance structure
4701  *
4702  * This function allocates PCI memory for DMAable buffers like IOARCB, IOADLs
4703  * and IOASAs. This is called after command blocks are already allocated.
4704  *
4705  * Return Value
4706  *  0 in case it can allocate all control blocks, otherwise -ENOMEM
4707  */
4708 static int pmcraid_allocate_control_blocks(struct pmcraid_instance *pinstance)
4709 {
4710         int i;
4711
4712         sprintf(pinstance->ctl_pool_name, "pmcraid_control_pool_%d",
4713                 pinstance->host->unique_id);
4714
4715         pinstance->control_pool =
4716                 dma_pool_create(pinstance->ctl_pool_name,
4717                                 &pinstance->pdev->dev,
4718                                 sizeof(struct pmcraid_control_block),
4719                                 PMCRAID_IOARCB_ALIGNMENT, 0);
4720
4721         if (!pinstance->control_pool)
4722                 return -ENOMEM;
4723
4724         for (i = 0; i < PMCRAID_MAX_CMD; i++) {
4725                 pinstance->cmd_list[i]->ioa_cb =
4726                         dma_pool_alloc(
4727                                 pinstance->control_pool,
4728                                 GFP_KERNEL,
4729                                 &(pinstance->cmd_list[i]->ioa_cb_bus_addr));
4730
4731                 if (!pinstance->cmd_list[i]->ioa_cb) {
4732                         pmcraid_release_control_blocks(pinstance, i);
4733                         return -ENOMEM;
4734                 }
4735                 memset(pinstance->cmd_list[i]->ioa_cb, 0,
4736                         sizeof(struct pmcraid_control_block));
4737         }
4738         return 0;
4739 }
4740
4741 /**
4742  * pmcraid_release_host_rrqs - release memory allocated for hrrq buffer(s)
4743  * @pinstance: pointer to per adapter instance structure
4744  * @maxindex: size of hrrq buffer pointer array
4745  *
4746  * Return Value
4747  *      None
4748  */
4749 static void
4750 pmcraid_release_host_rrqs(struct pmcraid_instance *pinstance, int maxindex)
4751 {
4752         int i;
4753         for (i = 0; i < maxindex; i++) {
4754
4755                 pci_free_consistent(pinstance->pdev,
4756                                     HRRQ_ENTRY_SIZE * PMCRAID_MAX_CMD,
4757                                     pinstance->hrrq_start[i],
4758                                     pinstance->hrrq_start_bus_addr[i]);
4759
4760                 /* reset pointers and toggle bit to zeros */
4761                 pinstance->hrrq_start[i] = NULL;
4762                 pinstance->hrrq_start_bus_addr[i] = 0;
4763                 pinstance->host_toggle_bit[i] = 0;
4764         }
4765 }
4766
4767 /**
4768  * pmcraid_allocate_host_rrqs - Allocate and initialize host RRQ buffers
4769  * @pinstance: pointer to per adapter instance structure
4770  *
4771  * Return value
4772  *      0 hrrq buffers are allocated, -ENOMEM otherwise.
4773  */
4774 static int pmcraid_allocate_host_rrqs(struct pmcraid_instance *pinstance)
4775 {
4776         int i, buffer_size;
4777
4778         buffer_size = HRRQ_ENTRY_SIZE * PMCRAID_MAX_CMD;
4779
4780         for (i = 0; i < pinstance->num_hrrq; i++) {
4781                 pinstance->hrrq_start[i] =
4782                         pci_alloc_consistent(
4783                                         pinstance->pdev,
4784                                         buffer_size,
4785                                         &(pinstance->hrrq_start_bus_addr[i]));
4786
4787                 if (!pinstance->hrrq_start[i]) {
4788                         pmcraid_err("pci_alloc failed for hrrq vector : %d\n",
4789                                     i);
4790                         pmcraid_release_host_rrqs(pinstance, i);
4791                         return -ENOMEM;
4792                 }
4793
4794                 memset(pinstance->hrrq_start[i], 0, buffer_size);
4795                 pinstance->hrrq_curr[i] = pinstance->hrrq_start[i];
4796                 pinstance->hrrq_end[i] =
4797                         pinstance->hrrq_start[i] + PMCRAID_MAX_CMD - 1;
4798                 pinstance->host_toggle_bit[i] = 1;
4799                 spin_lock_init(&pinstance->hrrq_lock[i]);
4800         }
4801         return 0;
4802 }
4803
4804 /**
4805  * pmcraid_release_hcams - release HCAM buffers
4806  *
4807  * @pinstance: pointer to per adapter instance structure
4808  *
4809  * Return value
4810  *  none
4811  */
4812 static void pmcraid_release_hcams(struct pmcraid_instance *pinstance)
4813 {
4814         if (pinstance->ccn.msg != NULL) {
4815                 pci_free_consistent(pinstance->pdev,
4816                                     PMCRAID_AEN_HDR_SIZE +
4817                                     sizeof(struct pmcraid_hcam_ccn_ext),
4818                                     pinstance->ccn.msg,
4819                                     pinstance->ccn.baddr);
4820
4821                 pinstance->ccn.msg = NULL;
4822                 pinstance->ccn.hcam = NULL;
4823                 pinstance->ccn.baddr = 0;
4824         }
4825
4826         if (pinstance->ldn.msg != NULL) {
4827                 pci_free_consistent(pinstance->pdev,
4828                                     PMCRAID_AEN_HDR_SIZE +
4829                                     sizeof(struct pmcraid_hcam_ldn),
4830                                     pinstance->ldn.msg,
4831                                     pinstance->ldn.baddr);
4832
4833                 pinstance->ldn.msg = NULL;
4834                 pinstance->ldn.hcam = NULL;
4835                 pinstance->ldn.baddr = 0;
4836         }
4837 }
4838
4839 /**
4840  * pmcraid_allocate_hcams - allocates HCAM buffers
4841  * @pinstance : pointer to per adapter instance structure
4842  *
4843  * Return Value:
4844  *   0 in case of successful allocation, non-zero otherwise
4845  */
4846 static int pmcraid_allocate_hcams(struct pmcraid_instance *pinstance)
4847 {
4848         pinstance->ccn.msg = pci_alloc_consistent(
4849                                         pinstance->pdev,
4850                                         PMCRAID_AEN_HDR_SIZE +
4851                                         sizeof(struct pmcraid_hcam_ccn_ext),
4852                                         &(pinstance->ccn.baddr));
4853
4854         pinstance->ldn.msg = pci_alloc_consistent(
4855                                         pinstance->pdev,
4856                                         PMCRAID_AEN_HDR_SIZE +
4857                                         sizeof(struct pmcraid_hcam_ldn),
4858                                         &(pinstance->ldn.baddr));
4859
4860         if (pinstance->ldn.msg == NULL || pinstance->ccn.msg == NULL) {
4861                 pmcraid_release_hcams(pinstance);
4862         } else {
4863                 pinstance->ccn.hcam =
4864                         (void *)pinstance->ccn.msg + PMCRAID_AEN_HDR_SIZE;
4865                 pinstance->ldn.hcam =
4866                         (void *)pinstance->ldn.msg + PMCRAID_AEN_HDR_SIZE;
4867
4868                 atomic_set(&pinstance->ccn.ignore, 0);
4869                 atomic_set(&pinstance->ldn.ignore, 0);
4870         }
4871
4872         return (pinstance->ldn.msg == NULL) ? -ENOMEM : 0;
4873 }
4874
4875 /**
4876  * pmcraid_release_config_buffers - release config.table buffers
4877  * @pinstance: pointer to per adapter instance structure
4878  *
4879  * Return Value
4880  *       none
4881  */
4882 static void pmcraid_release_config_buffers(struct pmcraid_instance *pinstance)
4883 {
4884         if (pinstance->cfg_table != NULL &&
4885             pinstance->cfg_table_bus_addr != 0) {
4886                 pci_free_consistent(pinstance->pdev,
4887                                     sizeof(struct pmcraid_config_table),
4888                                     pinstance->cfg_table,
4889                                     pinstance->cfg_table_bus_addr);
4890                 pinstance->cfg_table = NULL;
4891                 pinstance->cfg_table_bus_addr = 0;
4892         }
4893
4894         if (pinstance->res_entries != NULL) {
4895                 int i;
4896
4897                 for (i = 0; i < PMCRAID_MAX_RESOURCES; i++)
4898                         list_del(&pinstance->res_entries[i].queue);
4899                 kfree(pinstance->res_entries);
4900                 pinstance->res_entries = NULL;
4901         }
4902
4903         pmcraid_release_hcams(pinstance);
4904 }
4905
4906 /**
4907  * pmcraid_allocate_config_buffers - allocates DMAable memory for config table
4908  * @pinstance : pointer to per adapter instance structure
4909  *
4910  * Return Value
4911  *      0 for successful allocation, -ENOMEM for any failure
4912  */
4913 static int pmcraid_allocate_config_buffers(struct pmcraid_instance *pinstance)
4914 {
4915         int i;
4916
4917         pinstance->res_entries =
4918                         kzalloc(sizeof(struct pmcraid_resource_entry) *
4919                                 PMCRAID_MAX_RESOURCES, GFP_KERNEL);
4920
4921         if (NULL == pinstance->res_entries) {
4922                 pmcraid_err("failed to allocate memory for resource table\n");
4923                 return -ENOMEM;
4924         }
4925
4926         for (i = 0; i < PMCRAID_MAX_RESOURCES; i++)
4927                 list_add_tail(&pinstance->res_entries[i].queue,
4928                               &pinstance->free_res_q);
4929
4930         pinstance->cfg_table =
4931                 pci_alloc_consistent(pinstance->pdev,
4932                                      sizeof(struct pmcraid_config_table),
4933                                      &pinstance->cfg_table_bus_addr);
4934
4935         if (NULL == pinstance->cfg_table) {
4936                 pmcraid_err("couldn't alloc DMA memory for config table\n");
4937                 pmcraid_release_config_buffers(pinstance);
4938                 return -ENOMEM;
4939         }
4940
4941         if (pmcraid_allocate_hcams(pinstance)) {
4942                 pmcraid_err("could not alloc DMA memory for HCAMS\n");
4943                 pmcraid_release_config_buffers(pinstance);
4944                 return -ENOMEM;
4945         }
4946
4947         return 0;
4948 }
4949
4950 /**
4951  * pmcraid_init_tasklets - registers tasklets for response handling
4952  *
4953  * @pinstance: pointer adapter instance structure
4954  *
4955  * Return value
4956  *      none
4957  */
4958 static void pmcraid_init_tasklets(struct pmcraid_instance *pinstance)
4959 {
4960         int i;
4961         for (i = 0; i < pinstance->num_hrrq; i++)
4962                 tasklet_init(&pinstance->isr_tasklet[i],
4963                              pmcraid_tasklet_function,
4964                              (unsigned long)&pinstance->hrrq_vector[i]);
4965 }
4966
4967 /**
4968  * pmcraid_kill_tasklets - destroys tasklets registered for response handling
4969  *
4970  * @pinstance: pointer to adapter instance structure
4971  *
4972  * Return value
4973  *      none
4974  */
4975 static void pmcraid_kill_tasklets(struct pmcraid_instance *pinstance)
4976 {
4977         int i;
4978         for (i = 0; i < pinstance->num_hrrq; i++)
4979                 tasklet_kill(&pinstance->isr_tasklet[i]);
4980 }
4981
4982 /**
4983  * pmcraid_release_buffers - release per-adapter buffers allocated
4984  *
4985  * @pinstance: pointer to adapter soft state
4986  *
4987  * Return Value
4988  *      none
4989  */
4990 static void pmcraid_release_buffers(struct pmcraid_instance *pinstance)
4991 {
4992         pmcraid_release_config_buffers(pinstance);
4993         pmcraid_release_control_blocks(pinstance, PMCRAID_MAX_CMD);
4994         pmcraid_release_cmd_blocks(pinstance, PMCRAID_MAX_CMD);
4995         pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq);
4996
4997         if (pinstance->inq_data != NULL) {
4998                 pci_free_consistent(pinstance->pdev,
4999                                     sizeof(struct pmcraid_inquiry_data),
5000                                     pinstance->inq_data,
5001                                     pinstance->inq_data_baddr);
5002
5003                 pinstance->inq_data = NULL;
5004                 pinstance->inq_data_baddr = 0;
5005         }
5006
5007         if (pinstance->timestamp_data != NULL) {
5008                 pci_free_consistent(pinstance->pdev,
5009                                     sizeof(struct pmcraid_timestamp_data),
5010                                     pinstance->timestamp_data,
5011                                     pinstance->timestamp_data_baddr);
5012
5013                 pinstance->timestamp_data = NULL;
5014                 pinstance->timestamp_data_baddr = 0;
5015         }
5016 }
5017
5018 /**
5019  * pmcraid_init_buffers - allocates memory and initializes various structures
5020  * @pinstance: pointer to per adapter instance structure
5021  *
5022  * This routine pre-allocates memory based on the type of block as below:
5023  * cmdblocks(PMCRAID_MAX_CMD): kernel memory using kernel's slab_allocator,
5024  * IOARCBs(PMCRAID_MAX_CMD)  : DMAable memory, using pci pool allocator
5025  * config-table entries      : DMAable memory using pci_alloc_consistent
5026  * HostRRQs                  : DMAable memory, using pci_alloc_consistent
5027  *
5028  * Return Value
5029  *       0 in case all of the blocks are allocated, -ENOMEM otherwise.
5030  */
5031 static int pmcraid_init_buffers(struct pmcraid_instance *pinstance)
5032 {
5033         int i;
5034
5035         if (pmcraid_allocate_host_rrqs(pinstance)) {
5036                 pmcraid_err("couldn't allocate memory for %d host rrqs\n",
5037                              pinstance->num_hrrq);
5038                 return -ENOMEM;
5039         }
5040
5041         if (pmcraid_allocate_config_buffers(pinstance)) {
5042                 pmcraid_err("couldn't allocate memory for config buffers\n");
5043                 pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq);
5044                 return -ENOMEM;
5045         }
5046
5047         if (pmcraid_allocate_cmd_blocks(pinstance)) {
5048                 pmcraid_err("couldn't allocate memory for cmd blocks\n");
5049                 pmcraid_release_config_buffers(pinstance);
5050                 pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq);
5051                 return -ENOMEM;
5052         }
5053
5054         if (pmcraid_allocate_control_blocks(pinstance)) {
5055                 pmcraid_err("couldn't allocate memory control blocks\n");
5056                 pmcraid_release_config_buffers(pinstance);
5057                 pmcraid_release_cmd_blocks(pinstance, PMCRAID_MAX_CMD);
5058                 pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq);
5059                 return -ENOMEM;
5060         }
5061
5062         /* allocate DMAable memory for page D0 INQUIRY buffer */
5063         pinstance->inq_data = pci_alloc_consistent(
5064                                         pinstance->pdev,
5065                                         sizeof(struct pmcraid_inquiry_data),
5066                                         &pinstance->inq_data_baddr);
5067
5068         if (pinstance->inq_data == NULL) {
5069                 pmcraid_err("couldn't allocate DMA memory for INQUIRY\n");
5070                 pmcraid_release_buffers(pinstance);
5071                 return -ENOMEM;
5072         }
5073
5074         /* allocate DMAable memory for set timestamp data buffer */
5075         pinstance->timestamp_data = pci_alloc_consistent(
5076                                         pinstance->pdev,
5077                                         sizeof(struct pmcraid_timestamp_data),
5078                                         &pinstance->timestamp_data_baddr);
5079
5080         if (pinstance->timestamp_data == NULL) {
5081                 pmcraid_err("couldn't allocate DMA memory for \
5082                                 set time_stamp \n");
5083                 pmcraid_release_buffers(pinstance);
5084                 return -ENOMEM;
5085         }
5086
5087
5088         /* Initialize all the command blocks and add them to free pool. No
5089          * need to lock (free_pool_lock) as this is done in initialization
5090          * itself
5091          */
5092         for (i = 0; i < PMCRAID_MAX_CMD; i++) {
5093                 struct pmcraid_cmd *cmdp = pinstance->cmd_list[i];
5094                 pmcraid_init_cmdblk(cmdp, i);
5095                 cmdp->drv_inst = pinstance;
5096                 list_add_tail(&cmdp->free_list, &pinstance->free_cmd_pool);
5097         }
5098
5099         return 0;
5100 }
5101
5102 /**
5103  * pmcraid_reinit_buffers - resets various buffer pointers
5104  * @pinstance: pointer to adapter instance
5105  * Return value
5106  *      none
5107  */
5108 static void pmcraid_reinit_buffers(struct pmcraid_instance *pinstance)
5109 {
5110         int i;
5111         int buffer_size = HRRQ_ENTRY_SIZE * PMCRAID_MAX_CMD;
5112
5113         for (i = 0; i < pinstance->num_hrrq; i++) {
5114                 memset(pinstance->hrrq_start[i], 0, buffer_size);
5115                 pinstance->hrrq_curr[i] = pinstance->hrrq_start[i];
5116                 pinstance->hrrq_end[i] =
5117                         pinstance->hrrq_start[i] + PMCRAID_MAX_CMD - 1;
5118                 pinstance->host_toggle_bit[i] = 1;
5119         }
5120 }
5121
5122 /**
5123  * pmcraid_init_instance - initialize per instance data structure
5124  * @pdev: pointer to pci device structure
5125  * @host: pointer to Scsi_Host structure
5126  * @mapped_pci_addr: memory mapped IOA configuration registers
5127  *
5128  * Return Value
5129  *       0 on success, non-zero in case of any failure
5130  */
5131 static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
5132                                  void __iomem *mapped_pci_addr)
5133 {
5134         struct pmcraid_instance *pinstance =
5135                 (struct pmcraid_instance *)host->hostdata;
5136
5137         pinstance->host = host;
5138         pinstance->pdev = pdev;
5139
5140         /* Initialize register addresses */
5141         pinstance->mapped_dma_addr = mapped_pci_addr;
5142
5143         /* Initialize chip-specific details */
5144         {
5145                 struct pmcraid_chip_details *chip_cfg = pinstance->chip_cfg;
5146                 struct pmcraid_interrupts *pint_regs = &pinstance->int_regs;
5147
5148                 pinstance->ioarrin = mapped_pci_addr + chip_cfg->ioarrin;
5149
5150                 pint_regs->ioa_host_interrupt_reg =
5151                         mapped_pci_addr + chip_cfg->ioa_host_intr;
5152                 pint_regs->ioa_host_interrupt_clr_reg =
5153                         mapped_pci_addr + chip_cfg->ioa_host_intr_clr;
5154                 pint_regs->ioa_host_msix_interrupt_reg =
5155                         mapped_pci_addr + chip_cfg->ioa_host_msix_intr;
5156                 pint_regs->host_ioa_interrupt_reg =
5157                         mapped_pci_addr + chip_cfg->host_ioa_intr;
5158                 pint_regs->host_ioa_interrupt_clr_reg =
5159                         mapped_pci_addr + chip_cfg->host_ioa_intr_clr;
5160
5161                 /* Current version of firmware exposes interrupt mask set
5162                  * and mask clr registers through memory mapped bar0.
5163                  */
5164                 pinstance->mailbox = mapped_pci_addr + chip_cfg->mailbox;
5165                 pinstance->ioa_status = mapped_pci_addr + chip_cfg->ioastatus;
5166                 pint_regs->ioa_host_interrupt_mask_reg =
5167                         mapped_pci_addr + chip_cfg->ioa_host_mask;
5168                 pint_regs->ioa_host_interrupt_mask_clr_reg =
5169                         mapped_pci_addr + chip_cfg->ioa_host_mask_clr;
5170                 pint_regs->global_interrupt_mask_reg =
5171                         mapped_pci_addr + chip_cfg->global_intr_mask;
5172         };
5173
5174         pinstance->ioa_reset_attempts = 0;
5175         init_waitqueue_head(&pinstance->reset_wait_q);
5176
5177         atomic_set(&pinstance->outstanding_cmds, 0);
5178         atomic_set(&pinstance->last_message_id, 0);
5179         atomic_set(&pinstance->expose_resources, 0);
5180
5181         INIT_LIST_HEAD(&pinstance->free_res_q);
5182         INIT_LIST_HEAD(&pinstance->used_res_q);
5183         INIT_LIST_HEAD(&pinstance->free_cmd_pool);
5184         INIT_LIST_HEAD(&pinstance->pending_cmd_pool);
5185
5186         spin_lock_init(&pinstance->free_pool_lock);
5187         spin_lock_init(&pinstance->pending_pool_lock);
5188         spin_lock_init(&pinstance->resource_lock);
5189         mutex_init(&pinstance->aen_queue_lock);
5190
5191         /* Work-queue (Shared) for deferred processing error handling */
5192         INIT_WORK(&pinstance->worker_q, pmcraid_worker_function);
5193
5194         /* Initialize the default log_level */
5195         pinstance->current_log_level = pmcraid_log_level;
5196
5197         /* Setup variables required for reset engine */
5198         pinstance->ioa_state = IOA_STATE_UNKNOWN;
5199         pinstance->reset_cmd = NULL;
5200         return 0;
5201 }
5202
5203 /**
5204  * pmcraid_shutdown - shutdown adapter controller.
5205  * @pdev: pci device struct
5206  *
5207  * Issues an adapter shutdown to the card waits for its completion
5208  *
5209  * Return value
5210  *        none
5211  */
5212 static void pmcraid_shutdown(struct pci_dev *pdev)
5213 {
5214         struct pmcraid_instance *pinstance = pci_get_drvdata(pdev);
5215         pmcraid_reset_bringdown(pinstance);
5216 }
5217
5218
5219 /**
5220  * pmcraid_get_minor - returns unused minor number from minor number bitmap
5221  */
5222 static unsigned short pmcraid_get_minor(void)
5223 {
5224         int minor;
5225
5226         minor = find_first_zero_bit(pmcraid_minor, sizeof(pmcraid_minor));
5227         __set_bit(minor, pmcraid_minor);
5228         return minor;
5229 }
5230
5231 /**
5232  * pmcraid_release_minor - releases given minor back to minor number bitmap
5233  */
5234 static void pmcraid_release_minor(unsigned short minor)
5235 {
5236         __clear_bit(minor, pmcraid_minor);
5237 }
5238
5239 /**
5240  * pmcraid_setup_chrdev - allocates a minor number and registers a char device
5241  *
5242  * @pinstance: pointer to adapter instance for which to register device
5243  *
5244  * Return value
5245  *      0 in case of success, otherwise non-zero
5246  */
5247 static int pmcraid_setup_chrdev(struct pmcraid_instance *pinstance)
5248 {
5249         int minor;
5250         int error;
5251
5252         minor = pmcraid_get_minor();
5253         cdev_init(&pinstance->cdev, &pmcraid_fops);
5254         pinstance->cdev.owner = THIS_MODULE;
5255
5256         error = cdev_add(&pinstance->cdev, MKDEV(pmcraid_major, minor), 1);
5257
5258         if (error)
5259                 pmcraid_release_minor(minor);
5260         else
5261                 device_create(pmcraid_class, NULL, MKDEV(pmcraid_major, minor),
5262                               NULL, "%s%u", PMCRAID_DEVFILE, minor);
5263         return error;
5264 }
5265
5266 /**
5267  * pmcraid_release_chrdev - unregisters per-adapter management interface
5268  *
5269  * @pinstance: pointer to adapter instance structure
5270  *
5271  * Return value
5272  *  none
5273  */
5274 static void pmcraid_release_chrdev(struct pmcraid_instance *pinstance)
5275 {
5276         pmcraid_release_minor(MINOR(pinstance->cdev.dev));
5277         device_destroy(pmcraid_class,
5278                        MKDEV(pmcraid_major, MINOR(pinstance->cdev.dev)));
5279         cdev_del(&pinstance->cdev);
5280 }
5281
5282 /**
5283  * pmcraid_remove - IOA hot plug remove entry point
5284  * @pdev: pci device struct
5285  *
5286  * Return value
5287  *        none
5288  */
5289 static void pmcraid_remove(struct pci_dev *pdev)
5290 {
5291         struct pmcraid_instance *pinstance = pci_get_drvdata(pdev);
5292
5293         /* remove the management interface (/dev file) for this device */
5294         pmcraid_release_chrdev(pinstance);
5295
5296         /* remove host template from scsi midlayer */
5297         scsi_remove_host(pinstance->host);
5298
5299         /* block requests from mid-layer */
5300         scsi_block_requests(pinstance->host);
5301
5302         /* initiate shutdown adapter */
5303         pmcraid_shutdown(pdev);
5304
5305         pmcraid_disable_interrupts(pinstance, ~0);
5306         flush_work(&pinstance->worker_q);
5307
5308         pmcraid_kill_tasklets(pinstance);
5309         pmcraid_unregister_interrupt_handler(pinstance);
5310         pmcraid_release_buffers(pinstance);
5311         iounmap(pinstance->mapped_dma_addr);
5312         pci_release_regions(pdev);
5313         scsi_host_put(pinstance->host);
5314         pci_disable_device(pdev);
5315
5316         return;
5317 }
5318
5319 #ifdef CONFIG_PM
5320 /**
5321  * pmcraid_suspend - driver suspend entry point for power management
5322  * @pdev:   PCI device structure
5323  * @state:  PCI power state to suspend routine
5324  *
5325  * Return Value - 0 always
5326  */
5327 static int pmcraid_suspend(struct pci_dev *pdev, pm_message_t state)
5328 {
5329         struct pmcraid_instance *pinstance = pci_get_drvdata(pdev);
5330
5331         pmcraid_shutdown(pdev);
5332         pmcraid_disable_interrupts(pinstance, ~0);
5333         pmcraid_kill_tasklets(pinstance);
5334         pci_set_drvdata(pinstance->pdev, pinstance);
5335         pmcraid_unregister_interrupt_handler(pinstance);
5336         pci_save_state(pdev);
5337         pci_disable_device(pdev);
5338         pci_set_power_state(pdev, pci_choose_state(pdev, state));
5339
5340         return 0;
5341 }
5342
5343 /**
5344  * pmcraid_resume - driver resume entry point PCI power management
5345  * @pdev: PCI device structure
5346  *
5347  * Return Value - 0 in case of success. Error code in case of any failure
5348  */
5349 static int pmcraid_resume(struct pci_dev *pdev)
5350 {
5351         struct pmcraid_instance *pinstance = pci_get_drvdata(pdev);
5352         struct Scsi_Host *host = pinstance->host;
5353         int rc;
5354
5355         pci_set_power_state(pdev, PCI_D0);
5356         pci_enable_wake(pdev, PCI_D0, 0);
5357         pci_restore_state(pdev);
5358
5359         rc = pci_enable_device(pdev);
5360
5361         if (rc) {
5362                 dev_err(&pdev->dev, "resume: Enable device failed\n");
5363                 return rc;
5364         }
5365
5366         pci_set_master(pdev);
5367
5368         if ((sizeof(dma_addr_t) == 4) ||
5369              pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
5370                 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5371
5372         if (rc == 0)
5373                 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
5374
5375         if (rc != 0) {
5376                 dev_err(&pdev->dev, "resume: Failed to set PCI DMA mask\n");
5377                 goto disable_device;
5378         }
5379
5380         pmcraid_disable_interrupts(pinstance, ~0);
5381         atomic_set(&pinstance->outstanding_cmds, 0);
5382         rc = pmcraid_register_interrupt_handler(pinstance);
5383
5384         if (rc) {
5385                 dev_err(&pdev->dev,
5386                         "resume: couldn't register interrupt handlers\n");
5387                 rc = -ENODEV;
5388                 goto release_host;
5389         }
5390
5391         pmcraid_init_tasklets(pinstance);
5392         pmcraid_enable_interrupts(pinstance, PMCRAID_PCI_INTERRUPTS);
5393
5394         /* Start with hard reset sequence which brings up IOA to operational
5395          * state as well as completes the reset sequence.
5396          */
5397         pinstance->ioa_hard_reset = 1;
5398
5399         /* Start IOA firmware initialization and bring card to Operational
5400          * state.
5401          */
5402         if (pmcraid_reset_bringup(pinstance)) {
5403                 dev_err(&pdev->dev, "couldn't initialize IOA\n");
5404                 rc = -ENODEV;
5405                 goto release_tasklets;
5406         }
5407
5408         return 0;
5409
5410 release_tasklets:
5411         pmcraid_disable_interrupts(pinstance, ~0);
5412         pmcraid_kill_tasklets(pinstance);
5413         pmcraid_unregister_interrupt_handler(pinstance);
5414
5415 release_host:
5416         scsi_host_put(host);
5417
5418 disable_device:
5419         pci_disable_device(pdev);
5420
5421         return rc;
5422 }
5423
5424 #else
5425
5426 #define pmcraid_suspend NULL
5427 #define pmcraid_resume  NULL
5428
5429 #endif /* CONFIG_PM */
5430
5431 /**
5432  * pmcraid_complete_ioa_reset - Called by either timer or tasklet during
5433  *                              completion of the ioa reset
5434  * @cmd: pointer to reset command block
5435  */
5436 static void pmcraid_complete_ioa_reset(struct pmcraid_cmd *cmd)
5437 {
5438         struct pmcraid_instance *pinstance = cmd->drv_inst;
5439         unsigned long flags;
5440
5441         spin_lock_irqsave(pinstance->host->host_lock, flags);
5442         pmcraid_ioa_reset(cmd);
5443         spin_unlock_irqrestore(pinstance->host->host_lock, flags);
5444         scsi_unblock_requests(pinstance->host);
5445         schedule_work(&pinstance->worker_q);
5446 }
5447
5448 /**
5449  * pmcraid_set_supported_devs - sends SET SUPPORTED DEVICES to IOAFP
5450  *
5451  * @cmd: pointer to pmcraid_cmd structure
5452  *
5453  * Return Value
5454  *  0 for success or non-zero for failure cases
5455  */
5456 static void pmcraid_set_supported_devs(struct pmcraid_cmd *cmd)
5457 {
5458         struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
5459         void (*cmd_done) (struct pmcraid_cmd *) = pmcraid_complete_ioa_reset;
5460
5461         pmcraid_reinit_cmdblk(cmd);
5462
5463         ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
5464         ioarcb->request_type = REQ_TYPE_IOACMD;
5465         ioarcb->cdb[0] = PMCRAID_SET_SUPPORTED_DEVICES;
5466         ioarcb->cdb[1] = ALL_DEVICES_SUPPORTED;
5467
5468         /* If this was called as part of resource table reinitialization due to
5469          * lost CCN, it is enough to return the command block back to free pool
5470          * as part of set_supported_devs completion function.
5471          */
5472         if (cmd->drv_inst->reinit_cfg_table) {
5473                 cmd->drv_inst->reinit_cfg_table = 0;
5474                 cmd->release = 1;
5475                 cmd_done = pmcraid_reinit_cfgtable_done;
5476         }
5477
5478         /* we will be done with the reset sequence after set supported devices,
5479          * setup the done function to return the command block back to free
5480          * pool
5481          */
5482         pmcraid_send_cmd(cmd,
5483                          cmd_done,
5484                          PMCRAID_SET_SUP_DEV_TIMEOUT,
5485                          pmcraid_timeout_handler);
5486         return;
5487 }
5488
5489 /**
5490  * pmcraid_set_timestamp - set the timestamp to IOAFP
5491  *
5492  * @cmd: pointer to pmcraid_cmd structure
5493  *
5494  * Return Value
5495  *  0 for success or non-zero for failure cases
5496  */
5497 static void pmcraid_set_timestamp(struct pmcraid_cmd *cmd)
5498 {
5499         struct pmcraid_instance *pinstance = cmd->drv_inst;
5500         struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
5501         __be32 time_stamp_len = cpu_to_be32(PMCRAID_TIMESTAMP_LEN);
5502         struct pmcraid_ioadl_desc *ioadl = ioarcb->add_data.u.ioadl;
5503         u64 timestamp;
5504
5505         timestamp = ktime_get_real_seconds() * 1000;
5506
5507         pinstance->timestamp_data->timestamp[0] = (__u8)(timestamp);
5508         pinstance->timestamp_data->timestamp[1] = (__u8)((timestamp) >> 8);
5509         pinstance->timestamp_data->timestamp[2] = (__u8)((timestamp) >> 16);
5510         pinstance->timestamp_data->timestamp[3] = (__u8)((timestamp) >> 24);
5511         pinstance->timestamp_data->timestamp[4] = (__u8)((timestamp) >> 32);
5512         pinstance->timestamp_data->timestamp[5] = (__u8)((timestamp)  >> 40);
5513
5514         pmcraid_reinit_cmdblk(cmd);
5515         ioarcb->request_type = REQ_TYPE_SCSI;
5516         ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
5517         ioarcb->cdb[0] = PMCRAID_SCSI_SET_TIMESTAMP;
5518         ioarcb->cdb[1] = PMCRAID_SCSI_SERVICE_ACTION;
5519         memcpy(&(ioarcb->cdb[6]), &time_stamp_len, sizeof(time_stamp_len));
5520
5521         ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) +
5522                                         offsetof(struct pmcraid_ioarcb,
5523                                                 add_data.u.ioadl[0]));
5524         ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc));
5525         ioarcb->ioarcb_bus_addr &= cpu_to_le64(~(0x1FULL));
5526
5527         ioarcb->request_flags0 |= NO_LINK_DESCS;
5528         ioarcb->request_flags0 |= TRANSFER_DIR_WRITE;
5529         ioarcb->data_transfer_length =
5530                 cpu_to_le32(sizeof(struct pmcraid_timestamp_data));
5531         ioadl = &(ioarcb->add_data.u.ioadl[0]);
5532         ioadl->flags = IOADL_FLAGS_LAST_DESC;
5533         ioadl->address = cpu_to_le64(pinstance->timestamp_data_baddr);
5534         ioadl->data_len = cpu_to_le32(sizeof(struct pmcraid_timestamp_data));
5535
5536         if (!pinstance->timestamp_error) {
5537                 pinstance->timestamp_error = 0;
5538                 pmcraid_send_cmd(cmd, pmcraid_set_supported_devs,
5539                          PMCRAID_INTERNAL_TIMEOUT, pmcraid_timeout_handler);
5540         } else {
5541                 pmcraid_send_cmd(cmd, pmcraid_return_cmd,
5542                          PMCRAID_INTERNAL_TIMEOUT, pmcraid_timeout_handler);
5543                 return;
5544         }
5545 }
5546
5547
5548 /**
5549  * pmcraid_init_res_table - Initialize the resource table
5550  * @cmd:  pointer to pmcraid command struct
5551  *
5552  * This function looks through the existing resource table, comparing
5553  * it with the config table. This function will take care of old/new
5554  * devices and schedule adding/removing them from the mid-layer
5555  * as appropriate.
5556  *
5557  * Return value
5558  *       None
5559  */
5560 static void pmcraid_init_res_table(struct pmcraid_cmd *cmd)
5561 {
5562         struct pmcraid_instance *pinstance = cmd->drv_inst;
5563         struct pmcraid_resource_entry *res, *temp;
5564         struct pmcraid_config_table_entry *cfgte;
5565         unsigned long lock_flags;
5566         int found, rc, i;
5567         u16 fw_version;
5568         LIST_HEAD(old_res);
5569
5570         if (pinstance->cfg_table->flags & MICROCODE_UPDATE_REQUIRED)
5571                 pmcraid_err("IOA requires microcode download\n");
5572
5573         fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
5574
5575         /* resource list is protected by pinstance->resource_lock.
5576          * init_res_table can be called from probe (user-thread) or runtime
5577          * reset (timer/tasklet)
5578          */
5579         spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
5580
5581         list_for_each_entry_safe(res, temp, &pinstance->used_res_q, queue)
5582                 list_move_tail(&res->queue, &old_res);
5583
5584         for (i = 0; i < le16_to_cpu(pinstance->cfg_table->num_entries); i++) {
5585                 if (be16_to_cpu(pinstance->inq_data->fw_version) <=
5586                                                 PMCRAID_FW_VERSION_1)
5587                         cfgte = &pinstance->cfg_table->entries[i];
5588                 else
5589                         cfgte = (struct pmcraid_config_table_entry *)
5590                                         &pinstance->cfg_table->entries_ext[i];
5591
5592                 if (!pmcraid_expose_resource(fw_version, cfgte))
5593                         continue;
5594
5595                 found = 0;
5596
5597                 /* If this entry was already detected and initialized */
5598                 list_for_each_entry_safe(res, temp, &old_res, queue) {
5599
5600                         rc = memcmp(&res->cfg_entry.resource_address,
5601                                     &cfgte->resource_address,
5602                                     sizeof(cfgte->resource_address));
5603                         if (!rc) {
5604                                 list_move_tail(&res->queue,
5605                                                 &pinstance->used_res_q);
5606                                 found = 1;
5607                                 break;
5608                         }
5609                 }
5610
5611                 /* If this is new entry, initialize it and add it the queue */
5612                 if (!found) {
5613
5614                         if (list_empty(&pinstance->free_res_q)) {
5615                                 pmcraid_err("Too many devices attached\n");
5616                                 break;
5617                         }
5618
5619                         found = 1;
5620                         res = list_entry(pinstance->free_res_q.next,
5621                                          struct pmcraid_resource_entry, queue);
5622
5623                         res->scsi_dev = NULL;
5624                         res->change_detected = RES_CHANGE_ADD;
5625                         res->reset_progress = 0;
5626                         list_move_tail(&res->queue, &pinstance->used_res_q);
5627                 }
5628
5629                 /* copy new configuration table entry details into driver
5630                  * maintained resource entry
5631                  */
5632                 if (found) {
5633                         memcpy(&res->cfg_entry, cfgte,
5634                                         pinstance->config_table_entry_size);
5635                         pmcraid_info("New res type:%x, vset:%x, addr:%x:\n",
5636                                  res->cfg_entry.resource_type,
5637                                  (fw_version <= PMCRAID_FW_VERSION_1 ?
5638                                         res->cfg_entry.unique_flags1 :
5639                                         le16_to_cpu(res->cfg_entry.array_id) & 0xFF),
5640                                  le32_to_cpu(res->cfg_entry.resource_address));
5641                 }
5642         }
5643
5644         /* Detect any deleted entries, mark them for deletion from mid-layer */
5645         list_for_each_entry_safe(res, temp, &old_res, queue) {
5646
5647                 if (res->scsi_dev) {
5648                         res->change_detected = RES_CHANGE_DEL;
5649                         res->cfg_entry.resource_handle =
5650                                 PMCRAID_INVALID_RES_HANDLE;
5651                         list_move_tail(&res->queue, &pinstance->used_res_q);
5652                 } else {
5653                         list_move_tail(&res->queue, &pinstance->free_res_q);
5654                 }
5655         }
5656
5657         /* release the resource list lock */
5658         spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
5659         pmcraid_set_timestamp(cmd);
5660 }
5661
5662 /**
5663  * pmcraid_querycfg - Send a Query IOA Config to the adapter.
5664  * @cmd: pointer pmcraid_cmd struct
5665  *
5666  * This function sends a Query IOA Configuration command to the adapter to
5667  * retrieve the IOA configuration table.
5668  *
5669  * Return value:
5670  *      none
5671  */
5672 static void pmcraid_querycfg(struct pmcraid_cmd *cmd)
5673 {
5674         struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
5675         struct pmcraid_ioadl_desc *ioadl = ioarcb->add_data.u.ioadl;
5676         struct pmcraid_instance *pinstance = cmd->drv_inst;
5677         __be32 cfg_table_size = cpu_to_be32(sizeof(struct pmcraid_config_table));
5678
5679         if (be16_to_cpu(pinstance->inq_data->fw_version) <=
5680                                         PMCRAID_FW_VERSION_1)
5681                 pinstance->config_table_entry_size =
5682                         sizeof(struct pmcraid_config_table_entry);
5683         else
5684                 pinstance->config_table_entry_size =
5685                         sizeof(struct pmcraid_config_table_entry_ext);
5686
5687         ioarcb->request_type = REQ_TYPE_IOACMD;
5688         ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
5689
5690         ioarcb->cdb[0] = PMCRAID_QUERY_IOA_CONFIG;
5691
5692         /* firmware requires 4-byte length field, specified in B.E format */
5693         memcpy(&(ioarcb->cdb[10]), &cfg_table_size, sizeof(cfg_table_size));
5694
5695         /* Since entire config table can be described by single IOADL, it can
5696          * be part of IOARCB itself
5697          */
5698         ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) +
5699                                         offsetof(struct pmcraid_ioarcb,
5700                                                 add_data.u.ioadl[0]));
5701         ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc));
5702         ioarcb->ioarcb_bus_addr &= cpu_to_le64(~0x1FULL);
5703
5704         ioarcb->request_flags0 |= NO_LINK_DESCS;
5705         ioarcb->data_transfer_length =
5706                 cpu_to_le32(sizeof(struct pmcraid_config_table));
5707
5708         ioadl = &(ioarcb->add_data.u.ioadl[0]);
5709         ioadl->flags = IOADL_FLAGS_LAST_DESC;
5710         ioadl->address = cpu_to_le64(pinstance->cfg_table_bus_addr);
5711         ioadl->data_len = cpu_to_le32(sizeof(struct pmcraid_config_table));
5712
5713         pmcraid_send_cmd(cmd, pmcraid_init_res_table,
5714                          PMCRAID_INTERNAL_TIMEOUT, pmcraid_timeout_handler);
5715 }
5716
5717
5718 /**
5719  * pmcraid_probe - PCI probe entry pointer for PMC MaxRAID controller driver
5720  * @pdev: pointer to pci device structure
5721  * @dev_id: pointer to device ids structure
5722  *
5723  * Return Value
5724  *      returns 0 if the device is claimed and successfully configured.
5725  *      returns non-zero error code in case of any failure
5726  */
5727 static int pmcraid_probe(struct pci_dev *pdev,
5728                          const struct pci_device_id *dev_id)
5729 {
5730         struct pmcraid_instance *pinstance;
5731         struct Scsi_Host *host;
5732         void __iomem *mapped_pci_addr;
5733         int rc = PCIBIOS_SUCCESSFUL;
5734
5735         if (atomic_read(&pmcraid_adapter_count) >= PMCRAID_MAX_ADAPTERS) {
5736                 pmcraid_err
5737                         ("maximum number(%d) of supported adapters reached\n",
5738                          atomic_read(&pmcraid_adapter_count));
5739                 return -ENOMEM;
5740         }
5741
5742         atomic_inc(&pmcraid_adapter_count);
5743         rc = pci_enable_device(pdev);
5744
5745         if (rc) {
5746                 dev_err(&pdev->dev, "Cannot enable adapter\n");
5747                 atomic_dec(&pmcraid_adapter_count);
5748                 return rc;
5749         }
5750
5751         dev_info(&pdev->dev,
5752                 "Found new IOA(%x:%x), Total IOA count: %d\n",
5753                  pdev->vendor, pdev->device,
5754                  atomic_read(&pmcraid_adapter_count));
5755
5756         rc = pci_request_regions(pdev, PMCRAID_DRIVER_NAME);
5757
5758         if (rc < 0) {
5759                 dev_err(&pdev->dev,
5760                         "Couldn't register memory range of registers\n");
5761                 goto out_disable_device;
5762         }
5763
5764         mapped_pci_addr = pci_iomap(pdev, 0, 0);
5765
5766         if (!mapped_pci_addr) {
5767                 dev_err(&pdev->dev, "Couldn't map PCI registers memory\n");
5768                 rc = -ENOMEM;
5769                 goto out_release_regions;
5770         }
5771
5772         pci_set_master(pdev);
5773
5774         /* Firmware requires the system bus address of IOARCB to be within
5775          * 32-bit addressable range though it has 64-bit IOARRIN register.
5776          * However, firmware supports 64-bit streaming DMA buffers, whereas
5777          * coherent buffers are to be 32-bit. Since pci_alloc_consistent always
5778          * returns memory within 4GB (if not, change this logic), coherent
5779          * buffers are within firmware acceptable address ranges.
5780          */
5781         if ((sizeof(dma_addr_t) == 4) ||
5782             pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
5783                 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5784
5785         /* firmware expects 32-bit DMA addresses for IOARRIN register; set 32
5786          * bit mask for pci_alloc_consistent to return addresses within 4GB
5787          */
5788         if (rc == 0)
5789                 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
5790
5791         if (rc != 0) {
5792                 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
5793                 goto cleanup_nomem;
5794         }
5795
5796         host = scsi_host_alloc(&pmcraid_host_template,
5797                                 sizeof(struct pmcraid_instance));
5798
5799         if (!host) {
5800                 dev_err(&pdev->dev, "scsi_host_alloc failed!\n");
5801                 rc = -ENOMEM;
5802                 goto cleanup_nomem;
5803         }
5804
5805         host->max_id = PMCRAID_MAX_NUM_TARGETS_PER_BUS;
5806         host->max_lun = PMCRAID_MAX_NUM_LUNS_PER_TARGET;
5807         host->unique_id = host->host_no;
5808         host->max_channel = PMCRAID_MAX_BUS_TO_SCAN;
5809         host->max_cmd_len = PMCRAID_MAX_CDB_LEN;
5810
5811         /* zero out entire instance structure */
5812         pinstance = (struct pmcraid_instance *)host->hostdata;
5813         memset(pinstance, 0, sizeof(*pinstance));
5814
5815         pinstance->chip_cfg =
5816                 (struct pmcraid_chip_details *)(dev_id->driver_data);
5817
5818         rc = pmcraid_init_instance(pdev, host, mapped_pci_addr);
5819
5820         if (rc < 0) {
5821                 dev_err(&pdev->dev, "failed to initialize adapter instance\n");
5822                 goto out_scsi_host_put;
5823         }
5824
5825         pci_set_drvdata(pdev, pinstance);
5826
5827         /* Save PCI config-space for use following the reset */
5828         rc = pci_save_state(pinstance->pdev);
5829
5830         if (rc != 0) {
5831                 dev_err(&pdev->dev, "Failed to save PCI config space\n");
5832                 goto out_scsi_host_put;
5833         }
5834
5835         pmcraid_disable_interrupts(pinstance, ~0);
5836
5837         rc = pmcraid_register_interrupt_handler(pinstance);
5838
5839         if (rc) {
5840                 dev_err(&pdev->dev, "couldn't register interrupt handler\n");
5841                 goto out_scsi_host_put;
5842         }
5843
5844         pmcraid_init_tasklets(pinstance);
5845
5846         /* allocate verious buffers used by LLD.*/
5847         rc = pmcraid_init_buffers(pinstance);
5848
5849         if (rc) {
5850                 pmcraid_err("couldn't allocate memory blocks\n");
5851                 goto out_unregister_isr;
5852         }
5853
5854         /* check the reset type required */
5855         pmcraid_reset_type(pinstance);
5856
5857         pmcraid_enable_interrupts(pinstance, PMCRAID_PCI_INTERRUPTS);
5858
5859         /* Start IOA firmware initialization and bring card to Operational
5860          * state.
5861          */
5862         pmcraid_info("starting IOA initialization sequence\n");
5863         if (pmcraid_reset_bringup(pinstance)) {
5864                 dev_err(&pdev->dev, "couldn't initialize IOA\n");
5865                 rc = 1;
5866                 goto out_release_bufs;
5867         }
5868
5869         /* Add adapter instance into mid-layer list */
5870         rc = scsi_add_host(pinstance->host, &pdev->dev);
5871         if (rc != 0) {
5872                 pmcraid_err("couldn't add host into mid-layer: %d\n", rc);
5873                 goto out_release_bufs;
5874         }
5875
5876         scsi_scan_host(pinstance->host);
5877
5878         rc = pmcraid_setup_chrdev(pinstance);
5879
5880         if (rc != 0) {
5881                 pmcraid_err("couldn't create mgmt interface, error: %x\n",
5882                              rc);
5883                 goto out_remove_host;
5884         }
5885
5886         /* Schedule worker thread to handle CCN and take care of adding and
5887          * removing devices to OS
5888          */
5889         atomic_set(&pinstance->expose_resources, 1);
5890         schedule_work(&pinstance->worker_q);
5891         return rc;
5892
5893 out_remove_host:
5894         scsi_remove_host(host);
5895
5896 out_release_bufs:
5897         pmcraid_release_buffers(pinstance);
5898
5899 out_unregister_isr:
5900         pmcraid_kill_tasklets(pinstance);
5901         pmcraid_unregister_interrupt_handler(pinstance);
5902
5903 out_scsi_host_put:
5904         scsi_host_put(host);
5905
5906 cleanup_nomem:
5907         iounmap(mapped_pci_addr);
5908
5909 out_release_regions:
5910         pci_release_regions(pdev);
5911
5912 out_disable_device:
5913         atomic_dec(&pmcraid_adapter_count);
5914         pci_disable_device(pdev);
5915         return -ENODEV;
5916 }
5917
5918 /*
5919  * PCI driver structure of pcmraid driver
5920  */
5921 static struct pci_driver pmcraid_driver = {
5922         .name = PMCRAID_DRIVER_NAME,
5923         .id_table = pmcraid_pci_table,
5924         .probe = pmcraid_probe,
5925         .remove = pmcraid_remove,
5926         .suspend = pmcraid_suspend,
5927         .resume = pmcraid_resume,
5928         .shutdown = pmcraid_shutdown
5929 };
5930
5931 /**
5932  * pmcraid_init - module load entry point
5933  */
5934 static int __init pmcraid_init(void)
5935 {
5936         dev_t dev;
5937         int error;
5938
5939         pmcraid_info("%s Device Driver version: %s\n",
5940                          PMCRAID_DRIVER_NAME, PMCRAID_DRIVER_VERSION);
5941
5942         error = alloc_chrdev_region(&dev, 0,
5943                                     PMCRAID_MAX_ADAPTERS,
5944                                     PMCRAID_DEVFILE);
5945
5946         if (error) {
5947                 pmcraid_err("failed to get a major number for adapters\n");
5948                 goto out_init;
5949         }
5950
5951         pmcraid_major = MAJOR(dev);
5952         pmcraid_class = class_create(THIS_MODULE, PMCRAID_DEVFILE);
5953
5954         if (IS_ERR(pmcraid_class)) {
5955                 error = PTR_ERR(pmcraid_class);
5956                 pmcraid_err("failed to register with sysfs, error = %x\n",
5957                             error);
5958                 goto out_unreg_chrdev;
5959         }
5960
5961         error = pmcraid_netlink_init();
5962
5963         if (error) {
5964                 class_destroy(pmcraid_class);
5965                 goto out_unreg_chrdev;
5966         }
5967
5968         error = pci_register_driver(&pmcraid_driver);
5969
5970         if (error == 0)
5971                 goto out_init;
5972
5973         pmcraid_err("failed to register pmcraid driver, error = %x\n",
5974                      error);
5975         class_destroy(pmcraid_class);
5976         pmcraid_netlink_release();
5977
5978 out_unreg_chrdev:
5979         unregister_chrdev_region(MKDEV(pmcraid_major, 0), PMCRAID_MAX_ADAPTERS);
5980
5981 out_init:
5982         return error;
5983 }
5984
5985 /**
5986  * pmcraid_exit - module unload entry point
5987  */
5988 static void __exit pmcraid_exit(void)
5989 {
5990         pmcraid_netlink_release();
5991         unregister_chrdev_region(MKDEV(pmcraid_major, 0),
5992                                  PMCRAID_MAX_ADAPTERS);
5993         pci_unregister_driver(&pmcraid_driver);
5994         class_destroy(pmcraid_class);
5995 }
5996
5997 module_init(pmcraid_init);
5998 module_exit(pmcraid_exit);