Merge tag 'scsi-postmerge' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb...
[sfrench/cifs-2.6.git] / drivers / scsi / ipr.c
1 /*
2  * ipr.c -- driver for IBM Power Linux RAID adapters
3  *
4  * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5  *
6  * Copyright (C) 2003, 2004 IBM Corporation
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21  *
22  */
23
24 /*
25  * Notes:
26  *
27  * This driver is used to control the following SCSI adapters:
28  *
29  * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30  *
31  * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32  *              PCI-X Dual Channel Ultra 320 SCSI Adapter
33  *              PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34  *              Embedded SCSI adapter on p615 and p655 systems
35  *
36  * Supported Hardware Features:
37  *      - Ultra 320 SCSI controller
38  *      - PCI-X host interface
39  *      - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40  *      - Non-Volatile Write Cache
41  *      - Supports attachment of non-RAID disks, tape, and optical devices
42  *      - RAID Levels 0, 5, 10
43  *      - Hot spare
44  *      - Background Parity Checking
45  *      - Background Data Scrubbing
46  *      - Ability to increase the capacity of an existing RAID 5 disk array
47  *              by adding disks
48  *
49  * Driver Features:
50  *      - Tagged command queuing
51  *      - Adapter microcode download
52  *      - PCI hot plug
53  *      - SCSI device hot plug
54  *
55  */
56
57 #include <linux/fs.h>
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/slab.h>
63 #include <linux/vmalloc.h>
64 #include <linux/ioport.h>
65 #include <linux/delay.h>
66 #include <linux/pci.h>
67 #include <linux/wait.h>
68 #include <linux/spinlock.h>
69 #include <linux/sched.h>
70 #include <linux/interrupt.h>
71 #include <linux/blkdev.h>
72 #include <linux/firmware.h>
73 #include <linux/module.h>
74 #include <linux/moduleparam.h>
75 #include <linux/libata.h>
76 #include <linux/hdreg.h>
77 #include <linux/reboot.h>
78 #include <linux/stringify.h>
79 #include <asm/io.h>
80 #include <asm/irq.h>
81 #include <asm/processor.h>
82 #include <scsi/scsi.h>
83 #include <scsi/scsi_host.h>
84 #include <scsi/scsi_tcq.h>
85 #include <scsi/scsi_eh.h>
86 #include <scsi/scsi_cmnd.h>
87 #include "ipr.h"
88
89 /*
90  *   Global Data
91  */
92 static LIST_HEAD(ipr_ioa_head);
93 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94 static unsigned int ipr_max_speed = 1;
95 static int ipr_testmode = 0;
96 static unsigned int ipr_fastfail = 0;
97 static unsigned int ipr_transop_timeout = 0;
98 static unsigned int ipr_debug = 0;
99 static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
100 static unsigned int ipr_dual_ioa_raid = 1;
101 static unsigned int ipr_number_of_msix = 16;
102 static unsigned int ipr_fast_reboot;
103 static DEFINE_SPINLOCK(ipr_driver_lock);
104
105 /* This table describes the differences between DMA controller chips */
106 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
107         { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
108                 .mailbox = 0x0042C,
109                 .max_cmds = 100,
110                 .cache_line_size = 0x20,
111                 .clear_isr = 1,
112                 .iopoll_weight = 0,
113                 {
114                         .set_interrupt_mask_reg = 0x0022C,
115                         .clr_interrupt_mask_reg = 0x00230,
116                         .clr_interrupt_mask_reg32 = 0x00230,
117                         .sense_interrupt_mask_reg = 0x0022C,
118                         .sense_interrupt_mask_reg32 = 0x0022C,
119                         .clr_interrupt_reg = 0x00228,
120                         .clr_interrupt_reg32 = 0x00228,
121                         .sense_interrupt_reg = 0x00224,
122                         .sense_interrupt_reg32 = 0x00224,
123                         .ioarrin_reg = 0x00404,
124                         .sense_uproc_interrupt_reg = 0x00214,
125                         .sense_uproc_interrupt_reg32 = 0x00214,
126                         .set_uproc_interrupt_reg = 0x00214,
127                         .set_uproc_interrupt_reg32 = 0x00214,
128                         .clr_uproc_interrupt_reg = 0x00218,
129                         .clr_uproc_interrupt_reg32 = 0x00218
130                 }
131         },
132         { /* Snipe and Scamp */
133                 .mailbox = 0x0052C,
134                 .max_cmds = 100,
135                 .cache_line_size = 0x20,
136                 .clear_isr = 1,
137                 .iopoll_weight = 0,
138                 {
139                         .set_interrupt_mask_reg = 0x00288,
140                         .clr_interrupt_mask_reg = 0x0028C,
141                         .clr_interrupt_mask_reg32 = 0x0028C,
142                         .sense_interrupt_mask_reg = 0x00288,
143                         .sense_interrupt_mask_reg32 = 0x00288,
144                         .clr_interrupt_reg = 0x00284,
145                         .clr_interrupt_reg32 = 0x00284,
146                         .sense_interrupt_reg = 0x00280,
147                         .sense_interrupt_reg32 = 0x00280,
148                         .ioarrin_reg = 0x00504,
149                         .sense_uproc_interrupt_reg = 0x00290,
150                         .sense_uproc_interrupt_reg32 = 0x00290,
151                         .set_uproc_interrupt_reg = 0x00290,
152                         .set_uproc_interrupt_reg32 = 0x00290,
153                         .clr_uproc_interrupt_reg = 0x00294,
154                         .clr_uproc_interrupt_reg32 = 0x00294
155                 }
156         },
157         { /* CRoC */
158                 .mailbox = 0x00044,
159                 .max_cmds = 1000,
160                 .cache_line_size = 0x20,
161                 .clear_isr = 0,
162                 .iopoll_weight = 64,
163                 {
164                         .set_interrupt_mask_reg = 0x00010,
165                         .clr_interrupt_mask_reg = 0x00018,
166                         .clr_interrupt_mask_reg32 = 0x0001C,
167                         .sense_interrupt_mask_reg = 0x00010,
168                         .sense_interrupt_mask_reg32 = 0x00014,
169                         .clr_interrupt_reg = 0x00008,
170                         .clr_interrupt_reg32 = 0x0000C,
171                         .sense_interrupt_reg = 0x00000,
172                         .sense_interrupt_reg32 = 0x00004,
173                         .ioarrin_reg = 0x00070,
174                         .sense_uproc_interrupt_reg = 0x00020,
175                         .sense_uproc_interrupt_reg32 = 0x00024,
176                         .set_uproc_interrupt_reg = 0x00020,
177                         .set_uproc_interrupt_reg32 = 0x00024,
178                         .clr_uproc_interrupt_reg = 0x00028,
179                         .clr_uproc_interrupt_reg32 = 0x0002C,
180                         .init_feedback_reg = 0x0005C,
181                         .dump_addr_reg = 0x00064,
182                         .dump_data_reg = 0x00068,
183                         .endian_swap_reg = 0x00084
184                 }
185         },
186 };
187
188 static const struct ipr_chip_t ipr_chip[] = {
189         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
190         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
191         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
192         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
193         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, true, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
194         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
195         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
196         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
197         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
198         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
199 };
200
201 static int ipr_max_bus_speeds[] = {
202         IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
203 };
204
205 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
206 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
207 module_param_named(max_speed, ipr_max_speed, uint, 0);
208 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
209 module_param_named(log_level, ipr_log_level, uint, 0);
210 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
211 module_param_named(testmode, ipr_testmode, int, 0);
212 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
213 module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
214 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
215 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
216 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
217 module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
218 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
219 module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
220 MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
221 module_param_named(max_devs, ipr_max_devs, int, 0);
222 MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
223                  "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
224 module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
225 MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16).  (default:16)");
226 module_param_named(fast_reboot, ipr_fast_reboot, int, S_IRUGO | S_IWUSR);
227 MODULE_PARM_DESC(fast_reboot, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)");
228 MODULE_LICENSE("GPL");
229 MODULE_VERSION(IPR_DRIVER_VERSION);
230
231 /*  A constant array of IOASCs/URCs/Error Messages */
232 static const
233 struct ipr_error_table_t ipr_error_table[] = {
234         {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
235         "8155: An unknown error was received"},
236         {0x00330000, 0, 0,
237         "Soft underlength error"},
238         {0x005A0000, 0, 0,
239         "Command to be cancelled not found"},
240         {0x00808000, 0, 0,
241         "Qualified success"},
242         {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
243         "FFFE: Soft device bus error recovered by the IOA"},
244         {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
245         "4101: Soft device bus fabric error"},
246         {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
247         "FFFC: Logical block guard error recovered by the device"},
248         {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
249         "FFFC: Logical block reference tag error recovered by the device"},
250         {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
251         "4171: Recovered scatter list tag / sequence number error"},
252         {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
253         "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
254         {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
255         "4171: Recovered logical block sequence number error on IOA to Host transfer"},
256         {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
257         "FFFD: Recovered logical block reference tag error detected by the IOA"},
258         {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
259         "FFFD: Logical block guard error recovered by the IOA"},
260         {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
261         "FFF9: Device sector reassign successful"},
262         {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
263         "FFF7: Media error recovered by device rewrite procedures"},
264         {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
265         "7001: IOA sector reassignment successful"},
266         {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
267         "FFF9: Soft media error. Sector reassignment recommended"},
268         {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
269         "FFF7: Media error recovered by IOA rewrite procedures"},
270         {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
271         "FF3D: Soft PCI bus error recovered by the IOA"},
272         {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
273         "FFF6: Device hardware error recovered by the IOA"},
274         {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
275         "FFF6: Device hardware error recovered by the device"},
276         {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
277         "FF3D: Soft IOA error recovered by the IOA"},
278         {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
279         "FFFA: Undefined device response recovered by the IOA"},
280         {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
281         "FFF6: Device bus error, message or command phase"},
282         {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
283         "FFFE: Task Management Function failed"},
284         {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
285         "FFF6: Failure prediction threshold exceeded"},
286         {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
287         "8009: Impending cache battery pack failure"},
288         {0x02040100, 0, 0,
289         "Logical Unit in process of becoming ready"},
290         {0x02040200, 0, 0,
291         "Initializing command required"},
292         {0x02040400, 0, 0,
293         "34FF: Disk device format in progress"},
294         {0x02040C00, 0, 0,
295         "Logical unit not accessible, target port in unavailable state"},
296         {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
297         "9070: IOA requested reset"},
298         {0x023F0000, 0, 0,
299         "Synchronization required"},
300         {0x02408500, 0, 0,
301         "IOA microcode download required"},
302         {0x02408600, 0, 0,
303         "Device bus connection is prohibited by host"},
304         {0x024E0000, 0, 0,
305         "No ready, IOA shutdown"},
306         {0x025A0000, 0, 0,
307         "Not ready, IOA has been shutdown"},
308         {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
309         "3020: Storage subsystem configuration error"},
310         {0x03110B00, 0, 0,
311         "FFF5: Medium error, data unreadable, recommend reassign"},
312         {0x03110C00, 0, 0,
313         "7000: Medium error, data unreadable, do not reassign"},
314         {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
315         "FFF3: Disk media format bad"},
316         {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
317         "3002: Addressed device failed to respond to selection"},
318         {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
319         "3100: Device bus error"},
320         {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
321         "3109: IOA timed out a device command"},
322         {0x04088000, 0, 0,
323         "3120: SCSI bus is not operational"},
324         {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
325         "4100: Hard device bus fabric error"},
326         {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
327         "310C: Logical block guard error detected by the device"},
328         {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
329         "310C: Logical block reference tag error detected by the device"},
330         {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
331         "4170: Scatter list tag / sequence number error"},
332         {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
333         "8150: Logical block CRC error on IOA to Host transfer"},
334         {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
335         "4170: Logical block sequence number error on IOA to Host transfer"},
336         {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
337         "310D: Logical block reference tag error detected by the IOA"},
338         {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
339         "310D: Logical block guard error detected by the IOA"},
340         {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
341         "9000: IOA reserved area data check"},
342         {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
343         "9001: IOA reserved area invalid data pattern"},
344         {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
345         "9002: IOA reserved area LRC error"},
346         {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
347         "Hardware Error, IOA metadata access error"},
348         {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
349         "102E: Out of alternate sectors for disk storage"},
350         {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
351         "FFF4: Data transfer underlength error"},
352         {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
353         "FFF4: Data transfer overlength error"},
354         {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
355         "3400: Logical unit failure"},
356         {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
357         "FFF4: Device microcode is corrupt"},
358         {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
359         "8150: PCI bus error"},
360         {0x04430000, 1, 0,
361         "Unsupported device bus message received"},
362         {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
363         "FFF4: Disk device problem"},
364         {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
365         "8150: Permanent IOA failure"},
366         {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
367         "3010: Disk device returned wrong response to IOA"},
368         {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
369         "8151: IOA microcode error"},
370         {0x04448500, 0, 0,
371         "Device bus status error"},
372         {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
373         "8157: IOA error requiring IOA reset to recover"},
374         {0x04448700, 0, 0,
375         "ATA device status error"},
376         {0x04490000, 0, 0,
377         "Message reject received from the device"},
378         {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
379         "8008: A permanent cache battery pack failure occurred"},
380         {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
381         "9090: Disk unit has been modified after the last known status"},
382         {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
383         "9081: IOA detected device error"},
384         {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
385         "9082: IOA detected device error"},
386         {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
387         "3110: Device bus error, message or command phase"},
388         {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
389         "3110: SAS Command / Task Management Function failed"},
390         {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
391         "9091: Incorrect hardware configuration change has been detected"},
392         {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
393         "9073: Invalid multi-adapter configuration"},
394         {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
395         "4010: Incorrect connection between cascaded expanders"},
396         {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
397         "4020: Connections exceed IOA design limits"},
398         {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
399         "4030: Incorrect multipath connection"},
400         {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
401         "4110: Unsupported enclosure function"},
402         {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
403         "4120: SAS cable VPD cannot be read"},
404         {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
405         "FFF4: Command to logical unit failed"},
406         {0x05240000, 1, 0,
407         "Illegal request, invalid request type or request packet"},
408         {0x05250000, 0, 0,
409         "Illegal request, invalid resource handle"},
410         {0x05258000, 0, 0,
411         "Illegal request, commands not allowed to this device"},
412         {0x05258100, 0, 0,
413         "Illegal request, command not allowed to a secondary adapter"},
414         {0x05258200, 0, 0,
415         "Illegal request, command not allowed to a non-optimized resource"},
416         {0x05260000, 0, 0,
417         "Illegal request, invalid field in parameter list"},
418         {0x05260100, 0, 0,
419         "Illegal request, parameter not supported"},
420         {0x05260200, 0, 0,
421         "Illegal request, parameter value invalid"},
422         {0x052C0000, 0, 0,
423         "Illegal request, command sequence error"},
424         {0x052C8000, 1, 0,
425         "Illegal request, dual adapter support not enabled"},
426         {0x052C8100, 1, 0,
427         "Illegal request, another cable connector was physically disabled"},
428         {0x054E8000, 1, 0,
429         "Illegal request, inconsistent group id/group count"},
430         {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
431         "9031: Array protection temporarily suspended, protection resuming"},
432         {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
433         "9040: Array protection temporarily suspended, protection resuming"},
434         {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
435         "4080: IOA exceeded maximum operating temperature"},
436         {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
437         "4085: Service required"},
438         {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
439         "3140: Device bus not ready to ready transition"},
440         {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
441         "FFFB: SCSI bus was reset"},
442         {0x06290500, 0, 0,
443         "FFFE: SCSI bus transition to single ended"},
444         {0x06290600, 0, 0,
445         "FFFE: SCSI bus transition to LVD"},
446         {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
447         "FFFB: SCSI bus was reset by another initiator"},
448         {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
449         "3029: A device replacement has occurred"},
450         {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
451         "4102: Device bus fabric performance degradation"},
452         {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
453         "9051: IOA cache data exists for a missing or failed device"},
454         {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
455         "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
456         {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
457         "9025: Disk unit is not supported at its physical location"},
458         {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
459         "3020: IOA detected a SCSI bus configuration error"},
460         {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
461         "3150: SCSI bus configuration error"},
462         {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
463         "9074: Asymmetric advanced function disk configuration"},
464         {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
465         "4040: Incomplete multipath connection between IOA and enclosure"},
466         {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
467         "4041: Incomplete multipath connection between enclosure and device"},
468         {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
469         "9075: Incomplete multipath connection between IOA and remote IOA"},
470         {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
471         "9076: Configuration error, missing remote IOA"},
472         {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
473         "4050: Enclosure does not support a required multipath function"},
474         {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
475         "4121: Configuration error, required cable is missing"},
476         {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
477         "4122: Cable is not plugged into the correct location on remote IOA"},
478         {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
479         "4123: Configuration error, invalid cable vital product data"},
480         {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
481         "4124: Configuration error, both cable ends are plugged into the same IOA"},
482         {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
483         "4070: Logically bad block written on device"},
484         {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
485         "9041: Array protection temporarily suspended"},
486         {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
487         "9042: Corrupt array parity detected on specified device"},
488         {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
489         "9030: Array no longer protected due to missing or failed disk unit"},
490         {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
491         "9071: Link operational transition"},
492         {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
493         "9072: Link not operational transition"},
494         {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
495         "9032: Array exposed but still protected"},
496         {0x066B8300, 0, IPR_DEBUG_LOG_LEVEL,
497         "70DD: Device forced failed by disrupt device command"},
498         {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
499         "4061: Multipath redundancy level got better"},
500         {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
501         "4060: Multipath redundancy level got worse"},
502         {0x06808100, 0, IPR_DEBUG_LOG_LEVEL,
503         "9083: Device raw mode enabled"},
504         {0x06808200, 0, IPR_DEBUG_LOG_LEVEL,
505         "9084: Device raw mode disabled"},
506         {0x07270000, 0, 0,
507         "Failure due to other device"},
508         {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
509         "9008: IOA does not support functions expected by devices"},
510         {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
511         "9010: Cache data associated with attached devices cannot be found"},
512         {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
513         "9011: Cache data belongs to devices other than those attached"},
514         {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
515         "9020: Array missing 2 or more devices with only 1 device present"},
516         {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
517         "9021: Array missing 2 or more devices with 2 or more devices present"},
518         {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
519         "9022: Exposed array is missing a required device"},
520         {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
521         "9023: Array member(s) not at required physical locations"},
522         {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
523         "9024: Array not functional due to present hardware configuration"},
524         {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
525         "9026: Array not functional due to present hardware configuration"},
526         {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
527         "9027: Array is missing a device and parity is out of sync"},
528         {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
529         "9028: Maximum number of arrays already exist"},
530         {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
531         "9050: Required cache data cannot be located for a disk unit"},
532         {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
533         "9052: Cache data exists for a device that has been modified"},
534         {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
535         "9054: IOA resources not available due to previous problems"},
536         {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
537         "9092: Disk unit requires initialization before use"},
538         {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
539         "9029: Incorrect hardware configuration change has been detected"},
540         {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
541         "9060: One or more disk pairs are missing from an array"},
542         {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
543         "9061: One or more disks are missing from an array"},
544         {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
545         "9062: One or more disks are missing from an array"},
546         {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
547         "9063: Maximum number of functional arrays has been exceeded"},
548         {0x07279A00, 0, 0,
549         "Data protect, other volume set problem"},
550         {0x0B260000, 0, 0,
551         "Aborted command, invalid descriptor"},
552         {0x0B3F9000, 0, 0,
553         "Target operating conditions have changed, dual adapter takeover"},
554         {0x0B530200, 0, 0,
555         "Aborted command, medium removal prevented"},
556         {0x0B5A0000, 0, 0,
557         "Command terminated by host"},
558         {0x0B5B8000, 0, 0,
559         "Aborted command, command terminated by host"}
560 };
561
562 static const struct ipr_ses_table_entry ipr_ses_table[] = {
563         { "2104-DL1        ", "XXXXXXXXXXXXXXXX", 80 },
564         { "2104-TL1        ", "XXXXXXXXXXXXXXXX", 80 },
565         { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
566         { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
567         { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
568         { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
569         { "2104-DU3        ", "XXXXXXXXXXXXXXXX", 160 },
570         { "2104-TU3        ", "XXXXXXXXXXXXXXXX", 160 },
571         { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
572         { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
573         { "St  V1S2        ", "XXXXXXXXXXXXXXXX", 160 },
574         { "HSBPD4M  PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
575         { "VSBPD1H   U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
576 };
577
578 /*
579  *  Function Prototypes
580  */
581 static int ipr_reset_alert(struct ipr_cmnd *);
582 static void ipr_process_ccn(struct ipr_cmnd *);
583 static void ipr_process_error(struct ipr_cmnd *);
584 static void ipr_reset_ioa_job(struct ipr_cmnd *);
585 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
586                                    enum ipr_shutdown_type);
587
588 #ifdef CONFIG_SCSI_IPR_TRACE
589 /**
590  * ipr_trc_hook - Add a trace entry to the driver trace
591  * @ipr_cmd:    ipr command struct
592  * @type:               trace type
593  * @add_data:   additional data
594  *
595  * Return value:
596  *      none
597  **/
598 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
599                          u8 type, u32 add_data)
600 {
601         struct ipr_trace_entry *trace_entry;
602         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
603         unsigned int trace_index;
604
605         trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
606         trace_entry = &ioa_cfg->trace[trace_index];
607         trace_entry->time = jiffies;
608         trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
609         trace_entry->type = type;
610         if (ipr_cmd->ioa_cfg->sis64)
611                 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
612         else
613                 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
614         trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
615         trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
616         trace_entry->u.add_data = add_data;
617         wmb();
618 }
619 #else
620 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
621 #endif
622
623 /**
624  * ipr_lock_and_done - Acquire lock and complete command
625  * @ipr_cmd:    ipr command struct
626  *
627  * Return value:
628  *      none
629  **/
630 static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
631 {
632         unsigned long lock_flags;
633         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
634
635         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
636         ipr_cmd->done(ipr_cmd);
637         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
638 }
639
640 /**
641  * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
642  * @ipr_cmd:    ipr command struct
643  *
644  * Return value:
645  *      none
646  **/
647 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
648 {
649         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
650         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
651         struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
652         dma_addr_t dma_addr = ipr_cmd->dma_addr;
653         int hrrq_id;
654
655         hrrq_id = ioarcb->cmd_pkt.hrrq_id;
656         memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
657         ioarcb->cmd_pkt.hrrq_id = hrrq_id;
658         ioarcb->data_transfer_length = 0;
659         ioarcb->read_data_transfer_length = 0;
660         ioarcb->ioadl_len = 0;
661         ioarcb->read_ioadl_len = 0;
662
663         if (ipr_cmd->ioa_cfg->sis64) {
664                 ioarcb->u.sis64_addr_data.data_ioadl_addr =
665                         cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
666                 ioasa64->u.gata.status = 0;
667         } else {
668                 ioarcb->write_ioadl_addr =
669                         cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
670                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
671                 ioasa->u.gata.status = 0;
672         }
673
674         ioasa->hdr.ioasc = 0;
675         ioasa->hdr.residual_data_len = 0;
676         ipr_cmd->scsi_cmd = NULL;
677         ipr_cmd->qc = NULL;
678         ipr_cmd->sense_buffer[0] = 0;
679         ipr_cmd->dma_use_sg = 0;
680 }
681
682 /**
683  * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
684  * @ipr_cmd:    ipr command struct
685  *
686  * Return value:
687  *      none
688  **/
689 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
690                               void (*fast_done) (struct ipr_cmnd *))
691 {
692         ipr_reinit_ipr_cmnd(ipr_cmd);
693         ipr_cmd->u.scratch = 0;
694         ipr_cmd->sibling = NULL;
695         ipr_cmd->eh_comp = NULL;
696         ipr_cmd->fast_done = fast_done;
697         timer_setup(&ipr_cmd->timer, NULL, 0);
698 }
699
700 /**
701  * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
702  * @ioa_cfg:    ioa config struct
703  *
704  * Return value:
705  *      pointer to ipr command struct
706  **/
707 static
708 struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
709 {
710         struct ipr_cmnd *ipr_cmd = NULL;
711
712         if (likely(!list_empty(&hrrq->hrrq_free_q))) {
713                 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
714                         struct ipr_cmnd, queue);
715                 list_del(&ipr_cmd->queue);
716         }
717
718
719         return ipr_cmd;
720 }
721
722 /**
723  * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
724  * @ioa_cfg:    ioa config struct
725  *
726  * Return value:
727  *      pointer to ipr command struct
728  **/
729 static
730 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
731 {
732         struct ipr_cmnd *ipr_cmd =
733                 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
734         ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
735         return ipr_cmd;
736 }
737
738 /**
739  * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
740  * @ioa_cfg:    ioa config struct
741  * @clr_ints:     interrupts to clear
742  *
743  * This function masks all interrupts on the adapter, then clears the
744  * interrupts specified in the mask
745  *
746  * Return value:
747  *      none
748  **/
749 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
750                                           u32 clr_ints)
751 {
752         volatile u32 int_reg;
753         int i;
754
755         /* Stop new interrupts */
756         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
757                 spin_lock(&ioa_cfg->hrrq[i]._lock);
758                 ioa_cfg->hrrq[i].allow_interrupts = 0;
759                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
760         }
761         wmb();
762
763         /* Set interrupt mask to stop all new interrupts */
764         if (ioa_cfg->sis64)
765                 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
766         else
767                 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
768
769         /* Clear any pending interrupts */
770         if (ioa_cfg->sis64)
771                 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
772         writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
773         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
774 }
775
776 /**
777  * ipr_save_pcix_cmd_reg - Save PCI-X command register
778  * @ioa_cfg:    ioa config struct
779  *
780  * Return value:
781  *      0 on success / -EIO on failure
782  **/
783 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
784 {
785         int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
786
787         if (pcix_cmd_reg == 0)
788                 return 0;
789
790         if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
791                                  &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
792                 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
793                 return -EIO;
794         }
795
796         ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
797         return 0;
798 }
799
800 /**
801  * ipr_set_pcix_cmd_reg - Setup PCI-X command register
802  * @ioa_cfg:    ioa config struct
803  *
804  * Return value:
805  *      0 on success / -EIO on failure
806  **/
807 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
808 {
809         int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
810
811         if (pcix_cmd_reg) {
812                 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
813                                           ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
814                         dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
815                         return -EIO;
816                 }
817         }
818
819         return 0;
820 }
821
822 /**
823  * __ipr_sata_eh_done - done function for aborted SATA commands
824  * @ipr_cmd:    ipr command struct
825  *
826  * This function is invoked for ops generated to SATA
827  * devices which are being aborted.
828  *
829  * Return value:
830  *      none
831  **/
832 static void __ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
833 {
834         struct ata_queued_cmd *qc = ipr_cmd->qc;
835         struct ipr_sata_port *sata_port = qc->ap->private_data;
836
837         qc->err_mask |= AC_ERR_OTHER;
838         sata_port->ioasa.status |= ATA_BUSY;
839         ata_qc_complete(qc);
840         if (ipr_cmd->eh_comp)
841                 complete(ipr_cmd->eh_comp);
842         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
843 }
844
845 /**
846  * ipr_sata_eh_done - done function for aborted SATA commands
847  * @ipr_cmd:    ipr command struct
848  *
849  * This function is invoked for ops generated to SATA
850  * devices which are being aborted.
851  *
852  * Return value:
853  *      none
854  **/
855 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
856 {
857         struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
858         unsigned long hrrq_flags;
859
860         spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
861         __ipr_sata_eh_done(ipr_cmd);
862         spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
863 }
864
865 /**
866  * __ipr_scsi_eh_done - mid-layer done function for aborted ops
867  * @ipr_cmd:    ipr command struct
868  *
869  * This function is invoked by the interrupt handler for
870  * ops generated by the SCSI mid-layer which are being aborted.
871  *
872  * Return value:
873  *      none
874  **/
875 static void __ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
876 {
877         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
878
879         scsi_cmd->result |= (DID_ERROR << 16);
880
881         scsi_dma_unmap(ipr_cmd->scsi_cmd);
882         scsi_cmd->scsi_done(scsi_cmd);
883         if (ipr_cmd->eh_comp)
884                 complete(ipr_cmd->eh_comp);
885         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
886 }
887
888 /**
889  * ipr_scsi_eh_done - mid-layer done function for aborted ops
890  * @ipr_cmd:    ipr command struct
891  *
892  * This function is invoked by the interrupt handler for
893  * ops generated by the SCSI mid-layer which are being aborted.
894  *
895  * Return value:
896  *      none
897  **/
898 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
899 {
900         unsigned long hrrq_flags;
901         struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
902
903         spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
904         __ipr_scsi_eh_done(ipr_cmd);
905         spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
906 }
907
908 /**
909  * ipr_fail_all_ops - Fails all outstanding ops.
910  * @ioa_cfg:    ioa config struct
911  *
912  * This function fails all outstanding ops.
913  *
914  * Return value:
915  *      none
916  **/
917 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
918 {
919         struct ipr_cmnd *ipr_cmd, *temp;
920         struct ipr_hrr_queue *hrrq;
921
922         ENTER;
923         for_each_hrrq(hrrq, ioa_cfg) {
924                 spin_lock(&hrrq->_lock);
925                 list_for_each_entry_safe(ipr_cmd,
926                                         temp, &hrrq->hrrq_pending_q, queue) {
927                         list_del(&ipr_cmd->queue);
928
929                         ipr_cmd->s.ioasa.hdr.ioasc =
930                                 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
931                         ipr_cmd->s.ioasa.hdr.ilid =
932                                 cpu_to_be32(IPR_DRIVER_ILID);
933
934                         if (ipr_cmd->scsi_cmd)
935                                 ipr_cmd->done = __ipr_scsi_eh_done;
936                         else if (ipr_cmd->qc)
937                                 ipr_cmd->done = __ipr_sata_eh_done;
938
939                         ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
940                                      IPR_IOASC_IOA_WAS_RESET);
941                         del_timer(&ipr_cmd->timer);
942                         ipr_cmd->done(ipr_cmd);
943                 }
944                 spin_unlock(&hrrq->_lock);
945         }
946         LEAVE;
947 }
948
949 /**
950  * ipr_send_command -  Send driver initiated requests.
951  * @ipr_cmd:            ipr command struct
952  *
953  * This function sends a command to the adapter using the correct write call.
954  * In the case of sis64, calculate the ioarcb size required. Then or in the
955  * appropriate bits.
956  *
957  * Return value:
958  *      none
959  **/
960 static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
961 {
962         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
963         dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
964
965         if (ioa_cfg->sis64) {
966                 /* The default size is 256 bytes */
967                 send_dma_addr |= 0x1;
968
969                 /* If the number of ioadls * size of ioadl > 128 bytes,
970                    then use a 512 byte ioarcb */
971                 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
972                         send_dma_addr |= 0x4;
973                 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
974         } else
975                 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
976 }
977
978 /**
979  * ipr_do_req -  Send driver initiated requests.
980  * @ipr_cmd:            ipr command struct
981  * @done:                       done function
982  * @timeout_func:       timeout function
983  * @timeout:            timeout value
984  *
985  * This function sends the specified command to the adapter with the
986  * timeout given. The done function is invoked on command completion.
987  *
988  * Return value:
989  *      none
990  **/
991 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
992                        void (*done) (struct ipr_cmnd *),
993                        void (*timeout_func) (struct timer_list *), u32 timeout)
994 {
995         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
996
997         ipr_cmd->done = done;
998
999         ipr_cmd->timer.expires = jiffies + timeout;
1000         ipr_cmd->timer.function = timeout_func;
1001
1002         add_timer(&ipr_cmd->timer);
1003
1004         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
1005
1006         ipr_send_command(ipr_cmd);
1007 }
1008
1009 /**
1010  * ipr_internal_cmd_done - Op done function for an internally generated op.
1011  * @ipr_cmd:    ipr command struct
1012  *
1013  * This function is the op done function for an internally generated,
1014  * blocking op. It simply wakes the sleeping thread.
1015  *
1016  * Return value:
1017  *      none
1018  **/
1019 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
1020 {
1021         if (ipr_cmd->sibling)
1022                 ipr_cmd->sibling = NULL;
1023         else
1024                 complete(&ipr_cmd->completion);
1025 }
1026
1027 /**
1028  * ipr_init_ioadl - initialize the ioadl for the correct SIS type
1029  * @ipr_cmd:    ipr command struct
1030  * @dma_addr:   dma address
1031  * @len:        transfer length
1032  * @flags:      ioadl flag value
1033  *
1034  * This function initializes an ioadl in the case where there is only a single
1035  * descriptor.
1036  *
1037  * Return value:
1038  *      nothing
1039  **/
1040 static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
1041                            u32 len, int flags)
1042 {
1043         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1044         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
1045
1046         ipr_cmd->dma_use_sg = 1;
1047
1048         if (ipr_cmd->ioa_cfg->sis64) {
1049                 ioadl64->flags = cpu_to_be32(flags);
1050                 ioadl64->data_len = cpu_to_be32(len);
1051                 ioadl64->address = cpu_to_be64(dma_addr);
1052
1053                 ipr_cmd->ioarcb.ioadl_len =
1054                         cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
1055                 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1056         } else {
1057                 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
1058                 ioadl->address = cpu_to_be32(dma_addr);
1059
1060                 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
1061                         ipr_cmd->ioarcb.read_ioadl_len =
1062                                 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1063                         ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
1064                 } else {
1065                         ipr_cmd->ioarcb.ioadl_len =
1066                                 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1067                         ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1068                 }
1069         }
1070 }
1071
1072 /**
1073  * ipr_send_blocking_cmd - Send command and sleep on its completion.
1074  * @ipr_cmd:    ipr command struct
1075  * @timeout_func:       function to invoke if command times out
1076  * @timeout:    timeout
1077  *
1078  * Return value:
1079  *      none
1080  **/
1081 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
1082                                   void (*timeout_func) (struct timer_list *),
1083                                   u32 timeout)
1084 {
1085         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1086
1087         init_completion(&ipr_cmd->completion);
1088         ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1089
1090         spin_unlock_irq(ioa_cfg->host->host_lock);
1091         wait_for_completion(&ipr_cmd->completion);
1092         spin_lock_irq(ioa_cfg->host->host_lock);
1093 }
1094
1095 static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1096 {
1097         unsigned int hrrq;
1098
1099         if (ioa_cfg->hrrq_num == 1)
1100                 hrrq = 0;
1101         else {
1102                 hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
1103                 hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
1104         }
1105         return hrrq;
1106 }
1107
1108 /**
1109  * ipr_send_hcam - Send an HCAM to the adapter.
1110  * @ioa_cfg:    ioa config struct
1111  * @type:               HCAM type
1112  * @hostrcb:    hostrcb struct
1113  *
1114  * This function will send a Host Controlled Async command to the adapter.
1115  * If HCAMs are currently not allowed to be issued to the adapter, it will
1116  * place the hostrcb on the free queue.
1117  *
1118  * Return value:
1119  *      none
1120  **/
1121 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1122                           struct ipr_hostrcb *hostrcb)
1123 {
1124         struct ipr_cmnd *ipr_cmd;
1125         struct ipr_ioarcb *ioarcb;
1126
1127         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1128                 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
1129                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1130                 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1131
1132                 ipr_cmd->u.hostrcb = hostrcb;
1133                 ioarcb = &ipr_cmd->ioarcb;
1134
1135                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1136                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1137                 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1138                 ioarcb->cmd_pkt.cdb[1] = type;
1139                 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1140                 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1141
1142                 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1143                                sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1144
1145                 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1146                         ipr_cmd->done = ipr_process_ccn;
1147                 else
1148                         ipr_cmd->done = ipr_process_error;
1149
1150                 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1151
1152                 ipr_send_command(ipr_cmd);
1153         } else {
1154                 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1155         }
1156 }
1157
1158 /**
1159  * ipr_update_ata_class - Update the ata class in the resource entry
1160  * @res:        resource entry struct
1161  * @proto:      cfgte device bus protocol value
1162  *
1163  * Return value:
1164  *      none
1165  **/
1166 static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1167 {
1168         switch (proto) {
1169         case IPR_PROTO_SATA:
1170         case IPR_PROTO_SAS_STP:
1171                 res->ata_class = ATA_DEV_ATA;
1172                 break;
1173         case IPR_PROTO_SATA_ATAPI:
1174         case IPR_PROTO_SAS_STP_ATAPI:
1175                 res->ata_class = ATA_DEV_ATAPI;
1176                 break;
1177         default:
1178                 res->ata_class = ATA_DEV_UNKNOWN;
1179                 break;
1180         };
1181 }
1182
1183 /**
1184  * ipr_init_res_entry - Initialize a resource entry struct.
1185  * @res:        resource entry struct
1186  * @cfgtew:     config table entry wrapper struct
1187  *
1188  * Return value:
1189  *      none
1190  **/
1191 static void ipr_init_res_entry(struct ipr_resource_entry *res,
1192                                struct ipr_config_table_entry_wrapper *cfgtew)
1193 {
1194         int found = 0;
1195         unsigned int proto;
1196         struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1197         struct ipr_resource_entry *gscsi_res = NULL;
1198
1199         res->needs_sync_complete = 0;
1200         res->in_erp = 0;
1201         res->add_to_ml = 0;
1202         res->del_from_ml = 0;
1203         res->resetting_device = 0;
1204         res->reset_occurred = 0;
1205         res->sdev = NULL;
1206         res->sata_port = NULL;
1207
1208         if (ioa_cfg->sis64) {
1209                 proto = cfgtew->u.cfgte64->proto;
1210                 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1211                 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
1212                 res->qmodel = IPR_QUEUEING_MODEL64(res);
1213                 res->type = cfgtew->u.cfgte64->res_type;
1214
1215                 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1216                         sizeof(res->res_path));
1217
1218                 res->bus = 0;
1219                 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1220                         sizeof(res->dev_lun.scsi_lun));
1221                 res->lun = scsilun_to_int(&res->dev_lun);
1222
1223                 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1224                         list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1225                                 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1226                                         found = 1;
1227                                         res->target = gscsi_res->target;
1228                                         break;
1229                                 }
1230                         }
1231                         if (!found) {
1232                                 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1233                                                                   ioa_cfg->max_devs_supported);
1234                                 set_bit(res->target, ioa_cfg->target_ids);
1235                         }
1236                 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1237                         res->bus = IPR_IOAFP_VIRTUAL_BUS;
1238                         res->target = 0;
1239                 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1240                         res->bus = IPR_ARRAY_VIRTUAL_BUS;
1241                         res->target = find_first_zero_bit(ioa_cfg->array_ids,
1242                                                           ioa_cfg->max_devs_supported);
1243                         set_bit(res->target, ioa_cfg->array_ids);
1244                 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1245                         res->bus = IPR_VSET_VIRTUAL_BUS;
1246                         res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1247                                                           ioa_cfg->max_devs_supported);
1248                         set_bit(res->target, ioa_cfg->vset_ids);
1249                 } else {
1250                         res->target = find_first_zero_bit(ioa_cfg->target_ids,
1251                                                           ioa_cfg->max_devs_supported);
1252                         set_bit(res->target, ioa_cfg->target_ids);
1253                 }
1254         } else {
1255                 proto = cfgtew->u.cfgte->proto;
1256                 res->qmodel = IPR_QUEUEING_MODEL(res);
1257                 res->flags = cfgtew->u.cfgte->flags;
1258                 if (res->flags & IPR_IS_IOA_RESOURCE)
1259                         res->type = IPR_RES_TYPE_IOAFP;
1260                 else
1261                         res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1262
1263                 res->bus = cfgtew->u.cfgte->res_addr.bus;
1264                 res->target = cfgtew->u.cfgte->res_addr.target;
1265                 res->lun = cfgtew->u.cfgte->res_addr.lun;
1266                 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
1267         }
1268
1269         ipr_update_ata_class(res, proto);
1270 }
1271
1272 /**
1273  * ipr_is_same_device - Determine if two devices are the same.
1274  * @res:        resource entry struct
1275  * @cfgtew:     config table entry wrapper struct
1276  *
1277  * Return value:
1278  *      1 if the devices are the same / 0 otherwise
1279  **/
1280 static int ipr_is_same_device(struct ipr_resource_entry *res,
1281                               struct ipr_config_table_entry_wrapper *cfgtew)
1282 {
1283         if (res->ioa_cfg->sis64) {
1284                 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1285                                         sizeof(cfgtew->u.cfgte64->dev_id)) &&
1286                         !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1287                                         sizeof(cfgtew->u.cfgte64->lun))) {
1288                         return 1;
1289                 }
1290         } else {
1291                 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1292                     res->target == cfgtew->u.cfgte->res_addr.target &&
1293                     res->lun == cfgtew->u.cfgte->res_addr.lun)
1294                         return 1;
1295         }
1296
1297         return 0;
1298 }
1299
1300 /**
1301  * __ipr_format_res_path - Format the resource path for printing.
1302  * @res_path:   resource path
1303  * @buf:        buffer
1304  * @len:        length of buffer provided
1305  *
1306  * Return value:
1307  *      pointer to buffer
1308  **/
1309 static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
1310 {
1311         int i;
1312         char *p = buffer;
1313
1314         *p = '\0';
1315         p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1316         for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1317                 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
1318
1319         return buffer;
1320 }
1321
1322 /**
1323  * ipr_format_res_path - Format the resource path for printing.
1324  * @ioa_cfg:    ioa config struct
1325  * @res_path:   resource path
1326  * @buf:        buffer
1327  * @len:        length of buffer provided
1328  *
1329  * Return value:
1330  *      pointer to buffer
1331  **/
1332 static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1333                                  u8 *res_path, char *buffer, int len)
1334 {
1335         char *p = buffer;
1336
1337         *p = '\0';
1338         p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1339         __ipr_format_res_path(res_path, p, len - (buffer - p));
1340         return buffer;
1341 }
1342
1343 /**
1344  * ipr_update_res_entry - Update the resource entry.
1345  * @res:        resource entry struct
1346  * @cfgtew:     config table entry wrapper struct
1347  *
1348  * Return value:
1349  *      none
1350  **/
1351 static void ipr_update_res_entry(struct ipr_resource_entry *res,
1352                                  struct ipr_config_table_entry_wrapper *cfgtew)
1353 {
1354         char buffer[IPR_MAX_RES_PATH_LENGTH];
1355         unsigned int proto;
1356         int new_path = 0;
1357
1358         if (res->ioa_cfg->sis64) {
1359                 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1360                 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
1361                 res->type = cfgtew->u.cfgte64->res_type;
1362
1363                 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1364                         sizeof(struct ipr_std_inq_data));
1365
1366                 res->qmodel = IPR_QUEUEING_MODEL64(res);
1367                 proto = cfgtew->u.cfgte64->proto;
1368                 res->res_handle = cfgtew->u.cfgte64->res_handle;
1369                 res->dev_id = cfgtew->u.cfgte64->dev_id;
1370
1371                 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1372                         sizeof(res->dev_lun.scsi_lun));
1373
1374                 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1375                                         sizeof(res->res_path))) {
1376                         memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1377                                 sizeof(res->res_path));
1378                         new_path = 1;
1379                 }
1380
1381                 if (res->sdev && new_path)
1382                         sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1383                                     ipr_format_res_path(res->ioa_cfg,
1384                                         res->res_path, buffer, sizeof(buffer)));
1385         } else {
1386                 res->flags = cfgtew->u.cfgte->flags;
1387                 if (res->flags & IPR_IS_IOA_RESOURCE)
1388                         res->type = IPR_RES_TYPE_IOAFP;
1389                 else
1390                         res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1391
1392                 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1393                         sizeof(struct ipr_std_inq_data));
1394
1395                 res->qmodel = IPR_QUEUEING_MODEL(res);
1396                 proto = cfgtew->u.cfgte->proto;
1397                 res->res_handle = cfgtew->u.cfgte->res_handle;
1398         }
1399
1400         ipr_update_ata_class(res, proto);
1401 }
1402
1403 /**
1404  * ipr_clear_res_target - Clear the bit in the bit map representing the target
1405  *                        for the resource.
1406  * @res:        resource entry struct
1407  * @cfgtew:     config table entry wrapper struct
1408  *
1409  * Return value:
1410  *      none
1411  **/
1412 static void ipr_clear_res_target(struct ipr_resource_entry *res)
1413 {
1414         struct ipr_resource_entry *gscsi_res = NULL;
1415         struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1416
1417         if (!ioa_cfg->sis64)
1418                 return;
1419
1420         if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1421                 clear_bit(res->target, ioa_cfg->array_ids);
1422         else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1423                 clear_bit(res->target, ioa_cfg->vset_ids);
1424         else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1425                 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1426                         if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1427                                 return;
1428                 clear_bit(res->target, ioa_cfg->target_ids);
1429
1430         } else if (res->bus == 0)
1431                 clear_bit(res->target, ioa_cfg->target_ids);
1432 }
1433
1434 /**
1435  * ipr_handle_config_change - Handle a config change from the adapter
1436  * @ioa_cfg:    ioa config struct
1437  * @hostrcb:    hostrcb
1438  *
1439  * Return value:
1440  *      none
1441  **/
1442 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1443                                      struct ipr_hostrcb *hostrcb)
1444 {
1445         struct ipr_resource_entry *res = NULL;
1446         struct ipr_config_table_entry_wrapper cfgtew;
1447         __be32 cc_res_handle;
1448
1449         u32 is_ndn = 1;
1450
1451         if (ioa_cfg->sis64) {
1452                 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1453                 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1454         } else {
1455                 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1456                 cc_res_handle = cfgtew.u.cfgte->res_handle;
1457         }
1458
1459         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1460                 if (res->res_handle == cc_res_handle) {
1461                         is_ndn = 0;
1462                         break;
1463                 }
1464         }
1465
1466         if (is_ndn) {
1467                 if (list_empty(&ioa_cfg->free_res_q)) {
1468                         ipr_send_hcam(ioa_cfg,
1469                                       IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1470                                       hostrcb);
1471                         return;
1472                 }
1473
1474                 res = list_entry(ioa_cfg->free_res_q.next,
1475                                  struct ipr_resource_entry, queue);
1476
1477                 list_del(&res->queue);
1478                 ipr_init_res_entry(res, &cfgtew);
1479                 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1480         }
1481
1482         ipr_update_res_entry(res, &cfgtew);
1483
1484         if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1485                 if (res->sdev) {
1486                         res->del_from_ml = 1;
1487                         res->res_handle = IPR_INVALID_RES_HANDLE;
1488                         schedule_work(&ioa_cfg->work_q);
1489                 } else {
1490                         ipr_clear_res_target(res);
1491                         list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1492                 }
1493         } else if (!res->sdev || res->del_from_ml) {
1494                 res->add_to_ml = 1;
1495                 schedule_work(&ioa_cfg->work_q);
1496         }
1497
1498         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1499 }
1500
1501 /**
1502  * ipr_process_ccn - Op done function for a CCN.
1503  * @ipr_cmd:    ipr command struct
1504  *
1505  * This function is the op done function for a configuration
1506  * change notification host controlled async from the adapter.
1507  *
1508  * Return value:
1509  *      none
1510  **/
1511 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1512 {
1513         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1514         struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1515         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1516
1517         list_del_init(&hostrcb->queue);
1518         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1519
1520         if (ioasc) {
1521                 if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
1522                     ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST)
1523                         dev_err(&ioa_cfg->pdev->dev,
1524                                 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1525
1526                 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1527         } else {
1528                 ipr_handle_config_change(ioa_cfg, hostrcb);
1529         }
1530 }
1531
1532 /**
1533  * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1534  * @i:          index into buffer
1535  * @buf:                string to modify
1536  *
1537  * This function will strip all trailing whitespace, pad the end
1538  * of the string with a single space, and NULL terminate the string.
1539  *
1540  * Return value:
1541  *      new length of string
1542  **/
1543 static int strip_and_pad_whitespace(int i, char *buf)
1544 {
1545         while (i && buf[i] == ' ')
1546                 i--;
1547         buf[i+1] = ' ';
1548         buf[i+2] = '\0';
1549         return i + 2;
1550 }
1551
1552 /**
1553  * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1554  * @prefix:             string to print at start of printk
1555  * @hostrcb:    hostrcb pointer
1556  * @vpd:                vendor/product id/sn struct
1557  *
1558  * Return value:
1559  *      none
1560  **/
1561 static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1562                                 struct ipr_vpd *vpd)
1563 {
1564         char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1565         int i = 0;
1566
1567         memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1568         i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1569
1570         memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1571         i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1572
1573         memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1574         buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1575
1576         ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1577 }
1578
1579 /**
1580  * ipr_log_vpd - Log the passed VPD to the error log.
1581  * @vpd:                vendor/product id/sn struct
1582  *
1583  * Return value:
1584  *      none
1585  **/
1586 static void ipr_log_vpd(struct ipr_vpd *vpd)
1587 {
1588         char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1589                     + IPR_SERIAL_NUM_LEN];
1590
1591         memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1592         memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1593                IPR_PROD_ID_LEN);
1594         buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1595         ipr_err("Vendor/Product ID: %s\n", buffer);
1596
1597         memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1598         buffer[IPR_SERIAL_NUM_LEN] = '\0';
1599         ipr_err("    Serial Number: %s\n", buffer);
1600 }
1601
1602 /**
1603  * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1604  * @prefix:             string to print at start of printk
1605  * @hostrcb:    hostrcb pointer
1606  * @vpd:                vendor/product id/sn/wwn struct
1607  *
1608  * Return value:
1609  *      none
1610  **/
1611 static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1612                                     struct ipr_ext_vpd *vpd)
1613 {
1614         ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1615         ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1616                      be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1617 }
1618
1619 /**
1620  * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1621  * @vpd:                vendor/product id/sn/wwn struct
1622  *
1623  * Return value:
1624  *      none
1625  **/
1626 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1627 {
1628         ipr_log_vpd(&vpd->vpd);
1629         ipr_err("    WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1630                 be32_to_cpu(vpd->wwid[1]));
1631 }
1632
1633 /**
1634  * ipr_log_enhanced_cache_error - Log a cache error.
1635  * @ioa_cfg:    ioa config struct
1636  * @hostrcb:    hostrcb struct
1637  *
1638  * Return value:
1639  *      none
1640  **/
1641 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1642                                          struct ipr_hostrcb *hostrcb)
1643 {
1644         struct ipr_hostrcb_type_12_error *error;
1645
1646         if (ioa_cfg->sis64)
1647                 error = &hostrcb->hcam.u.error64.u.type_12_error;
1648         else
1649                 error = &hostrcb->hcam.u.error.u.type_12_error;
1650
1651         ipr_err("-----Current Configuration-----\n");
1652         ipr_err("Cache Directory Card Information:\n");
1653         ipr_log_ext_vpd(&error->ioa_vpd);
1654         ipr_err("Adapter Card Information:\n");
1655         ipr_log_ext_vpd(&error->cfc_vpd);
1656
1657         ipr_err("-----Expected Configuration-----\n");
1658         ipr_err("Cache Directory Card Information:\n");
1659         ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1660         ipr_err("Adapter Card Information:\n");
1661         ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1662
1663         ipr_err("Additional IOA Data: %08X %08X %08X\n",
1664                      be32_to_cpu(error->ioa_data[0]),
1665                      be32_to_cpu(error->ioa_data[1]),
1666                      be32_to_cpu(error->ioa_data[2]));
1667 }
1668
1669 /**
1670  * ipr_log_cache_error - Log a cache error.
1671  * @ioa_cfg:    ioa config struct
1672  * @hostrcb:    hostrcb struct
1673  *
1674  * Return value:
1675  *      none
1676  **/
1677 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1678                                 struct ipr_hostrcb *hostrcb)
1679 {
1680         struct ipr_hostrcb_type_02_error *error =
1681                 &hostrcb->hcam.u.error.u.type_02_error;
1682
1683         ipr_err("-----Current Configuration-----\n");
1684         ipr_err("Cache Directory Card Information:\n");
1685         ipr_log_vpd(&error->ioa_vpd);
1686         ipr_err("Adapter Card Information:\n");
1687         ipr_log_vpd(&error->cfc_vpd);
1688
1689         ipr_err("-----Expected Configuration-----\n");
1690         ipr_err("Cache Directory Card Information:\n");
1691         ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1692         ipr_err("Adapter Card Information:\n");
1693         ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1694
1695         ipr_err("Additional IOA Data: %08X %08X %08X\n",
1696                      be32_to_cpu(error->ioa_data[0]),
1697                      be32_to_cpu(error->ioa_data[1]),
1698                      be32_to_cpu(error->ioa_data[2]));
1699 }
1700
1701 /**
1702  * ipr_log_enhanced_config_error - Log a configuration error.
1703  * @ioa_cfg:    ioa config struct
1704  * @hostrcb:    hostrcb struct
1705  *
1706  * Return value:
1707  *      none
1708  **/
1709 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1710                                           struct ipr_hostrcb *hostrcb)
1711 {
1712         int errors_logged, i;
1713         struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1714         struct ipr_hostrcb_type_13_error *error;
1715
1716         error = &hostrcb->hcam.u.error.u.type_13_error;
1717         errors_logged = be32_to_cpu(error->errors_logged);
1718
1719         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1720                 be32_to_cpu(error->errors_detected), errors_logged);
1721
1722         dev_entry = error->dev;
1723
1724         for (i = 0; i < errors_logged; i++, dev_entry++) {
1725                 ipr_err_separator;
1726
1727                 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1728                 ipr_log_ext_vpd(&dev_entry->vpd);
1729
1730                 ipr_err("-----New Device Information-----\n");
1731                 ipr_log_ext_vpd(&dev_entry->new_vpd);
1732
1733                 ipr_err("Cache Directory Card Information:\n");
1734                 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1735
1736                 ipr_err("Adapter Card Information:\n");
1737                 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1738         }
1739 }
1740
1741 /**
1742  * ipr_log_sis64_config_error - Log a device error.
1743  * @ioa_cfg:    ioa config struct
1744  * @hostrcb:    hostrcb struct
1745  *
1746  * Return value:
1747  *      none
1748  **/
1749 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1750                                        struct ipr_hostrcb *hostrcb)
1751 {
1752         int errors_logged, i;
1753         struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1754         struct ipr_hostrcb_type_23_error *error;
1755         char buffer[IPR_MAX_RES_PATH_LENGTH];
1756
1757         error = &hostrcb->hcam.u.error64.u.type_23_error;
1758         errors_logged = be32_to_cpu(error->errors_logged);
1759
1760         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1761                 be32_to_cpu(error->errors_detected), errors_logged);
1762
1763         dev_entry = error->dev;
1764
1765         for (i = 0; i < errors_logged; i++, dev_entry++) {
1766                 ipr_err_separator;
1767
1768                 ipr_err("Device %d : %s", i + 1,
1769                         __ipr_format_res_path(dev_entry->res_path,
1770                                               buffer, sizeof(buffer)));
1771                 ipr_log_ext_vpd(&dev_entry->vpd);
1772
1773                 ipr_err("-----New Device Information-----\n");
1774                 ipr_log_ext_vpd(&dev_entry->new_vpd);
1775
1776                 ipr_err("Cache Directory Card Information:\n");
1777                 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1778
1779                 ipr_err("Adapter Card Information:\n");
1780                 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1781         }
1782 }
1783
1784 /**
1785  * ipr_log_config_error - Log a configuration error.
1786  * @ioa_cfg:    ioa config struct
1787  * @hostrcb:    hostrcb struct
1788  *
1789  * Return value:
1790  *      none
1791  **/
1792 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1793                                  struct ipr_hostrcb *hostrcb)
1794 {
1795         int errors_logged, i;
1796         struct ipr_hostrcb_device_data_entry *dev_entry;
1797         struct ipr_hostrcb_type_03_error *error;
1798
1799         error = &hostrcb->hcam.u.error.u.type_03_error;
1800         errors_logged = be32_to_cpu(error->errors_logged);
1801
1802         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1803                 be32_to_cpu(error->errors_detected), errors_logged);
1804
1805         dev_entry = error->dev;
1806
1807         for (i = 0; i < errors_logged; i++, dev_entry++) {
1808                 ipr_err_separator;
1809
1810                 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1811                 ipr_log_vpd(&dev_entry->vpd);
1812
1813                 ipr_err("-----New Device Information-----\n");
1814                 ipr_log_vpd(&dev_entry->new_vpd);
1815
1816                 ipr_err("Cache Directory Card Information:\n");
1817                 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1818
1819                 ipr_err("Adapter Card Information:\n");
1820                 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1821
1822                 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1823                         be32_to_cpu(dev_entry->ioa_data[0]),
1824                         be32_to_cpu(dev_entry->ioa_data[1]),
1825                         be32_to_cpu(dev_entry->ioa_data[2]),
1826                         be32_to_cpu(dev_entry->ioa_data[3]),
1827                         be32_to_cpu(dev_entry->ioa_data[4]));
1828         }
1829 }
1830
1831 /**
1832  * ipr_log_enhanced_array_error - Log an array configuration error.
1833  * @ioa_cfg:    ioa config struct
1834  * @hostrcb:    hostrcb struct
1835  *
1836  * Return value:
1837  *      none
1838  **/
1839 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1840                                          struct ipr_hostrcb *hostrcb)
1841 {
1842         int i, num_entries;
1843         struct ipr_hostrcb_type_14_error *error;
1844         struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1845         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1846
1847         error = &hostrcb->hcam.u.error.u.type_14_error;
1848
1849         ipr_err_separator;
1850
1851         ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1852                 error->protection_level,
1853                 ioa_cfg->host->host_no,
1854                 error->last_func_vset_res_addr.bus,
1855                 error->last_func_vset_res_addr.target,
1856                 error->last_func_vset_res_addr.lun);
1857
1858         ipr_err_separator;
1859
1860         array_entry = error->array_member;
1861         num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1862                             ARRAY_SIZE(error->array_member));
1863
1864         for (i = 0; i < num_entries; i++, array_entry++) {
1865                 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1866                         continue;
1867
1868                 if (be32_to_cpu(error->exposed_mode_adn) == i)
1869                         ipr_err("Exposed Array Member %d:\n", i);
1870                 else
1871                         ipr_err("Array Member %d:\n", i);
1872
1873                 ipr_log_ext_vpd(&array_entry->vpd);
1874                 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1875                 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1876                                  "Expected Location");
1877
1878                 ipr_err_separator;
1879         }
1880 }
1881
1882 /**
1883  * ipr_log_array_error - Log an array configuration error.
1884  * @ioa_cfg:    ioa config struct
1885  * @hostrcb:    hostrcb struct
1886  *
1887  * Return value:
1888  *      none
1889  **/
1890 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1891                                 struct ipr_hostrcb *hostrcb)
1892 {
1893         int i;
1894         struct ipr_hostrcb_type_04_error *error;
1895         struct ipr_hostrcb_array_data_entry *array_entry;
1896         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1897
1898         error = &hostrcb->hcam.u.error.u.type_04_error;
1899
1900         ipr_err_separator;
1901
1902         ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1903                 error->protection_level,
1904                 ioa_cfg->host->host_no,
1905                 error->last_func_vset_res_addr.bus,
1906                 error->last_func_vset_res_addr.target,
1907                 error->last_func_vset_res_addr.lun);
1908
1909         ipr_err_separator;
1910
1911         array_entry = error->array_member;
1912
1913         for (i = 0; i < 18; i++) {
1914                 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1915                         continue;
1916
1917                 if (be32_to_cpu(error->exposed_mode_adn) == i)
1918                         ipr_err("Exposed Array Member %d:\n", i);
1919                 else
1920                         ipr_err("Array Member %d:\n", i);
1921
1922                 ipr_log_vpd(&array_entry->vpd);
1923
1924                 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1925                 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1926                                  "Expected Location");
1927
1928                 ipr_err_separator;
1929
1930                 if (i == 9)
1931                         array_entry = error->array_member2;
1932                 else
1933                         array_entry++;
1934         }
1935 }
1936
1937 /**
1938  * ipr_log_hex_data - Log additional hex IOA error data.
1939  * @ioa_cfg:    ioa config struct
1940  * @data:               IOA error data
1941  * @len:                data length
1942  *
1943  * Return value:
1944  *      none
1945  **/
1946 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, __be32 *data, int len)
1947 {
1948         int i;
1949
1950         if (len == 0)
1951                 return;
1952
1953         if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1954                 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1955
1956         for (i = 0; i < len / 4; i += 4) {
1957                 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1958                         be32_to_cpu(data[i]),
1959                         be32_to_cpu(data[i+1]),
1960                         be32_to_cpu(data[i+2]),
1961                         be32_to_cpu(data[i+3]));
1962         }
1963 }
1964
1965 /**
1966  * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1967  * @ioa_cfg:    ioa config struct
1968  * @hostrcb:    hostrcb struct
1969  *
1970  * Return value:
1971  *      none
1972  **/
1973 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1974                                             struct ipr_hostrcb *hostrcb)
1975 {
1976         struct ipr_hostrcb_type_17_error *error;
1977
1978         if (ioa_cfg->sis64)
1979                 error = &hostrcb->hcam.u.error64.u.type_17_error;
1980         else
1981                 error = &hostrcb->hcam.u.error.u.type_17_error;
1982
1983         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1984         strim(error->failure_reason);
1985
1986         ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1987                      be32_to_cpu(hostrcb->hcam.u.error.prc));
1988         ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1989         ipr_log_hex_data(ioa_cfg, error->data,
1990                          be32_to_cpu(hostrcb->hcam.length) -
1991                          (offsetof(struct ipr_hostrcb_error, u) +
1992                           offsetof(struct ipr_hostrcb_type_17_error, data)));
1993 }
1994
1995 /**
1996  * ipr_log_dual_ioa_error - Log a dual adapter error.
1997  * @ioa_cfg:    ioa config struct
1998  * @hostrcb:    hostrcb struct
1999  *
2000  * Return value:
2001  *      none
2002  **/
2003 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
2004                                    struct ipr_hostrcb *hostrcb)
2005 {
2006         struct ipr_hostrcb_type_07_error *error;
2007
2008         error = &hostrcb->hcam.u.error.u.type_07_error;
2009         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2010         strim(error->failure_reason);
2011
2012         ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
2013                      be32_to_cpu(hostrcb->hcam.u.error.prc));
2014         ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
2015         ipr_log_hex_data(ioa_cfg, error->data,
2016                          be32_to_cpu(hostrcb->hcam.length) -
2017                          (offsetof(struct ipr_hostrcb_error, u) +
2018                           offsetof(struct ipr_hostrcb_type_07_error, data)));
2019 }
2020
2021 static const struct {
2022         u8 active;
2023         char *desc;
2024 } path_active_desc[] = {
2025         { IPR_PATH_NO_INFO, "Path" },
2026         { IPR_PATH_ACTIVE, "Active path" },
2027         { IPR_PATH_NOT_ACTIVE, "Inactive path" }
2028 };
2029
2030 static const struct {
2031         u8 state;
2032         char *desc;
2033 } path_state_desc[] = {
2034         { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
2035         { IPR_PATH_HEALTHY, "is healthy" },
2036         { IPR_PATH_DEGRADED, "is degraded" },
2037         { IPR_PATH_FAILED, "is failed" }
2038 };
2039
2040 /**
2041  * ipr_log_fabric_path - Log a fabric path error
2042  * @hostrcb:    hostrcb struct
2043  * @fabric:             fabric descriptor
2044  *
2045  * Return value:
2046  *      none
2047  **/
2048 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
2049                                 struct ipr_hostrcb_fabric_desc *fabric)
2050 {
2051         int i, j;
2052         u8 path_state = fabric->path_state;
2053         u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2054         u8 state = path_state & IPR_PATH_STATE_MASK;
2055
2056         for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2057                 if (path_active_desc[i].active != active)
2058                         continue;
2059
2060                 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2061                         if (path_state_desc[j].state != state)
2062                                 continue;
2063
2064                         if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
2065                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
2066                                              path_active_desc[i].desc, path_state_desc[j].desc,
2067                                              fabric->ioa_port);
2068                         } else if (fabric->cascaded_expander == 0xff) {
2069                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
2070                                              path_active_desc[i].desc, path_state_desc[j].desc,
2071                                              fabric->ioa_port, fabric->phy);
2072                         } else if (fabric->phy == 0xff) {
2073                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
2074                                              path_active_desc[i].desc, path_state_desc[j].desc,
2075                                              fabric->ioa_port, fabric->cascaded_expander);
2076                         } else {
2077                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2078                                              path_active_desc[i].desc, path_state_desc[j].desc,
2079                                              fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2080                         }
2081                         return;
2082                 }
2083         }
2084
2085         ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
2086                 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2087 }
2088
2089 /**
2090  * ipr_log64_fabric_path - Log a fabric path error
2091  * @hostrcb:    hostrcb struct
2092  * @fabric:             fabric descriptor
2093  *
2094  * Return value:
2095  *      none
2096  **/
2097 static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2098                                   struct ipr_hostrcb64_fabric_desc *fabric)
2099 {
2100         int i, j;
2101         u8 path_state = fabric->path_state;
2102         u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2103         u8 state = path_state & IPR_PATH_STATE_MASK;
2104         char buffer[IPR_MAX_RES_PATH_LENGTH];
2105
2106         for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2107                 if (path_active_desc[i].active != active)
2108                         continue;
2109
2110                 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2111                         if (path_state_desc[j].state != state)
2112                                 continue;
2113
2114                         ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2115                                      path_active_desc[i].desc, path_state_desc[j].desc,
2116                                      ipr_format_res_path(hostrcb->ioa_cfg,
2117                                                 fabric->res_path,
2118                                                 buffer, sizeof(buffer)));
2119                         return;
2120                 }
2121         }
2122
2123         ipr_err("Path state=%02X Resource Path=%s\n", path_state,
2124                 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2125                                     buffer, sizeof(buffer)));
2126 }
2127
2128 static const struct {
2129         u8 type;
2130         char *desc;
2131 } path_type_desc[] = {
2132         { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2133         { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2134         { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2135         { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2136 };
2137
2138 static const struct {
2139         u8 status;
2140         char *desc;
2141 } path_status_desc[] = {
2142         { IPR_PATH_CFG_NO_PROB, "Functional" },
2143         { IPR_PATH_CFG_DEGRADED, "Degraded" },
2144         { IPR_PATH_CFG_FAILED, "Failed" },
2145         { IPR_PATH_CFG_SUSPECT, "Suspect" },
2146         { IPR_PATH_NOT_DETECTED, "Missing" },
2147         { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2148 };
2149
2150 static const char *link_rate[] = {
2151         "unknown",
2152         "disabled",
2153         "phy reset problem",
2154         "spinup hold",
2155         "port selector",
2156         "unknown",
2157         "unknown",
2158         "unknown",
2159         "1.5Gbps",
2160         "3.0Gbps",
2161         "unknown",
2162         "unknown",
2163         "unknown",
2164         "unknown",
2165         "unknown",
2166         "unknown"
2167 };
2168
2169 /**
2170  * ipr_log_path_elem - Log a fabric path element.
2171  * @hostrcb:    hostrcb struct
2172  * @cfg:                fabric path element struct
2173  *
2174  * Return value:
2175  *      none
2176  **/
2177 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2178                               struct ipr_hostrcb_config_element *cfg)
2179 {
2180         int i, j;
2181         u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2182         u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2183
2184         if (type == IPR_PATH_CFG_NOT_EXIST)
2185                 return;
2186
2187         for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2188                 if (path_type_desc[i].type != type)
2189                         continue;
2190
2191                 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2192                         if (path_status_desc[j].status != status)
2193                                 continue;
2194
2195                         if (type == IPR_PATH_CFG_IOA_PORT) {
2196                                 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2197                                              path_status_desc[j].desc, path_type_desc[i].desc,
2198                                              cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2199                                              be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2200                         } else {
2201                                 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2202                                         ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2203                                                      path_status_desc[j].desc, path_type_desc[i].desc,
2204                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2205                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2206                                 } else if (cfg->cascaded_expander == 0xff) {
2207                                         ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2208                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2209                                                      path_type_desc[i].desc, cfg->phy,
2210                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2211                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2212                                 } else if (cfg->phy == 0xff) {
2213                                         ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2214                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2215                                                      path_type_desc[i].desc, cfg->cascaded_expander,
2216                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2217                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2218                                 } else {
2219                                         ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2220                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2221                                                      path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2222                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2223                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2224                                 }
2225                         }
2226                         return;
2227                 }
2228         }
2229
2230         ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2231                      "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2232                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2233                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2234 }
2235
2236 /**
2237  * ipr_log64_path_elem - Log a fabric path element.
2238  * @hostrcb:    hostrcb struct
2239  * @cfg:                fabric path element struct
2240  *
2241  * Return value:
2242  *      none
2243  **/
2244 static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2245                                 struct ipr_hostrcb64_config_element *cfg)
2246 {
2247         int i, j;
2248         u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2249         u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2250         u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2251         char buffer[IPR_MAX_RES_PATH_LENGTH];
2252
2253         if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2254                 return;
2255
2256         for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2257                 if (path_type_desc[i].type != type)
2258                         continue;
2259
2260                 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2261                         if (path_status_desc[j].status != status)
2262                                 continue;
2263
2264                         ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2265                                      path_status_desc[j].desc, path_type_desc[i].desc,
2266                                      ipr_format_res_path(hostrcb->ioa_cfg,
2267                                         cfg->res_path, buffer, sizeof(buffer)),
2268                                         link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2269                                         be32_to_cpu(cfg->wwid[0]),
2270                                         be32_to_cpu(cfg->wwid[1]));
2271                         return;
2272                 }
2273         }
2274         ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2275                      "WWN=%08X%08X\n", cfg->type_status,
2276                      ipr_format_res_path(hostrcb->ioa_cfg,
2277                         cfg->res_path, buffer, sizeof(buffer)),
2278                         link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2279                         be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2280 }
2281
2282 /**
2283  * ipr_log_fabric_error - Log a fabric error.
2284  * @ioa_cfg:    ioa config struct
2285  * @hostrcb:    hostrcb struct
2286  *
2287  * Return value:
2288  *      none
2289  **/
2290 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2291                                  struct ipr_hostrcb *hostrcb)
2292 {
2293         struct ipr_hostrcb_type_20_error *error;
2294         struct ipr_hostrcb_fabric_desc *fabric;
2295         struct ipr_hostrcb_config_element *cfg;
2296         int i, add_len;
2297
2298         error = &hostrcb->hcam.u.error.u.type_20_error;
2299         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2300         ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2301
2302         add_len = be32_to_cpu(hostrcb->hcam.length) -
2303                 (offsetof(struct ipr_hostrcb_error, u) +
2304                  offsetof(struct ipr_hostrcb_type_20_error, desc));
2305
2306         for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2307                 ipr_log_fabric_path(hostrcb, fabric);
2308                 for_each_fabric_cfg(fabric, cfg)
2309                         ipr_log_path_elem(hostrcb, cfg);
2310
2311                 add_len -= be16_to_cpu(fabric->length);
2312                 fabric = (struct ipr_hostrcb_fabric_desc *)
2313                         ((unsigned long)fabric + be16_to_cpu(fabric->length));
2314         }
2315
2316         ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2317 }
2318
2319 /**
2320  * ipr_log_sis64_array_error - Log a sis64 array error.
2321  * @ioa_cfg:    ioa config struct
2322  * @hostrcb:    hostrcb struct
2323  *
2324  * Return value:
2325  *      none
2326  **/
2327 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2328                                       struct ipr_hostrcb *hostrcb)
2329 {
2330         int i, num_entries;
2331         struct ipr_hostrcb_type_24_error *error;
2332         struct ipr_hostrcb64_array_data_entry *array_entry;
2333         char buffer[IPR_MAX_RES_PATH_LENGTH];
2334         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2335
2336         error = &hostrcb->hcam.u.error64.u.type_24_error;
2337
2338         ipr_err_separator;
2339
2340         ipr_err("RAID %s Array Configuration: %s\n",
2341                 error->protection_level,
2342                 ipr_format_res_path(ioa_cfg, error->last_res_path,
2343                         buffer, sizeof(buffer)));
2344
2345         ipr_err_separator;
2346
2347         array_entry = error->array_member;
2348         num_entries = min_t(u32, error->num_entries,
2349                             ARRAY_SIZE(error->array_member));
2350
2351         for (i = 0; i < num_entries; i++, array_entry++) {
2352
2353                 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2354                         continue;
2355
2356                 if (error->exposed_mode_adn == i)
2357                         ipr_err("Exposed Array Member %d:\n", i);
2358                 else
2359                         ipr_err("Array Member %d:\n", i);
2360
2361                 ipr_err("Array Member %d:\n", i);
2362                 ipr_log_ext_vpd(&array_entry->vpd);
2363                 ipr_err("Current Location: %s\n",
2364                          ipr_format_res_path(ioa_cfg, array_entry->res_path,
2365                                 buffer, sizeof(buffer)));
2366                 ipr_err("Expected Location: %s\n",
2367                          ipr_format_res_path(ioa_cfg,
2368                                 array_entry->expected_res_path,
2369                                 buffer, sizeof(buffer)));
2370
2371                 ipr_err_separator;
2372         }
2373 }
2374
2375 /**
2376  * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2377  * @ioa_cfg:    ioa config struct
2378  * @hostrcb:    hostrcb struct
2379  *
2380  * Return value:
2381  *      none
2382  **/
2383 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2384                                        struct ipr_hostrcb *hostrcb)
2385 {
2386         struct ipr_hostrcb_type_30_error *error;
2387         struct ipr_hostrcb64_fabric_desc *fabric;
2388         struct ipr_hostrcb64_config_element *cfg;
2389         int i, add_len;
2390
2391         error = &hostrcb->hcam.u.error64.u.type_30_error;
2392
2393         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2394         ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2395
2396         add_len = be32_to_cpu(hostrcb->hcam.length) -
2397                 (offsetof(struct ipr_hostrcb64_error, u) +
2398                  offsetof(struct ipr_hostrcb_type_30_error, desc));
2399
2400         for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2401                 ipr_log64_fabric_path(hostrcb, fabric);
2402                 for_each_fabric_cfg(fabric, cfg)
2403                         ipr_log64_path_elem(hostrcb, cfg);
2404
2405                 add_len -= be16_to_cpu(fabric->length);
2406                 fabric = (struct ipr_hostrcb64_fabric_desc *)
2407                         ((unsigned long)fabric + be16_to_cpu(fabric->length));
2408         }
2409
2410         ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2411 }
2412
2413 /**
2414  * ipr_log_generic_error - Log an adapter error.
2415  * @ioa_cfg:    ioa config struct
2416  * @hostrcb:    hostrcb struct
2417  *
2418  * Return value:
2419  *      none
2420  **/
2421 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2422                                   struct ipr_hostrcb *hostrcb)
2423 {
2424         ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2425                          be32_to_cpu(hostrcb->hcam.length));
2426 }
2427
2428 /**
2429  * ipr_log_sis64_device_error - Log a cache error.
2430  * @ioa_cfg:    ioa config struct
2431  * @hostrcb:    hostrcb struct
2432  *
2433  * Return value:
2434  *      none
2435  **/
2436 static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
2437                                          struct ipr_hostrcb *hostrcb)
2438 {
2439         struct ipr_hostrcb_type_21_error *error;
2440         char buffer[IPR_MAX_RES_PATH_LENGTH];
2441
2442         error = &hostrcb->hcam.u.error64.u.type_21_error;
2443
2444         ipr_err("-----Failing Device Information-----\n");
2445         ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2446                 be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
2447                  be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
2448         ipr_err("Device Resource Path: %s\n",
2449                 __ipr_format_res_path(error->res_path,
2450                                       buffer, sizeof(buffer)));
2451         error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
2452         error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
2453         ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
2454         ipr_err("Secondary Problem Description:  %s\n", error->second_problem_desc);
2455         ipr_err("SCSI Sense Data:\n");
2456         ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
2457         ipr_err("SCSI Command Descriptor Block: \n");
2458         ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
2459
2460         ipr_err("Additional IOA Data:\n");
2461         ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
2462 }
2463
2464 /**
2465  * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2466  * @ioasc:      IOASC
2467  *
2468  * This function will return the index of into the ipr_error_table
2469  * for the specified IOASC. If the IOASC is not in the table,
2470  * 0 will be returned, which points to the entry used for unknown errors.
2471  *
2472  * Return value:
2473  *      index into the ipr_error_table
2474  **/
2475 static u32 ipr_get_error(u32 ioasc)
2476 {
2477         int i;
2478
2479         for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2480                 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2481                         return i;
2482
2483         return 0;
2484 }
2485
2486 /**
2487  * ipr_handle_log_data - Log an adapter error.
2488  * @ioa_cfg:    ioa config struct
2489  * @hostrcb:    hostrcb struct
2490  *
2491  * This function logs an adapter error to the system.
2492  *
2493  * Return value:
2494  *      none
2495  **/
2496 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2497                                 struct ipr_hostrcb *hostrcb)
2498 {
2499         u32 ioasc;
2500         int error_index;
2501         struct ipr_hostrcb_type_21_error *error;
2502
2503         if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2504                 return;
2505
2506         if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2507                 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2508
2509         if (ioa_cfg->sis64)
2510                 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2511         else
2512                 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2513
2514         if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2515             ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2516                 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2517                 scsi_report_bus_reset(ioa_cfg->host,
2518                                       hostrcb->hcam.u.error.fd_res_addr.bus);
2519         }
2520
2521         error_index = ipr_get_error(ioasc);
2522
2523         if (!ipr_error_table[error_index].log_hcam)
2524                 return;
2525
2526         if (ioasc == IPR_IOASC_HW_CMD_FAILED &&
2527             hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) {
2528                 error = &hostrcb->hcam.u.error64.u.type_21_error;
2529
2530                 if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST &&
2531                         ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
2532                                 return;
2533         }
2534
2535         ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2536
2537         /* Set indication we have logged an error */
2538         ioa_cfg->errors_logged++;
2539
2540         if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2541                 return;
2542         if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2543                 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2544
2545         switch (hostrcb->hcam.overlay_id) {
2546         case IPR_HOST_RCB_OVERLAY_ID_2:
2547                 ipr_log_cache_error(ioa_cfg, hostrcb);
2548                 break;
2549         case IPR_HOST_RCB_OVERLAY_ID_3:
2550                 ipr_log_config_error(ioa_cfg, hostrcb);
2551                 break;
2552         case IPR_HOST_RCB_OVERLAY_ID_4:
2553         case IPR_HOST_RCB_OVERLAY_ID_6:
2554                 ipr_log_array_error(ioa_cfg, hostrcb);
2555                 break;
2556         case IPR_HOST_RCB_OVERLAY_ID_7:
2557                 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2558                 break;
2559         case IPR_HOST_RCB_OVERLAY_ID_12:
2560                 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2561                 break;
2562         case IPR_HOST_RCB_OVERLAY_ID_13:
2563                 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2564                 break;
2565         case IPR_HOST_RCB_OVERLAY_ID_14:
2566         case IPR_HOST_RCB_OVERLAY_ID_16:
2567                 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2568                 break;
2569         case IPR_HOST_RCB_OVERLAY_ID_17:
2570                 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2571                 break;
2572         case IPR_HOST_RCB_OVERLAY_ID_20:
2573                 ipr_log_fabric_error(ioa_cfg, hostrcb);
2574                 break;
2575         case IPR_HOST_RCB_OVERLAY_ID_21:
2576                 ipr_log_sis64_device_error(ioa_cfg, hostrcb);
2577                 break;
2578         case IPR_HOST_RCB_OVERLAY_ID_23:
2579                 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2580                 break;
2581         case IPR_HOST_RCB_OVERLAY_ID_24:
2582         case IPR_HOST_RCB_OVERLAY_ID_26:
2583                 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2584                 break;
2585         case IPR_HOST_RCB_OVERLAY_ID_30:
2586                 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2587                 break;
2588         case IPR_HOST_RCB_OVERLAY_ID_1:
2589         case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2590         default:
2591                 ipr_log_generic_error(ioa_cfg, hostrcb);
2592                 break;
2593         }
2594 }
2595
2596 static struct ipr_hostrcb *ipr_get_free_hostrcb(struct ipr_ioa_cfg *ioa)
2597 {
2598         struct ipr_hostrcb *hostrcb;
2599
2600         hostrcb = list_first_entry_or_null(&ioa->hostrcb_free_q,
2601                                         struct ipr_hostrcb, queue);
2602
2603         if (unlikely(!hostrcb)) {
2604                 dev_info(&ioa->pdev->dev, "Reclaiming async error buffers.");
2605                 hostrcb = list_first_entry_or_null(&ioa->hostrcb_report_q,
2606                                                 struct ipr_hostrcb, queue);
2607         }
2608
2609         list_del_init(&hostrcb->queue);
2610         return hostrcb;
2611 }
2612
2613 /**
2614  * ipr_process_error - Op done function for an adapter error log.
2615  * @ipr_cmd:    ipr command struct
2616  *
2617  * This function is the op done function for an error log host
2618  * controlled async from the adapter. It will log the error and
2619  * send the HCAM back to the adapter.
2620  *
2621  * Return value:
2622  *      none
2623  **/
2624 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2625 {
2626         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2627         struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2628         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2629         u32 fd_ioasc;
2630
2631         if (ioa_cfg->sis64)
2632                 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2633         else
2634                 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2635
2636         list_del_init(&hostrcb->queue);
2637         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
2638
2639         if (!ioasc) {
2640                 ipr_handle_log_data(ioa_cfg, hostrcb);
2641                 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2642                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2643         } else if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
2644                    ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) {
2645                 dev_err(&ioa_cfg->pdev->dev,
2646                         "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2647         }
2648
2649         list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q);
2650         schedule_work(&ioa_cfg->work_q);
2651         hostrcb = ipr_get_free_hostrcb(ioa_cfg);
2652
2653         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2654 }
2655
2656 /**
2657  * ipr_timeout -  An internally generated op has timed out.
2658  * @ipr_cmd:    ipr command struct
2659  *
2660  * This function blocks host requests and initiates an
2661  * adapter reset.
2662  *
2663  * Return value:
2664  *      none
2665  **/
2666 static void ipr_timeout(struct timer_list *t)
2667 {
2668         struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
2669         unsigned long lock_flags = 0;
2670         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2671
2672         ENTER;
2673         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2674
2675         ioa_cfg->errors_logged++;
2676         dev_err(&ioa_cfg->pdev->dev,
2677                 "Adapter being reset due to command timeout.\n");
2678
2679         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2680                 ioa_cfg->sdt_state = GET_DUMP;
2681
2682         if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2683                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2684
2685         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2686         LEAVE;
2687 }
2688
2689 /**
2690  * ipr_oper_timeout -  Adapter timed out transitioning to operational
2691  * @ipr_cmd:    ipr command struct
2692  *
2693  * This function blocks host requests and initiates an
2694  * adapter reset.
2695  *
2696  * Return value:
2697  *      none
2698  **/
2699 static void ipr_oper_timeout(struct timer_list *t)
2700 {
2701         struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
2702         unsigned long lock_flags = 0;
2703         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2704
2705         ENTER;
2706         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2707
2708         ioa_cfg->errors_logged++;
2709         dev_err(&ioa_cfg->pdev->dev,
2710                 "Adapter timed out transitioning to operational.\n");
2711
2712         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2713                 ioa_cfg->sdt_state = GET_DUMP;
2714
2715         if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2716                 if (ipr_fastfail)
2717                         ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2718                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2719         }
2720
2721         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2722         LEAVE;
2723 }
2724
2725 /**
2726  * ipr_find_ses_entry - Find matching SES in SES table
2727  * @res:        resource entry struct of SES
2728  *
2729  * Return value:
2730  *      pointer to SES table entry / NULL on failure
2731  **/
2732 static const struct ipr_ses_table_entry *
2733 ipr_find_ses_entry(struct ipr_resource_entry *res)
2734 {
2735         int i, j, matches;
2736         struct ipr_std_inq_vpids *vpids;
2737         const struct ipr_ses_table_entry *ste = ipr_ses_table;
2738
2739         for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2740                 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2741                         if (ste->compare_product_id_byte[j] == 'X') {
2742                                 vpids = &res->std_inq_data.vpids;
2743                                 if (vpids->product_id[j] == ste->product_id[j])
2744                                         matches++;
2745                                 else
2746                                         break;
2747                         } else
2748                                 matches++;
2749                 }
2750
2751                 if (matches == IPR_PROD_ID_LEN)
2752                         return ste;
2753         }
2754
2755         return NULL;
2756 }
2757
2758 /**
2759  * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2760  * @ioa_cfg:    ioa config struct
2761  * @bus:                SCSI bus
2762  * @bus_width:  bus width
2763  *
2764  * Return value:
2765  *      SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2766  *      For a 2-byte wide SCSI bus, the maximum transfer speed is
2767  *      twice the maximum transfer rate (e.g. for a wide enabled bus,
2768  *      max 160MHz = max 320MB/sec).
2769  **/
2770 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2771 {
2772         struct ipr_resource_entry *res;
2773         const struct ipr_ses_table_entry *ste;
2774         u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2775
2776         /* Loop through each config table entry in the config table buffer */
2777         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2778                 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2779                         continue;
2780
2781                 if (bus != res->bus)
2782                         continue;
2783
2784                 if (!(ste = ipr_find_ses_entry(res)))
2785                         continue;
2786
2787                 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2788         }
2789
2790         return max_xfer_rate;
2791 }
2792
2793 /**
2794  * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2795  * @ioa_cfg:            ioa config struct
2796  * @max_delay:          max delay in micro-seconds to wait
2797  *
2798  * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2799  *
2800  * Return value:
2801  *      0 on success / other on failure
2802  **/
2803 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2804 {
2805         volatile u32 pcii_reg;
2806         int delay = 1;
2807
2808         /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2809         while (delay < max_delay) {
2810                 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2811
2812                 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2813                         return 0;
2814
2815                 /* udelay cannot be used if delay is more than a few milliseconds */
2816                 if ((delay / 1000) > MAX_UDELAY_MS)
2817                         mdelay(delay / 1000);
2818                 else
2819                         udelay(delay);
2820
2821                 delay += delay;
2822         }
2823         return -EIO;
2824 }
2825
2826 /**
2827  * ipr_get_sis64_dump_data_section - Dump IOA memory
2828  * @ioa_cfg:                    ioa config struct
2829  * @start_addr:                 adapter address to dump
2830  * @dest:                       destination kernel buffer
2831  * @length_in_words:            length to dump in 4 byte words
2832  *
2833  * Return value:
2834  *      0 on success
2835  **/
2836 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2837                                            u32 start_addr,
2838                                            __be32 *dest, u32 length_in_words)
2839 {
2840         int i;
2841
2842         for (i = 0; i < length_in_words; i++) {
2843                 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2844                 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2845                 dest++;
2846         }
2847
2848         return 0;
2849 }
2850
2851 /**
2852  * ipr_get_ldump_data_section - Dump IOA memory
2853  * @ioa_cfg:                    ioa config struct
2854  * @start_addr:                 adapter address to dump
2855  * @dest:                               destination kernel buffer
2856  * @length_in_words:    length to dump in 4 byte words
2857  *
2858  * Return value:
2859  *      0 on success / -EIO on failure
2860  **/
2861 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2862                                       u32 start_addr,
2863                                       __be32 *dest, u32 length_in_words)
2864 {
2865         volatile u32 temp_pcii_reg;
2866         int i, delay = 0;
2867
2868         if (ioa_cfg->sis64)
2869                 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2870                                                        dest, length_in_words);
2871
2872         /* Write IOA interrupt reg starting LDUMP state  */
2873         writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2874                ioa_cfg->regs.set_uproc_interrupt_reg32);
2875
2876         /* Wait for IO debug acknowledge */
2877         if (ipr_wait_iodbg_ack(ioa_cfg,
2878                                IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2879                 dev_err(&ioa_cfg->pdev->dev,
2880                         "IOA dump long data transfer timeout\n");
2881                 return -EIO;
2882         }
2883
2884         /* Signal LDUMP interlocked - clear IO debug ack */
2885         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2886                ioa_cfg->regs.clr_interrupt_reg);
2887
2888         /* Write Mailbox with starting address */
2889         writel(start_addr, ioa_cfg->ioa_mailbox);
2890
2891         /* Signal address valid - clear IOA Reset alert */
2892         writel(IPR_UPROCI_RESET_ALERT,
2893                ioa_cfg->regs.clr_uproc_interrupt_reg32);
2894
2895         for (i = 0; i < length_in_words; i++) {
2896                 /* Wait for IO debug acknowledge */
2897                 if (ipr_wait_iodbg_ack(ioa_cfg,
2898                                        IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2899                         dev_err(&ioa_cfg->pdev->dev,
2900                                 "IOA dump short data transfer timeout\n");
2901                         return -EIO;
2902                 }
2903
2904                 /* Read data from mailbox and increment destination pointer */
2905                 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2906                 dest++;
2907
2908                 /* For all but the last word of data, signal data received */
2909                 if (i < (length_in_words - 1)) {
2910                         /* Signal dump data received - Clear IO debug Ack */
2911                         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2912                                ioa_cfg->regs.clr_interrupt_reg);
2913                 }
2914         }
2915
2916         /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2917         writel(IPR_UPROCI_RESET_ALERT,
2918                ioa_cfg->regs.set_uproc_interrupt_reg32);
2919
2920         writel(IPR_UPROCI_IO_DEBUG_ALERT,
2921                ioa_cfg->regs.clr_uproc_interrupt_reg32);
2922
2923         /* Signal dump data received - Clear IO debug Ack */
2924         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2925                ioa_cfg->regs.clr_interrupt_reg);
2926
2927         /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2928         while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2929                 temp_pcii_reg =
2930                     readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2931
2932                 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2933                         return 0;
2934
2935                 udelay(10);
2936                 delay += 10;
2937         }
2938
2939         return 0;
2940 }
2941
2942 #ifdef CONFIG_SCSI_IPR_DUMP
2943 /**
2944  * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2945  * @ioa_cfg:            ioa config struct
2946  * @pci_address:        adapter address
2947  * @length:                     length of data to copy
2948  *
2949  * Copy data from PCI adapter to kernel buffer.
2950  * Note: length MUST be a 4 byte multiple
2951  * Return value:
2952  *      0 on success / other on failure
2953  **/
2954 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2955                         unsigned long pci_address, u32 length)
2956 {
2957         int bytes_copied = 0;
2958         int cur_len, rc, rem_len, rem_page_len, max_dump_size;
2959         __be32 *page;
2960         unsigned long lock_flags = 0;
2961         struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2962
2963         if (ioa_cfg->sis64)
2964                 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2965         else
2966                 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2967
2968         while (bytes_copied < length &&
2969                (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
2970                 if (ioa_dump->page_offset >= PAGE_SIZE ||
2971                     ioa_dump->page_offset == 0) {
2972                         page = (__be32 *)__get_free_page(GFP_ATOMIC);
2973
2974                         if (!page) {
2975                                 ipr_trace;
2976                                 return bytes_copied;
2977                         }
2978
2979                         ioa_dump->page_offset = 0;
2980                         ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2981                         ioa_dump->next_page_index++;
2982                 } else
2983                         page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2984
2985                 rem_len = length - bytes_copied;
2986                 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2987                 cur_len = min(rem_len, rem_page_len);
2988
2989                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2990                 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2991                         rc = -EIO;
2992                 } else {
2993                         rc = ipr_get_ldump_data_section(ioa_cfg,
2994                                                         pci_address + bytes_copied,
2995                                                         &page[ioa_dump->page_offset / 4],
2996                                                         (cur_len / sizeof(u32)));
2997                 }
2998                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2999
3000                 if (!rc) {
3001                         ioa_dump->page_offset += cur_len;
3002                         bytes_copied += cur_len;
3003                 } else {
3004                         ipr_trace;
3005                         break;
3006                 }
3007                 schedule();
3008         }
3009
3010         return bytes_copied;
3011 }
3012
3013 /**
3014  * ipr_init_dump_entry_hdr - Initialize a dump entry header.
3015  * @hdr:        dump entry header struct
3016  *
3017  * Return value:
3018  *      nothing
3019  **/
3020 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
3021 {
3022         hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
3023         hdr->num_elems = 1;
3024         hdr->offset = sizeof(*hdr);
3025         hdr->status = IPR_DUMP_STATUS_SUCCESS;
3026 }
3027
3028 /**
3029  * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
3030  * @ioa_cfg:    ioa config struct
3031  * @driver_dump:        driver dump struct
3032  *
3033  * Return value:
3034  *      nothing
3035  **/
3036 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
3037                                    struct ipr_driver_dump *driver_dump)
3038 {
3039         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3040
3041         ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
3042         driver_dump->ioa_type_entry.hdr.len =
3043                 sizeof(struct ipr_dump_ioa_type_entry) -
3044                 sizeof(struct ipr_dump_entry_header);
3045         driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3046         driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
3047         driver_dump->ioa_type_entry.type = ioa_cfg->type;
3048         driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
3049                 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
3050                 ucode_vpd->minor_release[1];
3051         driver_dump->hdr.num_entries++;
3052 }
3053
3054 /**
3055  * ipr_dump_version_data - Fill in the driver version in the dump.
3056  * @ioa_cfg:    ioa config struct
3057  * @driver_dump:        driver dump struct
3058  *
3059  * Return value:
3060  *      nothing
3061  **/
3062 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
3063                                   struct ipr_driver_dump *driver_dump)
3064 {
3065         ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
3066         driver_dump->version_entry.hdr.len =
3067                 sizeof(struct ipr_dump_version_entry) -
3068                 sizeof(struct ipr_dump_entry_header);
3069         driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3070         driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
3071         strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
3072         driver_dump->hdr.num_entries++;
3073 }
3074
3075 /**
3076  * ipr_dump_trace_data - Fill in the IOA trace in the dump.
3077  * @ioa_cfg:    ioa config struct
3078  * @driver_dump:        driver dump struct
3079  *
3080  * Return value:
3081  *      nothing
3082  **/
3083 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
3084                                    struct ipr_driver_dump *driver_dump)
3085 {
3086         ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
3087         driver_dump->trace_entry.hdr.len =
3088                 sizeof(struct ipr_dump_trace_entry) -
3089                 sizeof(struct ipr_dump_entry_header);
3090         driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3091         driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
3092         memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
3093         driver_dump->hdr.num_entries++;
3094 }
3095
3096 /**
3097  * ipr_dump_location_data - Fill in the IOA location in the dump.
3098  * @ioa_cfg:    ioa config struct
3099  * @driver_dump:        driver dump struct
3100  *
3101  * Return value:
3102  *      nothing
3103  **/
3104 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
3105                                    struct ipr_driver_dump *driver_dump)
3106 {
3107         ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
3108         driver_dump->location_entry.hdr.len =
3109                 sizeof(struct ipr_dump_location_entry) -
3110                 sizeof(struct ipr_dump_entry_header);
3111         driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3112         driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
3113         strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
3114         driver_dump->hdr.num_entries++;
3115 }
3116
3117 /**
3118  * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3119  * @ioa_cfg:    ioa config struct
3120  * @dump:               dump struct
3121  *
3122  * Return value:
3123  *      nothing
3124  **/
3125 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
3126 {
3127         unsigned long start_addr, sdt_word;
3128         unsigned long lock_flags = 0;
3129         struct ipr_driver_dump *driver_dump = &dump->driver_dump;
3130         struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
3131         u32 num_entries, max_num_entries, start_off, end_off;
3132         u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
3133         struct ipr_sdt *sdt;
3134         int valid = 1;
3135         int i;
3136
3137         ENTER;
3138
3139         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3140
3141         if (ioa_cfg->sdt_state != READ_DUMP) {
3142                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3143                 return;
3144         }
3145
3146         if (ioa_cfg->sis64) {
3147                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3148                 ssleep(IPR_DUMP_DELAY_SECONDS);
3149                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3150         }
3151
3152         start_addr = readl(ioa_cfg->ioa_mailbox);
3153
3154         if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
3155                 dev_err(&ioa_cfg->pdev->dev,
3156                         "Invalid dump table format: %lx\n", start_addr);
3157                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3158                 return;
3159         }
3160
3161         dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3162
3163         driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3164
3165         /* Initialize the overall dump header */
3166         driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3167         driver_dump->hdr.num_entries = 1;
3168         driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3169         driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3170         driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3171         driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3172
3173         ipr_dump_version_data(ioa_cfg, driver_dump);
3174         ipr_dump_location_data(ioa_cfg, driver_dump);
3175         ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3176         ipr_dump_trace_data(ioa_cfg, driver_dump);
3177
3178         /* Update dump_header */
3179         driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3180
3181         /* IOA Dump entry */
3182         ipr_init_dump_entry_hdr(&ioa_dump->hdr);
3183         ioa_dump->hdr.len = 0;
3184         ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3185         ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3186
3187         /* First entries in sdt are actually a list of dump addresses and
3188          lengths to gather the real dump data.  sdt represents the pointer
3189          to the ioa generated dump table.  Dump data will be extracted based
3190          on entries in this table */
3191         sdt = &ioa_dump->sdt;
3192
3193         if (ioa_cfg->sis64) {
3194                 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3195                 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3196         } else {
3197                 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3198                 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3199         }
3200
3201         bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3202                         (max_num_entries * sizeof(struct ipr_sdt_entry));
3203         rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
3204                                         bytes_to_copy / sizeof(__be32));
3205
3206         /* Smart Dump table is ready to use and the first entry is valid */
3207         if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3208             (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
3209                 dev_err(&ioa_cfg->pdev->dev,
3210                         "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3211                         rc, be32_to_cpu(sdt->hdr.state));
3212                 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3213                 ioa_cfg->sdt_state = DUMP_OBTAINED;
3214                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3215                 return;
3216         }
3217
3218         num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3219
3220         if (num_entries > max_num_entries)
3221                 num_entries = max_num_entries;
3222
3223         /* Update dump length to the actual data to be copied */
3224         dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3225         if (ioa_cfg->sis64)
3226                 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3227         else
3228                 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
3229
3230         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3231
3232         for (i = 0; i < num_entries; i++) {
3233                 if (ioa_dump->hdr.len > max_dump_size) {
3234                         driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3235                         break;
3236                 }
3237
3238                 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
3239                         sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3240                         if (ioa_cfg->sis64)
3241                                 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3242                         else {
3243                                 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3244                                 end_off = be32_to_cpu(sdt->entry[i].end_token);
3245
3246                                 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3247                                         bytes_to_copy = end_off - start_off;
3248                                 else
3249                                         valid = 0;
3250                         }
3251                         if (valid) {
3252                                 if (bytes_to_copy > max_dump_size) {
3253                                         sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3254                                         continue;
3255                                 }
3256
3257                                 /* Copy data from adapter to driver buffers */
3258                                 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3259                                                             bytes_to_copy);
3260
3261                                 ioa_dump->hdr.len += bytes_copied;
3262
3263                                 if (bytes_copied != bytes_to_copy) {
3264                                         driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3265                                         break;
3266                                 }
3267                         }
3268                 }
3269         }
3270
3271         dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3272
3273         /* Update dump_header */
3274         driver_dump->hdr.len += ioa_dump->hdr.len;
3275         wmb();
3276         ioa_cfg->sdt_state = DUMP_OBTAINED;
3277         LEAVE;
3278 }
3279
3280 #else
3281 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3282 #endif
3283
3284 /**
3285  * ipr_release_dump - Free adapter dump memory
3286  * @kref:       kref struct
3287  *
3288  * Return value:
3289  *      nothing
3290  **/
3291 static void ipr_release_dump(struct kref *kref)
3292 {
3293         struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
3294         struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3295         unsigned long lock_flags = 0;
3296         int i;
3297
3298         ENTER;
3299         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3300         ioa_cfg->dump = NULL;
3301         ioa_cfg->sdt_state = INACTIVE;
3302         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3303
3304         for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3305                 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3306
3307         vfree(dump->ioa_dump.ioa_data);
3308         kfree(dump);
3309         LEAVE;
3310 }
3311
3312 /**
3313  * ipr_worker_thread - Worker thread
3314  * @work:               ioa config struct
3315  *
3316  * Called at task level from a work thread. This function takes care
3317  * of adding and removing device from the mid-layer as configuration
3318  * changes are detected by the adapter.
3319  *
3320  * Return value:
3321  *      nothing
3322  **/
3323 static void ipr_worker_thread(struct work_struct *work)
3324 {
3325         unsigned long lock_flags;
3326         struct ipr_resource_entry *res;
3327         struct scsi_device *sdev;
3328         struct ipr_dump *dump;
3329         struct ipr_ioa_cfg *ioa_cfg =
3330                 container_of(work, struct ipr_ioa_cfg, work_q);
3331         u8 bus, target, lun;
3332         int did_work;
3333
3334         ENTER;
3335         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3336
3337         if (ioa_cfg->sdt_state == READ_DUMP) {
3338                 dump = ioa_cfg->dump;
3339                 if (!dump) {
3340                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3341                         return;
3342                 }
3343                 kref_get(&dump->kref);
3344                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3345                 ipr_get_ioa_dump(ioa_cfg, dump);
3346                 kref_put(&dump->kref, ipr_release_dump);
3347
3348                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3349                 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
3350                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3351                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3352                 return;
3353         }
3354
3355         if (ioa_cfg->scsi_unblock) {
3356                 ioa_cfg->scsi_unblock = 0;
3357                 ioa_cfg->scsi_blocked = 0;
3358                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3359                 scsi_unblock_requests(ioa_cfg->host);
3360                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3361                 if (ioa_cfg->scsi_blocked)
3362                         scsi_block_requests(ioa_cfg->host);
3363         }
3364
3365         if (!ioa_cfg->scan_enabled) {
3366                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3367                 return;
3368         }
3369
3370 restart:
3371         do {
3372                 did_work = 0;
3373                 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
3374                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3375                         return;
3376                 }
3377
3378                 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3379                         if (res->del_from_ml && res->sdev) {
3380                                 did_work = 1;
3381                                 sdev = res->sdev;
3382                                 if (!scsi_device_get(sdev)) {
3383                                         if (!res->add_to_ml)
3384                                                 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3385                                         else
3386                                                 res->del_from_ml = 0;
3387                                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3388                                         scsi_remove_device(sdev);
3389                                         scsi_device_put(sdev);
3390                                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3391                                 }
3392                                 break;
3393                         }
3394                 }
3395         } while (did_work);
3396
3397         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3398                 if (res->add_to_ml) {
3399                         bus = res->bus;
3400                         target = res->target;
3401                         lun = res->lun;
3402                         res->add_to_ml = 0;
3403                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3404                         scsi_add_device(ioa_cfg->host, bus, target, lun);
3405                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3406                         goto restart;
3407                 }
3408         }
3409
3410         ioa_cfg->scan_done = 1;
3411         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3412         kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3413         LEAVE;
3414 }
3415
3416 #ifdef CONFIG_SCSI_IPR_TRACE
3417 /**
3418  * ipr_read_trace - Dump the adapter trace
3419  * @filp:               open sysfs file
3420  * @kobj:               kobject struct
3421  * @bin_attr:           bin_attribute struct
3422  * @buf:                buffer
3423  * @off:                offset
3424  * @count:              buffer size
3425  *
3426  * Return value:
3427  *      number of bytes printed to buffer
3428  **/
3429 static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
3430                               struct bin_attribute *bin_attr,
3431                               char *buf, loff_t off, size_t count)
3432 {
3433         struct device *dev = container_of(kobj, struct device, kobj);
3434         struct Scsi_Host *shost = class_to_shost(dev);
3435         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3436         unsigned long lock_flags = 0;
3437         ssize_t ret;
3438
3439         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3440         ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3441                                 IPR_TRACE_SIZE);
3442         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3443
3444         return ret;
3445 }
3446
3447 static struct bin_attribute ipr_trace_attr = {
3448         .attr = {
3449                 .name = "trace",
3450                 .mode = S_IRUGO,
3451         },
3452         .size = 0,
3453         .read = ipr_read_trace,
3454 };
3455 #endif
3456
3457 /**
3458  * ipr_show_fw_version - Show the firmware version
3459  * @dev:        class device struct
3460  * @buf:        buffer
3461  *
3462  * Return value:
3463  *      number of bytes printed to buffer
3464  **/
3465 static ssize_t ipr_show_fw_version(struct device *dev,
3466                                    struct device_attribute *attr, char *buf)
3467 {
3468         struct Scsi_Host *shost = class_to_shost(dev);
3469         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3470         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3471         unsigned long lock_flags = 0;
3472         int len;
3473
3474         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3475         len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3476                        ucode_vpd->major_release, ucode_vpd->card_type,
3477                        ucode_vpd->minor_release[0],
3478                        ucode_vpd->minor_release[1]);
3479         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3480         return len;
3481 }
3482
3483 static struct device_attribute ipr_fw_version_attr = {
3484         .attr = {
3485                 .name =         "fw_version",
3486                 .mode =         S_IRUGO,
3487         },
3488         .show = ipr_show_fw_version,
3489 };
3490
3491 /**
3492  * ipr_show_log_level - Show the adapter's error logging level
3493  * @dev:        class device struct
3494  * @buf:        buffer
3495  *
3496  * Return value:
3497  *      number of bytes printed to buffer
3498  **/
3499 static ssize_t ipr_show_log_level(struct device *dev,
3500                                    struct device_attribute *attr, char *buf)
3501 {
3502         struct Scsi_Host *shost = class_to_shost(dev);
3503         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3504         unsigned long lock_flags = 0;
3505         int len;
3506
3507         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3508         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3509         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3510         return len;
3511 }
3512
3513 /**
3514  * ipr_store_log_level - Change the adapter's error logging level
3515  * @dev:        class device struct
3516  * @buf:        buffer
3517  *
3518  * Return value:
3519  *      number of bytes printed to buffer
3520  **/
3521 static ssize_t ipr_store_log_level(struct device *dev,
3522                                    struct device_attribute *attr,
3523                                    const char *buf, size_t count)
3524 {
3525         struct Scsi_Host *shost = class_to_shost(dev);
3526         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3527         unsigned long lock_flags = 0;
3528
3529         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3530         ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3531         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3532         return strlen(buf);
3533 }
3534
3535 static struct device_attribute ipr_log_level_attr = {
3536         .attr = {
3537                 .name =         "log_level",
3538                 .mode =         S_IRUGO | S_IWUSR,
3539         },
3540         .show = ipr_show_log_level,
3541         .store = ipr_store_log_level
3542 };
3543
3544 /**
3545  * ipr_store_diagnostics - IOA Diagnostics interface
3546  * @dev:        device struct
3547  * @buf:        buffer
3548  * @count:      buffer size
3549  *
3550  * This function will reset the adapter and wait a reasonable
3551  * amount of time for any errors that the adapter might log.
3552  *
3553  * Return value:
3554  *      count on success / other on failure
3555  **/
3556 static ssize_t ipr_store_diagnostics(struct device *dev,
3557                                      struct device_attribute *attr,
3558                                      const char *buf, size_t count)
3559 {
3560         struct Scsi_Host *shost = class_to_shost(dev);
3561         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3562         unsigned long lock_flags = 0;
3563         int rc = count;
3564
3565         if (!capable(CAP_SYS_ADMIN))
3566                 return -EACCES;
3567
3568         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3569         while (ioa_cfg->in_reset_reload) {
3570                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3571                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3572                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3573         }
3574
3575         ioa_cfg->errors_logged = 0;
3576         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3577
3578         if (ioa_cfg->in_reset_reload) {
3579                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3580                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3581
3582                 /* Wait for a second for any errors to be logged */
3583                 msleep(1000);
3584         } else {
3585                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3586                 return -EIO;
3587         }
3588
3589         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3590         if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3591                 rc = -EIO;
3592         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3593
3594         return rc;
3595 }
3596
3597 static struct device_attribute ipr_diagnostics_attr = {
3598         .attr = {
3599                 .name =         "run_diagnostics",
3600                 .mode =         S_IWUSR,
3601         },
3602         .store = ipr_store_diagnostics
3603 };
3604
3605 /**
3606  * ipr_show_adapter_state - Show the adapter's state
3607  * @class_dev:  device struct
3608  * @buf:        buffer
3609  *
3610  * Return value:
3611  *      number of bytes printed to buffer
3612  **/
3613 static ssize_t ipr_show_adapter_state(struct device *dev,
3614                                       struct device_attribute *attr, char *buf)
3615 {
3616         struct Scsi_Host *shost = class_to_shost(dev);
3617         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3618         unsigned long lock_flags = 0;
3619         int len;
3620
3621         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3622         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
3623                 len = snprintf(buf, PAGE_SIZE, "offline\n");
3624         else
3625                 len = snprintf(buf, PAGE_SIZE, "online\n");
3626         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3627         return len;
3628 }
3629
3630 /**
3631  * ipr_store_adapter_state - Change adapter state
3632  * @dev:        device struct
3633  * @buf:        buffer
3634  * @count:      buffer size
3635  *
3636  * This function will change the adapter's state.
3637  *
3638  * Return value:
3639  *      count on success / other on failure
3640  **/
3641 static ssize_t ipr_store_adapter_state(struct device *dev,
3642                                        struct device_attribute *attr,
3643                                        const char *buf, size_t count)
3644 {
3645         struct Scsi_Host *shost = class_to_shost(dev);
3646         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3647         unsigned long lock_flags;
3648         int result = count, i;
3649
3650         if (!capable(CAP_SYS_ADMIN))
3651                 return -EACCES;
3652
3653         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3654         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3655             !strncmp(buf, "online", 6)) {
3656                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3657                         spin_lock(&ioa_cfg->hrrq[i]._lock);
3658                         ioa_cfg->hrrq[i].ioa_is_dead = 0;
3659                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
3660                 }
3661                 wmb();
3662                 ioa_cfg->reset_retries = 0;
3663                 ioa_cfg->in_ioa_bringdown = 0;
3664                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3665         }
3666         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3667         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3668
3669         return result;
3670 }
3671
3672 static struct device_attribute ipr_ioa_state_attr = {
3673         .attr = {
3674                 .name =         "online_state",
3675                 .mode =         S_IRUGO | S_IWUSR,
3676         },
3677         .show = ipr_show_adapter_state,
3678         .store = ipr_store_adapter_state
3679 };
3680
3681 /**
3682  * ipr_store_reset_adapter - Reset the adapter
3683  * @dev:        device struct
3684  * @buf:        buffer
3685  * @count:      buffer size
3686  *
3687  * This function will reset the adapter.
3688  *
3689  * Return value:
3690  *      count on success / other on failure
3691  **/
3692 static ssize_t ipr_store_reset_adapter(struct device *dev,
3693                                        struct device_attribute *attr,
3694                                        const char *buf, size_t count)
3695 {
3696         struct Scsi_Host *shost = class_to_shost(dev);
3697         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3698         unsigned long lock_flags;
3699         int result = count;
3700
3701         if (!capable(CAP_SYS_ADMIN))
3702                 return -EACCES;
3703
3704         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3705         if (!ioa_cfg->in_reset_reload)
3706                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3707         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3708         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3709
3710         return result;
3711 }
3712
3713 static struct device_attribute ipr_ioa_reset_attr = {
3714         .attr = {
3715                 .name =         "reset_host",
3716                 .mode =         S_IWUSR,
3717         },
3718         .store = ipr_store_reset_adapter
3719 };
3720
3721 static int ipr_iopoll(struct irq_poll *iop, int budget);
3722  /**
3723  * ipr_show_iopoll_weight - Show ipr polling mode
3724  * @dev:        class device struct
3725  * @buf:        buffer
3726  *
3727  * Return value:
3728  *      number of bytes printed to buffer
3729  **/
3730 static ssize_t ipr_show_iopoll_weight(struct device *dev,
3731                                    struct device_attribute *attr, char *buf)
3732 {
3733         struct Scsi_Host *shost = class_to_shost(dev);
3734         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3735         unsigned long lock_flags = 0;
3736         int len;
3737
3738         spin_lock_irqsave(shost->host_lock, lock_flags);
3739         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3740         spin_unlock_irqrestore(shost->host_lock, lock_flags);
3741
3742         return len;
3743 }
3744
3745 /**
3746  * ipr_store_iopoll_weight - Change the adapter's polling mode
3747  * @dev:        class device struct
3748  * @buf:        buffer
3749  *
3750  * Return value:
3751  *      number of bytes printed to buffer
3752  **/
3753 static ssize_t ipr_store_iopoll_weight(struct device *dev,
3754                                         struct device_attribute *attr,
3755                                         const char *buf, size_t count)
3756 {
3757         struct Scsi_Host *shost = class_to_shost(dev);
3758         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3759         unsigned long user_iopoll_weight;
3760         unsigned long lock_flags = 0;
3761         int i;
3762
3763         if (!ioa_cfg->sis64) {
3764                 dev_info(&ioa_cfg->pdev->dev, "irq_poll not supported on this adapter\n");
3765                 return -EINVAL;
3766         }
3767         if (kstrtoul(buf, 10, &user_iopoll_weight))
3768                 return -EINVAL;
3769
3770         if (user_iopoll_weight > 256) {
3771                 dev_info(&ioa_cfg->pdev->dev, "Invalid irq_poll weight. It must be less than 256\n");
3772                 return -EINVAL;
3773         }
3774
3775         if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3776                 dev_info(&ioa_cfg->pdev->dev, "Current irq_poll weight has the same weight\n");
3777                 return strlen(buf);
3778         }
3779
3780         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3781                 for (i = 1; i < ioa_cfg->hrrq_num; i++)
3782                         irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
3783         }
3784
3785         spin_lock_irqsave(shost->host_lock, lock_flags);
3786         ioa_cfg->iopoll_weight = user_iopoll_weight;
3787         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3788                 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3789                         irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
3790                                         ioa_cfg->iopoll_weight, ipr_iopoll);
3791                 }
3792         }
3793         spin_unlock_irqrestore(shost->host_lock, lock_flags);
3794
3795         return strlen(buf);
3796 }
3797
3798 static struct device_attribute ipr_iopoll_weight_attr = {
3799         .attr = {
3800                 .name =         "iopoll_weight",
3801                 .mode =         S_IRUGO | S_IWUSR,
3802         },
3803         .show = ipr_show_iopoll_weight,
3804         .store = ipr_store_iopoll_weight
3805 };
3806
3807 /**
3808  * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3809  * @buf_len:            buffer length
3810  *
3811  * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3812  * list to use for microcode download
3813  *
3814  * Return value:
3815  *      pointer to sglist / NULL on failure
3816  **/
3817 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3818 {
3819         int sg_size, order, bsize_elem, num_elem, i, j;
3820         struct ipr_sglist *sglist;
3821         struct scatterlist *scatterlist;
3822         struct page *page;
3823
3824         /* Get the minimum size per scatter/gather element */
3825         sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3826
3827         /* Get the actual size per element */
3828         order = get_order(sg_size);
3829
3830         /* Determine the actual number of bytes per element */
3831         bsize_elem = PAGE_SIZE * (1 << order);
3832
3833         /* Determine the actual number of sg entries needed */
3834         if (buf_len % bsize_elem)
3835                 num_elem = (buf_len / bsize_elem) + 1;
3836         else
3837                 num_elem = buf_len / bsize_elem;
3838
3839         /* Allocate a scatter/gather list for the DMA */
3840         sglist = kzalloc(sizeof(struct ipr_sglist) +
3841                          (sizeof(struct scatterlist) * (num_elem - 1)),
3842                          GFP_KERNEL);
3843
3844         if (sglist == NULL) {
3845                 ipr_trace;
3846                 return NULL;
3847         }
3848
3849         scatterlist = sglist->scatterlist;
3850         sg_init_table(scatterlist, num_elem);
3851
3852         sglist->order = order;
3853         sglist->num_sg = num_elem;
3854
3855         /* Allocate a bunch of sg elements */
3856         for (i = 0; i < num_elem; i++) {
3857                 page = alloc_pages(GFP_KERNEL, order);
3858                 if (!page) {
3859                         ipr_trace;
3860
3861                         /* Free up what we already allocated */
3862                         for (j = i - 1; j >= 0; j--)
3863                                 __free_pages(sg_page(&scatterlist[j]), order);
3864                         kfree(sglist);
3865                         return NULL;
3866                 }
3867
3868                 sg_set_page(&scatterlist[i], page, 0, 0);
3869         }
3870
3871         return sglist;
3872 }
3873
3874 /**
3875  * ipr_free_ucode_buffer - Frees a microcode download buffer
3876  * @p_dnld:             scatter/gather list pointer
3877  *
3878  * Free a DMA'able ucode download buffer previously allocated with
3879  * ipr_alloc_ucode_buffer
3880  *
3881  * Return value:
3882  *      nothing
3883  **/
3884 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3885 {
3886         int i;
3887
3888         for (i = 0; i < sglist->num_sg; i++)
3889                 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
3890
3891         kfree(sglist);
3892 }
3893
3894 /**
3895  * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3896  * @sglist:             scatter/gather list pointer
3897  * @buffer:             buffer pointer
3898  * @len:                buffer length
3899  *
3900  * Copy a microcode image from a user buffer into a buffer allocated by
3901  * ipr_alloc_ucode_buffer
3902  *
3903  * Return value:
3904  *      0 on success / other on failure
3905  **/
3906 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3907                                  u8 *buffer, u32 len)
3908 {
3909         int bsize_elem, i, result = 0;
3910         struct scatterlist *scatterlist;
3911         void *kaddr;
3912
3913         /* Determine the actual number of bytes per element */
3914         bsize_elem = PAGE_SIZE * (1 << sglist->order);
3915
3916         scatterlist = sglist->scatterlist;
3917
3918         for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3919                 struct page *page = sg_page(&scatterlist[i]);
3920
3921                 kaddr = kmap(page);
3922                 memcpy(kaddr, buffer, bsize_elem);
3923                 kunmap(page);
3924
3925                 scatterlist[i].length = bsize_elem;
3926
3927                 if (result != 0) {
3928                         ipr_trace;
3929                         return result;
3930                 }
3931         }
3932
3933         if (len % bsize_elem) {
3934                 struct page *page = sg_page(&scatterlist[i]);
3935
3936                 kaddr = kmap(page);
3937                 memcpy(kaddr, buffer, len % bsize_elem);
3938                 kunmap(page);
3939
3940                 scatterlist[i].length = len % bsize_elem;
3941         }
3942
3943         sglist->buffer_len = len;
3944         return result;
3945 }
3946
3947 /**
3948  * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3949  * @ipr_cmd:            ipr command struct
3950  * @sglist:             scatter/gather list
3951  *
3952  * Builds a microcode download IOA data list (IOADL).
3953  *
3954  **/
3955 static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3956                                     struct ipr_sglist *sglist)
3957 {
3958         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3959         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3960         struct scatterlist *scatterlist = sglist->scatterlist;
3961         int i;
3962
3963         ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3964         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3965         ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3966
3967         ioarcb->ioadl_len =
3968                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3969         for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3970                 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3971                 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3972                 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3973         }
3974
3975         ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3976 }
3977
3978 /**
3979  * ipr_build_ucode_ioadl - Build a microcode download IOADL
3980  * @ipr_cmd:    ipr command struct
3981  * @sglist:             scatter/gather list
3982  *
3983  * Builds a microcode download IOA data list (IOADL).
3984  *
3985  **/
3986 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3987                                   struct ipr_sglist *sglist)
3988 {
3989         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3990         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3991         struct scatterlist *scatterlist = sglist->scatterlist;
3992         int i;
3993
3994         ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3995         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3996         ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3997
3998         ioarcb->ioadl_len =
3999                 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4000
4001         for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
4002                 ioadl[i].flags_and_data_len =
4003                         cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
4004                 ioadl[i].address =
4005                         cpu_to_be32(sg_dma_address(&scatterlist[i]));
4006         }
4007
4008         ioadl[i-1].flags_and_data_len |=
4009                 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
4010 }
4011
4012 /**
4013  * ipr_update_ioa_ucode - Update IOA's microcode
4014  * @ioa_cfg:    ioa config struct
4015  * @sglist:             scatter/gather list
4016  *
4017  * Initiate an adapter reset to update the IOA's microcode
4018  *
4019  * Return value:
4020  *      0 on success / -EIO on failure
4021  **/
4022 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
4023                                 struct ipr_sglist *sglist)
4024 {
4025         unsigned long lock_flags;
4026
4027         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4028         while (ioa_cfg->in_reset_reload) {
4029                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4030                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4031                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4032         }
4033
4034         if (ioa_cfg->ucode_sglist) {
4035                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4036                 dev_err(&ioa_cfg->pdev->dev,
4037                         "Microcode download already in progress\n");
4038                 return -EIO;
4039         }
4040
4041         sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev,
4042                                         sglist->scatterlist, sglist->num_sg,
4043                                         DMA_TO_DEVICE);
4044
4045         if (!sglist->num_dma_sg) {
4046                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4047                 dev_err(&ioa_cfg->pdev->dev,
4048                         "Failed to map microcode download buffer!\n");
4049                 return -EIO;
4050         }
4051
4052         ioa_cfg->ucode_sglist = sglist;
4053         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
4054         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4055         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4056
4057         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4058         ioa_cfg->ucode_sglist = NULL;
4059         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4060         return 0;
4061 }
4062
4063 /**
4064  * ipr_store_update_fw - Update the firmware on the adapter
4065  * @class_dev:  device struct
4066  * @buf:        buffer
4067  * @count:      buffer size
4068  *
4069  * This function will update the firmware on the adapter.
4070  *
4071  * Return value:
4072  *      count on success / other on failure
4073  **/
4074 static ssize_t ipr_store_update_fw(struct device *dev,
4075                                    struct device_attribute *attr,
4076                                    const char *buf, size_t count)
4077 {
4078         struct Scsi_Host *shost = class_to_shost(dev);
4079         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4080         struct ipr_ucode_image_header *image_hdr;
4081         const struct firmware *fw_entry;
4082         struct ipr_sglist *sglist;
4083         char fname[100];
4084         char *src;
4085         char *endline;
4086         int result, dnld_size;
4087
4088         if (!capable(CAP_SYS_ADMIN))
4089                 return -EACCES;
4090
4091         snprintf(fname, sizeof(fname), "%s", buf);
4092
4093         endline = strchr(fname, '\n');
4094         if (endline)
4095                 *endline = '\0';
4096
4097         if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
4098                 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
4099                 return -EIO;
4100         }
4101
4102         image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
4103
4104         src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
4105         dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
4106         sglist = ipr_alloc_ucode_buffer(dnld_size);
4107
4108         if (!sglist) {
4109                 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
4110                 release_firmware(fw_entry);
4111                 return -ENOMEM;
4112         }
4113
4114         result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
4115
4116         if (result) {
4117                 dev_err(&ioa_cfg->pdev->dev,
4118                         "Microcode buffer copy to DMA buffer failed\n");
4119                 goto out;
4120         }
4121
4122         ipr_info("Updating microcode, please be patient.  This may take up to 30 minutes.\n");
4123
4124         result = ipr_update_ioa_ucode(ioa_cfg, sglist);
4125
4126         if (!result)
4127                 result = count;
4128 out:
4129         ipr_free_ucode_buffer(sglist);
4130         release_firmware(fw_entry);
4131         return result;
4132 }
4133
4134 static struct device_attribute ipr_update_fw_attr = {
4135         .attr = {
4136                 .name =         "update_fw",
4137                 .mode =         S_IWUSR,
4138         },
4139         .store = ipr_store_update_fw
4140 };
4141
4142 /**
4143  * ipr_show_fw_type - Show the adapter's firmware type.
4144  * @dev:        class device struct
4145  * @buf:        buffer
4146  *
4147  * Return value:
4148  *      number of bytes printed to buffer
4149  **/
4150 static ssize_t ipr_show_fw_type(struct device *dev,
4151                                 struct device_attribute *attr, char *buf)
4152 {
4153         struct Scsi_Host *shost = class_to_shost(dev);
4154         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4155         unsigned long lock_flags = 0;
4156         int len;
4157
4158         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4159         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4160         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4161         return len;
4162 }
4163
4164 static struct device_attribute ipr_ioa_fw_type_attr = {
4165         .attr = {
4166                 .name =         "fw_type",
4167                 .mode =         S_IRUGO,
4168         },
4169         .show = ipr_show_fw_type
4170 };
4171
4172 static ssize_t ipr_read_async_err_log(struct file *filep, struct kobject *kobj,
4173                                 struct bin_attribute *bin_attr, char *buf,
4174                                 loff_t off, size_t count)
4175 {
4176         struct device *cdev = container_of(kobj, struct device, kobj);
4177         struct Scsi_Host *shost = class_to_shost(cdev);
4178         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4179         struct ipr_hostrcb *hostrcb;
4180         unsigned long lock_flags = 0;
4181         int ret;
4182
4183         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4184         hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4185                                         struct ipr_hostrcb, queue);
4186         if (!hostrcb) {
4187                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4188                 return 0;
4189         }
4190         ret = memory_read_from_buffer(buf, count, &off, &hostrcb->hcam,
4191                                 sizeof(hostrcb->hcam));
4192         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4193         return ret;
4194 }
4195
4196 static ssize_t ipr_next_async_err_log(struct file *filep, struct kobject *kobj,
4197                                 struct bin_attribute *bin_attr, char *buf,
4198                                 loff_t off, size_t count)
4199 {
4200         struct device *cdev = container_of(kobj, struct device, kobj);
4201         struct Scsi_Host *shost = class_to_shost(cdev);
4202         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4203         struct ipr_hostrcb *hostrcb;
4204         unsigned long lock_flags = 0;
4205
4206         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4207         hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4208                                         struct ipr_hostrcb, queue);
4209         if (!hostrcb) {
4210                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4211                 return count;
4212         }
4213
4214         /* Reclaim hostrcb before exit */
4215         list_move_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
4216         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4217         return count;
4218 }
4219
4220 static struct bin_attribute ipr_ioa_async_err_log = {
4221         .attr = {
4222                 .name =         "async_err_log",
4223                 .mode =         S_IRUGO | S_IWUSR,
4224         },
4225         .size = 0,
4226         .read = ipr_read_async_err_log,
4227         .write = ipr_next_async_err_log
4228 };
4229
4230 static struct device_attribute *ipr_ioa_attrs[] = {
4231         &ipr_fw_version_attr,
4232         &ipr_log_level_attr,
4233         &ipr_diagnostics_attr,
4234         &ipr_ioa_state_attr,
4235         &ipr_ioa_reset_attr,
4236         &ipr_update_fw_attr,
4237         &ipr_ioa_fw_type_attr,
4238         &ipr_iopoll_weight_attr,
4239         NULL,
4240 };
4241
4242 #ifdef CONFIG_SCSI_IPR_DUMP
4243 /**
4244  * ipr_read_dump - Dump the adapter
4245  * @filp:               open sysfs file
4246  * @kobj:               kobject struct
4247  * @bin_attr:           bin_attribute struct
4248  * @buf:                buffer
4249  * @off:                offset
4250  * @count:              buffer size
4251  *
4252  * Return value:
4253  *      number of bytes printed to buffer
4254  **/
4255 static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
4256                              struct bin_attribute *bin_attr,
4257                              char *buf, loff_t off, size_t count)
4258 {
4259         struct device *cdev = container_of(kobj, struct device, kobj);
4260         struct Scsi_Host *shost = class_to_shost(cdev);
4261         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4262         struct ipr_dump *dump;
4263         unsigned long lock_flags = 0;
4264         char *src;
4265         int len, sdt_end;
4266         size_t rc = count;
4267
4268         if (!capable(CAP_SYS_ADMIN))
4269                 return -EACCES;
4270
4271         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4272         dump = ioa_cfg->dump;
4273
4274         if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4275                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4276                 return 0;
4277         }
4278         kref_get(&dump->kref);
4279         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4280
4281         if (off > dump->driver_dump.hdr.len) {
4282                 kref_put(&dump->kref, ipr_release_dump);
4283                 return 0;
4284         }
4285
4286         if (off + count > dump->driver_dump.hdr.len) {
4287                 count = dump->driver_dump.hdr.len - off;
4288                 rc = count;
4289         }
4290
4291         if (count && off < sizeof(dump->driver_dump)) {
4292                 if (off + count > sizeof(dump->driver_dump))
4293                         len = sizeof(dump->driver_dump) - off;
4294                 else
4295                         len = count;
4296                 src = (u8 *)&dump->driver_dump + off;
4297                 memcpy(buf, src, len);
4298                 buf += len;
4299                 off += len;
4300                 count -= len;
4301         }
4302
4303         off -= sizeof(dump->driver_dump);
4304
4305         if (ioa_cfg->sis64)
4306                 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4307                           (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4308                            sizeof(struct ipr_sdt_entry));
4309         else
4310                 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4311                           (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4312
4313         if (count && off < sdt_end) {
4314                 if (off + count > sdt_end)
4315                         len = sdt_end - off;
4316                 else
4317                         len = count;
4318                 src = (u8 *)&dump->ioa_dump + off;
4319                 memcpy(buf, src, len);
4320                 buf += len;
4321                 off += len;
4322                 count -= len;
4323         }
4324
4325         off -= sdt_end;
4326
4327         while (count) {
4328                 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4329                         len = PAGE_ALIGN(off) - off;
4330                 else
4331                         len = count;
4332                 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4333                 src += off & ~PAGE_MASK;
4334                 memcpy(buf, src, len);
4335                 buf += len;
4336                 off += len;
4337                 count -= len;
4338         }
4339
4340         kref_put(&dump->kref, ipr_release_dump);
4341         return rc;
4342 }
4343
4344 /**
4345  * ipr_alloc_dump - Prepare for adapter dump
4346  * @ioa_cfg:    ioa config struct
4347  *
4348  * Return value:
4349  *      0 on success / other on failure
4350  **/
4351 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4352 {
4353         struct ipr_dump *dump;
4354         __be32 **ioa_data;
4355         unsigned long lock_flags = 0;
4356
4357         dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
4358
4359         if (!dump) {
4360                 ipr_err("Dump memory allocation failed\n");
4361                 return -ENOMEM;
4362         }
4363
4364         if (ioa_cfg->sis64)
4365                 ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4366         else
4367                 ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4368
4369         if (!ioa_data) {
4370                 ipr_err("Dump memory allocation failed\n");
4371                 kfree(dump);
4372                 return -ENOMEM;
4373         }
4374
4375         dump->ioa_dump.ioa_data = ioa_data;
4376
4377         kref_init(&dump->kref);
4378         dump->ioa_cfg = ioa_cfg;
4379
4380         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4381
4382         if (INACTIVE != ioa_cfg->sdt_state) {
4383                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4384                 vfree(dump->ioa_dump.ioa_data);
4385                 kfree(dump);
4386                 return 0;
4387         }
4388
4389         ioa_cfg->dump = dump;
4390         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
4391         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
4392                 ioa_cfg->dump_taken = 1;
4393                 schedule_work(&ioa_cfg->work_q);
4394         }
4395         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4396
4397         return 0;
4398 }
4399
4400 /**
4401  * ipr_free_dump - Free adapter dump memory
4402  * @ioa_cfg:    ioa config struct
4403  *
4404  * Return value:
4405  *      0 on success / other on failure
4406  **/
4407 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4408 {
4409         struct ipr_dump *dump;
4410         unsigned long lock_flags = 0;
4411
4412         ENTER;
4413
4414         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4415         dump = ioa_cfg->dump;
4416         if (!dump) {
4417                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4418                 return 0;
4419         }
4420
4421         ioa_cfg->dump = NULL;
4422         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4423
4424         kref_put(&dump->kref, ipr_release_dump);
4425
4426         LEAVE;
4427         return 0;
4428 }
4429
4430 /**
4431  * ipr_write_dump - Setup dump state of adapter
4432  * @filp:               open sysfs file
4433  * @kobj:               kobject struct
4434  * @bin_attr:           bin_attribute struct
4435  * @buf:                buffer
4436  * @off:                offset
4437  * @count:              buffer size
4438  *
4439  * Return value:
4440  *      number of bytes printed to buffer
4441  **/
4442 static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
4443                               struct bin_attribute *bin_attr,
4444                               char *buf, loff_t off, size_t count)
4445 {
4446         struct device *cdev = container_of(kobj, struct device, kobj);
4447         struct Scsi_Host *shost = class_to_shost(cdev);
4448         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4449         int rc;
4450
4451         if (!capable(CAP_SYS_ADMIN))
4452                 return -EACCES;
4453
4454         if (buf[0] == '1')
4455                 rc = ipr_alloc_dump(ioa_cfg);
4456         else if (buf[0] == '0')
4457                 rc = ipr_free_dump(ioa_cfg);
4458         else
4459                 return -EINVAL;
4460
4461         if (rc)
4462                 return rc;
4463         else
4464                 return count;
4465 }
4466
4467 static struct bin_attribute ipr_dump_attr = {
4468         .attr = {
4469                 .name = "dump",
4470                 .mode = S_IRUSR | S_IWUSR,
4471         },
4472         .size = 0,
4473         .read = ipr_read_dump,
4474         .write = ipr_write_dump
4475 };
4476 #else
4477 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4478 #endif
4479
4480 /**
4481  * ipr_change_queue_depth - Change the device's queue depth
4482  * @sdev:       scsi device struct
4483  * @qdepth:     depth to set
4484  * @reason:     calling context
4485  *
4486  * Return value:
4487  *      actual depth set
4488  **/
4489 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
4490 {
4491         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4492         struct ipr_resource_entry *res;
4493         unsigned long lock_flags = 0;
4494
4495         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4496         res = (struct ipr_resource_entry *)sdev->hostdata;
4497
4498         if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4499                 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4500         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4501
4502         scsi_change_queue_depth(sdev, qdepth);
4503         return sdev->queue_depth;
4504 }
4505
4506 /**
4507  * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4508  * @dev:        device struct
4509  * @attr:       device attribute structure
4510  * @buf:        buffer
4511  *
4512  * Return value:
4513  *      number of bytes printed to buffer
4514  **/
4515 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4516 {
4517         struct scsi_device *sdev = to_scsi_device(dev);
4518         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4519         struct ipr_resource_entry *res;
4520         unsigned long lock_flags = 0;
4521         ssize_t len = -ENXIO;
4522
4523         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4524         res = (struct ipr_resource_entry *)sdev->hostdata;
4525         if (res)
4526                 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4527         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4528         return len;
4529 }
4530
4531 static struct device_attribute ipr_adapter_handle_attr = {
4532         .attr = {
4533                 .name =         "adapter_handle",
4534                 .mode =         S_IRUSR,
4535         },
4536         .show = ipr_show_adapter_handle
4537 };
4538
4539 /**
4540  * ipr_show_resource_path - Show the resource path or the resource address for
4541  *                          this device.
4542  * @dev:        device struct
4543  * @attr:       device attribute structure
4544  * @buf:        buffer
4545  *
4546  * Return value:
4547  *      number of bytes printed to buffer
4548  **/
4549 static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4550 {
4551         struct scsi_device *sdev = to_scsi_device(dev);
4552         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4553         struct ipr_resource_entry *res;
4554         unsigned long lock_flags = 0;
4555         ssize_t len = -ENXIO;
4556         char buffer[IPR_MAX_RES_PATH_LENGTH];
4557
4558         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4559         res = (struct ipr_resource_entry *)sdev->hostdata;
4560         if (res && ioa_cfg->sis64)
4561                 len = snprintf(buf, PAGE_SIZE, "%s\n",
4562                                __ipr_format_res_path(res->res_path, buffer,
4563                                                      sizeof(buffer)));
4564         else if (res)
4565                 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4566                                res->bus, res->target, res->lun);
4567
4568         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4569         return len;
4570 }
4571
4572 static struct device_attribute ipr_resource_path_attr = {
4573         .attr = {
4574                 .name =         "resource_path",
4575                 .mode =         S_IRUGO,
4576         },
4577         .show = ipr_show_resource_path
4578 };
4579
4580 /**
4581  * ipr_show_device_id - Show the device_id for this device.
4582  * @dev:        device struct
4583  * @attr:       device attribute structure
4584  * @buf:        buffer
4585  *
4586  * Return value:
4587  *      number of bytes printed to buffer
4588  **/
4589 static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4590 {
4591         struct scsi_device *sdev = to_scsi_device(dev);
4592         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4593         struct ipr_resource_entry *res;
4594         unsigned long lock_flags = 0;
4595         ssize_t len = -ENXIO;
4596
4597         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4598         res = (struct ipr_resource_entry *)sdev->hostdata;
4599         if (res && ioa_cfg->sis64)
4600                 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", be64_to_cpu(res->dev_id));
4601         else if (res)
4602                 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4603
4604         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4605         return len;
4606 }
4607
4608 static struct device_attribute ipr_device_id_attr = {
4609         .attr = {
4610                 .name =         "device_id",
4611                 .mode =         S_IRUGO,
4612         },
4613         .show = ipr_show_device_id
4614 };
4615
4616 /**
4617  * ipr_show_resource_type - Show the resource type for this device.
4618  * @dev:        device struct
4619  * @attr:       device attribute structure
4620  * @buf:        buffer
4621  *
4622  * Return value:
4623  *      number of bytes printed to buffer
4624  **/
4625 static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4626 {
4627         struct scsi_device *sdev = to_scsi_device(dev);
4628         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4629         struct ipr_resource_entry *res;
4630         unsigned long lock_flags = 0;
4631         ssize_t len = -ENXIO;
4632
4633         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4634         res = (struct ipr_resource_entry *)sdev->hostdata;
4635
4636         if (res)
4637                 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4638
4639         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4640         return len;
4641 }
4642
4643 static struct device_attribute ipr_resource_type_attr = {
4644         .attr = {
4645                 .name =         "resource_type",
4646                 .mode =         S_IRUGO,
4647         },
4648         .show = ipr_show_resource_type
4649 };
4650
4651 /**
4652  * ipr_show_raw_mode - Show the adapter's raw mode
4653  * @dev:        class device struct
4654  * @buf:        buffer
4655  *
4656  * Return value:
4657  *      number of bytes printed to buffer
4658  **/
4659 static ssize_t ipr_show_raw_mode(struct device *dev,
4660                                  struct device_attribute *attr, char *buf)
4661 {
4662         struct scsi_device *sdev = to_scsi_device(dev);
4663         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4664         struct ipr_resource_entry *res;
4665         unsigned long lock_flags = 0;
4666         ssize_t len;
4667
4668         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4669         res = (struct ipr_resource_entry *)sdev->hostdata;
4670         if (res)
4671                 len = snprintf(buf, PAGE_SIZE, "%d\n", res->raw_mode);
4672         else
4673                 len = -ENXIO;
4674         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4675         return len;
4676 }
4677
4678 /**
4679  * ipr_store_raw_mode - Change the adapter's raw mode
4680  * @dev:        class device struct
4681  * @buf:        buffer
4682  *
4683  * Return value:
4684  *      number of bytes printed to buffer
4685  **/
4686 static ssize_t ipr_store_raw_mode(struct device *dev,
4687                                   struct device_attribute *attr,
4688                                   const char *buf, size_t count)
4689 {
4690         struct scsi_device *sdev = to_scsi_device(dev);
4691         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4692         struct ipr_resource_entry *res;
4693         unsigned long lock_flags = 0;
4694         ssize_t len;
4695
4696         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4697         res = (struct ipr_resource_entry *)sdev->hostdata;
4698         if (res) {
4699                 if (ipr_is_af_dasd_device(res)) {
4700                         res->raw_mode = simple_strtoul(buf, NULL, 10);
4701                         len = strlen(buf);
4702                         if (res->sdev)
4703                                 sdev_printk(KERN_INFO, res->sdev, "raw mode is %s\n",
4704                                         res->raw_mode ? "enabled" : "disabled");
4705                 } else
4706                         len = -EINVAL;
4707         } else
4708                 len = -ENXIO;
4709         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4710         return len;
4711 }
4712
4713 static struct device_attribute ipr_raw_mode_attr = {
4714         .attr = {
4715                 .name =         "raw_mode",
4716                 .mode =         S_IRUGO | S_IWUSR,
4717         },
4718         .show = ipr_show_raw_mode,
4719         .store = ipr_store_raw_mode
4720 };
4721
4722 static struct device_attribute *ipr_dev_attrs[] = {
4723         &ipr_adapter_handle_attr,
4724         &ipr_resource_path_attr,
4725         &ipr_device_id_attr,
4726         &ipr_resource_type_attr,
4727         &ipr_raw_mode_attr,
4728         NULL,
4729 };
4730
4731 /**
4732  * ipr_biosparam - Return the HSC mapping
4733  * @sdev:                       scsi device struct
4734  * @block_device:       block device pointer
4735  * @capacity:           capacity of the device
4736  * @parm:                       Array containing returned HSC values.
4737  *
4738  * This function generates the HSC parms that fdisk uses.
4739  * We want to make sure we return something that places partitions
4740  * on 4k boundaries for best performance with the IOA.
4741  *
4742  * Return value:
4743  *      0 on success
4744  **/
4745 static int ipr_biosparam(struct scsi_device *sdev,
4746                          struct block_device *block_device,
4747                          sector_t capacity, int *parm)
4748 {
4749         int heads, sectors;
4750         sector_t cylinders;
4751
4752         heads = 128;
4753         sectors = 32;
4754
4755         cylinders = capacity;
4756         sector_div(cylinders, (128 * 32));
4757
4758         /* return result */
4759         parm[0] = heads;
4760         parm[1] = sectors;
4761         parm[2] = cylinders;
4762
4763         return 0;
4764 }
4765
4766 /**
4767  * ipr_find_starget - Find target based on bus/target.
4768  * @starget:    scsi target struct
4769  *
4770  * Return value:
4771  *      resource entry pointer if found / NULL if not found
4772  **/
4773 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4774 {
4775         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4776         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4777         struct ipr_resource_entry *res;
4778
4779         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4780                 if ((res->bus == starget->channel) &&
4781                     (res->target == starget->id)) {
4782                         return res;
4783                 }
4784         }
4785
4786         return NULL;
4787 }
4788
4789 static struct ata_port_info sata_port_info;
4790
4791 /**
4792  * ipr_target_alloc - Prepare for commands to a SCSI target
4793  * @starget:    scsi target struct
4794  *
4795  * If the device is a SATA device, this function allocates an
4796  * ATA port with libata, else it does nothing.
4797  *
4798  * Return value:
4799  *      0 on success / non-0 on failure
4800  **/
4801 static int ipr_target_alloc(struct scsi_target *starget)
4802 {
4803         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4804         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4805         struct ipr_sata_port *sata_port;
4806         struct ata_port *ap;
4807         struct ipr_resource_entry *res;
4808         unsigned long lock_flags;
4809
4810         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4811         res = ipr_find_starget(starget);
4812         starget->hostdata = NULL;
4813
4814         if (res && ipr_is_gata(res)) {
4815                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4816                 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4817                 if (!sata_port)
4818                         return -ENOMEM;
4819
4820                 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4821                 if (ap) {
4822                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4823                         sata_port->ioa_cfg = ioa_cfg;
4824                         sata_port->ap = ap;
4825                         sata_port->res = res;
4826
4827                         res->sata_port = sata_port;
4828                         ap->private_data = sata_port;
4829                         starget->hostdata = sata_port;
4830                 } else {
4831                         kfree(sata_port);
4832                         return -ENOMEM;
4833                 }
4834         }
4835         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4836
4837         return 0;
4838 }
4839
4840 /**
4841  * ipr_target_destroy - Destroy a SCSI target
4842  * @starget:    scsi target struct
4843  *
4844  * If the device was a SATA device, this function frees the libata
4845  * ATA port, else it does nothing.
4846  *
4847  **/
4848 static void ipr_target_destroy(struct scsi_target *starget)
4849 {
4850         struct ipr_sata_port *sata_port = starget->hostdata;
4851         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4852         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4853
4854         if (ioa_cfg->sis64) {
4855                 if (!ipr_find_starget(starget)) {
4856                         if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4857                                 clear_bit(starget->id, ioa_cfg->array_ids);
4858                         else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4859                                 clear_bit(starget->id, ioa_cfg->vset_ids);
4860                         else if (starget->channel == 0)
4861                                 clear_bit(starget->id, ioa_cfg->target_ids);
4862                 }
4863         }
4864
4865         if (sata_port) {
4866                 starget->hostdata = NULL;
4867                 ata_sas_port_destroy(sata_port->ap);
4868                 kfree(sata_port);
4869         }
4870 }
4871
4872 /**
4873  * ipr_find_sdev - Find device based on bus/target/lun.
4874  * @sdev:       scsi device struct
4875  *
4876  * Return value:
4877  *      resource entry pointer if found / NULL if not found
4878  **/
4879 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4880 {
4881         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4882         struct ipr_resource_entry *res;
4883
4884         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4885                 if ((res->bus == sdev->channel) &&
4886                     (res->target == sdev->id) &&
4887                     (res->lun == sdev->lun))
4888                         return res;
4889         }
4890
4891         return NULL;
4892 }
4893
4894 /**
4895  * ipr_slave_destroy - Unconfigure a SCSI device
4896  * @sdev:       scsi device struct
4897  *
4898  * Return value:
4899  *      nothing
4900  **/
4901 static void ipr_slave_destroy(struct scsi_device *sdev)
4902 {
4903         struct ipr_resource_entry *res;
4904         struct ipr_ioa_cfg *ioa_cfg;
4905         unsigned long lock_flags = 0;
4906
4907         ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4908
4909         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4910         res = (struct ipr_resource_entry *) sdev->hostdata;
4911         if (res) {
4912                 if (res->sata_port)
4913                         res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
4914                 sdev->hostdata = NULL;
4915                 res->sdev = NULL;
4916                 res->sata_port = NULL;
4917         }
4918         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4919 }
4920
4921 /**
4922  * ipr_slave_configure - Configure a SCSI device
4923  * @sdev:       scsi device struct
4924  *
4925  * This function configures the specified scsi device.
4926  *
4927  * Return value:
4928  *      0 on success
4929  **/
4930 static int ipr_slave_configure(struct scsi_device *sdev)
4931 {
4932         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4933         struct ipr_resource_entry *res;
4934         struct ata_port *ap = NULL;
4935         unsigned long lock_flags = 0;
4936         char buffer[IPR_MAX_RES_PATH_LENGTH];
4937
4938         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4939         res = sdev->hostdata;
4940         if (res) {
4941                 if (ipr_is_af_dasd_device(res))
4942                         sdev->type = TYPE_RAID;
4943                 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4944                         sdev->scsi_level = 4;
4945                         sdev->no_uld_attach = 1;
4946                 }
4947                 if (ipr_is_vset_device(res)) {
4948                         sdev->scsi_level = SCSI_SPC_3;
4949                         sdev->no_report_opcodes = 1;
4950                         blk_queue_rq_timeout(sdev->request_queue,
4951                                              IPR_VSET_RW_TIMEOUT);
4952                         blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
4953                 }
4954                 if (ipr_is_gata(res) && res->sata_port)
4955                         ap = res->sata_port->ap;
4956                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4957
4958                 if (ap) {
4959                         scsi_change_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN);
4960                         ata_sas_slave_configure(sdev, ap);
4961                 }
4962
4963                 if (ioa_cfg->sis64)
4964                         sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4965                                     ipr_format_res_path(ioa_cfg,
4966                                 res->res_path, buffer, sizeof(buffer)));
4967                 return 0;
4968         }
4969         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4970         return 0;
4971 }
4972
4973 /**
4974  * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4975  * @sdev:       scsi device struct
4976  *
4977  * This function initializes an ATA port so that future commands
4978  * sent through queuecommand will work.
4979  *
4980  * Return value:
4981  *      0 on success
4982  **/
4983 static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4984 {
4985         struct ipr_sata_port *sata_port = NULL;
4986         int rc = -ENXIO;
4987
4988         ENTER;
4989         if (sdev->sdev_target)
4990                 sata_port = sdev->sdev_target->hostdata;
4991         if (sata_port) {
4992                 rc = ata_sas_port_init(sata_port->ap);
4993                 if (rc == 0)
4994                         rc = ata_sas_sync_probe(sata_port->ap);
4995         }
4996
4997         if (rc)
4998                 ipr_slave_destroy(sdev);
4999
5000         LEAVE;
5001         return rc;
5002 }
5003
5004 /**
5005  * ipr_slave_alloc - Prepare for commands to a device.
5006  * @sdev:       scsi device struct
5007  *
5008  * This function saves a pointer to the resource entry
5009  * in the scsi device struct if the device exists. We
5010  * can then use this pointer in ipr_queuecommand when
5011  * handling new commands.
5012  *
5013  * Return value:
5014  *      0 on success / -ENXIO if device does not exist
5015  **/
5016 static int ipr_slave_alloc(struct scsi_device *sdev)
5017 {
5018         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
5019         struct ipr_resource_entry *res;
5020         unsigned long lock_flags;
5021         int rc = -ENXIO;
5022
5023         sdev->hostdata = NULL;
5024
5025         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5026
5027         res = ipr_find_sdev(sdev);
5028         if (res) {
5029                 res->sdev = sdev;
5030                 res->add_to_ml = 0;
5031                 res->in_erp = 0;
5032                 sdev->hostdata = res;
5033                 if (!ipr_is_naca_model(res))
5034                         res->needs_sync_complete = 1;
5035                 rc = 0;
5036                 if (ipr_is_gata(res)) {
5037                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5038                         return ipr_ata_slave_alloc(sdev);
5039                 }
5040         }
5041
5042         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5043
5044         return rc;
5045 }
5046
5047 /**
5048  * ipr_match_lun - Match function for specified LUN
5049  * @ipr_cmd:    ipr command struct
5050  * @device:             device to match (sdev)
5051  *
5052  * Returns:
5053  *      1 if command matches sdev / 0 if command does not match sdev
5054  **/
5055 static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
5056 {
5057         if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
5058                 return 1;
5059         return 0;
5060 }
5061
5062 /**
5063  * ipr_cmnd_is_free - Check if a command is free or not
5064  * @ipr_cmd     ipr command struct
5065  *
5066  * Returns:
5067  *      true / false
5068  **/
5069 static bool ipr_cmnd_is_free(struct ipr_cmnd *ipr_cmd)
5070 {
5071         struct ipr_cmnd *loop_cmd;
5072
5073         list_for_each_entry(loop_cmd, &ipr_cmd->hrrq->hrrq_free_q, queue) {
5074                 if (loop_cmd == ipr_cmd)
5075                         return true;
5076         }
5077
5078         return false;
5079 }
5080
5081 /**
5082  * ipr_match_res - Match function for specified resource entry
5083  * @ipr_cmd:    ipr command struct
5084  * @resource:   resource entry to match
5085  *
5086  * Returns:
5087  *      1 if command matches sdev / 0 if command does not match sdev
5088  **/
5089 static int ipr_match_res(struct ipr_cmnd *ipr_cmd, void *resource)
5090 {
5091         struct ipr_resource_entry *res = resource;
5092
5093         if (res && ipr_cmd->ioarcb.res_handle == res->res_handle)
5094                 return 1;
5095         return 0;
5096 }
5097
5098 /**
5099  * ipr_wait_for_ops - Wait for matching commands to complete
5100  * @ipr_cmd:    ipr command struct
5101  * @device:             device to match (sdev)
5102  * @match:              match function to use
5103  *
5104  * Returns:
5105  *      SUCCESS / FAILED
5106  **/
5107 static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
5108                             int (*match)(struct ipr_cmnd *, void *))
5109 {
5110         struct ipr_cmnd *ipr_cmd;
5111         int wait, i;
5112         unsigned long flags;
5113         struct ipr_hrr_queue *hrrq;
5114         signed long timeout = IPR_ABORT_TASK_TIMEOUT;
5115         DECLARE_COMPLETION_ONSTACK(comp);
5116
5117         ENTER;
5118         do {
5119                 wait = 0;
5120
5121                 for_each_hrrq(hrrq, ioa_cfg) {
5122                         spin_lock_irqsave(hrrq->lock, flags);
5123                         for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5124                                 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5125                                 if (!ipr_cmnd_is_free(ipr_cmd)) {
5126                                         if (match(ipr_cmd, device)) {
5127                                                 ipr_cmd->eh_comp = &comp;
5128                                                 wait++;
5129                                         }
5130                                 }
5131                         }
5132                         spin_unlock_irqrestore(hrrq->lock, flags);
5133                 }
5134
5135                 if (wait) {
5136                         timeout = wait_for_completion_timeout(&comp, timeout);
5137
5138                         if (!timeout) {
5139                                 wait = 0;
5140
5141                                 for_each_hrrq(hrrq, ioa_cfg) {
5142                                         spin_lock_irqsave(hrrq->lock, flags);
5143                                         for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5144                                                 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5145                                                 if (!ipr_cmnd_is_free(ipr_cmd)) {
5146                                                         if (match(ipr_cmd, device)) {
5147                                                                 ipr_cmd->eh_comp = NULL;
5148                                                                 wait++;
5149                                                         }
5150                                                 }
5151                                         }
5152                                         spin_unlock_irqrestore(hrrq->lock, flags);
5153                                 }
5154
5155                                 if (wait)
5156                                         dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
5157                                 LEAVE;
5158                                 return wait ? FAILED : SUCCESS;
5159                         }
5160                 }
5161         } while (wait);
5162
5163         LEAVE;
5164         return SUCCESS;
5165 }
5166
5167 static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
5168 {
5169         struct ipr_ioa_cfg *ioa_cfg;
5170         unsigned long lock_flags = 0;
5171         int rc = SUCCESS;
5172
5173         ENTER;
5174         ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5175         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5176
5177         if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5178                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
5179                 dev_err(&ioa_cfg->pdev->dev,
5180                         "Adapter being reset as a result of error recovery.\n");
5181
5182                 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5183                         ioa_cfg->sdt_state = GET_DUMP;
5184         }
5185
5186         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5187         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5188         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5189
5190         /* If we got hit with a host reset while we were already resetting
5191          the adapter for some reason, and the reset failed. */
5192         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5193                 ipr_trace;
5194                 rc = FAILED;
5195         }
5196
5197         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5198         LEAVE;
5199         return rc;
5200 }
5201
5202 /**
5203  * ipr_device_reset - Reset the device
5204  * @ioa_cfg:    ioa config struct
5205  * @res:                resource entry struct
5206  *
5207  * This function issues a device reset to the affected device.
5208  * If the device is a SCSI device, a LUN reset will be sent
5209  * to the device first. If that does not work, a target reset
5210  * will be sent. If the device is a SATA device, a PHY reset will
5211  * be sent.
5212  *
5213  * Return value:
5214  *      0 on success / non-zero on failure
5215  **/
5216 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
5217                             struct ipr_resource_entry *res)
5218 {
5219         struct ipr_cmnd *ipr_cmd;
5220         struct ipr_ioarcb *ioarcb;
5221         struct ipr_cmd_pkt *cmd_pkt;
5222         struct ipr_ioarcb_ata_regs *regs;
5223         u32 ioasc;
5224
5225         ENTER;
5226         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5227         ioarcb = &ipr_cmd->ioarcb;
5228         cmd_pkt = &ioarcb->cmd_pkt;
5229
5230         if (ipr_cmd->ioa_cfg->sis64) {
5231                 regs = &ipr_cmd->i.ata_ioadl.regs;
5232                 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
5233         } else
5234                 regs = &ioarcb->u.add_data.u.regs;
5235
5236         ioarcb->res_handle = res->res_handle;
5237         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5238         cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5239         if (ipr_is_gata(res)) {
5240                 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
5241                 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
5242                 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5243         }
5244
5245         ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5246         ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5247         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5248         if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
5249                 if (ipr_cmd->ioa_cfg->sis64)
5250                         memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
5251                                sizeof(struct ipr_ioasa_gata));
5252                 else
5253                         memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
5254                                sizeof(struct ipr_ioasa_gata));
5255         }
5256
5257         LEAVE;
5258         return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
5259 }
5260
5261 /**
5262  * ipr_sata_reset - Reset the SATA port
5263  * @link:       SATA link to reset
5264  * @classes:    class of the attached device
5265  *
5266  * This function issues a SATA phy reset to the affected ATA link.
5267  *
5268  * Return value:
5269  *      0 on success / non-zero on failure
5270  **/
5271 static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
5272                                 unsigned long deadline)
5273 {
5274         struct ipr_sata_port *sata_port = link->ap->private_data;
5275         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5276         struct ipr_resource_entry *res;
5277         unsigned long lock_flags = 0;
5278         int rc = -ENXIO, ret;
5279
5280         ENTER;
5281         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5282         while (ioa_cfg->in_reset_reload) {
5283                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5284                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5285                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5286         }
5287
5288         res = sata_port->res;
5289         if (res) {
5290                 rc = ipr_device_reset(ioa_cfg, res);
5291                 *classes = res->ata_class;
5292                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5293
5294                 ret = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
5295                 if (ret != SUCCESS) {
5296                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5297                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
5298                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5299
5300                         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5301                 }
5302         } else
5303                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5304
5305         LEAVE;
5306         return rc;
5307 }
5308
5309 /**
5310  * ipr_eh_dev_reset - Reset the device
5311  * @scsi_cmd:   scsi command struct
5312  *
5313  * This function issues a device reset to the affected device.
5314  * A LUN reset will be sent to the device first. If that does
5315  * not work, a target reset will be sent.
5316  *
5317  * Return value:
5318  *      SUCCESS / FAILED
5319  **/
5320 static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
5321 {
5322         struct ipr_cmnd *ipr_cmd;
5323         struct ipr_ioa_cfg *ioa_cfg;
5324         struct ipr_resource_entry *res;
5325         struct ata_port *ap;
5326         int rc = 0, i;
5327         struct ipr_hrr_queue *hrrq;
5328
5329         ENTER;
5330         ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5331         res = scsi_cmd->device->hostdata;
5332
5333         /*
5334          * If we are currently going through reset/reload, return failed. This will force the
5335          * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5336          * reset to complete
5337          */
5338         if (ioa_cfg->in_reset_reload)
5339                 return FAILED;
5340         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5341                 return FAILED;
5342
5343         for_each_hrrq(hrrq, ioa_cfg) {
5344                 spin_lock(&hrrq->_lock);
5345                 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5346                         ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5347
5348                         if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
5349                                 if (!ipr_cmd->qc)
5350                                         continue;
5351                                 if (ipr_cmnd_is_free(ipr_cmd))
5352                                         continue;
5353
5354                                 ipr_cmd->done = ipr_sata_eh_done;
5355                                 if (!(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
5356                                         ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
5357                                         ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
5358                                 }
5359                         }
5360                 }
5361                 spin_unlock(&hrrq->_lock);
5362         }
5363         res->resetting_device = 1;
5364         scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
5365
5366         if (ipr_is_gata(res) && res->sata_port) {
5367                 ap = res->sata_port->ap;
5368                 spin_unlock_irq(scsi_cmd->device->host->host_lock);
5369                 ata_std_error_handler(ap);
5370                 spin_lock_irq(scsi_cmd->device->host->host_lock);
5371         } else
5372                 rc = ipr_device_reset(ioa_cfg, res);
5373         res->resetting_device = 0;
5374         res->reset_occurred = 1;
5375
5376         LEAVE;
5377         return rc ? FAILED : SUCCESS;
5378 }
5379
5380 static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
5381 {
5382         int rc;
5383         struct ipr_ioa_cfg *ioa_cfg;
5384         struct ipr_resource_entry *res;
5385
5386         ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5387         res = cmd->device->hostdata;
5388
5389         if (!res)
5390                 return FAILED;
5391
5392         spin_lock_irq(cmd->device->host->host_lock);
5393         rc = __ipr_eh_dev_reset(cmd);
5394         spin_unlock_irq(cmd->device->host->host_lock);
5395
5396         if (rc == SUCCESS) {
5397                 if (ipr_is_gata(res) && res->sata_port)
5398                         rc = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
5399                 else
5400                         rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
5401         }
5402
5403         return rc;
5404 }
5405
5406 /**
5407  * ipr_bus_reset_done - Op done function for bus reset.
5408  * @ipr_cmd:    ipr command struct
5409  *
5410  * This function is the op done function for a bus reset
5411  *
5412  * Return value:
5413  *      none
5414  **/
5415 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5416 {
5417         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5418         struct ipr_resource_entry *res;
5419
5420         ENTER;
5421         if (!ioa_cfg->sis64)
5422                 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5423                         if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5424                                 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5425                                 break;
5426                         }
5427                 }
5428
5429         /*
5430          * If abort has not completed, indicate the reset has, else call the
5431          * abort's done function to wake the sleeping eh thread
5432          */
5433         if (ipr_cmd->sibling->sibling)
5434                 ipr_cmd->sibling->sibling = NULL;
5435         else
5436                 ipr_cmd->sibling->done(ipr_cmd->sibling);
5437
5438         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5439         LEAVE;
5440 }
5441
5442 /**
5443  * ipr_abort_timeout - An abort task has timed out
5444  * @ipr_cmd:    ipr command struct
5445  *
5446  * This function handles when an abort task times out. If this
5447  * happens we issue a bus reset since we have resources tied
5448  * up that must be freed before returning to the midlayer.
5449  *
5450  * Return value:
5451  *      none
5452  **/
5453 static void ipr_abort_timeout(struct timer_list *t)
5454 {
5455         struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
5456         struct ipr_cmnd *reset_cmd;
5457         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5458         struct ipr_cmd_pkt *cmd_pkt;
5459         unsigned long lock_flags = 0;
5460
5461         ENTER;
5462         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5463         if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5464                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5465                 return;
5466         }
5467
5468         sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
5469         reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5470         ipr_cmd->sibling = reset_cmd;
5471         reset_cmd->sibling = ipr_cmd;
5472         reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5473         cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5474         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5475         cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5476         cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5477
5478         ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5479         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5480         LEAVE;
5481 }
5482
5483 /**
5484  * ipr_cancel_op - Cancel specified op
5485  * @scsi_cmd:   scsi command struct
5486  *
5487  * This function cancels specified op.
5488  *
5489  * Return value:
5490  *      SUCCESS / FAILED
5491  **/
5492 static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
5493 {
5494         struct ipr_cmnd *ipr_cmd;
5495         struct ipr_ioa_cfg *ioa_cfg;
5496         struct ipr_resource_entry *res;
5497         struct ipr_cmd_pkt *cmd_pkt;
5498         u32 ioasc, int_reg;
5499         int i, op_found = 0;
5500         struct ipr_hrr_queue *hrrq;
5501
5502         ENTER;
5503         ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5504         res = scsi_cmd->device->hostdata;
5505
5506         /* If we are currently going through reset/reload, return failed.
5507          * This will force the mid-layer to call ipr_eh_host_reset,
5508          * which will then go to sleep and wait for the reset to complete
5509          */
5510         if (ioa_cfg->in_reset_reload ||
5511             ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5512                 return FAILED;
5513         if (!res)
5514                 return FAILED;
5515
5516         /*
5517          * If we are aborting a timed out op, chances are that the timeout was caused
5518          * by a still not detected EEH error. In such cases, reading a register will
5519          * trigger the EEH recovery infrastructure.
5520          */
5521         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5522
5523         if (!ipr_is_gscsi(res))
5524                 return FAILED;
5525
5526         for_each_hrrq(hrrq, ioa_cfg) {
5527                 spin_lock(&hrrq->_lock);
5528                 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5529                         if (ioa_cfg->ipr_cmnd_list[i]->scsi_cmd == scsi_cmd) {
5530                                 if (!ipr_cmnd_is_free(ioa_cfg->ipr_cmnd_list[i])) {
5531                                         op_found = 1;
5532                                         break;
5533                                 }
5534                         }
5535                 }
5536                 spin_unlock(&hrrq->_lock);
5537         }
5538
5539         if (!op_found)
5540                 return SUCCESS;
5541
5542         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5543         ipr_cmd->ioarcb.res_handle = res->res_handle;
5544         cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5545         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5546         cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5547         ipr_cmd->u.sdev = scsi_cmd->device;
5548
5549         scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5550                     scsi_cmd->cmnd[0]);
5551         ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
5552         ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5553
5554         /*
5555          * If the abort task timed out and we sent a bus reset, we will get
5556          * one the following responses to the abort
5557          */
5558         if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5559                 ioasc = 0;
5560                 ipr_trace;
5561         }
5562
5563         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5564         if (!ipr_is_naca_model(res))
5565                 res->needs_sync_complete = 1;
5566
5567         LEAVE;
5568         return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
5569 }
5570
5571 /**
5572  * ipr_eh_abort - Abort a single op
5573  * @scsi_cmd:   scsi command struct
5574  *
5575  * Return value:
5576  *      0 if scan in progress / 1 if scan is complete
5577  **/
5578 static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time)
5579 {
5580         unsigned long lock_flags;
5581         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
5582         int rc = 0;
5583
5584         spin_lock_irqsave(shost->host_lock, lock_flags);
5585         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
5586                 rc = 1;
5587         if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2))
5588                 rc = 1;
5589         spin_unlock_irqrestore(shost->host_lock, lock_flags);
5590         return rc;
5591 }
5592
5593 /**
5594  * ipr_eh_host_reset - Reset the host adapter
5595  * @scsi_cmd:   scsi command struct
5596  *
5597  * Return value:
5598  *      SUCCESS / FAILED
5599  **/
5600 static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
5601 {
5602         unsigned long flags;
5603         int rc;
5604         struct ipr_ioa_cfg *ioa_cfg;
5605
5606         ENTER;
5607
5608         ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5609
5610         spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5611         rc = ipr_cancel_op(scsi_cmd);
5612         spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
5613
5614         if (rc == SUCCESS)
5615                 rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
5616         LEAVE;
5617         return rc;
5618 }
5619
5620 /**
5621  * ipr_handle_other_interrupt - Handle "other" interrupts
5622  * @ioa_cfg:    ioa config struct
5623  * @int_reg:    interrupt register
5624  *
5625  * Return value:
5626  *      IRQ_NONE / IRQ_HANDLED
5627  **/
5628 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5629                                               u32 int_reg)
5630 {
5631         irqreturn_t rc = IRQ_HANDLED;
5632         u32 int_mask_reg;
5633
5634         int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5635         int_reg &= ~int_mask_reg;
5636
5637         /* If an interrupt on the adapter did not occur, ignore it.
5638          * Or in the case of SIS 64, check for a stage change interrupt.
5639          */
5640         if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5641                 if (ioa_cfg->sis64) {
5642                         int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5643                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5644                         if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5645
5646                                 /* clear stage change */
5647                                 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5648                                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5649                                 list_del(&ioa_cfg->reset_cmd->queue);
5650                                 del_timer(&ioa_cfg->reset_cmd->timer);
5651                                 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5652                                 return IRQ_HANDLED;
5653                         }
5654                 }
5655
5656                 return IRQ_NONE;
5657         }
5658
5659         if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5660                 /* Mask the interrupt */
5661                 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5662                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5663
5664                 list_del(&ioa_cfg->reset_cmd->queue);
5665                 del_timer(&ioa_cfg->reset_cmd->timer);
5666                 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5667         } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
5668                 if (ioa_cfg->clear_isr) {
5669                         if (ipr_debug && printk_ratelimit())
5670                                 dev_err(&ioa_cfg->pdev->dev,
5671                                         "Spurious interrupt detected. 0x%08X\n", int_reg);
5672                         writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5673                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5674                         return IRQ_NONE;
5675                 }
5676         } else {
5677                 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5678                         ioa_cfg->ioa_unit_checked = 1;
5679                 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5680                         dev_err(&ioa_cfg->pdev->dev,
5681                                 "No Host RRQ. 0x%08X\n", int_reg);
5682                 else
5683                         dev_err(&ioa_cfg->pdev->dev,
5684                                 "Permanent IOA failure. 0x%08X\n", int_reg);
5685
5686                 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5687                         ioa_cfg->sdt_state = GET_DUMP;
5688
5689                 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5690                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5691         }
5692
5693         return rc;
5694 }
5695
5696 /**
5697  * ipr_isr_eh - Interrupt service routine error handler
5698  * @ioa_cfg:    ioa config struct
5699  * @msg:        message to log
5700  *
5701  * Return value:
5702  *      none
5703  **/
5704 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
5705 {
5706         ioa_cfg->errors_logged++;
5707         dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
5708
5709         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5710                 ioa_cfg->sdt_state = GET_DUMP;
5711
5712         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5713 }
5714
5715 static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
5716                                                 struct list_head *doneq)
5717 {
5718         u32 ioasc;
5719         u16 cmd_index;
5720         struct ipr_cmnd *ipr_cmd;
5721         struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5722         int num_hrrq = 0;
5723
5724         /* If interrupts are disabled, ignore the interrupt */
5725         if (!hrr_queue->allow_interrupts)
5726                 return 0;
5727
5728         while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5729                hrr_queue->toggle_bit) {
5730
5731                 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5732                              IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5733                              IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5734
5735                 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5736                              cmd_index < hrr_queue->min_cmd_id)) {
5737                         ipr_isr_eh(ioa_cfg,
5738                                 "Invalid response handle from IOA: ",
5739                                 cmd_index);
5740                         break;
5741                 }
5742
5743                 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5744                 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5745
5746                 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5747
5748                 list_move_tail(&ipr_cmd->queue, doneq);
5749
5750                 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5751                         hrr_queue->hrrq_curr++;
5752                 } else {
5753                         hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5754                         hrr_queue->toggle_bit ^= 1u;
5755                 }
5756                 num_hrrq++;
5757                 if (budget > 0 && num_hrrq >= budget)
5758                         break;
5759         }
5760
5761         return num_hrrq;
5762 }
5763
5764 static int ipr_iopoll(struct irq_poll *iop, int budget)
5765 {
5766         struct ipr_ioa_cfg *ioa_cfg;
5767         struct ipr_hrr_queue *hrrq;
5768         struct ipr_cmnd *ipr_cmd, *temp;
5769         unsigned long hrrq_flags;
5770         int completed_ops;
5771         LIST_HEAD(doneq);
5772
5773         hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5774         ioa_cfg = hrrq->ioa_cfg;
5775
5776         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5777         completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5778
5779         if (completed_ops < budget)
5780                 irq_poll_complete(iop);
5781         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5782
5783         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5784                 list_del(&ipr_cmd->queue);
5785                 del_timer(&ipr_cmd->timer);
5786                 ipr_cmd->fast_done(ipr_cmd);
5787         }
5788
5789         return completed_ops;
5790 }
5791
5792 /**
5793  * ipr_isr - Interrupt service routine
5794  * @irq:        irq number
5795  * @devp:       pointer to ioa config struct
5796  *
5797  * Return value:
5798  *      IRQ_NONE / IRQ_HANDLED
5799  **/
5800 static irqreturn_t ipr_isr(int irq, void *devp)
5801 {
5802         struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5803         struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5804         unsigned long hrrq_flags = 0;
5805         u32 int_reg = 0;
5806         int num_hrrq = 0;
5807         int irq_none = 0;
5808         struct ipr_cmnd *ipr_cmd, *temp;
5809         irqreturn_t rc = IRQ_NONE;
5810         LIST_HEAD(doneq);
5811
5812         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5813         /* If interrupts are disabled, ignore the interrupt */
5814         if (!hrrq->allow_interrupts) {
5815                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5816                 return IRQ_NONE;
5817         }
5818
5819         while (1) {
5820                 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5821                         rc =  IRQ_HANDLED;
5822
5823                         if (!ioa_cfg->clear_isr)
5824                                 break;
5825
5826                         /* Clear the PCI interrupt */
5827                         num_hrrq = 0;
5828                         do {
5829                                 writel(IPR_PCII_HRRQ_UPDATED,
5830                                      ioa_cfg->regs.clr_interrupt_reg32);
5831                                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5832                         } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5833                                 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5834
5835                 } else if (rc == IRQ_NONE && irq_none == 0) {
5836                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5837                         irq_none++;
5838                 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5839                            int_reg & IPR_PCII_HRRQ_UPDATED) {
5840                         ipr_isr_eh(ioa_cfg,
5841                                 "Error clearing HRRQ: ", num_hrrq);
5842                         rc = IRQ_HANDLED;
5843                         break;
5844                 } else
5845                         break;
5846         }
5847
5848         if (unlikely(rc == IRQ_NONE))
5849                 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5850
5851         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5852         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5853                 list_del(&ipr_cmd->queue);
5854                 del_timer(&ipr_cmd->timer);
5855                 ipr_cmd->fast_done(ipr_cmd);
5856         }
5857         return rc;
5858 }
5859
5860 /**
5861  * ipr_isr_mhrrq - Interrupt service routine
5862  * @irq:        irq number
5863  * @devp:       pointer to ioa config struct
5864  *
5865  * Return value:
5866  *      IRQ_NONE / IRQ_HANDLED
5867  **/
5868 static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5869 {
5870         struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5871         struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5872         unsigned long hrrq_flags = 0;
5873         struct ipr_cmnd *ipr_cmd, *temp;
5874         irqreturn_t rc = IRQ_NONE;
5875         LIST_HEAD(doneq);
5876
5877         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5878
5879         /* If interrupts are disabled, ignore the interrupt */
5880         if (!hrrq->allow_interrupts) {
5881                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5882                 return IRQ_NONE;
5883         }
5884
5885         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
5886                 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5887                        hrrq->toggle_bit) {
5888                         irq_poll_sched(&hrrq->iopoll);
5889                         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5890                         return IRQ_HANDLED;
5891                 }
5892         } else {
5893                 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5894                         hrrq->toggle_bit)
5895
5896                         if (ipr_process_hrrq(hrrq, -1, &doneq))
5897                                 rc =  IRQ_HANDLED;
5898         }
5899
5900         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5901
5902         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5903                 list_del(&ipr_cmd->queue);
5904                 del_timer(&ipr_cmd->timer);
5905                 ipr_cmd->fast_done(ipr_cmd);
5906         }
5907         return rc;
5908 }
5909
5910 /**
5911  * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5912  * @ioa_cfg:    ioa config struct
5913  * @ipr_cmd:    ipr command struct
5914  *
5915  * Return value:
5916  *      0 on success / -1 on failure
5917  **/
5918 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5919                              struct ipr_cmnd *ipr_cmd)
5920 {
5921         int i, nseg;
5922         struct scatterlist *sg;
5923         u32 length;
5924         u32 ioadl_flags = 0;
5925         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5926         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5927         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5928
5929         length = scsi_bufflen(scsi_cmd);
5930         if (!length)
5931                 return 0;
5932
5933         nseg = scsi_dma_map(scsi_cmd);
5934         if (nseg < 0) {
5935                 if (printk_ratelimit())
5936                         dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5937                 return -1;
5938         }
5939
5940         ipr_cmd->dma_use_sg = nseg;
5941
5942         ioarcb->data_transfer_length = cpu_to_be32(length);
5943         ioarcb->ioadl_len =
5944                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5945
5946         if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5947                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5948                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5949         } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5950                 ioadl_flags = IPR_IOADL_FLAGS_READ;
5951
5952         scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5953                 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5954                 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5955                 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5956         }
5957
5958         ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5959         return 0;
5960 }
5961
5962 /**
5963  * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5964  * @ioa_cfg:    ioa config struct
5965  * @ipr_cmd:    ipr command struct
5966  *
5967  * Return value:
5968  *      0 on success / -1 on failure
5969  **/
5970 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5971                            struct ipr_cmnd *ipr_cmd)
5972 {
5973         int i, nseg;
5974         struct scatterlist *sg;
5975         u32 length;
5976         u32 ioadl_flags = 0;
5977         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5978         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5979         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5980
5981         length = scsi_bufflen(scsi_cmd);
5982         if (!length)
5983                 return 0;
5984
5985         nseg = scsi_dma_map(scsi_cmd);
5986         if (nseg < 0) {
5987                 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5988                 return -1;
5989         }
5990
5991         ipr_cmd->dma_use_sg = nseg;
5992
5993         if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5994                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5995                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5996                 ioarcb->data_transfer_length = cpu_to_be32(length);
5997                 ioarcb->ioadl_len =
5998                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5999         } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
6000                 ioadl_flags = IPR_IOADL_FLAGS_READ;
6001                 ioarcb->read_data_transfer_length = cpu_to_be32(length);
6002                 ioarcb->read_ioadl_len =
6003                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6004         }
6005
6006         if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
6007                 ioadl = ioarcb->u.add_data.u.ioadl;
6008                 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
6009                                     offsetof(struct ipr_ioarcb, u.add_data));
6010                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6011         }
6012
6013         scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
6014                 ioadl[i].flags_and_data_len =
6015                         cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6016                 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
6017         }
6018
6019         ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6020         return 0;
6021 }
6022
6023 /**
6024  * __ipr_erp_done - Process completion of ERP for a device
6025  * @ipr_cmd:            ipr command struct
6026  *
6027  * This function copies the sense buffer into the scsi_cmd
6028  * struct and pushes the scsi_done function.
6029  *
6030  * Return value:
6031  *      nothing
6032  **/
6033 static void __ipr_erp_done(struct ipr_cmnd *ipr_cmd)
6034 {
6035         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6036         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6037         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6038
6039         if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
6040                 scsi_cmd->result |= (DID_ERROR << 16);
6041                 scmd_printk(KERN_ERR, scsi_cmd,
6042                             "Request Sense failed with IOASC: 0x%08X\n", ioasc);
6043         } else {
6044                 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
6045                        SCSI_SENSE_BUFFERSIZE);
6046         }
6047
6048         if (res) {
6049                 if (!ipr_is_naca_model(res))
6050                         res->needs_sync_complete = 1;
6051                 res->in_erp = 0;
6052         }
6053         scsi_dma_unmap(ipr_cmd->scsi_cmd);
6054         scsi_cmd->scsi_done(scsi_cmd);
6055         if (ipr_cmd->eh_comp)
6056                 complete(ipr_cmd->eh_comp);
6057         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6058 }
6059
6060 /**
6061  * ipr_erp_done - Process completion of ERP for a device
6062  * @ipr_cmd:            ipr command struct
6063  *
6064  * This function copies the sense buffer into the scsi_cmd
6065  * struct and pushes the scsi_done function.
6066  *
6067  * Return value:
6068  *      nothing
6069  **/
6070 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
6071 {
6072         struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
6073         unsigned long hrrq_flags;
6074
6075         spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
6076         __ipr_erp_done(ipr_cmd);
6077         spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
6078 }
6079
6080 /**
6081  * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
6082  * @ipr_cmd:    ipr command struct
6083  *
6084  * Return value:
6085  *      none
6086  **/
6087 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
6088 {
6089         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6090         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6091         dma_addr_t dma_addr = ipr_cmd->dma_addr;
6092
6093         memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
6094         ioarcb->data_transfer_length = 0;
6095         ioarcb->read_data_transfer_length = 0;
6096         ioarcb->ioadl_len = 0;
6097         ioarcb->read_ioadl_len = 0;
6098         ioasa->hdr.ioasc = 0;
6099         ioasa->hdr.residual_data_len = 0;
6100
6101         if (ipr_cmd->ioa_cfg->sis64)
6102                 ioarcb->u.sis64_addr_data.data_ioadl_addr =
6103                         cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
6104         else {
6105                 ioarcb->write_ioadl_addr =
6106                         cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
6107                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6108         }
6109 }
6110
6111 /**
6112  * __ipr_erp_request_sense - Send request sense to a device
6113  * @ipr_cmd:    ipr command struct
6114  *
6115  * This function sends a request sense to a device as a result
6116  * of a check condition.
6117  *
6118  * Return value:
6119  *      nothing
6120  **/
6121 static void __ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
6122 {
6123         struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
6124         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6125
6126         if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
6127                 __ipr_erp_done(ipr_cmd);
6128                 return;
6129         }
6130
6131         ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6132
6133         cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
6134         cmd_pkt->cdb[0] = REQUEST_SENSE;
6135         cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
6136         cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
6137         cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6138         cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
6139
6140         ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
6141                        SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
6142
6143         ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
6144                    IPR_REQUEST_SENSE_TIMEOUT * 2);
6145 }
6146
6147 /**
6148  * ipr_erp_request_sense - Send request sense to a device
6149  * @ipr_cmd:    ipr command struct
6150  *
6151  * This function sends a request sense to a device as a result
6152  * of a check condition.
6153  *
6154  * Return value:
6155  *      nothing
6156  **/
6157 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
6158 {
6159         struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
6160         unsigned long hrrq_flags;
6161
6162         spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
6163         __ipr_erp_request_sense(ipr_cmd);
6164         spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
6165 }
6166
6167 /**
6168  * ipr_erp_cancel_all - Send cancel all to a device
6169  * @ipr_cmd:    ipr command struct
6170  *
6171  * This function sends a cancel all to a device to clear the
6172  * queue. If we are running TCQ on the device, QERR is set to 1,
6173  * which means all outstanding ops have been dropped on the floor.
6174  * Cancel all will return them to us.
6175  *
6176  * Return value:
6177  *      nothing
6178  **/
6179 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
6180 {
6181         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6182         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6183         struct ipr_cmd_pkt *cmd_pkt;
6184
6185         res->in_erp = 1;
6186
6187         ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6188
6189         if (!scsi_cmd->device->simple_tags) {
6190                 __ipr_erp_request_sense(ipr_cmd);
6191                 return;
6192         }
6193
6194         cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
6195         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
6196         cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
6197
6198         ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
6199                    IPR_CANCEL_ALL_TIMEOUT);
6200 }
6201
6202 /**
6203  * ipr_dump_ioasa - Dump contents of IOASA
6204  * @ioa_cfg:    ioa config struct
6205  * @ipr_cmd:    ipr command struct
6206  * @res:                resource entry struct
6207  *
6208  * This function is invoked by the interrupt handler when ops
6209  * fail. It will log the IOASA if appropriate. Only called
6210  * for GPDD ops.
6211  *
6212  * Return value:
6213  *      none
6214  **/
6215 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
6216                            struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
6217 {
6218         int i;
6219         u16 data_len;
6220         u32 ioasc, fd_ioasc;
6221         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6222         __be32 *ioasa_data = (__be32 *)ioasa;
6223         int error_index;
6224
6225         ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
6226         fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
6227
6228         if (0 == ioasc)
6229                 return;
6230
6231         if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
6232                 return;
6233
6234         if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
6235                 error_index = ipr_get_error(fd_ioasc);
6236         else
6237                 error_index = ipr_get_error(ioasc);
6238
6239         if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
6240                 /* Don't log an error if the IOA already logged one */
6241                 if (ioasa->hdr.ilid != 0)
6242                         return;
6243
6244                 if (!ipr_is_gscsi(res))
6245                         return;
6246
6247                 if (ipr_error_table[error_index].log_ioasa == 0)
6248                         return;
6249         }
6250
6251         ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
6252
6253         data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
6254         if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
6255                 data_len = sizeof(struct ipr_ioasa64);
6256         else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
6257                 data_len = sizeof(struct ipr_ioasa);
6258
6259         ipr_err("IOASA Dump:\n");
6260
6261         for (i = 0; i < data_len / 4; i += 4) {
6262                 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
6263                         be32_to_cpu(ioasa_data[i]),
6264                         be32_to_cpu(ioasa_data[i+1]),
6265                         be32_to_cpu(ioasa_data[i+2]),
6266                         be32_to_cpu(ioasa_data[i+3]));
6267         }
6268 }
6269
6270 /**
6271  * ipr_gen_sense - Generate SCSI sense data from an IOASA
6272  * @ioasa:              IOASA
6273  * @sense_buf:  sense data buffer
6274  *
6275  * Return value:
6276  *      none
6277  **/
6278 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
6279 {
6280         u32 failing_lba;
6281         u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
6282         struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
6283         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6284         u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
6285
6286         memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
6287
6288         if (ioasc >= IPR_FIRST_DRIVER_IOASC)
6289                 return;
6290
6291         ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
6292
6293         if (ipr_is_vset_device(res) &&
6294             ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
6295             ioasa->u.vset.failing_lba_hi != 0) {
6296                 sense_buf[0] = 0x72;
6297                 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
6298                 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
6299                 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
6300
6301                 sense_buf[7] = 12;
6302                 sense_buf[8] = 0;
6303                 sense_buf[9] = 0x0A;
6304                 sense_buf[10] = 0x80;
6305
6306                 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
6307
6308                 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
6309                 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
6310                 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
6311                 sense_buf[15] = failing_lba & 0x000000ff;
6312
6313                 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6314
6315                 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
6316                 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
6317                 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
6318                 sense_buf[19] = failing_lba & 0x000000ff;
6319         } else {
6320                 sense_buf[0] = 0x70;
6321                 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
6322                 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
6323                 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
6324
6325                 /* Illegal request */
6326                 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
6327                     (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
6328                         sense_buf[7] = 10;      /* additional length */
6329
6330                         /* IOARCB was in error */
6331                         if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
6332                                 sense_buf[15] = 0xC0;
6333                         else    /* Parameter data was invalid */
6334                                 sense_buf[15] = 0x80;
6335
6336                         sense_buf[16] =
6337                             ((IPR_FIELD_POINTER_MASK &
6338                               be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
6339                         sense_buf[17] =
6340                             (IPR_FIELD_POINTER_MASK &
6341                              be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
6342                 } else {
6343                         if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
6344                                 if (ipr_is_vset_device(res))
6345                                         failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6346                                 else
6347                                         failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
6348
6349                                 sense_buf[0] |= 0x80;   /* Or in the Valid bit */
6350                                 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
6351                                 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
6352                                 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
6353                                 sense_buf[6] = failing_lba & 0x000000ff;
6354                         }
6355
6356                         sense_buf[7] = 6;       /* additional length */
6357                 }
6358         }
6359 }
6360
6361 /**
6362  * ipr_get_autosense - Copy autosense data to sense buffer
6363  * @ipr_cmd:    ipr command struct
6364  *
6365  * This function copies the autosense buffer to the buffer
6366  * in the scsi_cmd, if there is autosense available.
6367  *
6368  * Return value:
6369  *      1 if autosense was available / 0 if not
6370  **/
6371 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
6372 {
6373         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6374         struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
6375
6376         if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
6377                 return 0;
6378
6379         if (ipr_cmd->ioa_cfg->sis64)
6380                 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
6381                        min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
6382                            SCSI_SENSE_BUFFERSIZE));
6383         else
6384                 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
6385                        min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
6386                            SCSI_SENSE_BUFFERSIZE));
6387         return 1;
6388 }
6389
6390 /**
6391  * ipr_erp_start - Process an error response for a SCSI op
6392  * @ioa_cfg:    ioa config struct
6393  * @ipr_cmd:    ipr command struct
6394  *
6395  * This function determines whether or not to initiate ERP
6396  * on the affected device.
6397  *
6398  * Return value:
6399  *      nothing
6400  **/
6401 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6402                               struct ipr_cmnd *ipr_cmd)
6403 {
6404         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6405         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6406         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6407         u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
6408
6409         if (!res) {
6410                 __ipr_scsi_eh_done(ipr_cmd);
6411                 return;
6412         }
6413
6414         if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
6415                 ipr_gen_sense(ipr_cmd);
6416
6417         ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6418
6419         switch (masked_ioasc) {
6420         case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
6421                 if (ipr_is_naca_model(res))
6422                         scsi_cmd->result |= (DID_ABORT << 16);
6423                 else
6424                         scsi_cmd->result |= (DID_IMM_RETRY << 16);
6425                 break;
6426         case IPR_IOASC_IR_RESOURCE_HANDLE:
6427         case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
6428                 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6429                 break;
6430         case IPR_IOASC_HW_SEL_TIMEOUT:
6431                 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6432                 if (!ipr_is_naca_model(res))
6433                         res->needs_sync_complete = 1;
6434                 break;
6435         case IPR_IOASC_SYNC_REQUIRED:
6436                 if (!res->in_erp)
6437                         res->needs_sync_complete = 1;
6438                 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6439                 break;
6440         case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
6441         case IPR_IOASA_IR_DUAL_IOA_DISABLED:
6442                 /*
6443                  * exception: do not set DID_PASSTHROUGH on CHECK CONDITION
6444                  * so SCSI mid-layer and upper layers handle it accordingly.
6445                  */
6446                 if (scsi_cmd->result != SAM_STAT_CHECK_CONDITION)
6447                         scsi_cmd->result |= (DID_PASSTHROUGH << 16);
6448                 break;
6449         case IPR_IOASC_BUS_WAS_RESET:
6450         case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6451                 /*
6452                  * Report the bus reset and ask for a retry. The device
6453                  * will give CC/UA the next command.
6454                  */
6455                 if (!res->resetting_device)
6456                         scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6457                 scsi_cmd->result |= (DID_ERROR << 16);
6458                 if (!ipr_is_naca_model(res))
6459                         res->needs_sync_complete = 1;
6460                 break;
6461         case IPR_IOASC_HW_DEV_BUS_STATUS:
6462                 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6463                 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
6464                         if (!ipr_get_autosense(ipr_cmd)) {
6465                                 if (!ipr_is_naca_model(res)) {
6466                                         ipr_erp_cancel_all(ipr_cmd);
6467                                         return;
6468                                 }
6469                         }
6470                 }
6471                 if (!ipr_is_naca_model(res))
6472                         res->needs_sync_complete = 1;
6473                 break;
6474         case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6475                 break;
6476         case IPR_IOASC_IR_NON_OPTIMIZED:
6477                 if (res->raw_mode) {
6478                         res->raw_mode = 0;
6479                         scsi_cmd->result |= (DID_IMM_RETRY << 16);
6480                 } else
6481                         scsi_cmd->result |= (DID_ERROR << 16);
6482                 break;
6483         default:
6484                 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6485                         scsi_cmd->result |= (DID_ERROR << 16);
6486                 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
6487                         res->needs_sync_complete = 1;
6488                 break;
6489         }
6490
6491         scsi_dma_unmap(ipr_cmd->scsi_cmd);
6492         scsi_cmd->scsi_done(scsi_cmd);
6493         if (ipr_cmd->eh_comp)
6494                 complete(ipr_cmd->eh_comp);
6495         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6496 }
6497
6498 /**
6499  * ipr_scsi_done - mid-layer done function
6500  * @ipr_cmd:    ipr command struct
6501  *
6502  * This function is invoked by the interrupt handler for
6503  * ops generated by the SCSI mid-layer
6504  *
6505  * Return value:
6506  *      none
6507  **/
6508 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6509 {
6510         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6511         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6512         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6513         unsigned long lock_flags;
6514
6515         scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
6516
6517         if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
6518                 scsi_dma_unmap(scsi_cmd);
6519
6520                 spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
6521                 scsi_cmd->scsi_done(scsi_cmd);
6522                 if (ipr_cmd->eh_comp)
6523                         complete(ipr_cmd->eh_comp);
6524                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6525                 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
6526         } else {
6527                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6528                 spin_lock(&ipr_cmd->hrrq->_lock);
6529                 ipr_erp_start(ioa_cfg, ipr_cmd);
6530                 spin_unlock(&ipr_cmd->hrrq->_lock);
6531                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6532         }
6533 }
6534
6535 /**
6536  * ipr_queuecommand - Queue a mid-layer request
6537  * @shost:              scsi host struct
6538  * @scsi_cmd:   scsi command struct
6539  *
6540  * This function queues a request generated by the mid-layer.
6541  *
6542  * Return value:
6543  *      0 on success
6544  *      SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6545  *      SCSI_MLQUEUE_HOST_BUSY if host is busy
6546  **/
6547 static int ipr_queuecommand(struct Scsi_Host *shost,
6548                             struct scsi_cmnd *scsi_cmd)
6549 {
6550         struct ipr_ioa_cfg *ioa_cfg;
6551         struct ipr_resource_entry *res;
6552         struct ipr_ioarcb *ioarcb;
6553         struct ipr_cmnd *ipr_cmd;
6554         unsigned long hrrq_flags, lock_flags;
6555         int rc;
6556         struct ipr_hrr_queue *hrrq;
6557         int hrrq_id;
6558
6559         ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6560
6561         scsi_cmd->result = (DID_OK << 16);
6562         res = scsi_cmd->device->hostdata;
6563
6564         if (ipr_is_gata(res) && res->sata_port) {
6565                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6566                 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6567                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6568                 return rc;
6569         }
6570
6571         hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6572         hrrq = &ioa_cfg->hrrq[hrrq_id];
6573
6574         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6575         /*
6576          * We are currently blocking all devices due to a host reset
6577          * We have told the host to stop giving us new requests, but
6578          * ERP ops don't count. FIXME
6579          */
6580         if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
6581                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6582                 return SCSI_MLQUEUE_HOST_BUSY;
6583         }
6584
6585         /*
6586          * FIXME - Create scsi_set_host_offline interface
6587          *  and the ioa_is_dead check can be removed
6588          */
6589         if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
6590                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6591                 goto err_nodev;
6592         }
6593
6594         ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6595         if (ipr_cmd == NULL) {
6596                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6597                 return SCSI_MLQUEUE_HOST_BUSY;
6598         }
6599         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6600
6601         ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
6602         ioarcb = &ipr_cmd->ioarcb;
6603
6604         memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6605         ipr_cmd->scsi_cmd = scsi_cmd;
6606         ipr_cmd->done = ipr_scsi_eh_done;
6607
6608         if (ipr_is_gscsi(res)) {
6609                 if (scsi_cmd->underflow == 0)
6610                         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6611
6612                 if (res->reset_occurred) {
6613                         res->reset_occurred = 0;
6614                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
6615                 }
6616         }
6617
6618         if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6619                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6620
6621                 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
6622                 if (scsi_cmd->flags & SCMD_TAGGED)
6623                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK;
6624                 else
6625                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK;
6626         }
6627
6628         if (scsi_cmd->cmnd[0] >= 0xC0 &&
6629             (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
6630                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6631         }
6632         if (res->raw_mode && ipr_is_af_dasd_device(res)) {
6633                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE;
6634
6635                 if (scsi_cmd->underflow == 0)
6636                         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6637         }
6638
6639         if (ioa_cfg->sis64)
6640                 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6641         else
6642                 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
6643
6644         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6645         if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
6646                 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6647                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6648                 if (!rc)
6649                         scsi_dma_unmap(scsi_cmd);
6650                 return SCSI_MLQUEUE_HOST_BUSY;
6651         }
6652
6653         if (unlikely(hrrq->ioa_is_dead)) {
6654                 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6655                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6656                 scsi_dma_unmap(scsi_cmd);
6657                 goto err_nodev;
6658         }
6659
6660         ioarcb->res_handle = res->res_handle;
6661         if (res->needs_sync_complete) {
6662                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6663                 res->needs_sync_complete = 0;
6664         }
6665         list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
6666         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6667         ipr_send_command(ipr_cmd);
6668         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6669         return 0;
6670
6671 err_nodev:
6672         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6673         memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6674         scsi_cmd->result = (DID_NO_CONNECT << 16);
6675         scsi_cmd->scsi_done(scsi_cmd);
6676         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6677         return 0;
6678 }
6679
6680 /**
6681  * ipr_ioctl - IOCTL handler
6682  * @sdev:       scsi device struct
6683  * @cmd:        IOCTL cmd
6684  * @arg:        IOCTL arg
6685  *
6686  * Return value:
6687  *      0 on success / other on failure
6688  **/
6689 static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
6690 {
6691         struct ipr_resource_entry *res;
6692
6693         res = (struct ipr_resource_entry *)sdev->hostdata;
6694         if (res && ipr_is_gata(res)) {
6695                 if (cmd == HDIO_GET_IDENTITY)
6696                         return -ENOTTY;
6697                 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
6698         }
6699
6700         return -EINVAL;
6701 }
6702
6703 /**
6704  * ipr_info - Get information about the card/driver
6705  * @scsi_host:  scsi host struct
6706  *
6707  * Return value:
6708  *      pointer to buffer with description string
6709  **/
6710 static const char *ipr_ioa_info(struct Scsi_Host *host)
6711 {
6712         static char buffer[512];
6713         struct ipr_ioa_cfg *ioa_cfg;
6714         unsigned long lock_flags = 0;
6715
6716         ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6717
6718         spin_lock_irqsave(host->host_lock, lock_flags);
6719         sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6720         spin_unlock_irqrestore(host->host_lock, lock_flags);
6721
6722         return buffer;
6723 }
6724
6725 static struct scsi_host_template driver_template = {
6726         .module = THIS_MODULE,
6727         .name = "IPR",
6728         .info = ipr_ioa_info,
6729         .ioctl = ipr_ioctl,
6730         .queuecommand = ipr_queuecommand,
6731         .eh_abort_handler = ipr_eh_abort,
6732         .eh_device_reset_handler = ipr_eh_dev_reset,
6733         .eh_host_reset_handler = ipr_eh_host_reset,
6734         .slave_alloc = ipr_slave_alloc,
6735         .slave_configure = ipr_slave_configure,
6736         .slave_destroy = ipr_slave_destroy,
6737         .scan_finished = ipr_scan_finished,
6738         .target_alloc = ipr_target_alloc,
6739         .target_destroy = ipr_target_destroy,
6740         .change_queue_depth = ipr_change_queue_depth,
6741         .bios_param = ipr_biosparam,
6742         .can_queue = IPR_MAX_COMMANDS,
6743         .this_id = -1,
6744         .sg_tablesize = IPR_MAX_SGLIST,
6745         .max_sectors = IPR_IOA_MAX_SECTORS,
6746         .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6747         .use_clustering = ENABLE_CLUSTERING,
6748         .shost_attrs = ipr_ioa_attrs,
6749         .sdev_attrs = ipr_dev_attrs,
6750         .proc_name = IPR_NAME,
6751 };
6752
6753 /**
6754  * ipr_ata_phy_reset - libata phy_reset handler
6755  * @ap:         ata port to reset
6756  *
6757  **/
6758 static void ipr_ata_phy_reset(struct ata_port *ap)
6759 {
6760         unsigned long flags;
6761         struct ipr_sata_port *sata_port = ap->private_data;
6762         struct ipr_resource_entry *res = sata_port->res;
6763         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6764         int rc;
6765
6766         ENTER;
6767         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6768         while (ioa_cfg->in_reset_reload) {
6769                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6770                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6771                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6772         }
6773
6774         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6775                 goto out_unlock;
6776
6777         rc = ipr_device_reset(ioa_cfg, res);
6778
6779         if (rc) {
6780                 ap->link.device[0].class = ATA_DEV_NONE;
6781                 goto out_unlock;
6782         }
6783
6784         ap->link.device[0].class = res->ata_class;
6785         if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
6786                 ap->link.device[0].class = ATA_DEV_NONE;
6787
6788 out_unlock:
6789         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6790         LEAVE;
6791 }
6792
6793 /**
6794  * ipr_ata_post_internal - Cleanup after an internal command
6795  * @qc: ATA queued command
6796  *
6797  * Return value:
6798  *      none
6799  **/
6800 static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6801 {
6802         struct ipr_sata_port *sata_port = qc->ap->private_data;
6803         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6804         struct ipr_cmnd *ipr_cmd;
6805         struct ipr_hrr_queue *hrrq;
6806         unsigned long flags;
6807
6808         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6809         while (ioa_cfg->in_reset_reload) {
6810                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6811                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6812                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6813         }
6814
6815         for_each_hrrq(hrrq, ioa_cfg) {
6816                 spin_lock(&hrrq->_lock);
6817                 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6818                         if (ipr_cmd->qc == qc) {
6819                                 ipr_device_reset(ioa_cfg, sata_port->res);
6820                                 break;
6821                         }
6822                 }
6823                 spin_unlock(&hrrq->_lock);
6824         }
6825         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6826 }
6827
6828 /**
6829  * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6830  * @regs:       destination
6831  * @tf: source ATA taskfile
6832  *
6833  * Return value:
6834  *      none
6835  **/
6836 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6837                              struct ata_taskfile *tf)
6838 {
6839         regs->feature = tf->feature;
6840         regs->nsect = tf->nsect;
6841         regs->lbal = tf->lbal;
6842         regs->lbam = tf->lbam;
6843         regs->lbah = tf->lbah;
6844         regs->device = tf->device;
6845         regs->command = tf->command;
6846         regs->hob_feature = tf->hob_feature;
6847         regs->hob_nsect = tf->hob_nsect;
6848         regs->hob_lbal = tf->hob_lbal;
6849         regs->hob_lbam = tf->hob_lbam;
6850         regs->hob_lbah = tf->hob_lbah;
6851         regs->ctl = tf->ctl;
6852 }
6853
6854 /**
6855  * ipr_sata_done - done function for SATA commands
6856  * @ipr_cmd:    ipr command struct
6857  *
6858  * This function is invoked by the interrupt handler for
6859  * ops generated by the SCSI mid-layer to SATA devices
6860  *
6861  * Return value:
6862  *      none
6863  **/
6864 static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6865 {
6866         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6867         struct ata_queued_cmd *qc = ipr_cmd->qc;
6868         struct ipr_sata_port *sata_port = qc->ap->private_data;
6869         struct ipr_resource_entry *res = sata_port->res;
6870         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6871
6872         spin_lock(&ipr_cmd->hrrq->_lock);
6873         if (ipr_cmd->ioa_cfg->sis64)
6874                 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6875                        sizeof(struct ipr_ioasa_gata));
6876         else
6877                 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6878                        sizeof(struct ipr_ioasa_gata));
6879         ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6880
6881         if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
6882                 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
6883
6884         if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6885                 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
6886         else
6887                 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
6888         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6889         spin_unlock(&ipr_cmd->hrrq->_lock);
6890         ata_qc_complete(qc);
6891 }
6892
6893 /**
6894  * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6895  * @ipr_cmd:    ipr command struct
6896  * @qc:         ATA queued command
6897  *
6898  **/
6899 static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6900                                   struct ata_queued_cmd *qc)
6901 {
6902         u32 ioadl_flags = 0;
6903         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6904         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
6905         struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6906         int len = qc->nbytes;
6907         struct scatterlist *sg;
6908         unsigned int si;
6909         dma_addr_t dma_addr = ipr_cmd->dma_addr;
6910
6911         if (len == 0)
6912                 return;
6913
6914         if (qc->dma_dir == DMA_TO_DEVICE) {
6915                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6916                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6917         } else if (qc->dma_dir == DMA_FROM_DEVICE)
6918                 ioadl_flags = IPR_IOADL_FLAGS_READ;
6919
6920         ioarcb->data_transfer_length = cpu_to_be32(len);
6921         ioarcb->ioadl_len =
6922                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6923         ioarcb->u.sis64_addr_data.data_ioadl_addr =
6924                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
6925
6926         for_each_sg(qc->sg, sg, qc->n_elem, si) {
6927                 ioadl64->flags = cpu_to_be32(ioadl_flags);
6928                 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6929                 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6930
6931                 last_ioadl64 = ioadl64;
6932                 ioadl64++;
6933         }
6934
6935         if (likely(last_ioadl64))
6936                 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6937 }
6938
6939 /**
6940  * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6941  * @ipr_cmd:    ipr command struct
6942  * @qc:         ATA queued command
6943  *
6944  **/
6945 static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6946                                 struct ata_queued_cmd *qc)
6947 {
6948         u32 ioadl_flags = 0;
6949         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6950         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
6951         struct ipr_ioadl_desc *last_ioadl = NULL;
6952         int len = qc->nbytes;
6953         struct scatterlist *sg;
6954         unsigned int si;
6955
6956         if (len == 0)
6957                 return;
6958
6959         if (qc->dma_dir == DMA_TO_DEVICE) {
6960                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6961                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6962                 ioarcb->data_transfer_length = cpu_to_be32(len);
6963                 ioarcb->ioadl_len =
6964                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6965         } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6966                 ioadl_flags = IPR_IOADL_FLAGS_READ;
6967                 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6968                 ioarcb->read_ioadl_len =
6969                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6970         }
6971
6972         for_each_sg(qc->sg, sg, qc->n_elem, si) {
6973                 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6974                 ioadl->address = cpu_to_be32(sg_dma_address(sg));
6975
6976                 last_ioadl = ioadl;
6977                 ioadl++;
6978         }
6979
6980         if (likely(last_ioadl))
6981                 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6982 }
6983
6984 /**
6985  * ipr_qc_defer - Get a free ipr_cmd
6986  * @qc: queued command
6987  *
6988  * Return value:
6989  *      0 if success
6990  **/
6991 static int ipr_qc_defer(struct ata_queued_cmd *qc)
6992 {
6993         struct ata_port *ap = qc->ap;
6994         struct ipr_sata_port *sata_port = ap->private_data;
6995         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6996         struct ipr_cmnd *ipr_cmd;
6997         struct ipr_hrr_queue *hrrq;
6998         int hrrq_id;
6999
7000         hrrq_id = ipr_get_hrrq_index(ioa_cfg);
7001         hrrq = &ioa_cfg->hrrq[hrrq_id];
7002
7003         qc->lldd_task = NULL;
7004         spin_lock(&hrrq->_lock);
7005         if (unlikely(hrrq->ioa_is_dead)) {
7006                 spin_unlock(&hrrq->_lock);
7007                 return 0;
7008         }
7009
7010         if (unlikely(!hrrq->allow_cmds)) {
7011                 spin_unlock(&hrrq->_lock);
7012                 return ATA_DEFER_LINK;
7013         }
7014
7015         ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
7016         if (ipr_cmd == NULL) {
7017                 spin_unlock(&hrrq->_lock);
7018                 return ATA_DEFER_LINK;
7019         }
7020
7021         qc->lldd_task = ipr_cmd;
7022         spin_unlock(&hrrq->_lock);
7023         return 0;
7024 }
7025
7026 /**
7027  * ipr_qc_issue - Issue a SATA qc to a device
7028  * @qc: queued command
7029  *
7030  * Return value:
7031  *      0 if success
7032  **/
7033 static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
7034 {
7035         struct ata_port *ap = qc->ap;
7036         struct ipr_sata_port *sata_port = ap->private_data;
7037         struct ipr_resource_entry *res = sata_port->res;
7038         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
7039         struct ipr_cmnd *ipr_cmd;
7040         struct ipr_ioarcb *ioarcb;
7041         struct ipr_ioarcb_ata_regs *regs;
7042
7043         if (qc->lldd_task == NULL)
7044                 ipr_qc_defer(qc);
7045
7046         ipr_cmd = qc->lldd_task;
7047         if (ipr_cmd == NULL)
7048                 return AC_ERR_SYSTEM;
7049
7050         qc->lldd_task = NULL;
7051         spin_lock(&ipr_cmd->hrrq->_lock);
7052         if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
7053                         ipr_cmd->hrrq->ioa_is_dead)) {
7054                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7055                 spin_unlock(&ipr_cmd->hrrq->_lock);
7056                 return AC_ERR_SYSTEM;
7057         }
7058
7059         ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
7060         ioarcb = &ipr_cmd->ioarcb;
7061
7062         if (ioa_cfg->sis64) {
7063                 regs = &ipr_cmd->i.ata_ioadl.regs;
7064                 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
7065         } else
7066                 regs = &ioarcb->u.add_data.u.regs;
7067
7068         memset(regs, 0, sizeof(*regs));
7069         ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
7070
7071         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7072         ipr_cmd->qc = qc;
7073         ipr_cmd->done = ipr_sata_done;
7074         ipr_cmd->ioarcb.res_handle = res->res_handle;
7075         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
7076         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
7077         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
7078         ipr_cmd->dma_use_sg = qc->n_elem;
7079
7080         if (ioa_cfg->sis64)
7081                 ipr_build_ata_ioadl64(ipr_cmd, qc);
7082         else
7083                 ipr_build_ata_ioadl(ipr_cmd, qc);
7084
7085         regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
7086         ipr_copy_sata_tf(regs, &qc->tf);
7087         memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
7088         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
7089
7090         switch (qc->tf.protocol) {
7091         case ATA_PROT_NODATA:
7092         case ATA_PROT_PIO:
7093                 break;
7094
7095         case ATA_PROT_DMA:
7096                 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
7097                 break;
7098
7099         case ATAPI_PROT_PIO:
7100         case ATAPI_PROT_NODATA:
7101                 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
7102                 break;
7103
7104         case ATAPI_PROT_DMA:
7105                 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
7106                 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
7107                 break;
7108
7109         default:
7110                 WARN_ON(1);
7111                 spin_unlock(&ipr_cmd->hrrq->_lock);
7112                 return AC_ERR_INVALID;
7113         }
7114
7115         ipr_send_command(ipr_cmd);
7116         spin_unlock(&ipr_cmd->hrrq->_lock);
7117
7118         return 0;
7119 }
7120
7121 /**
7122  * ipr_qc_fill_rtf - Read result TF
7123  * @qc: ATA queued command
7124  *
7125  * Return value:
7126  *      true
7127  **/
7128 static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
7129 {
7130         struct ipr_sata_port *sata_port = qc->ap->private_data;
7131         struct ipr_ioasa_gata *g = &sata_port->ioasa;
7132         struct ata_taskfile *tf = &qc->result_tf;
7133
7134         tf->feature = g->error;
7135         tf->nsect = g->nsect;
7136         tf->lbal = g->lbal;
7137         tf->lbam = g->lbam;
7138         tf->lbah = g->lbah;
7139         tf->device = g->device;
7140         tf->command = g->status;
7141         tf->hob_nsect = g->hob_nsect;
7142         tf->hob_lbal = g->hob_lbal;
7143         tf->hob_lbam = g->hob_lbam;
7144         tf->hob_lbah = g->hob_lbah;
7145
7146         return true;
7147 }
7148
7149 static struct ata_port_operations ipr_sata_ops = {
7150         .phy_reset = ipr_ata_phy_reset,
7151         .hardreset = ipr_sata_reset,
7152         .post_internal_cmd = ipr_ata_post_internal,
7153         .qc_prep = ata_noop_qc_prep,
7154         .qc_defer = ipr_qc_defer,
7155         .qc_issue = ipr_qc_issue,
7156         .qc_fill_rtf = ipr_qc_fill_rtf,
7157         .port_start = ata_sas_port_start,
7158         .port_stop = ata_sas_port_stop
7159 };
7160
7161 static struct ata_port_info sata_port_info = {
7162         .flags          = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
7163                           ATA_FLAG_SAS_HOST,
7164         .pio_mask       = ATA_PIO4_ONLY,
7165         .mwdma_mask     = ATA_MWDMA2,
7166         .udma_mask      = ATA_UDMA6,
7167         .port_ops       = &ipr_sata_ops
7168 };
7169
7170 #ifdef CONFIG_PPC_PSERIES
7171 static const u16 ipr_blocked_processors[] = {
7172         PVR_NORTHSTAR,
7173         PVR_PULSAR,
7174         PVR_POWER4,
7175         PVR_ICESTAR,
7176         PVR_SSTAR,
7177         PVR_POWER4p,
7178         PVR_630,
7179         PVR_630p
7180 };
7181
7182 /**
7183  * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
7184  * @ioa_cfg:    ioa cfg struct
7185  *
7186  * Adapters that use Gemstone revision < 3.1 do not work reliably on
7187  * certain pSeries hardware. This function determines if the given
7188  * adapter is in one of these confgurations or not.
7189  *
7190  * Return value:
7191  *      1 if adapter is not supported / 0 if adapter is supported
7192  **/
7193 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
7194 {
7195         int i;
7196
7197         if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
7198                 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
7199                         if (pvr_version_is(ipr_blocked_processors[i]))
7200                                 return 1;
7201                 }
7202         }
7203         return 0;
7204 }
7205 #else
7206 #define ipr_invalid_adapter(ioa_cfg) 0
7207 #endif
7208
7209 /**
7210  * ipr_ioa_bringdown_done - IOA bring down completion.
7211  * @ipr_cmd:    ipr command struct
7212  *
7213  * This function processes the completion of an adapter bring down.
7214  * It wakes any reset sleepers.
7215  *
7216  * Return value:
7217  *      IPR_RC_JOB_RETURN
7218  **/
7219 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
7220 {
7221         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7222         int i;
7223
7224         ENTER;
7225         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
7226                 ipr_trace;
7227                 ioa_cfg->scsi_unblock = 1;
7228                 schedule_work(&ioa_cfg->work_q);
7229         }
7230
7231         ioa_cfg->in_reset_reload = 0;
7232         ioa_cfg->reset_retries = 0;
7233         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7234                 spin_lock(&ioa_cfg->hrrq[i]._lock);
7235                 ioa_cfg->hrrq[i].ioa_is_dead = 1;
7236                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
7237         }
7238         wmb();
7239
7240         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7241         wake_up_all(&ioa_cfg->reset_wait_q);
7242         LEAVE;
7243
7244         return IPR_RC_JOB_RETURN;
7245 }
7246
7247 /**
7248  * ipr_ioa_reset_done - IOA reset completion.
7249  * @ipr_cmd:    ipr command struct
7250  *
7251  * This function processes the completion of an adapter reset.
7252  * It schedules any necessary mid-layer add/removes and
7253  * wakes any reset sleepers.
7254  *
7255  * Return value:
7256  *      IPR_RC_JOB_RETURN
7257  **/
7258 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
7259 {
7260         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7261         struct ipr_resource_entry *res;
7262         int j;
7263
7264         ENTER;
7265         ioa_cfg->in_reset_reload = 0;
7266         for (j = 0; j < ioa_cfg->hrrq_num; j++) {
7267                 spin_lock(&ioa_cfg->hrrq[j]._lock);
7268                 ioa_cfg->hrrq[j].allow_cmds = 1;
7269                 spin_unlock(&ioa_cfg->hrrq[j]._lock);
7270         }
7271         wmb();
7272         ioa_cfg->reset_cmd = NULL;
7273         ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
7274
7275         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
7276                 if (res->add_to_ml || res->del_from_ml) {
7277                         ipr_trace;
7278                         break;
7279                 }
7280         }
7281         schedule_work(&ioa_cfg->work_q);
7282
7283         for (j = 0; j < IPR_NUM_HCAMS; j++) {
7284                 list_del_init(&ioa_cfg->hostrcb[j]->queue);
7285                 if (j < IPR_NUM_LOG_HCAMS)
7286                         ipr_send_hcam(ioa_cfg,
7287                                 IPR_HCAM_CDB_OP_CODE_LOG_DATA,
7288                                 ioa_cfg->hostrcb[j]);
7289                 else
7290                         ipr_send_hcam(ioa_cfg,
7291                                 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
7292                                 ioa_cfg->hostrcb[j]);
7293         }
7294
7295         scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
7296         dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
7297
7298         ioa_cfg->reset_retries = 0;
7299         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7300         wake_up_all(&ioa_cfg->reset_wait_q);
7301
7302         ioa_cfg->scsi_unblock = 1;
7303         schedule_work(&ioa_cfg->work_q);
7304         LEAVE;
7305         return IPR_RC_JOB_RETURN;
7306 }
7307
7308 /**
7309  * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
7310  * @supported_dev:      supported device struct
7311  * @vpids:                      vendor product id struct
7312  *
7313  * Return value:
7314  *      none
7315  **/
7316 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
7317                                  struct ipr_std_inq_vpids *vpids)
7318 {
7319         memset(supported_dev, 0, sizeof(struct ipr_supported_device));
7320         memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
7321         supported_dev->num_records = 1;
7322         supported_dev->data_length =
7323                 cpu_to_be16(sizeof(struct ipr_supported_device));
7324         supported_dev->reserved = 0;
7325 }
7326
7327 /**
7328  * ipr_set_supported_devs - Send Set Supported Devices for a device
7329  * @ipr_cmd:    ipr command struct
7330  *
7331  * This function sends a Set Supported Devices to the adapter
7332  *
7333  * Return value:
7334  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7335  **/
7336 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
7337 {
7338         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7339         struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
7340         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7341         struct ipr_resource_entry *res = ipr_cmd->u.res;
7342
7343         ipr_cmd->job_step = ipr_ioa_reset_done;
7344
7345         list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
7346                 if (!ipr_is_scsi_disk(res))
7347                         continue;
7348
7349                 ipr_cmd->u.res = res;
7350                 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
7351
7352                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7353                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7354                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7355
7356                 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
7357                 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
7358                 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
7359                 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
7360
7361                 ipr_init_ioadl(ipr_cmd,
7362                                ioa_cfg->vpd_cbs_dma +
7363                                  offsetof(struct ipr_misc_cbs, supp_dev),
7364                                sizeof(struct ipr_supported_device),
7365                                IPR_IOADL_FLAGS_WRITE_LAST);
7366
7367                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7368                            IPR_SET_SUP_DEVICE_TIMEOUT);
7369
7370                 if (!ioa_cfg->sis64)
7371                         ipr_cmd->job_step = ipr_set_supported_devs;
7372                 LEAVE;
7373                 return IPR_RC_JOB_RETURN;
7374         }
7375
7376         LEAVE;
7377         return IPR_RC_JOB_CONTINUE;
7378 }
7379
7380 /**
7381  * ipr_get_mode_page - Locate specified mode page
7382  * @mode_pages: mode page buffer
7383  * @page_code:  page code to find
7384  * @len:                minimum required length for mode page
7385  *
7386  * Return value:
7387  *      pointer to mode page / NULL on failure
7388  **/
7389 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
7390                                u32 page_code, u32 len)
7391 {
7392         struct ipr_mode_page_hdr *mode_hdr;
7393         u32 page_length;
7394         u32 length;
7395
7396         if (!mode_pages || (mode_pages->hdr.length == 0))
7397                 return NULL;
7398
7399         length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
7400         mode_hdr = (struct ipr_mode_page_hdr *)
7401                 (mode_pages->data + mode_pages->hdr.block_desc_len);
7402
7403         while (length) {
7404                 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
7405                         if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
7406                                 return mode_hdr;
7407                         break;
7408                 } else {
7409                         page_length = (sizeof(struct ipr_mode_page_hdr) +
7410                                        mode_hdr->page_length);
7411                         length -= page_length;
7412                         mode_hdr = (struct ipr_mode_page_hdr *)
7413                                 ((unsigned long)mode_hdr + page_length);
7414                 }
7415         }
7416         return NULL;
7417 }
7418
7419 /**
7420  * ipr_check_term_power - Check for term power errors
7421  * @ioa_cfg:    ioa config struct
7422  * @mode_pages: IOAFP mode pages buffer
7423  *
7424  * Check the IOAFP's mode page 28 for term power errors
7425  *
7426  * Return value:
7427  *      nothing
7428  **/
7429 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
7430                                  struct ipr_mode_pages *mode_pages)
7431 {
7432         int i;
7433         int entry_length;
7434         struct ipr_dev_bus_entry *bus;
7435         struct ipr_mode_page28 *mode_page;
7436
7437         mode_page = ipr_get_mode_page(mode_pages, 0x28,
7438                                       sizeof(struct ipr_mode_page28));
7439
7440         entry_length = mode_page->entry_length;
7441
7442         bus = mode_page->bus;
7443
7444         for (i = 0; i < mode_page->num_entries; i++) {
7445                 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
7446                         dev_err(&ioa_cfg->pdev->dev,
7447                                 "Term power is absent on scsi bus %d\n",
7448                                 bus->res_addr.bus);
7449                 }
7450
7451                 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
7452         }
7453 }
7454
7455 /**
7456  * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7457  * @ioa_cfg:    ioa config struct
7458  *
7459  * Looks through the config table checking for SES devices. If
7460  * the SES device is in the SES table indicating a maximum SCSI
7461  * bus speed, the speed is limited for the bus.
7462  *
7463  * Return value:
7464  *      none
7465  **/
7466 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7467 {
7468         u32 max_xfer_rate;
7469         int i;
7470
7471         for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7472                 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7473                                                        ioa_cfg->bus_attr[i].bus_width);
7474
7475                 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7476                         ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7477         }
7478 }
7479
7480 /**
7481  * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7482  * @ioa_cfg:    ioa config struct
7483  * @mode_pages: mode page 28 buffer
7484  *
7485  * Updates mode page 28 based on driver configuration
7486  *
7487  * Return value:
7488  *      none
7489  **/
7490 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
7491                                           struct ipr_mode_pages *mode_pages)
7492 {
7493         int i, entry_length;
7494         struct ipr_dev_bus_entry *bus;
7495         struct ipr_bus_attributes *bus_attr;
7496         struct ipr_mode_page28 *mode_page;
7497
7498         mode_page = ipr_get_mode_page(mode_pages, 0x28,
7499                                       sizeof(struct ipr_mode_page28));
7500
7501         entry_length = mode_page->entry_length;
7502
7503         /* Loop for each device bus entry */
7504         for (i = 0, bus = mode_page->bus;
7505              i < mode_page->num_entries;
7506              i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7507                 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7508                         dev_err(&ioa_cfg->pdev->dev,
7509                                 "Invalid resource address reported: 0x%08X\n",
7510                                 IPR_GET_PHYS_LOC(bus->res_addr));
7511                         continue;
7512                 }
7513
7514                 bus_attr = &ioa_cfg->bus_attr[i];
7515                 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7516                 bus->bus_width = bus_attr->bus_width;
7517                 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7518                 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7519                 if (bus_attr->qas_enabled)
7520                         bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7521                 else
7522                         bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7523         }
7524 }
7525
7526 /**
7527  * ipr_build_mode_select - Build a mode select command
7528  * @ipr_cmd:    ipr command struct
7529  * @res_handle: resource handle to send command to
7530  * @parm:               Byte 2 of Mode Sense command
7531  * @dma_addr:   DMA buffer address
7532  * @xfer_len:   data transfer length
7533  *
7534  * Return value:
7535  *      none
7536  **/
7537 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
7538                                   __be32 res_handle, u8 parm,
7539                                   dma_addr_t dma_addr, u8 xfer_len)
7540 {
7541         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7542
7543         ioarcb->res_handle = res_handle;
7544         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7545         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7546         ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7547         ioarcb->cmd_pkt.cdb[1] = parm;
7548         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7549
7550         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
7551 }
7552
7553 /**
7554  * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7555  * @ipr_cmd:    ipr command struct
7556  *
7557  * This function sets up the SCSI bus attributes and sends
7558  * a Mode Select for Page 28 to activate them.
7559  *
7560  * Return value:
7561  *      IPR_RC_JOB_RETURN
7562  **/
7563 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7564 {
7565         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7566         struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7567         int length;
7568
7569         ENTER;
7570         ipr_scsi_bus_speed_limit(ioa_cfg);
7571         ipr_check_term_power(ioa_cfg, mode_pages);
7572         ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7573         length = mode_pages->hdr.length + 1;
7574         mode_pages->hdr.length = 0;
7575
7576         ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7577                               ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7578                               length);
7579
7580         ipr_cmd->job_step = ipr_set_supported_devs;
7581         ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7582                                     struct ipr_resource_entry, queue);
7583         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7584
7585         LEAVE;
7586         return IPR_RC_JOB_RETURN;
7587 }
7588
7589 /**
7590  * ipr_build_mode_sense - Builds a mode sense command
7591  * @ipr_cmd:    ipr command struct
7592  * @res:                resource entry struct
7593  * @parm:               Byte 2 of mode sense command
7594  * @dma_addr:   DMA address of mode sense buffer
7595  * @xfer_len:   Size of DMA buffer
7596  *
7597  * Return value:
7598  *      none
7599  **/
7600 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7601                                  __be32 res_handle,
7602                                  u8 parm, dma_addr_t dma_addr, u8 xfer_len)
7603 {
7604         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7605
7606         ioarcb->res_handle = res_handle;
7607         ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7608         ioarcb->cmd_pkt.cdb[2] = parm;
7609         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7610         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7611
7612         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7613 }
7614
7615 /**
7616  * ipr_reset_cmd_failed - Handle failure of IOA reset command
7617  * @ipr_cmd:    ipr command struct
7618  *
7619  * This function handles the failure of an IOA bringup command.
7620  *
7621  * Return value:
7622  *      IPR_RC_JOB_RETURN
7623  **/
7624 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7625 {
7626         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7627         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7628
7629         dev_err(&ioa_cfg->pdev->dev,
7630                 "0x%02X failed with IOASC: 0x%08X\n",
7631                 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7632
7633         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7634         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7635         return IPR_RC_JOB_RETURN;
7636 }
7637
7638 /**
7639  * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7640  * @ipr_cmd:    ipr command struct
7641  *
7642  * This function handles the failure of a Mode Sense to the IOAFP.
7643  * Some adapters do not handle all mode pages.
7644  *
7645  * Return value:
7646  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7647  **/
7648 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7649 {
7650         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7651         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7652
7653         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7654                 ipr_cmd->job_step = ipr_set_supported_devs;
7655                 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7656                                             struct ipr_resource_entry, queue);
7657                 return IPR_RC_JOB_CONTINUE;
7658         }
7659
7660         return ipr_reset_cmd_failed(ipr_cmd);
7661 }
7662
7663 /**
7664  * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7665  * @ipr_cmd:    ipr command struct
7666  *
7667  * This function send a Page 28 mode sense to the IOA to
7668  * retrieve SCSI bus attributes.
7669  *
7670  * Return value:
7671  *      IPR_RC_JOB_RETURN
7672  **/
7673 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7674 {
7675         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7676
7677         ENTER;
7678         ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7679                              0x28, ioa_cfg->vpd_cbs_dma +
7680                              offsetof(struct ipr_misc_cbs, mode_pages),
7681                              sizeof(struct ipr_mode_pages));
7682
7683         ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
7684         ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
7685
7686         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7687
7688         LEAVE;
7689         return IPR_RC_JOB_RETURN;
7690 }
7691
7692 /**
7693  * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7694  * @ipr_cmd:    ipr command struct
7695  *
7696  * This function enables dual IOA RAID support if possible.
7697  *
7698  * Return value:
7699  *      IPR_RC_JOB_RETURN
7700  **/
7701 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7702 {
7703         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7704         struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7705         struct ipr_mode_page24 *mode_page;
7706         int length;
7707
7708         ENTER;
7709         mode_page = ipr_get_mode_page(mode_pages, 0x24,
7710                                       sizeof(struct ipr_mode_page24));
7711
7712         if (mode_page)
7713                 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7714
7715         length = mode_pages->hdr.length + 1;
7716         mode_pages->hdr.length = 0;
7717
7718         ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7719                               ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7720                               length);
7721
7722         ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7723         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7724
7725         LEAVE;
7726         return IPR_RC_JOB_RETURN;
7727 }
7728
7729 /**
7730  * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7731  * @ipr_cmd:    ipr command struct
7732  *
7733  * This function handles the failure of a Mode Sense to the IOAFP.
7734  * Some adapters do not handle all mode pages.
7735  *
7736  * Return value:
7737  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7738  **/
7739 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7740 {
7741         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7742
7743         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7744                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7745                 return IPR_RC_JOB_CONTINUE;
7746         }
7747
7748         return ipr_reset_cmd_failed(ipr_cmd);
7749 }
7750
7751 /**
7752  * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7753  * @ipr_cmd:    ipr command struct
7754  *
7755  * This function send a mode sense to the IOA to retrieve
7756  * the IOA Advanced Function Control mode page.
7757  *
7758  * Return value:
7759  *      IPR_RC_JOB_RETURN
7760  **/
7761 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7762 {
7763         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7764
7765         ENTER;
7766         ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7767                              0x24, ioa_cfg->vpd_cbs_dma +
7768                              offsetof(struct ipr_misc_cbs, mode_pages),
7769                              sizeof(struct ipr_mode_pages));
7770
7771         ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7772         ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7773
7774         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7775
7776         LEAVE;
7777         return IPR_RC_JOB_RETURN;
7778 }
7779
7780 /**
7781  * ipr_init_res_table - Initialize the resource table
7782  * @ipr_cmd:    ipr command struct
7783  *
7784  * This function looks through the existing resource table, comparing
7785  * it with the config table. This function will take care of old/new
7786  * devices and schedule adding/removing them from the mid-layer
7787  * as appropriate.
7788  *
7789  * Return value:
7790  *      IPR_RC_JOB_CONTINUE
7791  **/
7792 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7793 {
7794         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7795         struct ipr_resource_entry *res, *temp;
7796         struct ipr_config_table_entry_wrapper cfgtew;
7797         int entries, found, flag, i;
7798         LIST_HEAD(old_res);
7799
7800         ENTER;
7801         if (ioa_cfg->sis64)
7802                 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7803         else
7804                 flag = ioa_cfg->u.cfg_table->hdr.flags;
7805
7806         if (flag & IPR_UCODE_DOWNLOAD_REQ)
7807                 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7808
7809         list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7810                 list_move_tail(&res->queue, &old_res);
7811
7812         if (ioa_cfg->sis64)
7813                 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
7814         else
7815                 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7816
7817         for (i = 0; i < entries; i++) {
7818                 if (ioa_cfg->sis64)
7819                         cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7820                 else
7821                         cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
7822                 found = 0;
7823
7824                 list_for_each_entry_safe(res, temp, &old_res, queue) {
7825                         if (ipr_is_same_device(res, &cfgtew)) {
7826                                 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7827                                 found = 1;
7828                                 break;
7829                         }
7830                 }
7831
7832                 if (!found) {
7833                         if (list_empty(&ioa_cfg->free_res_q)) {
7834                                 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7835                                 break;
7836                         }
7837
7838                         found = 1;
7839                         res = list_entry(ioa_cfg->free_res_q.next,
7840                                          struct ipr_resource_entry, queue);
7841                         list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7842                         ipr_init_res_entry(res, &cfgtew);
7843                         res->add_to_ml = 1;
7844                 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7845                         res->sdev->allow_restart = 1;
7846
7847                 if (found)
7848                         ipr_update_res_entry(res, &cfgtew);
7849         }
7850
7851         list_for_each_entry_safe(res, temp, &old_res, queue) {
7852                 if (res->sdev) {
7853                         res->del_from_ml = 1;
7854                         res->res_handle = IPR_INVALID_RES_HANDLE;
7855                         list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7856                 }
7857         }
7858
7859         list_for_each_entry_safe(res, temp, &old_res, queue) {
7860                 ipr_clear_res_target(res);
7861                 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7862         }
7863
7864         if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7865                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7866         else
7867                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7868
7869         LEAVE;
7870         return IPR_RC_JOB_CONTINUE;
7871 }
7872
7873 /**
7874  * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7875  * @ipr_cmd:    ipr command struct
7876  *
7877  * This function sends a Query IOA Configuration command
7878  * to the adapter to retrieve the IOA configuration table.
7879  *
7880  * Return value:
7881  *      IPR_RC_JOB_RETURN
7882  **/
7883 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7884 {
7885         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7886         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7887         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
7888         struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7889
7890         ENTER;
7891         if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7892                 ioa_cfg->dual_raid = 1;
7893         dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7894                  ucode_vpd->major_release, ucode_vpd->card_type,
7895                  ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7896         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7897         ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7898
7899         ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
7900         ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
7901         ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7902         ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
7903
7904         ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
7905                        IPR_IOADL_FLAGS_READ_LAST);
7906
7907         ipr_cmd->job_step = ipr_init_res_table;
7908
7909         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7910
7911         LEAVE;
7912         return IPR_RC_JOB_RETURN;
7913 }
7914
7915 static int ipr_ioa_service_action_failed(struct ipr_cmnd *ipr_cmd)
7916 {
7917         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7918
7919         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT)
7920                 return IPR_RC_JOB_CONTINUE;
7921
7922         return ipr_reset_cmd_failed(ipr_cmd);
7923 }
7924
7925 static void ipr_build_ioa_service_action(struct ipr_cmnd *ipr_cmd,
7926                                          __be32 res_handle, u8 sa_code)
7927 {
7928         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7929
7930         ioarcb->res_handle = res_handle;
7931         ioarcb->cmd_pkt.cdb[0] = IPR_IOA_SERVICE_ACTION;
7932         ioarcb->cmd_pkt.cdb[1] = sa_code;
7933         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7934 }
7935
7936 /**
7937  * ipr_ioafp_set_caching_parameters - Issue Set Cache parameters service
7938  * action
7939  *
7940  * Return value:
7941  *      none
7942  **/
7943 static int ipr_ioafp_set_caching_parameters(struct ipr_cmnd *ipr_cmd)
7944 {
7945         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7946         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7947         struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
7948
7949         ENTER;
7950
7951         ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7952
7953         if (pageC4->cache_cap[0] & IPR_CAP_SYNC_CACHE) {
7954                 ipr_build_ioa_service_action(ipr_cmd,
7955                                              cpu_to_be32(IPR_IOA_RES_HANDLE),
7956                                              IPR_IOA_SA_CHANGE_CACHE_PARAMS);
7957
7958                 ioarcb->cmd_pkt.cdb[2] = 0x40;
7959
7960                 ipr_cmd->job_step_failed = ipr_ioa_service_action_failed;
7961                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7962                            IPR_SET_SUP_DEVICE_TIMEOUT);
7963
7964                 LEAVE;
7965                 return IPR_RC_JOB_RETURN;
7966         }
7967
7968         LEAVE;
7969         return IPR_RC_JOB_CONTINUE;
7970 }
7971
7972 /**
7973  * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7974  * @ipr_cmd:    ipr command struct
7975  *
7976  * This utility function sends an inquiry to the adapter.
7977  *
7978  * Return value:
7979  *      none
7980  **/
7981 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
7982                               dma_addr_t dma_addr, u8 xfer_len)
7983 {
7984         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7985
7986         ENTER;
7987         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7988         ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7989
7990         ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7991         ioarcb->cmd_pkt.cdb[1] = flags;
7992         ioarcb->cmd_pkt.cdb[2] = page;
7993         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7994
7995         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7996
7997         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7998         LEAVE;
7999 }
8000
8001 /**
8002  * ipr_inquiry_page_supported - Is the given inquiry page supported
8003  * @page0:              inquiry page 0 buffer
8004  * @page:               page code.
8005  *
8006  * This function determines if the specified inquiry page is supported.
8007  *
8008  * Return value:
8009  *      1 if page is supported / 0 if not
8010  **/
8011 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
8012 {
8013         int i;
8014
8015         for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
8016                 if (page0->page[i] == page)
8017                         return 1;
8018
8019         return 0;
8020 }
8021
8022 /**
8023  * ipr_ioafp_pageC4_inquiry - Send a Page 0xC4 Inquiry to the adapter.
8024  * @ipr_cmd:    ipr command struct
8025  *
8026  * This function sends a Page 0xC4 inquiry to the adapter
8027  * to retrieve software VPD information.
8028  *
8029  * Return value:
8030  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8031  **/
8032 static int ipr_ioafp_pageC4_inquiry(struct ipr_cmnd *ipr_cmd)
8033 {
8034         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8035         struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
8036         struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
8037
8038         ENTER;
8039         ipr_cmd->job_step = ipr_ioafp_set_caching_parameters;
8040         memset(pageC4, 0, sizeof(*pageC4));
8041
8042         if (ipr_inquiry_page_supported(page0, 0xC4)) {
8043                 ipr_ioafp_inquiry(ipr_cmd, 1, 0xC4,
8044                                   (ioa_cfg->vpd_cbs_dma
8045                                    + offsetof(struct ipr_misc_cbs,
8046                                               pageC4_data)),
8047                                   sizeof(struct ipr_inquiry_pageC4));
8048                 return IPR_RC_JOB_RETURN;
8049         }
8050
8051         LEAVE;
8052         return IPR_RC_JOB_CONTINUE;
8053 }
8054
8055 /**
8056  * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
8057  * @ipr_cmd:    ipr command struct
8058  *
8059  * This function sends a Page 0xD0 inquiry to the adapter
8060  * to retrieve adapter capabilities.
8061  *
8062  * Return value:
8063  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8064  **/
8065 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
8066 {
8067         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8068         struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
8069         struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
8070
8071         ENTER;
8072         ipr_cmd->job_step = ipr_ioafp_pageC4_inquiry;
8073         memset(cap, 0, sizeof(*cap));
8074
8075         if (ipr_inquiry_page_supported(page0, 0xD0)) {
8076                 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
8077                                   ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
8078                                   sizeof(struct ipr_inquiry_cap));
8079                 return IPR_RC_JOB_RETURN;
8080         }
8081
8082         LEAVE;
8083         return IPR_RC_JOB_CONTINUE;
8084 }
8085
8086 /**
8087  * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
8088  * @ipr_cmd:    ipr command struct
8089  *
8090  * This function sends a Page 3 inquiry to the adapter
8091  * to retrieve software VPD information.
8092  *
8093  * Return value:
8094  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8095  **/
8096 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
8097 {
8098         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8099
8100         ENTER;
8101
8102         ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
8103
8104         ipr_ioafp_inquiry(ipr_cmd, 1, 3,
8105                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
8106                           sizeof(struct ipr_inquiry_page3));
8107
8108         LEAVE;
8109         return IPR_RC_JOB_RETURN;
8110 }
8111
8112 /**
8113  * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
8114  * @ipr_cmd:    ipr command struct
8115  *
8116  * This function sends a Page 0 inquiry to the adapter
8117  * to retrieve supported inquiry pages.
8118  *
8119  * Return value:
8120  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8121  **/
8122 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
8123 {
8124         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8125         char type[5];
8126
8127         ENTER;
8128
8129         /* Grab the type out of the VPD and store it away */
8130         memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
8131         type[4] = '\0';
8132         ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
8133
8134         if (ipr_invalid_adapter(ioa_cfg)) {
8135                 dev_err(&ioa_cfg->pdev->dev,
8136                         "Adapter not supported in this hardware configuration.\n");
8137
8138                 if (!ipr_testmode) {
8139                         ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
8140                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8141                         list_add_tail(&ipr_cmd->queue,
8142                                         &ioa_cfg->hrrq->hrrq_free_q);
8143                         return IPR_RC_JOB_RETURN;
8144                 }
8145         }
8146
8147         ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
8148
8149         ipr_ioafp_inquiry(ipr_cmd, 1, 0,
8150                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
8151                           sizeof(struct ipr_inquiry_page0));
8152
8153         LEAVE;
8154         return IPR_RC_JOB_RETURN;
8155 }
8156
8157 /**
8158  * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
8159  * @ipr_cmd:    ipr command struct
8160  *
8161  * This function sends a standard inquiry to the adapter.
8162  *
8163  * Return value:
8164  *      IPR_RC_JOB_RETURN
8165  **/
8166 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
8167 {
8168         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8169
8170         ENTER;
8171         ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
8172
8173         ipr_ioafp_inquiry(ipr_cmd, 0, 0,
8174                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
8175                           sizeof(struct ipr_ioa_vpd));
8176
8177         LEAVE;
8178         return IPR_RC_JOB_RETURN;
8179 }
8180
8181 /**
8182  * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
8183  * @ipr_cmd:    ipr command struct
8184  *
8185  * This function send an Identify Host Request Response Queue
8186  * command to establish the HRRQ with the adapter.
8187  *
8188  * Return value:
8189  *      IPR_RC_JOB_RETURN
8190  **/
8191 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
8192 {
8193         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8194         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
8195         struct ipr_hrr_queue *hrrq;
8196
8197         ENTER;
8198         ipr_cmd->job_step = ipr_ioafp_std_inquiry;
8199         if (ioa_cfg->identify_hrrq_index == 0)
8200                 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
8201
8202         if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
8203                 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
8204
8205                 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
8206                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8207
8208                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8209                 if (ioa_cfg->sis64)
8210                         ioarcb->cmd_pkt.cdb[1] = 0x1;
8211
8212                 if (ioa_cfg->nvectors == 1)
8213                         ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
8214                 else
8215                         ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
8216
8217                 ioarcb->cmd_pkt.cdb[2] =
8218                         ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
8219                 ioarcb->cmd_pkt.cdb[3] =
8220                         ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
8221                 ioarcb->cmd_pkt.cdb[4] =
8222                         ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
8223                 ioarcb->cmd_pkt.cdb[5] =
8224                         ((u64) hrrq->host_rrq_dma) & 0xff;
8225                 ioarcb->cmd_pkt.cdb[7] =
8226                         ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
8227                 ioarcb->cmd_pkt.cdb[8] =
8228                         (sizeof(u32) * hrrq->size) & 0xff;
8229
8230                 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
8231                         ioarcb->cmd_pkt.cdb[9] =
8232                                         ioa_cfg->identify_hrrq_index;
8233
8234                 if (ioa_cfg->sis64) {
8235                         ioarcb->cmd_pkt.cdb[10] =
8236                                 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
8237                         ioarcb->cmd_pkt.cdb[11] =
8238                                 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
8239                         ioarcb->cmd_pkt.cdb[12] =
8240                                 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
8241                         ioarcb->cmd_pkt.cdb[13] =
8242                                 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
8243                 }
8244
8245                 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
8246                         ioarcb->cmd_pkt.cdb[14] =
8247                                         ioa_cfg->identify_hrrq_index;
8248
8249                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8250                            IPR_INTERNAL_TIMEOUT);
8251
8252                 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
8253                         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8254
8255                 LEAVE;
8256                 return IPR_RC_JOB_RETURN;
8257         }
8258
8259         LEAVE;
8260         return IPR_RC_JOB_CONTINUE;
8261 }
8262
8263 /**
8264  * ipr_reset_timer_done - Adapter reset timer function
8265  * @ipr_cmd:    ipr command struct
8266  *
8267  * Description: This function is used in adapter reset processing
8268  * for timing events. If the reset_cmd pointer in the IOA
8269  * config struct is not this adapter's we are doing nested
8270  * resets and fail_all_ops will take care of freeing the
8271  * command block.
8272  *
8273  * Return value:
8274  *      none
8275  **/
8276 static void ipr_reset_timer_done(struct timer_list *t)
8277 {
8278         struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
8279         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8280         unsigned long lock_flags = 0;
8281
8282         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8283
8284         if (ioa_cfg->reset_cmd == ipr_cmd) {
8285                 list_del(&ipr_cmd->queue);
8286                 ipr_cmd->done(ipr_cmd);
8287         }
8288
8289         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8290 }
8291
8292 /**
8293  * ipr_reset_start_timer - Start a timer for adapter reset job
8294  * @ipr_cmd:    ipr command struct
8295  * @timeout:    timeout value
8296  *
8297  * Description: This function is used in adapter reset processing
8298  * for timing events. If the reset_cmd pointer in the IOA
8299  * config struct is not this adapter's we are doing nested
8300  * resets and fail_all_ops will take care of freeing the
8301  * command block.
8302  *
8303  * Return value:
8304  *      none
8305  **/
8306 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
8307                                   unsigned long timeout)
8308 {
8309
8310         ENTER;
8311         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8312         ipr_cmd->done = ipr_reset_ioa_job;
8313
8314         ipr_cmd->timer.expires = jiffies + timeout;
8315         ipr_cmd->timer.function = ipr_reset_timer_done;
8316         add_timer(&ipr_cmd->timer);
8317 }
8318
8319 /**
8320  * ipr_init_ioa_mem - Initialize ioa_cfg control block
8321  * @ioa_cfg:    ioa cfg struct
8322  *
8323  * Return value:
8324  *      nothing
8325  **/
8326 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
8327 {
8328         struct ipr_hrr_queue *hrrq;
8329
8330         for_each_hrrq(hrrq, ioa_cfg) {
8331                 spin_lock(&hrrq->_lock);
8332                 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
8333
8334                 /* Initialize Host RRQ pointers */
8335                 hrrq->hrrq_start = hrrq->host_rrq;
8336                 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
8337                 hrrq->hrrq_curr = hrrq->hrrq_start;
8338                 hrrq->toggle_bit = 1;
8339                 spin_unlock(&hrrq->_lock);
8340         }
8341         wmb();
8342
8343         ioa_cfg->identify_hrrq_index = 0;
8344         if (ioa_cfg->hrrq_num == 1)
8345                 atomic_set(&ioa_cfg->hrrq_index, 0);
8346         else
8347                 atomic_set(&ioa_cfg->hrrq_index, 1);
8348
8349         /* Zero out config table */
8350         memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
8351 }
8352
8353 /**
8354  * ipr_reset_next_stage - Process IPL stage change based on feedback register.
8355  * @ipr_cmd:    ipr command struct
8356  *
8357  * Return value:
8358  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8359  **/
8360 static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
8361 {
8362         unsigned long stage, stage_time;
8363         u32 feedback;
8364         volatile u32 int_reg;
8365         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8366         u64 maskval = 0;
8367
8368         feedback = readl(ioa_cfg->regs.init_feedback_reg);
8369         stage = feedback & IPR_IPL_INIT_STAGE_MASK;
8370         stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
8371
8372         ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
8373
8374         /* sanity check the stage_time value */
8375         if (stage_time == 0)
8376                 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
8377         else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
8378                 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
8379         else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
8380                 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
8381
8382         if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
8383                 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
8384                 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8385                 stage_time = ioa_cfg->transop_timeout;
8386                 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8387         } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
8388                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8389                 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8390                         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8391                         maskval = IPR_PCII_IPL_STAGE_CHANGE;
8392                         maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
8393                         writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
8394                         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8395                         return IPR_RC_JOB_CONTINUE;
8396                 }
8397         }
8398
8399         ipr_cmd->timer.expires = jiffies + stage_time * HZ;
8400         ipr_cmd->timer.function = ipr_oper_timeout;
8401         ipr_cmd->done = ipr_reset_ioa_job;
8402         add_timer(&ipr_cmd->timer);
8403
8404         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8405
8406         return IPR_RC_JOB_RETURN;
8407 }
8408
8409 /**
8410  * ipr_reset_enable_ioa - Enable the IOA following a reset.
8411  * @ipr_cmd:    ipr command struct
8412  *
8413  * This function reinitializes some control blocks and
8414  * enables destructive diagnostics on the adapter.
8415  *
8416  * Return value:
8417  *      IPR_RC_JOB_RETURN
8418  **/
8419 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
8420 {
8421         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8422         volatile u32 int_reg;
8423         volatile u64 maskval;
8424         int i;
8425
8426         ENTER;
8427         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8428         ipr_init_ioa_mem(ioa_cfg);
8429
8430         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8431                 spin_lock(&ioa_cfg->hrrq[i]._lock);
8432                 ioa_cfg->hrrq[i].allow_interrupts = 1;
8433                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8434         }
8435         wmb();
8436         if (ioa_cfg->sis64) {
8437                 /* Set the adapter to the correct endian mode. */
8438                 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8439                 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8440         }
8441
8442         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8443
8444         if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8445                 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
8446                        ioa_cfg->regs.clr_interrupt_mask_reg32);
8447                 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8448                 return IPR_RC_JOB_CONTINUE;
8449         }
8450
8451         /* Enable destructive diagnostics on IOA */
8452         writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
8453
8454         if (ioa_cfg->sis64) {
8455                 maskval = IPR_PCII_IPL_STAGE_CHANGE;
8456                 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
8457                 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
8458         } else
8459                 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
8460
8461         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8462
8463         dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
8464
8465         if (ioa_cfg->sis64) {
8466                 ipr_cmd->job_step = ipr_reset_next_stage;
8467                 return IPR_RC_JOB_CONTINUE;
8468         }
8469
8470         ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
8471         ipr_cmd->timer.function = ipr_oper_timeout;
8472         ipr_cmd->done = ipr_reset_ioa_job;
8473         add_timer(&ipr_cmd->timer);
8474         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8475
8476         LEAVE;
8477         return IPR_RC_JOB_RETURN;
8478 }
8479
8480 /**
8481  * ipr_reset_wait_for_dump - Wait for a dump to timeout.
8482  * @ipr_cmd:    ipr command struct
8483  *
8484  * This function is invoked when an adapter dump has run out
8485  * of processing time.
8486  *
8487  * Return value:
8488  *      IPR_RC_JOB_CONTINUE
8489  **/
8490 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
8491 {
8492         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8493
8494         if (ioa_cfg->sdt_state == GET_DUMP)
8495                 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8496         else if (ioa_cfg->sdt_state == READ_DUMP)
8497                 ioa_cfg->sdt_state = ABORT_DUMP;
8498
8499         ioa_cfg->dump_timeout = 1;
8500         ipr_cmd->job_step = ipr_reset_alert;
8501
8502         return IPR_RC_JOB_CONTINUE;
8503 }
8504
8505 /**
8506  * ipr_unit_check_no_data - Log a unit check/no data error log
8507  * @ioa_cfg:            ioa config struct
8508  *
8509  * Logs an error indicating the adapter unit checked, but for some
8510  * reason, we were unable to fetch the unit check buffer.
8511  *
8512  * Return value:
8513  *      nothing
8514  **/
8515 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
8516 {
8517         ioa_cfg->errors_logged++;
8518         dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
8519 }
8520
8521 /**
8522  * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8523  * @ioa_cfg:            ioa config struct
8524  *
8525  * Fetches the unit check buffer from the adapter by clocking the data
8526  * through the mailbox register.
8527  *
8528  * Return value:
8529  *      nothing
8530  **/
8531 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
8532 {
8533         unsigned long mailbox;
8534         struct ipr_hostrcb *hostrcb;
8535         struct ipr_uc_sdt sdt;
8536         int rc, length;
8537         u32 ioasc;
8538
8539         mailbox = readl(ioa_cfg->ioa_mailbox);
8540
8541         if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
8542                 ipr_unit_check_no_data(ioa_cfg);
8543                 return;
8544         }
8545
8546         memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
8547         rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
8548                                         (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
8549
8550         if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
8551             ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
8552             (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
8553                 ipr_unit_check_no_data(ioa_cfg);
8554                 return;
8555         }
8556
8557         /* Find length of the first sdt entry (UC buffer) */
8558         if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
8559                 length = be32_to_cpu(sdt.entry[0].end_token);
8560         else
8561                 length = (be32_to_cpu(sdt.entry[0].end_token) -
8562                           be32_to_cpu(sdt.entry[0].start_token)) &
8563                           IPR_FMT2_MBX_ADDR_MASK;
8564
8565         hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8566                              struct ipr_hostrcb, queue);
8567         list_del_init(&hostrcb->queue);
8568         memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8569
8570         rc = ipr_get_ldump_data_section(ioa_cfg,
8571                                         be32_to_cpu(sdt.entry[0].start_token),
8572                                         (__be32 *)&hostrcb->hcam,
8573                                         min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
8574
8575         if (!rc) {
8576                 ipr_handle_log_data(ioa_cfg, hostrcb);
8577                 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
8578                 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8579                     ioa_cfg->sdt_state == GET_DUMP)
8580                         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8581         } else
8582                 ipr_unit_check_no_data(ioa_cfg);
8583
8584         list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8585 }
8586
8587 /**
8588  * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8589  * @ipr_cmd:    ipr command struct
8590  *
8591  * Description: This function will call to get the unit check buffer.
8592  *
8593  * Return value:
8594  *      IPR_RC_JOB_RETURN
8595  **/
8596 static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8597 {
8598         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8599
8600         ENTER;
8601         ioa_cfg->ioa_unit_checked = 0;
8602         ipr_get_unit_check_buffer(ioa_cfg);
8603         ipr_cmd->job_step = ipr_reset_alert;
8604         ipr_reset_start_timer(ipr_cmd, 0);
8605
8606         LEAVE;
8607         return IPR_RC_JOB_RETURN;
8608 }
8609
8610 static int ipr_dump_mailbox_wait(struct ipr_cmnd *ipr_cmd)
8611 {
8612         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8613
8614         ENTER;
8615
8616         if (ioa_cfg->sdt_state != GET_DUMP)
8617                 return IPR_RC_JOB_RETURN;
8618
8619         if (!ioa_cfg->sis64 || !ipr_cmd->u.time_left ||
8620             (readl(ioa_cfg->regs.sense_interrupt_reg) &
8621              IPR_PCII_MAILBOX_STABLE)) {
8622
8623                 if (!ipr_cmd->u.time_left)
8624                         dev_err(&ioa_cfg->pdev->dev,
8625                                 "Timed out waiting for Mailbox register.\n");
8626
8627                 ioa_cfg->sdt_state = READ_DUMP;
8628                 ioa_cfg->dump_timeout = 0;
8629                 if (ioa_cfg->sis64)
8630                         ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8631                 else
8632                         ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
8633                 ipr_cmd->job_step = ipr_reset_wait_for_dump;
8634                 schedule_work(&ioa_cfg->work_q);
8635
8636         } else {
8637                 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8638                 ipr_reset_start_timer(ipr_cmd,
8639                                       IPR_CHECK_FOR_RESET_TIMEOUT);
8640         }
8641
8642         LEAVE;
8643         return IPR_RC_JOB_RETURN;
8644 }
8645
8646 /**
8647  * ipr_reset_restore_cfg_space - Restore PCI config space.
8648  * @ipr_cmd:    ipr command struct
8649  *
8650  * Description: This function restores the saved PCI config space of
8651  * the adapter, fails all outstanding ops back to the callers, and
8652  * fetches the dump/unit check if applicable to this reset.
8653  *
8654  * Return value:
8655  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8656  **/
8657 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8658 {
8659         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8660         u32 int_reg;
8661
8662         ENTER;
8663         ioa_cfg->pdev->state_saved = true;
8664         pci_restore_state(ioa_cfg->pdev);
8665
8666         if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
8667                 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8668                 return IPR_RC_JOB_CONTINUE;
8669         }
8670
8671         ipr_fail_all_ops(ioa_cfg);
8672
8673         if (ioa_cfg->sis64) {
8674                 /* Set the adapter to the correct endian mode. */
8675                 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8676                 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8677         }
8678
8679         if (ioa_cfg->ioa_unit_checked) {
8680                 if (ioa_cfg->sis64) {
8681                         ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8682                         ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8683                         return IPR_RC_JOB_RETURN;
8684                 } else {
8685                         ioa_cfg->ioa_unit_checked = 0;
8686                         ipr_get_unit_check_buffer(ioa_cfg);
8687                         ipr_cmd->job_step = ipr_reset_alert;
8688                         ipr_reset_start_timer(ipr_cmd, 0);
8689                         return IPR_RC_JOB_RETURN;
8690                 }
8691         }
8692
8693         if (ioa_cfg->in_ioa_bringdown) {
8694                 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8695         } else if (ioa_cfg->sdt_state == GET_DUMP) {
8696                 ipr_cmd->job_step = ipr_dump_mailbox_wait;
8697                 ipr_cmd->u.time_left = IPR_WAIT_FOR_MAILBOX;
8698         } else {
8699                 ipr_cmd->job_step = ipr_reset_enable_ioa;
8700         }
8701
8702         LEAVE;
8703         return IPR_RC_JOB_CONTINUE;
8704 }
8705
8706 /**
8707  * ipr_reset_bist_done - BIST has completed on the adapter.
8708  * @ipr_cmd:    ipr command struct
8709  *
8710  * Description: Unblock config space and resume the reset process.
8711  *
8712  * Return value:
8713  *      IPR_RC_JOB_CONTINUE
8714  **/
8715 static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8716 {
8717         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8718
8719         ENTER;
8720         if (ioa_cfg->cfg_locked)
8721                 pci_cfg_access_unlock(ioa_cfg->pdev);
8722         ioa_cfg->cfg_locked = 0;
8723         ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8724         LEAVE;
8725         return IPR_RC_JOB_CONTINUE;
8726 }
8727
8728 /**
8729  * ipr_reset_start_bist - Run BIST on the adapter.
8730  * @ipr_cmd:    ipr command struct
8731  *
8732  * Description: This function runs BIST on the adapter, then delays 2 seconds.
8733  *
8734  * Return value:
8735  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8736  **/
8737 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8738 {
8739         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8740         int rc = PCIBIOS_SUCCESSFUL;
8741
8742         ENTER;
8743         if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8744                 writel(IPR_UPROCI_SIS64_START_BIST,
8745                        ioa_cfg->regs.set_uproc_interrupt_reg32);
8746         else
8747                 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8748
8749         if (rc == PCIBIOS_SUCCESSFUL) {
8750                 ipr_cmd->job_step = ipr_reset_bist_done;
8751                 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8752                 rc = IPR_RC_JOB_RETURN;
8753         } else {
8754                 if (ioa_cfg->cfg_locked)
8755                         pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8756                 ioa_cfg->cfg_locked = 0;
8757                 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8758                 rc = IPR_RC_JOB_CONTINUE;
8759         }
8760
8761         LEAVE;
8762         return rc;
8763 }
8764
8765 /**
8766  * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8767  * @ipr_cmd:    ipr command struct
8768  *
8769  * Description: This clears PCI reset to the adapter and delays two seconds.
8770  *
8771  * Return value:
8772  *      IPR_RC_JOB_RETURN
8773  **/
8774 static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8775 {
8776         ENTER;
8777         ipr_cmd->job_step = ipr_reset_bist_done;
8778         ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8779         LEAVE;
8780         return IPR_RC_JOB_RETURN;
8781 }
8782
8783 /**
8784  * ipr_reset_reset_work - Pulse a PCIe fundamental reset
8785  * @work:       work struct
8786  *
8787  * Description: This pulses warm reset to a slot.
8788  *
8789  **/
8790 static void ipr_reset_reset_work(struct work_struct *work)
8791 {
8792         struct ipr_cmnd *ipr_cmd = container_of(work, struct ipr_cmnd, work);
8793         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8794         struct pci_dev *pdev = ioa_cfg->pdev;
8795         unsigned long lock_flags = 0;
8796
8797         ENTER;
8798         pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8799         msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT));
8800         pci_set_pcie_reset_state(pdev, pcie_deassert_reset);
8801
8802         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8803         if (ioa_cfg->reset_cmd == ipr_cmd)
8804                 ipr_reset_ioa_job(ipr_cmd);
8805         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8806         LEAVE;
8807 }
8808
8809 /**
8810  * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8811  * @ipr_cmd:    ipr command struct
8812  *
8813  * Description: This asserts PCI reset to the adapter.
8814  *
8815  * Return value:
8816  *      IPR_RC_JOB_RETURN
8817  **/
8818 static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8819 {
8820         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8821
8822         ENTER;
8823         INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work);
8824         queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work);
8825         ipr_cmd->job_step = ipr_reset_slot_reset_done;
8826         LEAVE;
8827         return IPR_RC_JOB_RETURN;
8828 }
8829
8830 /**
8831  * ipr_reset_block_config_access_wait - Wait for permission to block config access
8832  * @ipr_cmd:    ipr command struct
8833  *
8834  * Description: This attempts to block config access to the IOA.
8835  *
8836  * Return value:
8837  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8838  **/
8839 static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8840 {
8841         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8842         int rc = IPR_RC_JOB_CONTINUE;
8843
8844         if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8845                 ioa_cfg->cfg_locked = 1;
8846                 ipr_cmd->job_step = ioa_cfg->reset;
8847         } else {
8848                 if (ipr_cmd->u.time_left) {
8849                         rc = IPR_RC_JOB_RETURN;
8850                         ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8851                         ipr_reset_start_timer(ipr_cmd,
8852                                               IPR_CHECK_FOR_RESET_TIMEOUT);
8853                 } else {
8854                         ipr_cmd->job_step = ioa_cfg->reset;
8855                         dev_err(&ioa_cfg->pdev->dev,
8856                                 "Timed out waiting to lock config access. Resetting anyway.\n");
8857                 }
8858         }
8859
8860         return rc;
8861 }
8862
8863 /**
8864  * ipr_reset_block_config_access - Block config access to the IOA
8865  * @ipr_cmd:    ipr command struct
8866  *
8867  * Description: This attempts to block config access to the IOA
8868  *
8869  * Return value:
8870  *      IPR_RC_JOB_CONTINUE
8871  **/
8872 static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8873 {
8874         ipr_cmd->ioa_cfg->cfg_locked = 0;
8875         ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8876         ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8877         return IPR_RC_JOB_CONTINUE;
8878 }
8879
8880 /**
8881  * ipr_reset_allowed - Query whether or not IOA can be reset
8882  * @ioa_cfg:    ioa config struct
8883  *
8884  * Return value:
8885  *      0 if reset not allowed / non-zero if reset is allowed
8886  **/
8887 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8888 {
8889         volatile u32 temp_reg;
8890
8891         temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8892         return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8893 }
8894
8895 /**
8896  * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8897  * @ipr_cmd:    ipr command struct
8898  *
8899  * Description: This function waits for adapter permission to run BIST,
8900  * then runs BIST. If the adapter does not give permission after a
8901  * reasonable time, we will reset the adapter anyway. The impact of
8902  * resetting the adapter without warning the adapter is the risk of
8903  * losing the persistent error log on the adapter. If the adapter is
8904  * reset while it is writing to the flash on the adapter, the flash
8905  * segment will have bad ECC and be zeroed.
8906  *
8907  * Return value:
8908  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8909  **/
8910 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8911 {
8912         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8913         int rc = IPR_RC_JOB_RETURN;
8914
8915         if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8916                 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8917                 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8918         } else {
8919                 ipr_cmd->job_step = ipr_reset_block_config_access;
8920                 rc = IPR_RC_JOB_CONTINUE;
8921         }
8922
8923         return rc;
8924 }
8925
8926 /**
8927  * ipr_reset_alert - Alert the adapter of a pending reset
8928  * @ipr_cmd:    ipr command struct
8929  *
8930  * Description: This function alerts the adapter that it will be reset.
8931  * If memory space is not currently enabled, proceed directly
8932  * to running BIST on the adapter. The timer must always be started
8933  * so we guarantee we do not run BIST from ipr_isr.
8934  *
8935  * Return value:
8936  *      IPR_RC_JOB_RETURN
8937  **/
8938 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8939 {
8940         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8941         u16 cmd_reg;
8942         int rc;
8943
8944         ENTER;
8945         rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8946
8947         if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8948                 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
8949                 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
8950                 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8951         } else {
8952                 ipr_cmd->job_step = ipr_reset_block_config_access;
8953         }
8954
8955         ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8956         ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8957
8958         LEAVE;
8959         return IPR_RC_JOB_RETURN;
8960 }
8961
8962 /**
8963  * ipr_reset_quiesce_done - Complete IOA disconnect
8964  * @ipr_cmd:    ipr command struct
8965  *
8966  * Description: Freeze the adapter to complete quiesce processing
8967  *
8968  * Return value:
8969  *      IPR_RC_JOB_CONTINUE
8970  **/
8971 static int ipr_reset_quiesce_done(struct ipr_cmnd *ipr_cmd)
8972 {
8973         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8974
8975         ENTER;
8976         ipr_cmd->job_step = ipr_ioa_bringdown_done;
8977         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8978         LEAVE;
8979         return IPR_RC_JOB_CONTINUE;
8980 }
8981
8982 /**
8983  * ipr_reset_cancel_hcam_done - Check for outstanding commands
8984  * @ipr_cmd:    ipr command struct
8985  *
8986  * Description: Ensure nothing is outstanding to the IOA and
8987  *                      proceed with IOA disconnect. Otherwise reset the IOA.
8988  *
8989  * Return value:
8990  *      IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE
8991  **/
8992 static int ipr_reset_cancel_hcam_done(struct ipr_cmnd *ipr_cmd)
8993 {
8994         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8995         struct ipr_cmnd *loop_cmd;
8996         struct ipr_hrr_queue *hrrq;
8997         int rc = IPR_RC_JOB_CONTINUE;
8998         int count = 0;
8999
9000         ENTER;
9001         ipr_cmd->job_step = ipr_reset_quiesce_done;
9002
9003         for_each_hrrq(hrrq, ioa_cfg) {
9004                 spin_lock(&hrrq->_lock);
9005                 list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) {
9006                         count++;
9007                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9008                         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9009                         rc = IPR_RC_JOB_RETURN;
9010                         break;
9011                 }
9012                 spin_unlock(&hrrq->_lock);
9013
9014                 if (count)
9015                         break;
9016         }
9017
9018         LEAVE;
9019         return rc;
9020 }
9021
9022 /**
9023  * ipr_reset_cancel_hcam - Cancel outstanding HCAMs
9024  * @ipr_cmd:    ipr command struct
9025  *
9026  * Description: Cancel any oustanding HCAMs to the IOA.
9027  *
9028  * Return value:
9029  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9030  **/
9031 static int ipr_reset_cancel_hcam(struct ipr_cmnd *ipr_cmd)
9032 {
9033         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9034         int rc = IPR_RC_JOB_CONTINUE;
9035         struct ipr_cmd_pkt *cmd_pkt;
9036         struct ipr_cmnd *hcam_cmd;
9037         struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ];
9038
9039         ENTER;
9040         ipr_cmd->job_step = ipr_reset_cancel_hcam_done;
9041
9042         if (!hrrq->ioa_is_dead) {
9043                 if (!list_empty(&ioa_cfg->hostrcb_pending_q)) {
9044                         list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) {
9045                                 if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC)
9046                                         continue;
9047
9048                                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9049                                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9050                                 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
9051                                 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
9052                                 cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST;
9053                                 cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB;
9054                                 cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff;
9055                                 cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff;
9056                                 cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff;
9057                                 cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff;
9058                                 cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff;
9059                                 cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff;
9060                                 cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff;
9061                                 cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff;
9062
9063                                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
9064                                            IPR_CANCEL_TIMEOUT);
9065
9066                                 rc = IPR_RC_JOB_RETURN;
9067                                 ipr_cmd->job_step = ipr_reset_cancel_hcam;
9068                                 break;
9069                         }
9070                 }
9071         } else
9072                 ipr_cmd->job_step = ipr_reset_alert;
9073
9074         LEAVE;
9075         return rc;
9076 }
9077
9078 /**
9079  * ipr_reset_ucode_download_done - Microcode download completion
9080  * @ipr_cmd:    ipr command struct
9081  *
9082  * Description: This function unmaps the microcode download buffer.
9083  *
9084  * Return value:
9085  *      IPR_RC_JOB_CONTINUE
9086  **/
9087 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
9088 {
9089         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9090         struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
9091
9092         dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist,
9093                      sglist->num_sg, DMA_TO_DEVICE);
9094
9095         ipr_cmd->job_step = ipr_reset_alert;
9096         return IPR_RC_JOB_CONTINUE;
9097 }
9098
9099 /**
9100  * ipr_reset_ucode_download - Download microcode to the adapter
9101  * @ipr_cmd:    ipr command struct
9102  *
9103  * Description: This function checks to see if it there is microcode
9104  * to download to the adapter. If there is, a download is performed.
9105  *
9106  * Return value:
9107  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9108  **/
9109 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
9110 {
9111         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9112         struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
9113
9114         ENTER;
9115         ipr_cmd->job_step = ipr_reset_alert;
9116
9117         if (!sglist)
9118                 return IPR_RC_JOB_CONTINUE;
9119
9120         ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9121         ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
9122         ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
9123         ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
9124         ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
9125         ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
9126         ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
9127
9128         if (ioa_cfg->sis64)
9129                 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
9130         else
9131                 ipr_build_ucode_ioadl(ipr_cmd, sglist);
9132         ipr_cmd->job_step = ipr_reset_ucode_download_done;
9133
9134         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
9135                    IPR_WRITE_BUFFER_TIMEOUT);
9136
9137         LEAVE;
9138         return IPR_RC_JOB_RETURN;
9139 }
9140
9141 /**
9142  * ipr_reset_shutdown_ioa - Shutdown the adapter
9143  * @ipr_cmd:    ipr command struct
9144  *
9145  * Description: This function issues an adapter shutdown of the
9146  * specified type to the specified adapter as part of the
9147  * adapter reset job.
9148  *
9149  * Return value:
9150  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9151  **/
9152 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
9153 {
9154         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9155         enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
9156         unsigned long timeout;
9157         int rc = IPR_RC_JOB_CONTINUE;
9158
9159         ENTER;
9160         if (shutdown_type == IPR_SHUTDOWN_QUIESCE)
9161                 ipr_cmd->job_step = ipr_reset_cancel_hcam;
9162         else if (shutdown_type != IPR_SHUTDOWN_NONE &&
9163                         !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
9164                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9165                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9166                 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
9167                 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
9168
9169                 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
9170                         timeout = IPR_SHUTDOWN_TIMEOUT;
9171                 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
9172                         timeout = IPR_INTERNAL_TIMEOUT;
9173                 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
9174                         timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
9175                 else
9176                         timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
9177
9178                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
9179
9180                 rc = IPR_RC_JOB_RETURN;
9181                 ipr_cmd->job_step = ipr_reset_ucode_download;
9182         } else
9183                 ipr_cmd->job_step = ipr_reset_alert;
9184
9185         LEAVE;
9186         return rc;
9187 }
9188
9189 /**
9190  * ipr_reset_ioa_job - Adapter reset job
9191  * @ipr_cmd:    ipr command struct
9192  *
9193  * Description: This function is the job router for the adapter reset job.
9194  *
9195  * Return value:
9196  *      none
9197  **/
9198 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
9199 {
9200         u32 rc, ioasc;
9201         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9202
9203         do {
9204                 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
9205
9206                 if (ioa_cfg->reset_cmd != ipr_cmd) {
9207                         /*
9208                          * We are doing nested adapter resets and this is
9209                          * not the current reset job.
9210                          */
9211                         list_add_tail(&ipr_cmd->queue,
9212                                         &ipr_cmd->hrrq->hrrq_free_q);
9213                         return;
9214                 }
9215
9216                 if (IPR_IOASC_SENSE_KEY(ioasc)) {
9217                         rc = ipr_cmd->job_step_failed(ipr_cmd);
9218                         if (rc == IPR_RC_JOB_RETURN)
9219                                 return;
9220                 }
9221
9222                 ipr_reinit_ipr_cmnd(ipr_cmd);
9223                 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
9224                 rc = ipr_cmd->job_step(ipr_cmd);
9225         } while (rc == IPR_RC_JOB_CONTINUE);
9226 }
9227
9228 /**
9229  * _ipr_initiate_ioa_reset - Initiate an adapter reset
9230  * @ioa_cfg:            ioa config struct
9231  * @job_step:           first job step of reset job
9232  * @shutdown_type:      shutdown type
9233  *
9234  * Description: This function will initiate the reset of the given adapter
9235  * starting at the selected job step.
9236  * If the caller needs to wait on the completion of the reset,
9237  * the caller must sleep on the reset_wait_q.
9238  *
9239  * Return value:
9240  *      none
9241  **/
9242 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9243                                     int (*job_step) (struct ipr_cmnd *),
9244                                     enum ipr_shutdown_type shutdown_type)
9245 {
9246         struct ipr_cmnd *ipr_cmd;
9247         int i;
9248
9249         ioa_cfg->in_reset_reload = 1;
9250         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9251                 spin_lock(&ioa_cfg->hrrq[i]._lock);
9252                 ioa_cfg->hrrq[i].allow_cmds = 0;
9253                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9254         }
9255         wmb();
9256         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
9257                 ioa_cfg->scsi_unblock = 0;
9258                 ioa_cfg->scsi_blocked = 1;
9259                 scsi_block_requests(ioa_cfg->host);
9260         }
9261
9262         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
9263         ioa_cfg->reset_cmd = ipr_cmd;
9264         ipr_cmd->job_step = job_step;
9265         ipr_cmd->u.shutdown_type = shutdown_type;
9266
9267         ipr_reset_ioa_job(ipr_cmd);
9268 }
9269
9270 /**
9271  * ipr_initiate_ioa_reset - Initiate an adapter reset
9272  * @ioa_cfg:            ioa config struct
9273  * @shutdown_type:      shutdown type
9274  *
9275  * Description: This function will initiate the reset of the given adapter.
9276  * If the caller needs to wait on the completion of the reset,
9277  * the caller must sleep on the reset_wait_q.
9278  *
9279  * Return value:
9280  *      none
9281  **/
9282 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9283                                    enum ipr_shutdown_type shutdown_type)
9284 {
9285         int i;
9286
9287         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
9288                 return;
9289
9290         if (ioa_cfg->in_reset_reload) {
9291                 if (ioa_cfg->sdt_state == GET_DUMP)
9292                         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9293                 else if (ioa_cfg->sdt_state == READ_DUMP)
9294                         ioa_cfg->sdt_state = ABORT_DUMP;
9295         }
9296
9297         if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
9298                 dev_err(&ioa_cfg->pdev->dev,
9299                         "IOA taken offline - error recovery failed\n");
9300
9301                 ioa_cfg->reset_retries = 0;
9302                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9303                         spin_lock(&ioa_cfg->hrrq[i]._lock);
9304                         ioa_cfg->hrrq[i].ioa_is_dead = 1;
9305                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
9306                 }
9307                 wmb();
9308
9309                 if (ioa_cfg->in_ioa_bringdown) {
9310                         ioa_cfg->reset_cmd = NULL;
9311                         ioa_cfg->in_reset_reload = 0;
9312                         ipr_fail_all_ops(ioa_cfg);
9313                         wake_up_all(&ioa_cfg->reset_wait_q);
9314
9315                         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
9316                                 ioa_cfg->scsi_unblock = 1;
9317                                 schedule_work(&ioa_cfg->work_q);
9318                         }
9319                         return;
9320                 } else {
9321                         ioa_cfg->in_ioa_bringdown = 1;
9322                         shutdown_type = IPR_SHUTDOWN_NONE;
9323                 }
9324         }
9325
9326         _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
9327                                 shutdown_type);
9328 }
9329
9330 /**
9331  * ipr_reset_freeze - Hold off all I/O activity
9332  * @ipr_cmd:    ipr command struct
9333  *
9334  * Description: If the PCI slot is frozen, hold off all I/O
9335  * activity; then, as soon as the slot is available again,
9336  * initiate an adapter reset.
9337  */
9338 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
9339 {
9340         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9341         int i;
9342
9343         /* Disallow new interrupts, avoid loop */
9344         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9345                 spin_lock(&ioa_cfg->hrrq[i]._lock);
9346                 ioa_cfg->hrrq[i].allow_interrupts = 0;
9347                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9348         }
9349         wmb();
9350         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
9351         ipr_cmd->done = ipr_reset_ioa_job;
9352         return IPR_RC_JOB_RETURN;
9353 }
9354
9355 /**
9356  * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
9357  * @pdev:       PCI device struct
9358  *
9359  * Description: This routine is called to tell us that the MMIO
9360  * access to the IOA has been restored
9361  */
9362 static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
9363 {
9364         unsigned long flags = 0;
9365         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9366
9367         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9368         if (!ioa_cfg->probe_done)
9369                 pci_save_state(pdev);
9370         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9371         return PCI_ERS_RESULT_NEED_RESET;
9372 }
9373
9374 /**
9375  * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
9376  * @pdev:       PCI device struct
9377  *
9378  * Description: This routine is called to tell us that the PCI bus
9379  * is down. Can't do anything here, except put the device driver
9380  * into a holding pattern, waiting for the PCI bus to come back.
9381  */
9382 static void ipr_pci_frozen(struct pci_dev *pdev)
9383 {
9384         unsigned long flags = 0;
9385         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9386
9387         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9388         if (ioa_cfg->probe_done)
9389                 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
9390         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9391 }
9392
9393 /**
9394  * ipr_pci_slot_reset - Called when PCI slot has been reset.
9395  * @pdev:       PCI device struct
9396  *
9397  * Description: This routine is called by the pci error recovery
9398  * code after the PCI slot has been reset, just before we
9399  * should resume normal operations.
9400  */
9401 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
9402 {
9403         unsigned long flags = 0;
9404         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9405
9406         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9407         if (ioa_cfg->probe_done) {
9408                 if (ioa_cfg->needs_warm_reset)
9409                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9410                 else
9411                         _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
9412                                                 IPR_SHUTDOWN_NONE);
9413         } else
9414                 wake_up_all(&ioa_cfg->eeh_wait_q);
9415         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9416         return PCI_ERS_RESULT_RECOVERED;
9417 }
9418
9419 /**
9420  * ipr_pci_perm_failure - Called when PCI slot is dead for good.
9421  * @pdev:       PCI device struct
9422  *
9423  * Description: This routine is called when the PCI bus has
9424  * permanently failed.
9425  */
9426 static void ipr_pci_perm_failure(struct pci_dev *pdev)
9427 {
9428         unsigned long flags = 0;
9429         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9430         int i;
9431
9432         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9433         if (ioa_cfg->probe_done) {
9434                 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9435                         ioa_cfg->sdt_state = ABORT_DUMP;
9436                 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
9437                 ioa_cfg->in_ioa_bringdown = 1;
9438                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9439                         spin_lock(&ioa_cfg->hrrq[i]._lock);
9440                         ioa_cfg->hrrq[i].allow_cmds = 0;
9441                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
9442                 }
9443                 wmb();
9444                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9445         } else
9446                 wake_up_all(&ioa_cfg->eeh_wait_q);
9447         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9448 }
9449
9450 /**
9451  * ipr_pci_error_detected - Called when a PCI error is detected.
9452  * @pdev:       PCI device struct
9453  * @state:      PCI channel state
9454  *
9455  * Description: Called when a PCI error is detected.
9456  *
9457  * Return value:
9458  *      PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
9459  */
9460 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
9461                                                pci_channel_state_t state)
9462 {
9463         switch (state) {
9464         case pci_channel_io_frozen:
9465                 ipr_pci_frozen(pdev);
9466                 return PCI_ERS_RESULT_CAN_RECOVER;
9467         case pci_channel_io_perm_failure:
9468                 ipr_pci_perm_failure(pdev);
9469                 return PCI_ERS_RESULT_DISCONNECT;
9470                 break;
9471         default:
9472                 break;
9473         }
9474         return PCI_ERS_RESULT_NEED_RESET;
9475 }
9476
9477 /**
9478  * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
9479  * @ioa_cfg:    ioa cfg struct
9480  *
9481  * Description: This is the second phase of adapter initialization
9482  * This function takes care of initilizing the adapter to the point
9483  * where it can accept new commands.
9484
9485  * Return value:
9486  *      0 on success / -EIO on failure
9487  **/
9488 static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
9489 {
9490         int rc = 0;
9491         unsigned long host_lock_flags = 0;
9492
9493         ENTER;
9494         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9495         dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
9496         ioa_cfg->probe_done = 1;
9497         if (ioa_cfg->needs_hard_reset) {
9498                 ioa_cfg->needs_hard_reset = 0;
9499                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9500         } else
9501                 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
9502                                         IPR_SHUTDOWN_NONE);
9503         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9504
9505         LEAVE;
9506         return rc;
9507 }
9508
9509 /**
9510  * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
9511  * @ioa_cfg:    ioa config struct
9512  *
9513  * Return value:
9514  *      none
9515  **/
9516 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9517 {
9518         int i;
9519
9520         if (ioa_cfg->ipr_cmnd_list) {
9521                 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9522                         if (ioa_cfg->ipr_cmnd_list[i])
9523                                 dma_pool_free(ioa_cfg->ipr_cmd_pool,
9524                                               ioa_cfg->ipr_cmnd_list[i],
9525                                               ioa_cfg->ipr_cmnd_list_dma[i]);
9526
9527                         ioa_cfg->ipr_cmnd_list[i] = NULL;
9528                 }
9529         }
9530
9531         if (ioa_cfg->ipr_cmd_pool)
9532                 dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
9533
9534         kfree(ioa_cfg->ipr_cmnd_list);
9535         kfree(ioa_cfg->ipr_cmnd_list_dma);
9536         ioa_cfg->ipr_cmnd_list = NULL;
9537         ioa_cfg->ipr_cmnd_list_dma = NULL;
9538         ioa_cfg->ipr_cmd_pool = NULL;
9539 }
9540
9541 /**
9542  * ipr_free_mem - Frees memory allocated for an adapter
9543  * @ioa_cfg:    ioa cfg struct
9544  *
9545  * Return value:
9546  *      nothing
9547  **/
9548 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
9549 {
9550         int i;
9551
9552         kfree(ioa_cfg->res_entries);
9553         dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs),
9554                           ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9555         ipr_free_cmd_blks(ioa_cfg);
9556
9557         for (i = 0; i < ioa_cfg->hrrq_num; i++)
9558                 dma_free_coherent(&ioa_cfg->pdev->dev,
9559                                   sizeof(u32) * ioa_cfg->hrrq[i].size,
9560                                   ioa_cfg->hrrq[i].host_rrq,
9561                                   ioa_cfg->hrrq[i].host_rrq_dma);
9562
9563         dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
9564                           ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9565
9566         for (i = 0; i < IPR_MAX_HCAMS; i++) {
9567                 dma_free_coherent(&ioa_cfg->pdev->dev,
9568                                   sizeof(struct ipr_hostrcb),
9569                                   ioa_cfg->hostrcb[i],
9570                                   ioa_cfg->hostrcb_dma[i]);
9571         }
9572
9573         ipr_free_dump(ioa_cfg);
9574         kfree(ioa_cfg->trace);
9575 }
9576
9577 /**
9578  * ipr_free_irqs - Free all allocated IRQs for the adapter.
9579  * @ioa_cfg:    ipr cfg struct
9580  *
9581  * This function frees all allocated IRQs for the
9582  * specified adapter.
9583  *
9584  * Return value:
9585  *      none
9586  **/
9587 static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg)
9588 {
9589         struct pci_dev *pdev = ioa_cfg->pdev;
9590         int i;
9591
9592         for (i = 0; i < ioa_cfg->nvectors; i++)
9593                 free_irq(pci_irq_vector(pdev, i), &ioa_cfg->hrrq[i]);
9594         pci_free_irq_vectors(pdev);
9595 }
9596
9597 /**
9598  * ipr_free_all_resources - Free all allocated resources for an adapter.
9599  * @ipr_cmd:    ipr command struct
9600  *
9601  * This function frees all allocated resources for the
9602  * specified adapter.
9603  *
9604  * Return value:
9605  *      none
9606  **/
9607 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
9608 {
9609         struct pci_dev *pdev = ioa_cfg->pdev;
9610
9611         ENTER;
9612         ipr_free_irqs(ioa_cfg);
9613         if (ioa_cfg->reset_work_q)
9614                 destroy_workqueue(ioa_cfg->reset_work_q);
9615         iounmap(ioa_cfg->hdw_dma_regs);
9616         pci_release_regions(pdev);
9617         ipr_free_mem(ioa_cfg);
9618         scsi_host_put(ioa_cfg->host);
9619         pci_disable_device(pdev);
9620         LEAVE;
9621 }
9622
9623 /**
9624  * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
9625  * @ioa_cfg:    ioa config struct
9626  *
9627  * Return value:
9628  *      0 on success / -ENOMEM on allocation failure
9629  **/
9630 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9631 {
9632         struct ipr_cmnd *ipr_cmd;
9633         struct ipr_ioarcb *ioarcb;
9634         dma_addr_t dma_addr;
9635         int i, entries_each_hrrq, hrrq_id = 0;
9636
9637         ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev,
9638                                                 sizeof(struct ipr_cmnd), 512, 0);
9639
9640         if (!ioa_cfg->ipr_cmd_pool)
9641                 return -ENOMEM;
9642
9643         ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
9644         ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
9645
9646         if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
9647                 ipr_free_cmd_blks(ioa_cfg);
9648                 return -ENOMEM;
9649         }
9650
9651         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9652                 if (ioa_cfg->hrrq_num > 1) {
9653                         if (i == 0) {
9654                                 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
9655                                 ioa_cfg->hrrq[i].min_cmd_id = 0;
9656                                 ioa_cfg->hrrq[i].max_cmd_id =
9657                                         (entries_each_hrrq - 1);
9658                         } else {
9659                                 entries_each_hrrq =
9660                                         IPR_NUM_BASE_CMD_BLKS/
9661                                         (ioa_cfg->hrrq_num - 1);
9662                                 ioa_cfg->hrrq[i].min_cmd_id =
9663                                         IPR_NUM_INTERNAL_CMD_BLKS +
9664                                         (i - 1) * entries_each_hrrq;
9665                                 ioa_cfg->hrrq[i].max_cmd_id =
9666                                         (IPR_NUM_INTERNAL_CMD_BLKS +
9667                                         i * entries_each_hrrq - 1);
9668                         }
9669                 } else {
9670                         entries_each_hrrq = IPR_NUM_CMD_BLKS;
9671                         ioa_cfg->hrrq[i].min_cmd_id = 0;
9672                         ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
9673                 }
9674                 ioa_cfg->hrrq[i].size = entries_each_hrrq;
9675         }
9676
9677         BUG_ON(ioa_cfg->hrrq_num == 0);
9678
9679         i = IPR_NUM_CMD_BLKS -
9680                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
9681         if (i > 0) {
9682                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
9683                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
9684         }
9685
9686         for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9687                 ipr_cmd = dma_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
9688
9689                 if (!ipr_cmd) {
9690                         ipr_free_cmd_blks(ioa_cfg);
9691                         return -ENOMEM;
9692                 }
9693
9694                 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
9695                 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
9696                 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
9697
9698                 ioarcb = &ipr_cmd->ioarcb;
9699                 ipr_cmd->dma_addr = dma_addr;
9700                 if (ioa_cfg->sis64)
9701                         ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
9702                 else
9703                         ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
9704
9705                 ioarcb->host_response_handle = cpu_to_be32(i << 2);
9706                 if (ioa_cfg->sis64) {
9707                         ioarcb->u.sis64_addr_data.data_ioadl_addr =
9708                                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
9709                         ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
9710                                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
9711                 } else {
9712                         ioarcb->write_ioadl_addr =
9713                                 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
9714                         ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
9715                         ioarcb->ioasa_host_pci_addr =
9716                                 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
9717                 }
9718                 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
9719                 ipr_cmd->cmd_index = i;
9720                 ipr_cmd->ioa_cfg = ioa_cfg;
9721                 ipr_cmd->sense_buffer_dma = dma_addr +
9722                         offsetof(struct ipr_cmnd, sense_buffer);
9723
9724                 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
9725                 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
9726                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9727                 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9728                         hrrq_id++;
9729         }
9730
9731         return 0;
9732 }
9733
9734 /**
9735  * ipr_alloc_mem - Allocate memory for an adapter
9736  * @ioa_cfg:    ioa config struct
9737  *
9738  * Return value:
9739  *      0 on success / non-zero for error
9740  **/
9741 static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
9742 {
9743         struct pci_dev *pdev = ioa_cfg->pdev;
9744         int i, rc = -ENOMEM;
9745
9746         ENTER;
9747         ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
9748                                        ioa_cfg->max_devs_supported, GFP_KERNEL);
9749
9750         if (!ioa_cfg->res_entries)
9751                 goto out;
9752
9753         for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
9754                 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
9755                 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9756         }
9757
9758         ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev,
9759                                               sizeof(struct ipr_misc_cbs),
9760                                               &ioa_cfg->vpd_cbs_dma,
9761                                               GFP_KERNEL);
9762
9763         if (!ioa_cfg->vpd_cbs)
9764                 goto out_free_res_entries;
9765
9766         if (ipr_alloc_cmd_blks(ioa_cfg))
9767                 goto out_free_vpd_cbs;
9768
9769         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9770                 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev,
9771                                         sizeof(u32) * ioa_cfg->hrrq[i].size,
9772                                         &ioa_cfg->hrrq[i].host_rrq_dma,
9773                                         GFP_KERNEL);
9774
9775                 if (!ioa_cfg->hrrq[i].host_rrq)  {
9776                         while (--i > 0)
9777                                 dma_free_coherent(&pdev->dev,
9778                                         sizeof(u32) * ioa_cfg->hrrq[i].size,
9779                                         ioa_cfg->hrrq[i].host_rrq,
9780                                         ioa_cfg->hrrq[i].host_rrq_dma);
9781                         goto out_ipr_free_cmd_blocks;
9782                 }
9783                 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9784         }
9785
9786         ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev,
9787                                                   ioa_cfg->cfg_table_size,
9788                                                   &ioa_cfg->cfg_table_dma,
9789                                                   GFP_KERNEL);
9790
9791         if (!ioa_cfg->u.cfg_table)
9792                 goto out_free_host_rrq;
9793
9794         for (i = 0; i < IPR_MAX_HCAMS; i++) {
9795                 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
9796                                                          sizeof(struct ipr_hostrcb),
9797                                                          &ioa_cfg->hostrcb_dma[i],
9798                                                          GFP_KERNEL);
9799
9800                 if (!ioa_cfg->hostrcb[i])
9801                         goto out_free_hostrcb_dma;
9802
9803                 ioa_cfg->hostrcb[i]->hostrcb_dma =
9804                         ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
9805                 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
9806                 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9807         }
9808
9809         ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
9810                                  IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
9811
9812         if (!ioa_cfg->trace)
9813                 goto out_free_hostrcb_dma;
9814
9815         rc = 0;
9816 out:
9817         LEAVE;
9818         return rc;
9819
9820 out_free_hostrcb_dma:
9821         while (i-- > 0) {
9822                 dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb),
9823                                   ioa_cfg->hostrcb[i],
9824                                   ioa_cfg->hostrcb_dma[i]);
9825         }
9826         dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size,
9827                           ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9828 out_free_host_rrq:
9829         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9830                 dma_free_coherent(&pdev->dev,
9831                                   sizeof(u32) * ioa_cfg->hrrq[i].size,
9832                                   ioa_cfg->hrrq[i].host_rrq,
9833                                   ioa_cfg->hrrq[i].host_rrq_dma);
9834         }
9835 out_ipr_free_cmd_blocks:
9836         ipr_free_cmd_blks(ioa_cfg);
9837 out_free_vpd_cbs:
9838         dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs),
9839                           ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9840 out_free_res_entries:
9841         kfree(ioa_cfg->res_entries);
9842         goto out;
9843 }
9844
9845 /**
9846  * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9847  * @ioa_cfg:    ioa config struct
9848  *
9849  * Return value:
9850  *      none
9851  **/
9852 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
9853 {
9854         int i;
9855
9856         for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9857                 ioa_cfg->bus_attr[i].bus = i;
9858                 ioa_cfg->bus_attr[i].qas_enabled = 0;
9859                 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9860                 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9861                         ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9862                 else
9863                         ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9864         }
9865 }
9866
9867 /**
9868  * ipr_init_regs - Initialize IOA registers
9869  * @ioa_cfg:    ioa config struct
9870  *
9871  * Return value:
9872  *      none
9873  **/
9874 static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
9875 {
9876         const struct ipr_interrupt_offsets *p;
9877         struct ipr_interrupts *t;
9878         void __iomem *base;
9879
9880         p = &ioa_cfg->chip_cfg->regs;
9881         t = &ioa_cfg->regs;
9882         base = ioa_cfg->hdw_dma_regs;
9883
9884         t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9885         t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
9886         t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
9887         t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
9888         t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
9889         t->clr_interrupt_reg = base + p->clr_interrupt_reg;
9890         t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
9891         t->sense_interrupt_reg = base + p->sense_interrupt_reg;
9892         t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
9893         t->ioarrin_reg = base + p->ioarrin_reg;
9894         t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
9895         t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
9896         t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
9897         t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
9898         t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
9899         t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
9900
9901         if (ioa_cfg->sis64) {
9902                 t->init_feedback_reg = base + p->init_feedback_reg;
9903                 t->dump_addr_reg = base + p->dump_addr_reg;
9904                 t->dump_data_reg = base + p->dump_data_reg;
9905                 t->endian_swap_reg = base + p->endian_swap_reg;
9906         }
9907 }
9908
9909 /**
9910  * ipr_init_ioa_cfg - Initialize IOA config struct
9911  * @ioa_cfg:    ioa config struct
9912  * @host:               scsi host struct
9913  * @pdev:               PCI dev struct
9914  *
9915  * Return value:
9916  *      none
9917  **/
9918 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9919                              struct Scsi_Host *host, struct pci_dev *pdev)
9920 {
9921         int i;
9922
9923         ioa_cfg->host = host;
9924         ioa_cfg->pdev = pdev;
9925         ioa_cfg->log_level = ipr_log_level;
9926         ioa_cfg->doorbell = IPR_DOORBELL;
9927         sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9928         sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
9929         sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9930         sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9931         sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9932         sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9933
9934         INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9935         INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9936         INIT_LIST_HEAD(&ioa_cfg->hostrcb_report_q);
9937         INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9938         INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9939         INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
9940         init_waitqueue_head(&ioa_cfg->reset_wait_q);
9941         init_waitqueue_head(&ioa_cfg->msi_wait_q);
9942         init_waitqueue_head(&ioa_cfg->eeh_wait_q);
9943         ioa_cfg->sdt_state = INACTIVE;
9944
9945         ipr_initialize_bus_attr(ioa_cfg);
9946         ioa_cfg->max_devs_supported = ipr_max_devs;
9947
9948         if (ioa_cfg->sis64) {
9949                 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9950                 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9951                 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9952                         ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9953                 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9954                                            + ((sizeof(struct ipr_config_table_entry64)
9955                                                * ioa_cfg->max_devs_supported)));
9956         } else {
9957                 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9958                 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9959                 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9960                         ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9961                 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9962                                            + ((sizeof(struct ipr_config_table_entry)
9963                                                * ioa_cfg->max_devs_supported)));
9964         }
9965
9966         host->max_channel = IPR_VSET_BUS;
9967         host->unique_id = host->host_no;
9968         host->max_cmd_len = IPR_MAX_CDB_LEN;
9969         host->can_queue = ioa_cfg->max_cmds;
9970         pci_set_drvdata(pdev, ioa_cfg);
9971
9972         for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9973                 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9974                 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9975                 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9976                 if (i == 0)
9977                         ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9978                 else
9979                         ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
9980         }
9981 }
9982
9983 /**
9984  * ipr_get_chip_info - Find adapter chip information
9985  * @dev_id:             PCI device id struct
9986  *
9987  * Return value:
9988  *      ptr to chip information on success / NULL on failure
9989  **/
9990 static const struct ipr_chip_t *
9991 ipr_get_chip_info(const struct pci_device_id *dev_id)
9992 {
9993         int i;
9994
9995         for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
9996                 if (ipr_chip[i].vendor == dev_id->vendor &&
9997                     ipr_chip[i].device == dev_id->device)
9998                         return &ipr_chip[i];
9999         return NULL;
10000 }
10001
10002 /**
10003  * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
10004  *                                              during probe time
10005  * @ioa_cfg:    ioa config struct
10006  *
10007  * Return value:
10008  *      None
10009  **/
10010 static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
10011 {
10012         struct pci_dev *pdev = ioa_cfg->pdev;
10013
10014         if (pci_channel_offline(pdev)) {
10015                 wait_event_timeout(ioa_cfg->eeh_wait_q,
10016                                    !pci_channel_offline(pdev),
10017                                    IPR_PCI_ERROR_RECOVERY_TIMEOUT);
10018                 pci_restore_state(pdev);
10019         }
10020 }
10021
10022 static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
10023 {
10024         int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
10025
10026         for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
10027                 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
10028                          "host%d-%d", ioa_cfg->host->host_no, vec_idx);
10029                 ioa_cfg->vectors_info[vec_idx].
10030                         desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
10031         }
10032 }
10033
10034 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg,
10035                 struct pci_dev *pdev)
10036 {
10037         int i, rc;
10038
10039         for (i = 1; i < ioa_cfg->nvectors; i++) {
10040                 rc = request_irq(pci_irq_vector(pdev, i),
10041                         ipr_isr_mhrrq,
10042                         0,
10043                         ioa_cfg->vectors_info[i].desc,
10044                         &ioa_cfg->hrrq[i]);
10045                 if (rc) {
10046                         while (--i >= 0)
10047                                 free_irq(pci_irq_vector(pdev, i),
10048                                         &ioa_cfg->hrrq[i]);
10049                         return rc;
10050                 }
10051         }
10052         return 0;
10053 }
10054
10055 /**
10056  * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
10057  * @pdev:               PCI device struct
10058  *
10059  * Description: Simply set the msi_received flag to 1 indicating that
10060  * Message Signaled Interrupts are supported.
10061  *
10062  * Return value:
10063  *      0 on success / non-zero on failure
10064  **/
10065 static irqreturn_t ipr_test_intr(int irq, void *devp)
10066 {
10067         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
10068         unsigned long lock_flags = 0;
10069         irqreturn_t rc = IRQ_HANDLED;
10070
10071         dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
10072         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10073
10074         ioa_cfg->msi_received = 1;
10075         wake_up(&ioa_cfg->msi_wait_q);
10076
10077         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10078         return rc;
10079 }
10080
10081 /**
10082  * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
10083  * @pdev:               PCI device struct
10084  *
10085  * Description: This routine sets up and initiates a test interrupt to determine
10086  * if the interrupt is received via the ipr_test_intr() service routine.
10087  * If the tests fails, the driver will fall back to LSI.
10088  *
10089  * Return value:
10090  *      0 on success / non-zero on failure
10091  **/
10092 static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
10093 {
10094         int rc;
10095         volatile u32 int_reg;
10096         unsigned long lock_flags = 0;
10097         int irq = pci_irq_vector(pdev, 0);
10098
10099         ENTER;
10100
10101         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10102         init_waitqueue_head(&ioa_cfg->msi_wait_q);
10103         ioa_cfg->msi_received = 0;
10104         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10105         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
10106         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
10107         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10108
10109         rc = request_irq(irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
10110         if (rc) {
10111                 dev_err(&pdev->dev, "Can not assign irq %d\n", irq);
10112                 return rc;
10113         } else if (ipr_debug)
10114                 dev_info(&pdev->dev, "IRQ assigned: %d\n", irq);
10115
10116         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
10117         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
10118         wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
10119         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10120         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10121
10122         if (!ioa_cfg->msi_received) {
10123                 /* MSI test failed */
10124                 dev_info(&pdev->dev, "MSI test failed.  Falling back to LSI.\n");
10125                 rc = -EOPNOTSUPP;
10126         } else if (ipr_debug)
10127                 dev_info(&pdev->dev, "MSI test succeeded.\n");
10128
10129         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10130
10131         free_irq(irq, ioa_cfg);
10132
10133         LEAVE;
10134
10135         return rc;
10136 }
10137
10138  /* ipr_probe_ioa - Allocates memory and does first stage of initialization
10139  * @pdev:               PCI device struct
10140  * @dev_id:             PCI device id struct
10141  *
10142  * Return value:
10143  *      0 on success / non-zero on failure
10144  **/
10145 static int ipr_probe_ioa(struct pci_dev *pdev,
10146                          const struct pci_device_id *dev_id)
10147 {
10148         struct ipr_ioa_cfg *ioa_cfg;
10149         struct Scsi_Host *host;
10150         unsigned long ipr_regs_pci;
10151         void __iomem *ipr_regs;
10152         int rc = PCIBIOS_SUCCESSFUL;
10153         volatile u32 mask, uproc, interrupts;
10154         unsigned long lock_flags, driver_lock_flags;
10155         unsigned int irq_flag;
10156
10157         ENTER;
10158
10159         dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
10160         host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
10161
10162         if (!host) {
10163                 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
10164                 rc = -ENOMEM;
10165                 goto out;
10166         }
10167
10168         ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
10169         memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
10170         ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
10171
10172         ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
10173
10174         if (!ioa_cfg->ipr_chip) {
10175                 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
10176                         dev_id->vendor, dev_id->device);
10177                 goto out_scsi_host_put;
10178         }
10179
10180         /* set SIS 32 or SIS 64 */
10181         ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
10182         ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
10183         ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
10184         ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
10185
10186         if (ipr_transop_timeout)
10187                 ioa_cfg->transop_timeout = ipr_transop_timeout;
10188         else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
10189                 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
10190         else
10191                 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
10192
10193         ioa_cfg->revid = pdev->revision;
10194
10195         ipr_init_ioa_cfg(ioa_cfg, host, pdev);
10196
10197         ipr_regs_pci = pci_resource_start(pdev, 0);
10198
10199         rc = pci_request_regions(pdev, IPR_NAME);
10200         if (rc < 0) {
10201                 dev_err(&pdev->dev,
10202                         "Couldn't register memory range of registers\n");
10203                 goto out_scsi_host_put;
10204         }
10205
10206         rc = pci_enable_device(pdev);
10207
10208         if (rc || pci_channel_offline(pdev)) {
10209                 if (pci_channel_offline(pdev)) {
10210                         ipr_wait_for_pci_err_recovery(ioa_cfg);
10211                         rc = pci_enable_device(pdev);
10212                 }
10213
10214                 if (rc) {
10215                         dev_err(&pdev->dev, "Cannot enable adapter\n");
10216                         ipr_wait_for_pci_err_recovery(ioa_cfg);
10217                         goto out_release_regions;
10218                 }
10219         }
10220
10221         ipr_regs = pci_ioremap_bar(pdev, 0);
10222
10223         if (!ipr_regs) {
10224                 dev_err(&pdev->dev,
10225                         "Couldn't map memory range of registers\n");
10226                 rc = -ENOMEM;
10227                 goto out_disable;
10228         }
10229
10230         ioa_cfg->hdw_dma_regs = ipr_regs;
10231         ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
10232         ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
10233
10234         ipr_init_regs(ioa_cfg);
10235
10236         if (ioa_cfg->sis64) {
10237                 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10238                 if (rc < 0) {
10239                         dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n");
10240                         rc = dma_set_mask_and_coherent(&pdev->dev,
10241                                                        DMA_BIT_MASK(32));
10242                 }
10243         } else
10244                 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10245
10246         if (rc < 0) {
10247                 dev_err(&pdev->dev, "Failed to set DMA mask\n");
10248                 goto cleanup_nomem;
10249         }
10250
10251         rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
10252                                    ioa_cfg->chip_cfg->cache_line_size);
10253
10254         if (rc != PCIBIOS_SUCCESSFUL) {
10255                 dev_err(&pdev->dev, "Write of cache line size failed\n");
10256                 ipr_wait_for_pci_err_recovery(ioa_cfg);
10257                 rc = -EIO;
10258                 goto cleanup_nomem;
10259         }
10260
10261         /* Issue MMIO read to ensure card is not in EEH */
10262         interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
10263         ipr_wait_for_pci_err_recovery(ioa_cfg);
10264
10265         if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
10266                 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
10267                         IPR_MAX_MSIX_VECTORS);
10268                 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
10269         }
10270
10271         irq_flag = PCI_IRQ_LEGACY;
10272         if (ioa_cfg->ipr_chip->has_msi)
10273                 irq_flag |= PCI_IRQ_MSI | PCI_IRQ_MSIX;
10274         rc = pci_alloc_irq_vectors(pdev, 1, ipr_number_of_msix, irq_flag);
10275         if (rc < 0) {
10276                 ipr_wait_for_pci_err_recovery(ioa_cfg);
10277                 goto cleanup_nomem;
10278         }
10279         ioa_cfg->nvectors = rc;
10280
10281         if (!pdev->msi_enabled && !pdev->msix_enabled)
10282                 ioa_cfg->clear_isr = 1;
10283
10284         pci_set_master(pdev);
10285
10286         if (pci_channel_offline(pdev)) {
10287                 ipr_wait_for_pci_err_recovery(ioa_cfg);
10288                 pci_set_master(pdev);
10289                 if (pci_channel_offline(pdev)) {
10290                         rc = -EIO;
10291                         goto out_msi_disable;
10292                 }
10293         }
10294
10295         if (pdev->msi_enabled || pdev->msix_enabled) {
10296                 rc = ipr_test_msi(ioa_cfg, pdev);
10297                 switch (rc) {
10298                 case 0:
10299                         dev_info(&pdev->dev,
10300                                 "Request for %d MSI%ss succeeded.", ioa_cfg->nvectors,
10301                                 pdev->msix_enabled ? "-X" : "");
10302                         break;
10303                 case -EOPNOTSUPP:
10304                         ipr_wait_for_pci_err_recovery(ioa_cfg);
10305                         pci_free_irq_vectors(pdev);
10306
10307                         ioa_cfg->nvectors = 1;
10308                         ioa_cfg->clear_isr = 1;
10309                         break;
10310                 default:
10311                         goto out_msi_disable;
10312                 }
10313         }
10314
10315         ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
10316                                 (unsigned int)num_online_cpus(),
10317                                 (unsigned int)IPR_MAX_HRRQ_NUM);
10318
10319         if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
10320                 goto out_msi_disable;
10321
10322         if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
10323                 goto out_msi_disable;
10324
10325         rc = ipr_alloc_mem(ioa_cfg);
10326         if (rc < 0) {
10327                 dev_err(&pdev->dev,
10328                         "Couldn't allocate enough memory for device driver!\n");
10329                 goto out_msi_disable;
10330         }
10331
10332         /* Save away PCI config space for use following IOA reset */
10333         rc = pci_save_state(pdev);
10334
10335         if (rc != PCIBIOS_SUCCESSFUL) {
10336                 dev_err(&pdev->dev, "Failed to save PCI config space\n");
10337                 rc = -EIO;
10338                 goto cleanup_nolog;
10339         }
10340
10341         /*
10342          * If HRRQ updated interrupt is not masked, or reset alert is set,
10343          * the card is in an unknown state and needs a hard reset
10344          */
10345         mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
10346         interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
10347         uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
10348         if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
10349                 ioa_cfg->needs_hard_reset = 1;
10350         if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
10351                 ioa_cfg->needs_hard_reset = 1;
10352         if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
10353                 ioa_cfg->ioa_unit_checked = 1;
10354
10355         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10356         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10357         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10358
10359         if (pdev->msi_enabled || pdev->msix_enabled) {
10360                 name_msi_vectors(ioa_cfg);
10361                 rc = request_irq(pci_irq_vector(pdev, 0), ipr_isr, 0,
10362                         ioa_cfg->vectors_info[0].desc,
10363                         &ioa_cfg->hrrq[0]);
10364                 if (!rc)
10365                         rc = ipr_request_other_msi_irqs(ioa_cfg, pdev);
10366         } else {
10367                 rc = request_irq(pdev->irq, ipr_isr,
10368                          IRQF_SHARED,
10369                          IPR_NAME, &ioa_cfg->hrrq[0]);
10370         }
10371         if (rc) {
10372                 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
10373                         pdev->irq, rc);
10374                 goto cleanup_nolog;
10375         }
10376
10377         if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
10378             (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
10379                 ioa_cfg->needs_warm_reset = 1;
10380                 ioa_cfg->reset = ipr_reset_slot_reset;
10381
10382                 ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d",
10383                                                                 WQ_MEM_RECLAIM, host->host_no);
10384
10385                 if (!ioa_cfg->reset_work_q) {
10386                         dev_err(&pdev->dev, "Couldn't register reset workqueue\n");
10387                         rc = -ENOMEM;
10388                         goto out_free_irq;
10389                 }
10390         } else
10391                 ioa_cfg->reset = ipr_reset_start_bist;
10392
10393         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10394         list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
10395         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10396
10397         LEAVE;
10398 out:
10399         return rc;
10400
10401 out_free_irq:
10402         ipr_free_irqs(ioa_cfg);
10403 cleanup_nolog:
10404         ipr_free_mem(ioa_cfg);
10405 out_msi_disable:
10406         ipr_wait_for_pci_err_recovery(ioa_cfg);
10407         pci_free_irq_vectors(pdev);
10408 cleanup_nomem:
10409         iounmap(ipr_regs);
10410 out_disable:
10411         pci_disable_device(pdev);
10412 out_release_regions:
10413         pci_release_regions(pdev);
10414 out_scsi_host_put:
10415         scsi_host_put(host);
10416         goto out;
10417 }
10418
10419 /**
10420  * ipr_initiate_ioa_bringdown - Bring down an adapter
10421  * @ioa_cfg:            ioa config struct
10422  * @shutdown_type:      shutdown type
10423  *
10424  * Description: This function will initiate bringing down the adapter.
10425  * This consists of issuing an IOA shutdown to the adapter
10426  * to flush the cache, and running BIST.
10427  * If the caller needs to wait on the completion of the reset,
10428  * the caller must sleep on the reset_wait_q.
10429  *
10430  * Return value:
10431  *      none
10432  **/
10433 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
10434                                        enum ipr_shutdown_type shutdown_type)
10435 {
10436         ENTER;
10437         if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
10438                 ioa_cfg->sdt_state = ABORT_DUMP;
10439         ioa_cfg->reset_retries = 0;
10440         ioa_cfg->in_ioa_bringdown = 1;
10441         ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
10442         LEAVE;
10443 }
10444
10445 /**
10446  * __ipr_remove - Remove a single adapter
10447  * @pdev:       pci device struct
10448  *
10449  * Adapter hot plug remove entry point.
10450  *
10451  * Return value:
10452  *      none
10453  **/
10454 static void __ipr_remove(struct pci_dev *pdev)
10455 {
10456         unsigned long host_lock_flags = 0;
10457         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10458         int i;
10459         unsigned long driver_lock_flags;
10460         ENTER;
10461
10462         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10463         while (ioa_cfg->in_reset_reload) {
10464                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10465                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10466                 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10467         }
10468
10469         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
10470                 spin_lock(&ioa_cfg->hrrq[i]._lock);
10471                 ioa_cfg->hrrq[i].removing_ioa = 1;
10472                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
10473         }
10474         wmb();
10475         ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
10476
10477         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10478         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10479         flush_work(&ioa_cfg->work_q);
10480         if (ioa_cfg->reset_work_q)
10481                 flush_workqueue(ioa_cfg->reset_work_q);
10482         INIT_LIST_HEAD(&ioa_cfg->used_res_q);
10483         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10484
10485         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10486         list_del(&ioa_cfg->queue);
10487         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10488
10489         if (ioa_cfg->sdt_state == ABORT_DUMP)
10490                 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
10491         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10492
10493         ipr_free_all_resources(ioa_cfg);
10494
10495         LEAVE;
10496 }
10497
10498 /**
10499  * ipr_remove - IOA hot plug remove entry point
10500  * @pdev:       pci device struct
10501  *
10502  * Adapter hot plug remove entry point.
10503  *
10504  * Return value:
10505  *      none
10506  **/
10507 static void ipr_remove(struct pci_dev *pdev)
10508 {
10509         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10510
10511         ENTER;
10512
10513         ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10514                               &ipr_trace_attr);
10515         ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10516                              &ipr_dump_attr);
10517         sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10518                         &ipr_ioa_async_err_log);
10519         scsi_remove_host(ioa_cfg->host);
10520
10521         __ipr_remove(pdev);
10522
10523         LEAVE;
10524 }
10525
10526 /**
10527  * ipr_probe - Adapter hot plug add entry point
10528  *
10529  * Return value:
10530  *      0 on success / non-zero on failure
10531  **/
10532 static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
10533 {
10534         struct ipr_ioa_cfg *ioa_cfg;
10535         unsigned long flags;
10536         int rc, i;
10537
10538         rc = ipr_probe_ioa(pdev, dev_id);
10539
10540         if (rc)
10541                 return rc;
10542
10543         ioa_cfg = pci_get_drvdata(pdev);
10544         rc = ipr_probe_ioa_part2(ioa_cfg);
10545
10546         if (rc) {
10547                 __ipr_remove(pdev);
10548                 return rc;
10549         }
10550
10551         rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
10552
10553         if (rc) {
10554                 __ipr_remove(pdev);
10555                 return rc;
10556         }
10557
10558         rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
10559                                    &ipr_trace_attr);
10560
10561         if (rc) {
10562                 scsi_remove_host(ioa_cfg->host);
10563                 __ipr_remove(pdev);
10564                 return rc;
10565         }
10566
10567         rc = sysfs_create_bin_file(&ioa_cfg->host->shost_dev.kobj,
10568                         &ipr_ioa_async_err_log);
10569
10570         if (rc) {
10571                 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10572                                 &ipr_dump_attr);
10573                 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10574                                 &ipr_trace_attr);
10575                 scsi_remove_host(ioa_cfg->host);
10576                 __ipr_remove(pdev);
10577                 return rc;
10578         }
10579
10580         rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
10581                                    &ipr_dump_attr);
10582
10583         if (rc) {
10584                 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10585                                       &ipr_ioa_async_err_log);
10586                 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10587                                       &ipr_trace_attr);
10588                 scsi_remove_host(ioa_cfg->host);
10589                 __ipr_remove(pdev);
10590                 return rc;
10591         }
10592         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10593         ioa_cfg->scan_enabled = 1;
10594         schedule_work(&ioa_cfg->work_q);
10595         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10596
10597         ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
10598
10599         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10600                 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
10601                         irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
10602                                         ioa_cfg->iopoll_weight, ipr_iopoll);
10603                 }
10604         }
10605
10606         scsi_scan_host(ioa_cfg->host);
10607
10608         return 0;
10609 }
10610
10611 /**
10612  * ipr_shutdown - Shutdown handler.
10613  * @pdev:       pci device struct
10614  *
10615  * This function is invoked upon system shutdown/reboot. It will issue
10616  * an adapter shutdown to the adapter to flush the write cache.
10617  *
10618  * Return value:
10619  *      none
10620  **/
10621 static void ipr_shutdown(struct pci_dev *pdev)
10622 {
10623         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10624         unsigned long lock_flags = 0;
10625         enum ipr_shutdown_type shutdown_type = IPR_SHUTDOWN_NORMAL;
10626         int i;
10627
10628         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10629         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10630                 ioa_cfg->iopoll_weight = 0;
10631                 for (i = 1; i < ioa_cfg->hrrq_num; i++)
10632                         irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
10633         }
10634
10635         while (ioa_cfg->in_reset_reload) {
10636                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10637                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10638                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10639         }
10640
10641         if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64)
10642                 shutdown_type = IPR_SHUTDOWN_QUIESCE;
10643
10644         ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type);
10645         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10646         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10647         if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) {
10648                 ipr_free_irqs(ioa_cfg);
10649                 pci_disable_device(ioa_cfg->pdev);
10650         }
10651 }
10652
10653 static struct pci_device_id ipr_pci_table[] = {
10654         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10655                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
10656         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10657                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
10658         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10659                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
10660         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10661                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
10662         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10663                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
10664         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10665                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
10666         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10667                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
10668         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10669                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
10670                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10671         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10672               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10673         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10674               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10675               IPR_USE_LONG_TRANSOP_TIMEOUT },
10676         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10677               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10678               IPR_USE_LONG_TRANSOP_TIMEOUT },
10679         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10680               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10681         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10682               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10683               IPR_USE_LONG_TRANSOP_TIMEOUT},
10684         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10685               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10686               IPR_USE_LONG_TRANSOP_TIMEOUT },
10687         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10688               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
10689               IPR_USE_LONG_TRANSOP_TIMEOUT },
10690         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10691               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
10692         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10693               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
10694         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10695               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
10696               IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
10697         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
10698                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
10699         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10700                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
10701         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10702                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
10703                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10704         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10705                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
10706                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10707         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10708                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
10709         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10710                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
10711         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10712                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
10713         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10714                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
10715         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10716                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
10717         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10718                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
10719         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10720                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
10721         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10722                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
10723         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10724                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
10725         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10726                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
10727         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10728                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
10729         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10730                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
10731         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10732                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
10733         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10734                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
10735         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10736                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
10737         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10738                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
10739         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10740                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
10741         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10742                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
10743         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10744                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
10745         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10746                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
10747         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10748                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
10749         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10750                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
10751         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10752                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
10753         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10754                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
10755         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10756                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
10757         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10758                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
10759         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10760                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580A, 0, 0, 0 },
10761         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10762                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580B, 0, 0, 0 },
10763         { }
10764 };
10765 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10766
10767 static const struct pci_error_handlers ipr_err_handler = {
10768         .error_detected = ipr_pci_error_detected,
10769         .mmio_enabled = ipr_pci_mmio_enabled,
10770         .slot_reset = ipr_pci_slot_reset,
10771 };
10772
10773 static struct pci_driver ipr_driver = {
10774         .name = IPR_NAME,
10775         .id_table = ipr_pci_table,
10776         .probe = ipr_probe,
10777         .remove = ipr_remove,
10778         .shutdown = ipr_shutdown,
10779         .err_handler = &ipr_err_handler,
10780 };
10781
10782 /**
10783  * ipr_halt_done - Shutdown prepare completion
10784  *
10785  * Return value:
10786  *      none
10787  **/
10788 static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10789 {
10790         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
10791 }
10792
10793 /**
10794  * ipr_halt - Issue shutdown prepare to all adapters
10795  *
10796  * Return value:
10797  *      NOTIFY_OK on success / NOTIFY_DONE on failure
10798  **/
10799 static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10800 {
10801         struct ipr_cmnd *ipr_cmd;
10802         struct ipr_ioa_cfg *ioa_cfg;
10803         unsigned long flags = 0, driver_lock_flags;
10804
10805         if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10806                 return NOTIFY_DONE;
10807
10808         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10809
10810         list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10811                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10812                 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
10813                     (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) {
10814                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10815                         continue;
10816                 }
10817
10818                 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10819                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10820                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10821                 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10822                 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10823
10824                 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10825                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10826         }
10827         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10828
10829         return NOTIFY_OK;
10830 }
10831
10832 static struct notifier_block ipr_notifier = {
10833         ipr_halt, NULL, 0
10834 };
10835
10836 /**
10837  * ipr_init - Module entry point
10838  *
10839  * Return value:
10840  *      0 on success / negative value on failure
10841  **/
10842 static int __init ipr_init(void)
10843 {
10844         ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10845                  IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10846
10847         register_reboot_notifier(&ipr_notifier);
10848         return pci_register_driver(&ipr_driver);
10849 }
10850
10851 /**
10852  * ipr_exit - Module unload
10853  *
10854  * Module unload entry point.
10855  *
10856  * Return value:
10857  *      none
10858  **/
10859 static void __exit ipr_exit(void)
10860 {
10861         unregister_reboot_notifier(&ipr_notifier);
10862         pci_unregister_driver(&ipr_driver);
10863 }
10864
10865 module_init(ipr_init);
10866 module_exit(ipr_exit);