Merge tag 'imx-fixes-4.18-3' of git://git.kernel.org/pub/scm/linux/kernel/git/shawngu...
[sfrench/cifs-2.6.git] / drivers / scsi / ipr.c
1 /*
2  * ipr.c -- driver for IBM Power Linux RAID adapters
3  *
4  * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5  *
6  * Copyright (C) 2003, 2004 IBM Corporation
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21  *
22  */
23
24 /*
25  * Notes:
26  *
27  * This driver is used to control the following SCSI adapters:
28  *
29  * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30  *
31  * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32  *              PCI-X Dual Channel Ultra 320 SCSI Adapter
33  *              PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34  *              Embedded SCSI adapter on p615 and p655 systems
35  *
36  * Supported Hardware Features:
37  *      - Ultra 320 SCSI controller
38  *      - PCI-X host interface
39  *      - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40  *      - Non-Volatile Write Cache
41  *      - Supports attachment of non-RAID disks, tape, and optical devices
42  *      - RAID Levels 0, 5, 10
43  *      - Hot spare
44  *      - Background Parity Checking
45  *      - Background Data Scrubbing
46  *      - Ability to increase the capacity of an existing RAID 5 disk array
47  *              by adding disks
48  *
49  * Driver Features:
50  *      - Tagged command queuing
51  *      - Adapter microcode download
52  *      - PCI hot plug
53  *      - SCSI device hot plug
54  *
55  */
56
57 #include <linux/fs.h>
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/slab.h>
63 #include <linux/vmalloc.h>
64 #include <linux/ioport.h>
65 #include <linux/delay.h>
66 #include <linux/pci.h>
67 #include <linux/wait.h>
68 #include <linux/spinlock.h>
69 #include <linux/sched.h>
70 #include <linux/interrupt.h>
71 #include <linux/blkdev.h>
72 #include <linux/firmware.h>
73 #include <linux/module.h>
74 #include <linux/moduleparam.h>
75 #include <linux/libata.h>
76 #include <linux/hdreg.h>
77 #include <linux/reboot.h>
78 #include <linux/stringify.h>
79 #include <asm/io.h>
80 #include <asm/irq.h>
81 #include <asm/processor.h>
82 #include <scsi/scsi.h>
83 #include <scsi/scsi_host.h>
84 #include <scsi/scsi_tcq.h>
85 #include <scsi/scsi_eh.h>
86 #include <scsi/scsi_cmnd.h>
87 #include "ipr.h"
88
89 /*
90  *   Global Data
91  */
92 static LIST_HEAD(ipr_ioa_head);
93 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94 static unsigned int ipr_max_speed = 1;
95 static int ipr_testmode = 0;
96 static unsigned int ipr_fastfail = 0;
97 static unsigned int ipr_transop_timeout = 0;
98 static unsigned int ipr_debug = 0;
99 static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
100 static unsigned int ipr_dual_ioa_raid = 1;
101 static unsigned int ipr_number_of_msix = 16;
102 static unsigned int ipr_fast_reboot;
103 static DEFINE_SPINLOCK(ipr_driver_lock);
104
105 /* This table describes the differences between DMA controller chips */
106 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
107         { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
108                 .mailbox = 0x0042C,
109                 .max_cmds = 100,
110                 .cache_line_size = 0x20,
111                 .clear_isr = 1,
112                 .iopoll_weight = 0,
113                 {
114                         .set_interrupt_mask_reg = 0x0022C,
115                         .clr_interrupt_mask_reg = 0x00230,
116                         .clr_interrupt_mask_reg32 = 0x00230,
117                         .sense_interrupt_mask_reg = 0x0022C,
118                         .sense_interrupt_mask_reg32 = 0x0022C,
119                         .clr_interrupt_reg = 0x00228,
120                         .clr_interrupt_reg32 = 0x00228,
121                         .sense_interrupt_reg = 0x00224,
122                         .sense_interrupt_reg32 = 0x00224,
123                         .ioarrin_reg = 0x00404,
124                         .sense_uproc_interrupt_reg = 0x00214,
125                         .sense_uproc_interrupt_reg32 = 0x00214,
126                         .set_uproc_interrupt_reg = 0x00214,
127                         .set_uproc_interrupt_reg32 = 0x00214,
128                         .clr_uproc_interrupt_reg = 0x00218,
129                         .clr_uproc_interrupt_reg32 = 0x00218
130                 }
131         },
132         { /* Snipe and Scamp */
133                 .mailbox = 0x0052C,
134                 .max_cmds = 100,
135                 .cache_line_size = 0x20,
136                 .clear_isr = 1,
137                 .iopoll_weight = 0,
138                 {
139                         .set_interrupt_mask_reg = 0x00288,
140                         .clr_interrupt_mask_reg = 0x0028C,
141                         .clr_interrupt_mask_reg32 = 0x0028C,
142                         .sense_interrupt_mask_reg = 0x00288,
143                         .sense_interrupt_mask_reg32 = 0x00288,
144                         .clr_interrupt_reg = 0x00284,
145                         .clr_interrupt_reg32 = 0x00284,
146                         .sense_interrupt_reg = 0x00280,
147                         .sense_interrupt_reg32 = 0x00280,
148                         .ioarrin_reg = 0x00504,
149                         .sense_uproc_interrupt_reg = 0x00290,
150                         .sense_uproc_interrupt_reg32 = 0x00290,
151                         .set_uproc_interrupt_reg = 0x00290,
152                         .set_uproc_interrupt_reg32 = 0x00290,
153                         .clr_uproc_interrupt_reg = 0x00294,
154                         .clr_uproc_interrupt_reg32 = 0x00294
155                 }
156         },
157         { /* CRoC */
158                 .mailbox = 0x00044,
159                 .max_cmds = 1000,
160                 .cache_line_size = 0x20,
161                 .clear_isr = 0,
162                 .iopoll_weight = 64,
163                 {
164                         .set_interrupt_mask_reg = 0x00010,
165                         .clr_interrupt_mask_reg = 0x00018,
166                         .clr_interrupt_mask_reg32 = 0x0001C,
167                         .sense_interrupt_mask_reg = 0x00010,
168                         .sense_interrupt_mask_reg32 = 0x00014,
169                         .clr_interrupt_reg = 0x00008,
170                         .clr_interrupt_reg32 = 0x0000C,
171                         .sense_interrupt_reg = 0x00000,
172                         .sense_interrupt_reg32 = 0x00004,
173                         .ioarrin_reg = 0x00070,
174                         .sense_uproc_interrupt_reg = 0x00020,
175                         .sense_uproc_interrupt_reg32 = 0x00024,
176                         .set_uproc_interrupt_reg = 0x00020,
177                         .set_uproc_interrupt_reg32 = 0x00024,
178                         .clr_uproc_interrupt_reg = 0x00028,
179                         .clr_uproc_interrupt_reg32 = 0x0002C,
180                         .init_feedback_reg = 0x0005C,
181                         .dump_addr_reg = 0x00064,
182                         .dump_data_reg = 0x00068,
183                         .endian_swap_reg = 0x00084
184                 }
185         },
186 };
187
188 static const struct ipr_chip_t ipr_chip[] = {
189         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
190         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
191         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
192         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
193         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, true, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
194         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
195         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
196         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
197         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
198         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
199 };
200
201 static int ipr_max_bus_speeds[] = {
202         IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
203 };
204
205 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
206 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
207 module_param_named(max_speed, ipr_max_speed, uint, 0);
208 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
209 module_param_named(log_level, ipr_log_level, uint, 0);
210 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
211 module_param_named(testmode, ipr_testmode, int, 0);
212 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
213 module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
214 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
215 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
216 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
217 module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
218 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
219 module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
220 MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
221 module_param_named(max_devs, ipr_max_devs, int, 0);
222 MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
223                  "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
224 module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
225 MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16).  (default:16)");
226 module_param_named(fast_reboot, ipr_fast_reboot, int, S_IRUGO | S_IWUSR);
227 MODULE_PARM_DESC(fast_reboot, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)");
228 MODULE_LICENSE("GPL");
229 MODULE_VERSION(IPR_DRIVER_VERSION);
230
231 /*  A constant array of IOASCs/URCs/Error Messages */
232 static const
233 struct ipr_error_table_t ipr_error_table[] = {
234         {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
235         "8155: An unknown error was received"},
236         {0x00330000, 0, 0,
237         "Soft underlength error"},
238         {0x005A0000, 0, 0,
239         "Command to be cancelled not found"},
240         {0x00808000, 0, 0,
241         "Qualified success"},
242         {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
243         "FFFE: Soft device bus error recovered by the IOA"},
244         {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
245         "4101: Soft device bus fabric error"},
246         {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
247         "FFFC: Logical block guard error recovered by the device"},
248         {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
249         "FFFC: Logical block reference tag error recovered by the device"},
250         {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
251         "4171: Recovered scatter list tag / sequence number error"},
252         {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
253         "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
254         {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
255         "4171: Recovered logical block sequence number error on IOA to Host transfer"},
256         {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
257         "FFFD: Recovered logical block reference tag error detected by the IOA"},
258         {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
259         "FFFD: Logical block guard error recovered by the IOA"},
260         {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
261         "FFF9: Device sector reassign successful"},
262         {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
263         "FFF7: Media error recovered by device rewrite procedures"},
264         {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
265         "7001: IOA sector reassignment successful"},
266         {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
267         "FFF9: Soft media error. Sector reassignment recommended"},
268         {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
269         "FFF7: Media error recovered by IOA rewrite procedures"},
270         {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
271         "FF3D: Soft PCI bus error recovered by the IOA"},
272         {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
273         "FFF6: Device hardware error recovered by the IOA"},
274         {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
275         "FFF6: Device hardware error recovered by the device"},
276         {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
277         "FF3D: Soft IOA error recovered by the IOA"},
278         {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
279         "FFFA: Undefined device response recovered by the IOA"},
280         {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
281         "FFF6: Device bus error, message or command phase"},
282         {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
283         "FFFE: Task Management Function failed"},
284         {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
285         "FFF6: Failure prediction threshold exceeded"},
286         {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
287         "8009: Impending cache battery pack failure"},
288         {0x02040100, 0, 0,
289         "Logical Unit in process of becoming ready"},
290         {0x02040200, 0, 0,
291         "Initializing command required"},
292         {0x02040400, 0, 0,
293         "34FF: Disk device format in progress"},
294         {0x02040C00, 0, 0,
295         "Logical unit not accessible, target port in unavailable state"},
296         {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
297         "9070: IOA requested reset"},
298         {0x023F0000, 0, 0,
299         "Synchronization required"},
300         {0x02408500, 0, 0,
301         "IOA microcode download required"},
302         {0x02408600, 0, 0,
303         "Device bus connection is prohibited by host"},
304         {0x024E0000, 0, 0,
305         "No ready, IOA shutdown"},
306         {0x025A0000, 0, 0,
307         "Not ready, IOA has been shutdown"},
308         {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
309         "3020: Storage subsystem configuration error"},
310         {0x03110B00, 0, 0,
311         "FFF5: Medium error, data unreadable, recommend reassign"},
312         {0x03110C00, 0, 0,
313         "7000: Medium error, data unreadable, do not reassign"},
314         {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
315         "FFF3: Disk media format bad"},
316         {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
317         "3002: Addressed device failed to respond to selection"},
318         {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
319         "3100: Device bus error"},
320         {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
321         "3109: IOA timed out a device command"},
322         {0x04088000, 0, 0,
323         "3120: SCSI bus is not operational"},
324         {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
325         "4100: Hard device bus fabric error"},
326         {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
327         "310C: Logical block guard error detected by the device"},
328         {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
329         "310C: Logical block reference tag error detected by the device"},
330         {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
331         "4170: Scatter list tag / sequence number error"},
332         {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
333         "8150: Logical block CRC error on IOA to Host transfer"},
334         {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
335         "4170: Logical block sequence number error on IOA to Host transfer"},
336         {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
337         "310D: Logical block reference tag error detected by the IOA"},
338         {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
339         "310D: Logical block guard error detected by the IOA"},
340         {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
341         "9000: IOA reserved area data check"},
342         {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
343         "9001: IOA reserved area invalid data pattern"},
344         {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
345         "9002: IOA reserved area LRC error"},
346         {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
347         "Hardware Error, IOA metadata access error"},
348         {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
349         "102E: Out of alternate sectors for disk storage"},
350         {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
351         "FFF4: Data transfer underlength error"},
352         {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
353         "FFF4: Data transfer overlength error"},
354         {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
355         "3400: Logical unit failure"},
356         {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
357         "FFF4: Device microcode is corrupt"},
358         {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
359         "8150: PCI bus error"},
360         {0x04430000, 1, 0,
361         "Unsupported device bus message received"},
362         {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
363         "FFF4: Disk device problem"},
364         {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
365         "8150: Permanent IOA failure"},
366         {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
367         "3010: Disk device returned wrong response to IOA"},
368         {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
369         "8151: IOA microcode error"},
370         {0x04448500, 0, 0,
371         "Device bus status error"},
372         {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
373         "8157: IOA error requiring IOA reset to recover"},
374         {0x04448700, 0, 0,
375         "ATA device status error"},
376         {0x04490000, 0, 0,
377         "Message reject received from the device"},
378         {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
379         "8008: A permanent cache battery pack failure occurred"},
380         {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
381         "9090: Disk unit has been modified after the last known status"},
382         {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
383         "9081: IOA detected device error"},
384         {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
385         "9082: IOA detected device error"},
386         {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
387         "3110: Device bus error, message or command phase"},
388         {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
389         "3110: SAS Command / Task Management Function failed"},
390         {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
391         "9091: Incorrect hardware configuration change has been detected"},
392         {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
393         "9073: Invalid multi-adapter configuration"},
394         {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
395         "4010: Incorrect connection between cascaded expanders"},
396         {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
397         "4020: Connections exceed IOA design limits"},
398         {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
399         "4030: Incorrect multipath connection"},
400         {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
401         "4110: Unsupported enclosure function"},
402         {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
403         "4120: SAS cable VPD cannot be read"},
404         {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
405         "FFF4: Command to logical unit failed"},
406         {0x05240000, 1, 0,
407         "Illegal request, invalid request type or request packet"},
408         {0x05250000, 0, 0,
409         "Illegal request, invalid resource handle"},
410         {0x05258000, 0, 0,
411         "Illegal request, commands not allowed to this device"},
412         {0x05258100, 0, 0,
413         "Illegal request, command not allowed to a secondary adapter"},
414         {0x05258200, 0, 0,
415         "Illegal request, command not allowed to a non-optimized resource"},
416         {0x05260000, 0, 0,
417         "Illegal request, invalid field in parameter list"},
418         {0x05260100, 0, 0,
419         "Illegal request, parameter not supported"},
420         {0x05260200, 0, 0,
421         "Illegal request, parameter value invalid"},
422         {0x052C0000, 0, 0,
423         "Illegal request, command sequence error"},
424         {0x052C8000, 1, 0,
425         "Illegal request, dual adapter support not enabled"},
426         {0x052C8100, 1, 0,
427         "Illegal request, another cable connector was physically disabled"},
428         {0x054E8000, 1, 0,
429         "Illegal request, inconsistent group id/group count"},
430         {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
431         "9031: Array protection temporarily suspended, protection resuming"},
432         {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
433         "9040: Array protection temporarily suspended, protection resuming"},
434         {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
435         "4080: IOA exceeded maximum operating temperature"},
436         {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
437         "4085: Service required"},
438         {0x060B8100, 0, IPR_DEFAULT_LOG_LEVEL,
439         "4086: SAS Adapter Hardware Configuration Error"},
440         {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
441         "3140: Device bus not ready to ready transition"},
442         {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
443         "FFFB: SCSI bus was reset"},
444         {0x06290500, 0, 0,
445         "FFFE: SCSI bus transition to single ended"},
446         {0x06290600, 0, 0,
447         "FFFE: SCSI bus transition to LVD"},
448         {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
449         "FFFB: SCSI bus was reset by another initiator"},
450         {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
451         "3029: A device replacement has occurred"},
452         {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
453         "4102: Device bus fabric performance degradation"},
454         {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
455         "9051: IOA cache data exists for a missing or failed device"},
456         {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
457         "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
458         {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
459         "9025: Disk unit is not supported at its physical location"},
460         {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
461         "3020: IOA detected a SCSI bus configuration error"},
462         {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
463         "3150: SCSI bus configuration error"},
464         {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
465         "9074: Asymmetric advanced function disk configuration"},
466         {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
467         "4040: Incomplete multipath connection between IOA and enclosure"},
468         {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
469         "4041: Incomplete multipath connection between enclosure and device"},
470         {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
471         "9075: Incomplete multipath connection between IOA and remote IOA"},
472         {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
473         "9076: Configuration error, missing remote IOA"},
474         {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
475         "4050: Enclosure does not support a required multipath function"},
476         {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
477         "4121: Configuration error, required cable is missing"},
478         {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
479         "4122: Cable is not plugged into the correct location on remote IOA"},
480         {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
481         "4123: Configuration error, invalid cable vital product data"},
482         {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
483         "4124: Configuration error, both cable ends are plugged into the same IOA"},
484         {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
485         "4070: Logically bad block written on device"},
486         {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
487         "9041: Array protection temporarily suspended"},
488         {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
489         "9042: Corrupt array parity detected on specified device"},
490         {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
491         "9030: Array no longer protected due to missing or failed disk unit"},
492         {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
493         "9071: Link operational transition"},
494         {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
495         "9072: Link not operational transition"},
496         {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
497         "9032: Array exposed but still protected"},
498         {0x066B8300, 0, IPR_DEBUG_LOG_LEVEL,
499         "70DD: Device forced failed by disrupt device command"},
500         {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
501         "4061: Multipath redundancy level got better"},
502         {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
503         "4060: Multipath redundancy level got worse"},
504         {0x06808100, 0, IPR_DEBUG_LOG_LEVEL,
505         "9083: Device raw mode enabled"},
506         {0x06808200, 0, IPR_DEBUG_LOG_LEVEL,
507         "9084: Device raw mode disabled"},
508         {0x07270000, 0, 0,
509         "Failure due to other device"},
510         {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
511         "9008: IOA does not support functions expected by devices"},
512         {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
513         "9010: Cache data associated with attached devices cannot be found"},
514         {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
515         "9011: Cache data belongs to devices other than those attached"},
516         {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
517         "9020: Array missing 2 or more devices with only 1 device present"},
518         {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
519         "9021: Array missing 2 or more devices with 2 or more devices present"},
520         {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
521         "9022: Exposed array is missing a required device"},
522         {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
523         "9023: Array member(s) not at required physical locations"},
524         {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
525         "9024: Array not functional due to present hardware configuration"},
526         {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
527         "9026: Array not functional due to present hardware configuration"},
528         {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
529         "9027: Array is missing a device and parity is out of sync"},
530         {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
531         "9028: Maximum number of arrays already exist"},
532         {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
533         "9050: Required cache data cannot be located for a disk unit"},
534         {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
535         "9052: Cache data exists for a device that has been modified"},
536         {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
537         "9054: IOA resources not available due to previous problems"},
538         {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
539         "9092: Disk unit requires initialization before use"},
540         {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
541         "9029: Incorrect hardware configuration change has been detected"},
542         {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
543         "9060: One or more disk pairs are missing from an array"},
544         {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
545         "9061: One or more disks are missing from an array"},
546         {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
547         "9062: One or more disks are missing from an array"},
548         {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
549         "9063: Maximum number of functional arrays has been exceeded"},
550         {0x07279A00, 0, 0,
551         "Data protect, other volume set problem"},
552         {0x0B260000, 0, 0,
553         "Aborted command, invalid descriptor"},
554         {0x0B3F9000, 0, 0,
555         "Target operating conditions have changed, dual adapter takeover"},
556         {0x0B530200, 0, 0,
557         "Aborted command, medium removal prevented"},
558         {0x0B5A0000, 0, 0,
559         "Command terminated by host"},
560         {0x0B5B8000, 0, 0,
561         "Aborted command, command terminated by host"}
562 };
563
564 static const struct ipr_ses_table_entry ipr_ses_table[] = {
565         { "2104-DL1        ", "XXXXXXXXXXXXXXXX", 80 },
566         { "2104-TL1        ", "XXXXXXXXXXXXXXXX", 80 },
567         { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
568         { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
569         { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
570         { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
571         { "2104-DU3        ", "XXXXXXXXXXXXXXXX", 160 },
572         { "2104-TU3        ", "XXXXXXXXXXXXXXXX", 160 },
573         { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
574         { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
575         { "St  V1S2        ", "XXXXXXXXXXXXXXXX", 160 },
576         { "HSBPD4M  PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
577         { "VSBPD1H   U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
578 };
579
580 /*
581  *  Function Prototypes
582  */
583 static int ipr_reset_alert(struct ipr_cmnd *);
584 static void ipr_process_ccn(struct ipr_cmnd *);
585 static void ipr_process_error(struct ipr_cmnd *);
586 static void ipr_reset_ioa_job(struct ipr_cmnd *);
587 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
588                                    enum ipr_shutdown_type);
589
590 #ifdef CONFIG_SCSI_IPR_TRACE
591 /**
592  * ipr_trc_hook - Add a trace entry to the driver trace
593  * @ipr_cmd:    ipr command struct
594  * @type:               trace type
595  * @add_data:   additional data
596  *
597  * Return value:
598  *      none
599  **/
600 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
601                          u8 type, u32 add_data)
602 {
603         struct ipr_trace_entry *trace_entry;
604         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
605         unsigned int trace_index;
606
607         trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
608         trace_entry = &ioa_cfg->trace[trace_index];
609         trace_entry->time = jiffies;
610         trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
611         trace_entry->type = type;
612         if (ipr_cmd->ioa_cfg->sis64)
613                 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
614         else
615                 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
616         trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
617         trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
618         trace_entry->u.add_data = add_data;
619         wmb();
620 }
621 #else
622 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
623 #endif
624
625 /**
626  * ipr_lock_and_done - Acquire lock and complete command
627  * @ipr_cmd:    ipr command struct
628  *
629  * Return value:
630  *      none
631  **/
632 static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
633 {
634         unsigned long lock_flags;
635         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
636
637         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
638         ipr_cmd->done(ipr_cmd);
639         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
640 }
641
642 /**
643  * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
644  * @ipr_cmd:    ipr command struct
645  *
646  * Return value:
647  *      none
648  **/
649 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
650 {
651         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
652         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
653         struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
654         dma_addr_t dma_addr = ipr_cmd->dma_addr;
655         int hrrq_id;
656
657         hrrq_id = ioarcb->cmd_pkt.hrrq_id;
658         memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
659         ioarcb->cmd_pkt.hrrq_id = hrrq_id;
660         ioarcb->data_transfer_length = 0;
661         ioarcb->read_data_transfer_length = 0;
662         ioarcb->ioadl_len = 0;
663         ioarcb->read_ioadl_len = 0;
664
665         if (ipr_cmd->ioa_cfg->sis64) {
666                 ioarcb->u.sis64_addr_data.data_ioadl_addr =
667                         cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
668                 ioasa64->u.gata.status = 0;
669         } else {
670                 ioarcb->write_ioadl_addr =
671                         cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
672                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
673                 ioasa->u.gata.status = 0;
674         }
675
676         ioasa->hdr.ioasc = 0;
677         ioasa->hdr.residual_data_len = 0;
678         ipr_cmd->scsi_cmd = NULL;
679         ipr_cmd->qc = NULL;
680         ipr_cmd->sense_buffer[0] = 0;
681         ipr_cmd->dma_use_sg = 0;
682 }
683
684 /**
685  * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
686  * @ipr_cmd:    ipr command struct
687  *
688  * Return value:
689  *      none
690  **/
691 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
692                               void (*fast_done) (struct ipr_cmnd *))
693 {
694         ipr_reinit_ipr_cmnd(ipr_cmd);
695         ipr_cmd->u.scratch = 0;
696         ipr_cmd->sibling = NULL;
697         ipr_cmd->eh_comp = NULL;
698         ipr_cmd->fast_done = fast_done;
699         timer_setup(&ipr_cmd->timer, NULL, 0);
700 }
701
702 /**
703  * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
704  * @ioa_cfg:    ioa config struct
705  *
706  * Return value:
707  *      pointer to ipr command struct
708  **/
709 static
710 struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
711 {
712         struct ipr_cmnd *ipr_cmd = NULL;
713
714         if (likely(!list_empty(&hrrq->hrrq_free_q))) {
715                 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
716                         struct ipr_cmnd, queue);
717                 list_del(&ipr_cmd->queue);
718         }
719
720
721         return ipr_cmd;
722 }
723
724 /**
725  * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
726  * @ioa_cfg:    ioa config struct
727  *
728  * Return value:
729  *      pointer to ipr command struct
730  **/
731 static
732 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
733 {
734         struct ipr_cmnd *ipr_cmd =
735                 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
736         ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
737         return ipr_cmd;
738 }
739
740 /**
741  * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
742  * @ioa_cfg:    ioa config struct
743  * @clr_ints:     interrupts to clear
744  *
745  * This function masks all interrupts on the adapter, then clears the
746  * interrupts specified in the mask
747  *
748  * Return value:
749  *      none
750  **/
751 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
752                                           u32 clr_ints)
753 {
754         volatile u32 int_reg;
755         int i;
756
757         /* Stop new interrupts */
758         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
759                 spin_lock(&ioa_cfg->hrrq[i]._lock);
760                 ioa_cfg->hrrq[i].allow_interrupts = 0;
761                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
762         }
763
764         /* Set interrupt mask to stop all new interrupts */
765         if (ioa_cfg->sis64)
766                 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
767         else
768                 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
769
770         /* Clear any pending interrupts */
771         if (ioa_cfg->sis64)
772                 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
773         writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
774         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
775 }
776
777 /**
778  * ipr_save_pcix_cmd_reg - Save PCI-X command register
779  * @ioa_cfg:    ioa config struct
780  *
781  * Return value:
782  *      0 on success / -EIO on failure
783  **/
784 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
785 {
786         int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
787
788         if (pcix_cmd_reg == 0)
789                 return 0;
790
791         if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
792                                  &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
793                 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
794                 return -EIO;
795         }
796
797         ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
798         return 0;
799 }
800
801 /**
802  * ipr_set_pcix_cmd_reg - Setup PCI-X command register
803  * @ioa_cfg:    ioa config struct
804  *
805  * Return value:
806  *      0 on success / -EIO on failure
807  **/
808 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
809 {
810         int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
811
812         if (pcix_cmd_reg) {
813                 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
814                                           ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
815                         dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
816                         return -EIO;
817                 }
818         }
819
820         return 0;
821 }
822
823 /**
824  * __ipr_sata_eh_done - done function for aborted SATA commands
825  * @ipr_cmd:    ipr command struct
826  *
827  * This function is invoked for ops generated to SATA
828  * devices which are being aborted.
829  *
830  * Return value:
831  *      none
832  **/
833 static void __ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
834 {
835         struct ata_queued_cmd *qc = ipr_cmd->qc;
836         struct ipr_sata_port *sata_port = qc->ap->private_data;
837
838         qc->err_mask |= AC_ERR_OTHER;
839         sata_port->ioasa.status |= ATA_BUSY;
840         ata_qc_complete(qc);
841         if (ipr_cmd->eh_comp)
842                 complete(ipr_cmd->eh_comp);
843         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
844 }
845
846 /**
847  * ipr_sata_eh_done - done function for aborted SATA commands
848  * @ipr_cmd:    ipr command struct
849  *
850  * This function is invoked for ops generated to SATA
851  * devices which are being aborted.
852  *
853  * Return value:
854  *      none
855  **/
856 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
857 {
858         struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
859         unsigned long hrrq_flags;
860
861         spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
862         __ipr_sata_eh_done(ipr_cmd);
863         spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
864 }
865
866 /**
867  * __ipr_scsi_eh_done - mid-layer done function for aborted ops
868  * @ipr_cmd:    ipr command struct
869  *
870  * This function is invoked by the interrupt handler for
871  * ops generated by the SCSI mid-layer which are being aborted.
872  *
873  * Return value:
874  *      none
875  **/
876 static void __ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
877 {
878         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
879
880         scsi_cmd->result |= (DID_ERROR << 16);
881
882         scsi_dma_unmap(ipr_cmd->scsi_cmd);
883         scsi_cmd->scsi_done(scsi_cmd);
884         if (ipr_cmd->eh_comp)
885                 complete(ipr_cmd->eh_comp);
886         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
887 }
888
889 /**
890  * ipr_scsi_eh_done - mid-layer done function for aborted ops
891  * @ipr_cmd:    ipr command struct
892  *
893  * This function is invoked by the interrupt handler for
894  * ops generated by the SCSI mid-layer which are being aborted.
895  *
896  * Return value:
897  *      none
898  **/
899 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
900 {
901         unsigned long hrrq_flags;
902         struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
903
904         spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
905         __ipr_scsi_eh_done(ipr_cmd);
906         spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
907 }
908
909 /**
910  * ipr_fail_all_ops - Fails all outstanding ops.
911  * @ioa_cfg:    ioa config struct
912  *
913  * This function fails all outstanding ops.
914  *
915  * Return value:
916  *      none
917  **/
918 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
919 {
920         struct ipr_cmnd *ipr_cmd, *temp;
921         struct ipr_hrr_queue *hrrq;
922
923         ENTER;
924         for_each_hrrq(hrrq, ioa_cfg) {
925                 spin_lock(&hrrq->_lock);
926                 list_for_each_entry_safe(ipr_cmd,
927                                         temp, &hrrq->hrrq_pending_q, queue) {
928                         list_del(&ipr_cmd->queue);
929
930                         ipr_cmd->s.ioasa.hdr.ioasc =
931                                 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
932                         ipr_cmd->s.ioasa.hdr.ilid =
933                                 cpu_to_be32(IPR_DRIVER_ILID);
934
935                         if (ipr_cmd->scsi_cmd)
936                                 ipr_cmd->done = __ipr_scsi_eh_done;
937                         else if (ipr_cmd->qc)
938                                 ipr_cmd->done = __ipr_sata_eh_done;
939
940                         ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
941                                      IPR_IOASC_IOA_WAS_RESET);
942                         del_timer(&ipr_cmd->timer);
943                         ipr_cmd->done(ipr_cmd);
944                 }
945                 spin_unlock(&hrrq->_lock);
946         }
947         LEAVE;
948 }
949
950 /**
951  * ipr_send_command -  Send driver initiated requests.
952  * @ipr_cmd:            ipr command struct
953  *
954  * This function sends a command to the adapter using the correct write call.
955  * In the case of sis64, calculate the ioarcb size required. Then or in the
956  * appropriate bits.
957  *
958  * Return value:
959  *      none
960  **/
961 static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
962 {
963         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
964         dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
965
966         if (ioa_cfg->sis64) {
967                 /* The default size is 256 bytes */
968                 send_dma_addr |= 0x1;
969
970                 /* If the number of ioadls * size of ioadl > 128 bytes,
971                    then use a 512 byte ioarcb */
972                 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
973                         send_dma_addr |= 0x4;
974                 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
975         } else
976                 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
977 }
978
979 /**
980  * ipr_do_req -  Send driver initiated requests.
981  * @ipr_cmd:            ipr command struct
982  * @done:                       done function
983  * @timeout_func:       timeout function
984  * @timeout:            timeout value
985  *
986  * This function sends the specified command to the adapter with the
987  * timeout given. The done function is invoked on command completion.
988  *
989  * Return value:
990  *      none
991  **/
992 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
993                        void (*done) (struct ipr_cmnd *),
994                        void (*timeout_func) (struct timer_list *), u32 timeout)
995 {
996         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
997
998         ipr_cmd->done = done;
999
1000         ipr_cmd->timer.expires = jiffies + timeout;
1001         ipr_cmd->timer.function = timeout_func;
1002
1003         add_timer(&ipr_cmd->timer);
1004
1005         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
1006
1007         ipr_send_command(ipr_cmd);
1008 }
1009
1010 /**
1011  * ipr_internal_cmd_done - Op done function for an internally generated op.
1012  * @ipr_cmd:    ipr command struct
1013  *
1014  * This function is the op done function for an internally generated,
1015  * blocking op. It simply wakes the sleeping thread.
1016  *
1017  * Return value:
1018  *      none
1019  **/
1020 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
1021 {
1022         if (ipr_cmd->sibling)
1023                 ipr_cmd->sibling = NULL;
1024         else
1025                 complete(&ipr_cmd->completion);
1026 }
1027
1028 /**
1029  * ipr_init_ioadl - initialize the ioadl for the correct SIS type
1030  * @ipr_cmd:    ipr command struct
1031  * @dma_addr:   dma address
1032  * @len:        transfer length
1033  * @flags:      ioadl flag value
1034  *
1035  * This function initializes an ioadl in the case where there is only a single
1036  * descriptor.
1037  *
1038  * Return value:
1039  *      nothing
1040  **/
1041 static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
1042                            u32 len, int flags)
1043 {
1044         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1045         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
1046
1047         ipr_cmd->dma_use_sg = 1;
1048
1049         if (ipr_cmd->ioa_cfg->sis64) {
1050                 ioadl64->flags = cpu_to_be32(flags);
1051                 ioadl64->data_len = cpu_to_be32(len);
1052                 ioadl64->address = cpu_to_be64(dma_addr);
1053
1054                 ipr_cmd->ioarcb.ioadl_len =
1055                         cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
1056                 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1057         } else {
1058                 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
1059                 ioadl->address = cpu_to_be32(dma_addr);
1060
1061                 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
1062                         ipr_cmd->ioarcb.read_ioadl_len =
1063                                 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1064                         ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
1065                 } else {
1066                         ipr_cmd->ioarcb.ioadl_len =
1067                                 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1068                         ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1069                 }
1070         }
1071 }
1072
1073 /**
1074  * ipr_send_blocking_cmd - Send command and sleep on its completion.
1075  * @ipr_cmd:    ipr command struct
1076  * @timeout_func:       function to invoke if command times out
1077  * @timeout:    timeout
1078  *
1079  * Return value:
1080  *      none
1081  **/
1082 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
1083                                   void (*timeout_func) (struct timer_list *),
1084                                   u32 timeout)
1085 {
1086         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1087
1088         init_completion(&ipr_cmd->completion);
1089         ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1090
1091         spin_unlock_irq(ioa_cfg->host->host_lock);
1092         wait_for_completion(&ipr_cmd->completion);
1093         spin_lock_irq(ioa_cfg->host->host_lock);
1094 }
1095
1096 static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1097 {
1098         unsigned int hrrq;
1099
1100         if (ioa_cfg->hrrq_num == 1)
1101                 hrrq = 0;
1102         else {
1103                 hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
1104                 hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
1105         }
1106         return hrrq;
1107 }
1108
1109 /**
1110  * ipr_send_hcam - Send an HCAM to the adapter.
1111  * @ioa_cfg:    ioa config struct
1112  * @type:               HCAM type
1113  * @hostrcb:    hostrcb struct
1114  *
1115  * This function will send a Host Controlled Async command to the adapter.
1116  * If HCAMs are currently not allowed to be issued to the adapter, it will
1117  * place the hostrcb on the free queue.
1118  *
1119  * Return value:
1120  *      none
1121  **/
1122 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1123                           struct ipr_hostrcb *hostrcb)
1124 {
1125         struct ipr_cmnd *ipr_cmd;
1126         struct ipr_ioarcb *ioarcb;
1127
1128         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1129                 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
1130                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1131                 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1132
1133                 ipr_cmd->u.hostrcb = hostrcb;
1134                 ioarcb = &ipr_cmd->ioarcb;
1135
1136                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1137                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1138                 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1139                 ioarcb->cmd_pkt.cdb[1] = type;
1140                 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1141                 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1142
1143                 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1144                                sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1145
1146                 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1147                         ipr_cmd->done = ipr_process_ccn;
1148                 else
1149                         ipr_cmd->done = ipr_process_error;
1150
1151                 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1152
1153                 ipr_send_command(ipr_cmd);
1154         } else {
1155                 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1156         }
1157 }
1158
1159 /**
1160  * ipr_update_ata_class - Update the ata class in the resource entry
1161  * @res:        resource entry struct
1162  * @proto:      cfgte device bus protocol value
1163  *
1164  * Return value:
1165  *      none
1166  **/
1167 static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1168 {
1169         switch (proto) {
1170         case IPR_PROTO_SATA:
1171         case IPR_PROTO_SAS_STP:
1172                 res->ata_class = ATA_DEV_ATA;
1173                 break;
1174         case IPR_PROTO_SATA_ATAPI:
1175         case IPR_PROTO_SAS_STP_ATAPI:
1176                 res->ata_class = ATA_DEV_ATAPI;
1177                 break;
1178         default:
1179                 res->ata_class = ATA_DEV_UNKNOWN;
1180                 break;
1181         };
1182 }
1183
1184 /**
1185  * ipr_init_res_entry - Initialize a resource entry struct.
1186  * @res:        resource entry struct
1187  * @cfgtew:     config table entry wrapper struct
1188  *
1189  * Return value:
1190  *      none
1191  **/
1192 static void ipr_init_res_entry(struct ipr_resource_entry *res,
1193                                struct ipr_config_table_entry_wrapper *cfgtew)
1194 {
1195         int found = 0;
1196         unsigned int proto;
1197         struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1198         struct ipr_resource_entry *gscsi_res = NULL;
1199
1200         res->needs_sync_complete = 0;
1201         res->in_erp = 0;
1202         res->add_to_ml = 0;
1203         res->del_from_ml = 0;
1204         res->resetting_device = 0;
1205         res->reset_occurred = 0;
1206         res->sdev = NULL;
1207         res->sata_port = NULL;
1208
1209         if (ioa_cfg->sis64) {
1210                 proto = cfgtew->u.cfgte64->proto;
1211                 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1212                 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
1213                 res->qmodel = IPR_QUEUEING_MODEL64(res);
1214                 res->type = cfgtew->u.cfgte64->res_type;
1215
1216                 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1217                         sizeof(res->res_path));
1218
1219                 res->bus = 0;
1220                 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1221                         sizeof(res->dev_lun.scsi_lun));
1222                 res->lun = scsilun_to_int(&res->dev_lun);
1223
1224                 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1225                         list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1226                                 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1227                                         found = 1;
1228                                         res->target = gscsi_res->target;
1229                                         break;
1230                                 }
1231                         }
1232                         if (!found) {
1233                                 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1234                                                                   ioa_cfg->max_devs_supported);
1235                                 set_bit(res->target, ioa_cfg->target_ids);
1236                         }
1237                 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1238                         res->bus = IPR_IOAFP_VIRTUAL_BUS;
1239                         res->target = 0;
1240                 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1241                         res->bus = IPR_ARRAY_VIRTUAL_BUS;
1242                         res->target = find_first_zero_bit(ioa_cfg->array_ids,
1243                                                           ioa_cfg->max_devs_supported);
1244                         set_bit(res->target, ioa_cfg->array_ids);
1245                 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1246                         res->bus = IPR_VSET_VIRTUAL_BUS;
1247                         res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1248                                                           ioa_cfg->max_devs_supported);
1249                         set_bit(res->target, ioa_cfg->vset_ids);
1250                 } else {
1251                         res->target = find_first_zero_bit(ioa_cfg->target_ids,
1252                                                           ioa_cfg->max_devs_supported);
1253                         set_bit(res->target, ioa_cfg->target_ids);
1254                 }
1255         } else {
1256                 proto = cfgtew->u.cfgte->proto;
1257                 res->qmodel = IPR_QUEUEING_MODEL(res);
1258                 res->flags = cfgtew->u.cfgte->flags;
1259                 if (res->flags & IPR_IS_IOA_RESOURCE)
1260                         res->type = IPR_RES_TYPE_IOAFP;
1261                 else
1262                         res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1263
1264                 res->bus = cfgtew->u.cfgte->res_addr.bus;
1265                 res->target = cfgtew->u.cfgte->res_addr.target;
1266                 res->lun = cfgtew->u.cfgte->res_addr.lun;
1267                 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
1268         }
1269
1270         ipr_update_ata_class(res, proto);
1271 }
1272
1273 /**
1274  * ipr_is_same_device - Determine if two devices are the same.
1275  * @res:        resource entry struct
1276  * @cfgtew:     config table entry wrapper struct
1277  *
1278  * Return value:
1279  *      1 if the devices are the same / 0 otherwise
1280  **/
1281 static int ipr_is_same_device(struct ipr_resource_entry *res,
1282                               struct ipr_config_table_entry_wrapper *cfgtew)
1283 {
1284         if (res->ioa_cfg->sis64) {
1285                 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1286                                         sizeof(cfgtew->u.cfgte64->dev_id)) &&
1287                         !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1288                                         sizeof(cfgtew->u.cfgte64->lun))) {
1289                         return 1;
1290                 }
1291         } else {
1292                 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1293                     res->target == cfgtew->u.cfgte->res_addr.target &&
1294                     res->lun == cfgtew->u.cfgte->res_addr.lun)
1295                         return 1;
1296         }
1297
1298         return 0;
1299 }
1300
1301 /**
1302  * __ipr_format_res_path - Format the resource path for printing.
1303  * @res_path:   resource path
1304  * @buf:        buffer
1305  * @len:        length of buffer provided
1306  *
1307  * Return value:
1308  *      pointer to buffer
1309  **/
1310 static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
1311 {
1312         int i;
1313         char *p = buffer;
1314
1315         *p = '\0';
1316         p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1317         for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1318                 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
1319
1320         return buffer;
1321 }
1322
1323 /**
1324  * ipr_format_res_path - Format the resource path for printing.
1325  * @ioa_cfg:    ioa config struct
1326  * @res_path:   resource path
1327  * @buf:        buffer
1328  * @len:        length of buffer provided
1329  *
1330  * Return value:
1331  *      pointer to buffer
1332  **/
1333 static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1334                                  u8 *res_path, char *buffer, int len)
1335 {
1336         char *p = buffer;
1337
1338         *p = '\0';
1339         p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1340         __ipr_format_res_path(res_path, p, len - (buffer - p));
1341         return buffer;
1342 }
1343
1344 /**
1345  * ipr_update_res_entry - Update the resource entry.
1346  * @res:        resource entry struct
1347  * @cfgtew:     config table entry wrapper struct
1348  *
1349  * Return value:
1350  *      none
1351  **/
1352 static void ipr_update_res_entry(struct ipr_resource_entry *res,
1353                                  struct ipr_config_table_entry_wrapper *cfgtew)
1354 {
1355         char buffer[IPR_MAX_RES_PATH_LENGTH];
1356         unsigned int proto;
1357         int new_path = 0;
1358
1359         if (res->ioa_cfg->sis64) {
1360                 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1361                 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
1362                 res->type = cfgtew->u.cfgte64->res_type;
1363
1364                 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1365                         sizeof(struct ipr_std_inq_data));
1366
1367                 res->qmodel = IPR_QUEUEING_MODEL64(res);
1368                 proto = cfgtew->u.cfgte64->proto;
1369                 res->res_handle = cfgtew->u.cfgte64->res_handle;
1370                 res->dev_id = cfgtew->u.cfgte64->dev_id;
1371
1372                 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1373                         sizeof(res->dev_lun.scsi_lun));
1374
1375                 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1376                                         sizeof(res->res_path))) {
1377                         memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1378                                 sizeof(res->res_path));
1379                         new_path = 1;
1380                 }
1381
1382                 if (res->sdev && new_path)
1383                         sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1384                                     ipr_format_res_path(res->ioa_cfg,
1385                                         res->res_path, buffer, sizeof(buffer)));
1386         } else {
1387                 res->flags = cfgtew->u.cfgte->flags;
1388                 if (res->flags & IPR_IS_IOA_RESOURCE)
1389                         res->type = IPR_RES_TYPE_IOAFP;
1390                 else
1391                         res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1392
1393                 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1394                         sizeof(struct ipr_std_inq_data));
1395
1396                 res->qmodel = IPR_QUEUEING_MODEL(res);
1397                 proto = cfgtew->u.cfgte->proto;
1398                 res->res_handle = cfgtew->u.cfgte->res_handle;
1399         }
1400
1401         ipr_update_ata_class(res, proto);
1402 }
1403
1404 /**
1405  * ipr_clear_res_target - Clear the bit in the bit map representing the target
1406  *                        for the resource.
1407  * @res:        resource entry struct
1408  * @cfgtew:     config table entry wrapper struct
1409  *
1410  * Return value:
1411  *      none
1412  **/
1413 static void ipr_clear_res_target(struct ipr_resource_entry *res)
1414 {
1415         struct ipr_resource_entry *gscsi_res = NULL;
1416         struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1417
1418         if (!ioa_cfg->sis64)
1419                 return;
1420
1421         if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1422                 clear_bit(res->target, ioa_cfg->array_ids);
1423         else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1424                 clear_bit(res->target, ioa_cfg->vset_ids);
1425         else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1426                 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1427                         if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1428                                 return;
1429                 clear_bit(res->target, ioa_cfg->target_ids);
1430
1431         } else if (res->bus == 0)
1432                 clear_bit(res->target, ioa_cfg->target_ids);
1433 }
1434
1435 /**
1436  * ipr_handle_config_change - Handle a config change from the adapter
1437  * @ioa_cfg:    ioa config struct
1438  * @hostrcb:    hostrcb
1439  *
1440  * Return value:
1441  *      none
1442  **/
1443 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1444                                      struct ipr_hostrcb *hostrcb)
1445 {
1446         struct ipr_resource_entry *res = NULL;
1447         struct ipr_config_table_entry_wrapper cfgtew;
1448         __be32 cc_res_handle;
1449
1450         u32 is_ndn = 1;
1451
1452         if (ioa_cfg->sis64) {
1453                 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1454                 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1455         } else {
1456                 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1457                 cc_res_handle = cfgtew.u.cfgte->res_handle;
1458         }
1459
1460         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1461                 if (res->res_handle == cc_res_handle) {
1462                         is_ndn = 0;
1463                         break;
1464                 }
1465         }
1466
1467         if (is_ndn) {
1468                 if (list_empty(&ioa_cfg->free_res_q)) {
1469                         ipr_send_hcam(ioa_cfg,
1470                                       IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1471                                       hostrcb);
1472                         return;
1473                 }
1474
1475                 res = list_entry(ioa_cfg->free_res_q.next,
1476                                  struct ipr_resource_entry, queue);
1477
1478                 list_del(&res->queue);
1479                 ipr_init_res_entry(res, &cfgtew);
1480                 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1481         }
1482
1483         ipr_update_res_entry(res, &cfgtew);
1484
1485         if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1486                 if (res->sdev) {
1487                         res->del_from_ml = 1;
1488                         res->res_handle = IPR_INVALID_RES_HANDLE;
1489                         schedule_work(&ioa_cfg->work_q);
1490                 } else {
1491                         ipr_clear_res_target(res);
1492                         list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1493                 }
1494         } else if (!res->sdev || res->del_from_ml) {
1495                 res->add_to_ml = 1;
1496                 schedule_work(&ioa_cfg->work_q);
1497         }
1498
1499         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1500 }
1501
1502 /**
1503  * ipr_process_ccn - Op done function for a CCN.
1504  * @ipr_cmd:    ipr command struct
1505  *
1506  * This function is the op done function for a configuration
1507  * change notification host controlled async from the adapter.
1508  *
1509  * Return value:
1510  *      none
1511  **/
1512 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1513 {
1514         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1515         struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1516         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1517
1518         list_del_init(&hostrcb->queue);
1519         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1520
1521         if (ioasc) {
1522                 if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
1523                     ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST)
1524                         dev_err(&ioa_cfg->pdev->dev,
1525                                 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1526
1527                 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1528         } else {
1529                 ipr_handle_config_change(ioa_cfg, hostrcb);
1530         }
1531 }
1532
1533 /**
1534  * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1535  * @i:          index into buffer
1536  * @buf:                string to modify
1537  *
1538  * This function will strip all trailing whitespace, pad the end
1539  * of the string with a single space, and NULL terminate the string.
1540  *
1541  * Return value:
1542  *      new length of string
1543  **/
1544 static int strip_and_pad_whitespace(int i, char *buf)
1545 {
1546         while (i && buf[i] == ' ')
1547                 i--;
1548         buf[i+1] = ' ';
1549         buf[i+2] = '\0';
1550         return i + 2;
1551 }
1552
1553 /**
1554  * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1555  * @prefix:             string to print at start of printk
1556  * @hostrcb:    hostrcb pointer
1557  * @vpd:                vendor/product id/sn struct
1558  *
1559  * Return value:
1560  *      none
1561  **/
1562 static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1563                                 struct ipr_vpd *vpd)
1564 {
1565         char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1566         int i = 0;
1567
1568         memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1569         i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1570
1571         memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1572         i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1573
1574         memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1575         buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1576
1577         ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1578 }
1579
1580 /**
1581  * ipr_log_vpd - Log the passed VPD to the error log.
1582  * @vpd:                vendor/product id/sn struct
1583  *
1584  * Return value:
1585  *      none
1586  **/
1587 static void ipr_log_vpd(struct ipr_vpd *vpd)
1588 {
1589         char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1590                     + IPR_SERIAL_NUM_LEN];
1591
1592         memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1593         memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1594                IPR_PROD_ID_LEN);
1595         buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1596         ipr_err("Vendor/Product ID: %s\n", buffer);
1597
1598         memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1599         buffer[IPR_SERIAL_NUM_LEN] = '\0';
1600         ipr_err("    Serial Number: %s\n", buffer);
1601 }
1602
1603 /**
1604  * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1605  * @prefix:             string to print at start of printk
1606  * @hostrcb:    hostrcb pointer
1607  * @vpd:                vendor/product id/sn/wwn struct
1608  *
1609  * Return value:
1610  *      none
1611  **/
1612 static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1613                                     struct ipr_ext_vpd *vpd)
1614 {
1615         ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1616         ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1617                      be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1618 }
1619
1620 /**
1621  * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1622  * @vpd:                vendor/product id/sn/wwn struct
1623  *
1624  * Return value:
1625  *      none
1626  **/
1627 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1628 {
1629         ipr_log_vpd(&vpd->vpd);
1630         ipr_err("    WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1631                 be32_to_cpu(vpd->wwid[1]));
1632 }
1633
1634 /**
1635  * ipr_log_enhanced_cache_error - Log a cache error.
1636  * @ioa_cfg:    ioa config struct
1637  * @hostrcb:    hostrcb struct
1638  *
1639  * Return value:
1640  *      none
1641  **/
1642 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1643                                          struct ipr_hostrcb *hostrcb)
1644 {
1645         struct ipr_hostrcb_type_12_error *error;
1646
1647         if (ioa_cfg->sis64)
1648                 error = &hostrcb->hcam.u.error64.u.type_12_error;
1649         else
1650                 error = &hostrcb->hcam.u.error.u.type_12_error;
1651
1652         ipr_err("-----Current Configuration-----\n");
1653         ipr_err("Cache Directory Card Information:\n");
1654         ipr_log_ext_vpd(&error->ioa_vpd);
1655         ipr_err("Adapter Card Information:\n");
1656         ipr_log_ext_vpd(&error->cfc_vpd);
1657
1658         ipr_err("-----Expected Configuration-----\n");
1659         ipr_err("Cache Directory Card Information:\n");
1660         ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1661         ipr_err("Adapter Card Information:\n");
1662         ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1663
1664         ipr_err("Additional IOA Data: %08X %08X %08X\n",
1665                      be32_to_cpu(error->ioa_data[0]),
1666                      be32_to_cpu(error->ioa_data[1]),
1667                      be32_to_cpu(error->ioa_data[2]));
1668 }
1669
1670 /**
1671  * ipr_log_cache_error - Log a cache error.
1672  * @ioa_cfg:    ioa config struct
1673  * @hostrcb:    hostrcb struct
1674  *
1675  * Return value:
1676  *      none
1677  **/
1678 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1679                                 struct ipr_hostrcb *hostrcb)
1680 {
1681         struct ipr_hostrcb_type_02_error *error =
1682                 &hostrcb->hcam.u.error.u.type_02_error;
1683
1684         ipr_err("-----Current Configuration-----\n");
1685         ipr_err("Cache Directory Card Information:\n");
1686         ipr_log_vpd(&error->ioa_vpd);
1687         ipr_err("Adapter Card Information:\n");
1688         ipr_log_vpd(&error->cfc_vpd);
1689
1690         ipr_err("-----Expected Configuration-----\n");
1691         ipr_err("Cache Directory Card Information:\n");
1692         ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1693         ipr_err("Adapter Card Information:\n");
1694         ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1695
1696         ipr_err("Additional IOA Data: %08X %08X %08X\n",
1697                      be32_to_cpu(error->ioa_data[0]),
1698                      be32_to_cpu(error->ioa_data[1]),
1699                      be32_to_cpu(error->ioa_data[2]));
1700 }
1701
1702 /**
1703  * ipr_log_enhanced_config_error - Log a configuration error.
1704  * @ioa_cfg:    ioa config struct
1705  * @hostrcb:    hostrcb struct
1706  *
1707  * Return value:
1708  *      none
1709  **/
1710 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1711                                           struct ipr_hostrcb *hostrcb)
1712 {
1713         int errors_logged, i;
1714         struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1715         struct ipr_hostrcb_type_13_error *error;
1716
1717         error = &hostrcb->hcam.u.error.u.type_13_error;
1718         errors_logged = be32_to_cpu(error->errors_logged);
1719
1720         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1721                 be32_to_cpu(error->errors_detected), errors_logged);
1722
1723         dev_entry = error->dev;
1724
1725         for (i = 0; i < errors_logged; i++, dev_entry++) {
1726                 ipr_err_separator;
1727
1728                 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1729                 ipr_log_ext_vpd(&dev_entry->vpd);
1730
1731                 ipr_err("-----New Device Information-----\n");
1732                 ipr_log_ext_vpd(&dev_entry->new_vpd);
1733
1734                 ipr_err("Cache Directory Card Information:\n");
1735                 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1736
1737                 ipr_err("Adapter Card Information:\n");
1738                 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1739         }
1740 }
1741
1742 /**
1743  * ipr_log_sis64_config_error - Log a device error.
1744  * @ioa_cfg:    ioa config struct
1745  * @hostrcb:    hostrcb struct
1746  *
1747  * Return value:
1748  *      none
1749  **/
1750 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1751                                        struct ipr_hostrcb *hostrcb)
1752 {
1753         int errors_logged, i;
1754         struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1755         struct ipr_hostrcb_type_23_error *error;
1756         char buffer[IPR_MAX_RES_PATH_LENGTH];
1757
1758         error = &hostrcb->hcam.u.error64.u.type_23_error;
1759         errors_logged = be32_to_cpu(error->errors_logged);
1760
1761         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1762                 be32_to_cpu(error->errors_detected), errors_logged);
1763
1764         dev_entry = error->dev;
1765
1766         for (i = 0; i < errors_logged; i++, dev_entry++) {
1767                 ipr_err_separator;
1768
1769                 ipr_err("Device %d : %s", i + 1,
1770                         __ipr_format_res_path(dev_entry->res_path,
1771                                               buffer, sizeof(buffer)));
1772                 ipr_log_ext_vpd(&dev_entry->vpd);
1773
1774                 ipr_err("-----New Device Information-----\n");
1775                 ipr_log_ext_vpd(&dev_entry->new_vpd);
1776
1777                 ipr_err("Cache Directory Card Information:\n");
1778                 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1779
1780                 ipr_err("Adapter Card Information:\n");
1781                 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1782         }
1783 }
1784
1785 /**
1786  * ipr_log_config_error - Log a configuration error.
1787  * @ioa_cfg:    ioa config struct
1788  * @hostrcb:    hostrcb struct
1789  *
1790  * Return value:
1791  *      none
1792  **/
1793 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1794                                  struct ipr_hostrcb *hostrcb)
1795 {
1796         int errors_logged, i;
1797         struct ipr_hostrcb_device_data_entry *dev_entry;
1798         struct ipr_hostrcb_type_03_error *error;
1799
1800         error = &hostrcb->hcam.u.error.u.type_03_error;
1801         errors_logged = be32_to_cpu(error->errors_logged);
1802
1803         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1804                 be32_to_cpu(error->errors_detected), errors_logged);
1805
1806         dev_entry = error->dev;
1807
1808         for (i = 0; i < errors_logged; i++, dev_entry++) {
1809                 ipr_err_separator;
1810
1811                 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1812                 ipr_log_vpd(&dev_entry->vpd);
1813
1814                 ipr_err("-----New Device Information-----\n");
1815                 ipr_log_vpd(&dev_entry->new_vpd);
1816
1817                 ipr_err("Cache Directory Card Information:\n");
1818                 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1819
1820                 ipr_err("Adapter Card Information:\n");
1821                 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1822
1823                 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1824                         be32_to_cpu(dev_entry->ioa_data[0]),
1825                         be32_to_cpu(dev_entry->ioa_data[1]),
1826                         be32_to_cpu(dev_entry->ioa_data[2]),
1827                         be32_to_cpu(dev_entry->ioa_data[3]),
1828                         be32_to_cpu(dev_entry->ioa_data[4]));
1829         }
1830 }
1831
1832 /**
1833  * ipr_log_enhanced_array_error - Log an array configuration error.
1834  * @ioa_cfg:    ioa config struct
1835  * @hostrcb:    hostrcb struct
1836  *
1837  * Return value:
1838  *      none
1839  **/
1840 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1841                                          struct ipr_hostrcb *hostrcb)
1842 {
1843         int i, num_entries;
1844         struct ipr_hostrcb_type_14_error *error;
1845         struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1846         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1847
1848         error = &hostrcb->hcam.u.error.u.type_14_error;
1849
1850         ipr_err_separator;
1851
1852         ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1853                 error->protection_level,
1854                 ioa_cfg->host->host_no,
1855                 error->last_func_vset_res_addr.bus,
1856                 error->last_func_vset_res_addr.target,
1857                 error->last_func_vset_res_addr.lun);
1858
1859         ipr_err_separator;
1860
1861         array_entry = error->array_member;
1862         num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1863                             ARRAY_SIZE(error->array_member));
1864
1865         for (i = 0; i < num_entries; i++, array_entry++) {
1866                 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1867                         continue;
1868
1869                 if (be32_to_cpu(error->exposed_mode_adn) == i)
1870                         ipr_err("Exposed Array Member %d:\n", i);
1871                 else
1872                         ipr_err("Array Member %d:\n", i);
1873
1874                 ipr_log_ext_vpd(&array_entry->vpd);
1875                 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1876                 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1877                                  "Expected Location");
1878
1879                 ipr_err_separator;
1880         }
1881 }
1882
1883 /**
1884  * ipr_log_array_error - Log an array configuration error.
1885  * @ioa_cfg:    ioa config struct
1886  * @hostrcb:    hostrcb struct
1887  *
1888  * Return value:
1889  *      none
1890  **/
1891 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1892                                 struct ipr_hostrcb *hostrcb)
1893 {
1894         int i;
1895         struct ipr_hostrcb_type_04_error *error;
1896         struct ipr_hostrcb_array_data_entry *array_entry;
1897         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1898
1899         error = &hostrcb->hcam.u.error.u.type_04_error;
1900
1901         ipr_err_separator;
1902
1903         ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1904                 error->protection_level,
1905                 ioa_cfg->host->host_no,
1906                 error->last_func_vset_res_addr.bus,
1907                 error->last_func_vset_res_addr.target,
1908                 error->last_func_vset_res_addr.lun);
1909
1910         ipr_err_separator;
1911
1912         array_entry = error->array_member;
1913
1914         for (i = 0; i < 18; i++) {
1915                 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1916                         continue;
1917
1918                 if (be32_to_cpu(error->exposed_mode_adn) == i)
1919                         ipr_err("Exposed Array Member %d:\n", i);
1920                 else
1921                         ipr_err("Array Member %d:\n", i);
1922
1923                 ipr_log_vpd(&array_entry->vpd);
1924
1925                 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1926                 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1927                                  "Expected Location");
1928
1929                 ipr_err_separator;
1930
1931                 if (i == 9)
1932                         array_entry = error->array_member2;
1933                 else
1934                         array_entry++;
1935         }
1936 }
1937
1938 /**
1939  * ipr_log_hex_data - Log additional hex IOA error data.
1940  * @ioa_cfg:    ioa config struct
1941  * @data:               IOA error data
1942  * @len:                data length
1943  *
1944  * Return value:
1945  *      none
1946  **/
1947 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, __be32 *data, int len)
1948 {
1949         int i;
1950
1951         if (len == 0)
1952                 return;
1953
1954         if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1955                 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1956
1957         for (i = 0; i < len / 4; i += 4) {
1958                 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1959                         be32_to_cpu(data[i]),
1960                         be32_to_cpu(data[i+1]),
1961                         be32_to_cpu(data[i+2]),
1962                         be32_to_cpu(data[i+3]));
1963         }
1964 }
1965
1966 /**
1967  * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1968  * @ioa_cfg:    ioa config struct
1969  * @hostrcb:    hostrcb struct
1970  *
1971  * Return value:
1972  *      none
1973  **/
1974 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1975                                             struct ipr_hostrcb *hostrcb)
1976 {
1977         struct ipr_hostrcb_type_17_error *error;
1978
1979         if (ioa_cfg->sis64)
1980                 error = &hostrcb->hcam.u.error64.u.type_17_error;
1981         else
1982                 error = &hostrcb->hcam.u.error.u.type_17_error;
1983
1984         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1985         strim(error->failure_reason);
1986
1987         ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1988                      be32_to_cpu(hostrcb->hcam.u.error.prc));
1989         ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1990         ipr_log_hex_data(ioa_cfg, error->data,
1991                          be32_to_cpu(hostrcb->hcam.length) -
1992                          (offsetof(struct ipr_hostrcb_error, u) +
1993                           offsetof(struct ipr_hostrcb_type_17_error, data)));
1994 }
1995
1996 /**
1997  * ipr_log_dual_ioa_error - Log a dual adapter error.
1998  * @ioa_cfg:    ioa config struct
1999  * @hostrcb:    hostrcb struct
2000  *
2001  * Return value:
2002  *      none
2003  **/
2004 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
2005                                    struct ipr_hostrcb *hostrcb)
2006 {
2007         struct ipr_hostrcb_type_07_error *error;
2008
2009         error = &hostrcb->hcam.u.error.u.type_07_error;
2010         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2011         strim(error->failure_reason);
2012
2013         ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
2014                      be32_to_cpu(hostrcb->hcam.u.error.prc));
2015         ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
2016         ipr_log_hex_data(ioa_cfg, error->data,
2017                          be32_to_cpu(hostrcb->hcam.length) -
2018                          (offsetof(struct ipr_hostrcb_error, u) +
2019                           offsetof(struct ipr_hostrcb_type_07_error, data)));
2020 }
2021
2022 static const struct {
2023         u8 active;
2024         char *desc;
2025 } path_active_desc[] = {
2026         { IPR_PATH_NO_INFO, "Path" },
2027         { IPR_PATH_ACTIVE, "Active path" },
2028         { IPR_PATH_NOT_ACTIVE, "Inactive path" }
2029 };
2030
2031 static const struct {
2032         u8 state;
2033         char *desc;
2034 } path_state_desc[] = {
2035         { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
2036         { IPR_PATH_HEALTHY, "is healthy" },
2037         { IPR_PATH_DEGRADED, "is degraded" },
2038         { IPR_PATH_FAILED, "is failed" }
2039 };
2040
2041 /**
2042  * ipr_log_fabric_path - Log a fabric path error
2043  * @hostrcb:    hostrcb struct
2044  * @fabric:             fabric descriptor
2045  *
2046  * Return value:
2047  *      none
2048  **/
2049 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
2050                                 struct ipr_hostrcb_fabric_desc *fabric)
2051 {
2052         int i, j;
2053         u8 path_state = fabric->path_state;
2054         u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2055         u8 state = path_state & IPR_PATH_STATE_MASK;
2056
2057         for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2058                 if (path_active_desc[i].active != active)
2059                         continue;
2060
2061                 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2062                         if (path_state_desc[j].state != state)
2063                                 continue;
2064
2065                         if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
2066                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
2067                                              path_active_desc[i].desc, path_state_desc[j].desc,
2068                                              fabric->ioa_port);
2069                         } else if (fabric->cascaded_expander == 0xff) {
2070                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
2071                                              path_active_desc[i].desc, path_state_desc[j].desc,
2072                                              fabric->ioa_port, fabric->phy);
2073                         } else if (fabric->phy == 0xff) {
2074                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
2075                                              path_active_desc[i].desc, path_state_desc[j].desc,
2076                                              fabric->ioa_port, fabric->cascaded_expander);
2077                         } else {
2078                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2079                                              path_active_desc[i].desc, path_state_desc[j].desc,
2080                                              fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2081                         }
2082                         return;
2083                 }
2084         }
2085
2086         ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
2087                 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2088 }
2089
2090 /**
2091  * ipr_log64_fabric_path - Log a fabric path error
2092  * @hostrcb:    hostrcb struct
2093  * @fabric:             fabric descriptor
2094  *
2095  * Return value:
2096  *      none
2097  **/
2098 static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2099                                   struct ipr_hostrcb64_fabric_desc *fabric)
2100 {
2101         int i, j;
2102         u8 path_state = fabric->path_state;
2103         u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2104         u8 state = path_state & IPR_PATH_STATE_MASK;
2105         char buffer[IPR_MAX_RES_PATH_LENGTH];
2106
2107         for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2108                 if (path_active_desc[i].active != active)
2109                         continue;
2110
2111                 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2112                         if (path_state_desc[j].state != state)
2113                                 continue;
2114
2115                         ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2116                                      path_active_desc[i].desc, path_state_desc[j].desc,
2117                                      ipr_format_res_path(hostrcb->ioa_cfg,
2118                                                 fabric->res_path,
2119                                                 buffer, sizeof(buffer)));
2120                         return;
2121                 }
2122         }
2123
2124         ipr_err("Path state=%02X Resource Path=%s\n", path_state,
2125                 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2126                                     buffer, sizeof(buffer)));
2127 }
2128
2129 static const struct {
2130         u8 type;
2131         char *desc;
2132 } path_type_desc[] = {
2133         { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2134         { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2135         { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2136         { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2137 };
2138
2139 static const struct {
2140         u8 status;
2141         char *desc;
2142 } path_status_desc[] = {
2143         { IPR_PATH_CFG_NO_PROB, "Functional" },
2144         { IPR_PATH_CFG_DEGRADED, "Degraded" },
2145         { IPR_PATH_CFG_FAILED, "Failed" },
2146         { IPR_PATH_CFG_SUSPECT, "Suspect" },
2147         { IPR_PATH_NOT_DETECTED, "Missing" },
2148         { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2149 };
2150
2151 static const char *link_rate[] = {
2152         "unknown",
2153         "disabled",
2154         "phy reset problem",
2155         "spinup hold",
2156         "port selector",
2157         "unknown",
2158         "unknown",
2159         "unknown",
2160         "1.5Gbps",
2161         "3.0Gbps",
2162         "unknown",
2163         "unknown",
2164         "unknown",
2165         "unknown",
2166         "unknown",
2167         "unknown"
2168 };
2169
2170 /**
2171  * ipr_log_path_elem - Log a fabric path element.
2172  * @hostrcb:    hostrcb struct
2173  * @cfg:                fabric path element struct
2174  *
2175  * Return value:
2176  *      none
2177  **/
2178 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2179                               struct ipr_hostrcb_config_element *cfg)
2180 {
2181         int i, j;
2182         u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2183         u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2184
2185         if (type == IPR_PATH_CFG_NOT_EXIST)
2186                 return;
2187
2188         for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2189                 if (path_type_desc[i].type != type)
2190                         continue;
2191
2192                 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2193                         if (path_status_desc[j].status != status)
2194                                 continue;
2195
2196                         if (type == IPR_PATH_CFG_IOA_PORT) {
2197                                 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2198                                              path_status_desc[j].desc, path_type_desc[i].desc,
2199                                              cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2200                                              be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2201                         } else {
2202                                 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2203                                         ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2204                                                      path_status_desc[j].desc, path_type_desc[i].desc,
2205                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2206                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2207                                 } else if (cfg->cascaded_expander == 0xff) {
2208                                         ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2209                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2210                                                      path_type_desc[i].desc, cfg->phy,
2211                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2212                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2213                                 } else if (cfg->phy == 0xff) {
2214                                         ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2215                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2216                                                      path_type_desc[i].desc, cfg->cascaded_expander,
2217                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2218                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2219                                 } else {
2220                                         ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2221                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2222                                                      path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2223                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2224                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2225                                 }
2226                         }
2227                         return;
2228                 }
2229         }
2230
2231         ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2232                      "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2233                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2234                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2235 }
2236
2237 /**
2238  * ipr_log64_path_elem - Log a fabric path element.
2239  * @hostrcb:    hostrcb struct
2240  * @cfg:                fabric path element struct
2241  *
2242  * Return value:
2243  *      none
2244  **/
2245 static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2246                                 struct ipr_hostrcb64_config_element *cfg)
2247 {
2248         int i, j;
2249         u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2250         u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2251         u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2252         char buffer[IPR_MAX_RES_PATH_LENGTH];
2253
2254         if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2255                 return;
2256
2257         for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2258                 if (path_type_desc[i].type != type)
2259                         continue;
2260
2261                 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2262                         if (path_status_desc[j].status != status)
2263                                 continue;
2264
2265                         ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2266                                      path_status_desc[j].desc, path_type_desc[i].desc,
2267                                      ipr_format_res_path(hostrcb->ioa_cfg,
2268                                         cfg->res_path, buffer, sizeof(buffer)),
2269                                         link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2270                                         be32_to_cpu(cfg->wwid[0]),
2271                                         be32_to_cpu(cfg->wwid[1]));
2272                         return;
2273                 }
2274         }
2275         ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2276                      "WWN=%08X%08X\n", cfg->type_status,
2277                      ipr_format_res_path(hostrcb->ioa_cfg,
2278                         cfg->res_path, buffer, sizeof(buffer)),
2279                         link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2280                         be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2281 }
2282
2283 /**
2284  * ipr_log_fabric_error - Log a fabric error.
2285  * @ioa_cfg:    ioa config struct
2286  * @hostrcb:    hostrcb struct
2287  *
2288  * Return value:
2289  *      none
2290  **/
2291 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2292                                  struct ipr_hostrcb *hostrcb)
2293 {
2294         struct ipr_hostrcb_type_20_error *error;
2295         struct ipr_hostrcb_fabric_desc *fabric;
2296         struct ipr_hostrcb_config_element *cfg;
2297         int i, add_len;
2298
2299         error = &hostrcb->hcam.u.error.u.type_20_error;
2300         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2301         ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2302
2303         add_len = be32_to_cpu(hostrcb->hcam.length) -
2304                 (offsetof(struct ipr_hostrcb_error, u) +
2305                  offsetof(struct ipr_hostrcb_type_20_error, desc));
2306
2307         for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2308                 ipr_log_fabric_path(hostrcb, fabric);
2309                 for_each_fabric_cfg(fabric, cfg)
2310                         ipr_log_path_elem(hostrcb, cfg);
2311
2312                 add_len -= be16_to_cpu(fabric->length);
2313                 fabric = (struct ipr_hostrcb_fabric_desc *)
2314                         ((unsigned long)fabric + be16_to_cpu(fabric->length));
2315         }
2316
2317         ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2318 }
2319
2320 /**
2321  * ipr_log_sis64_array_error - Log a sis64 array error.
2322  * @ioa_cfg:    ioa config struct
2323  * @hostrcb:    hostrcb struct
2324  *
2325  * Return value:
2326  *      none
2327  **/
2328 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2329                                       struct ipr_hostrcb *hostrcb)
2330 {
2331         int i, num_entries;
2332         struct ipr_hostrcb_type_24_error *error;
2333         struct ipr_hostrcb64_array_data_entry *array_entry;
2334         char buffer[IPR_MAX_RES_PATH_LENGTH];
2335         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2336
2337         error = &hostrcb->hcam.u.error64.u.type_24_error;
2338
2339         ipr_err_separator;
2340
2341         ipr_err("RAID %s Array Configuration: %s\n",
2342                 error->protection_level,
2343                 ipr_format_res_path(ioa_cfg, error->last_res_path,
2344                         buffer, sizeof(buffer)));
2345
2346         ipr_err_separator;
2347
2348         array_entry = error->array_member;
2349         num_entries = min_t(u32, error->num_entries,
2350                             ARRAY_SIZE(error->array_member));
2351
2352         for (i = 0; i < num_entries; i++, array_entry++) {
2353
2354                 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2355                         continue;
2356
2357                 if (error->exposed_mode_adn == i)
2358                         ipr_err("Exposed Array Member %d:\n", i);
2359                 else
2360                         ipr_err("Array Member %d:\n", i);
2361
2362                 ipr_err("Array Member %d:\n", i);
2363                 ipr_log_ext_vpd(&array_entry->vpd);
2364                 ipr_err("Current Location: %s\n",
2365                          ipr_format_res_path(ioa_cfg, array_entry->res_path,
2366                                 buffer, sizeof(buffer)));
2367                 ipr_err("Expected Location: %s\n",
2368                          ipr_format_res_path(ioa_cfg,
2369                                 array_entry->expected_res_path,
2370                                 buffer, sizeof(buffer)));
2371
2372                 ipr_err_separator;
2373         }
2374 }
2375
2376 /**
2377  * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2378  * @ioa_cfg:    ioa config struct
2379  * @hostrcb:    hostrcb struct
2380  *
2381  * Return value:
2382  *      none
2383  **/
2384 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2385                                        struct ipr_hostrcb *hostrcb)
2386 {
2387         struct ipr_hostrcb_type_30_error *error;
2388         struct ipr_hostrcb64_fabric_desc *fabric;
2389         struct ipr_hostrcb64_config_element *cfg;
2390         int i, add_len;
2391
2392         error = &hostrcb->hcam.u.error64.u.type_30_error;
2393
2394         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2395         ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2396
2397         add_len = be32_to_cpu(hostrcb->hcam.length) -
2398                 (offsetof(struct ipr_hostrcb64_error, u) +
2399                  offsetof(struct ipr_hostrcb_type_30_error, desc));
2400
2401         for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2402                 ipr_log64_fabric_path(hostrcb, fabric);
2403                 for_each_fabric_cfg(fabric, cfg)
2404                         ipr_log64_path_elem(hostrcb, cfg);
2405
2406                 add_len -= be16_to_cpu(fabric->length);
2407                 fabric = (struct ipr_hostrcb64_fabric_desc *)
2408                         ((unsigned long)fabric + be16_to_cpu(fabric->length));
2409         }
2410
2411         ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2412 }
2413
2414 /**
2415  * ipr_log_generic_error - Log an adapter error.
2416  * @ioa_cfg:    ioa config struct
2417  * @hostrcb:    hostrcb struct
2418  *
2419  * Return value:
2420  *      none
2421  **/
2422 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2423                                   struct ipr_hostrcb *hostrcb)
2424 {
2425         ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2426                          be32_to_cpu(hostrcb->hcam.length));
2427 }
2428
2429 /**
2430  * ipr_log_sis64_device_error - Log a cache error.
2431  * @ioa_cfg:    ioa config struct
2432  * @hostrcb:    hostrcb struct
2433  *
2434  * Return value:
2435  *      none
2436  **/
2437 static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
2438                                          struct ipr_hostrcb *hostrcb)
2439 {
2440         struct ipr_hostrcb_type_21_error *error;
2441         char buffer[IPR_MAX_RES_PATH_LENGTH];
2442
2443         error = &hostrcb->hcam.u.error64.u.type_21_error;
2444
2445         ipr_err("-----Failing Device Information-----\n");
2446         ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2447                 be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
2448                  be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
2449         ipr_err("Device Resource Path: %s\n",
2450                 __ipr_format_res_path(error->res_path,
2451                                       buffer, sizeof(buffer)));
2452         error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
2453         error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
2454         ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
2455         ipr_err("Secondary Problem Description:  %s\n", error->second_problem_desc);
2456         ipr_err("SCSI Sense Data:\n");
2457         ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
2458         ipr_err("SCSI Command Descriptor Block: \n");
2459         ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
2460
2461         ipr_err("Additional IOA Data:\n");
2462         ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
2463 }
2464
2465 /**
2466  * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2467  * @ioasc:      IOASC
2468  *
2469  * This function will return the index of into the ipr_error_table
2470  * for the specified IOASC. If the IOASC is not in the table,
2471  * 0 will be returned, which points to the entry used for unknown errors.
2472  *
2473  * Return value:
2474  *      index into the ipr_error_table
2475  **/
2476 static u32 ipr_get_error(u32 ioasc)
2477 {
2478         int i;
2479
2480         for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2481                 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2482                         return i;
2483
2484         return 0;
2485 }
2486
2487 /**
2488  * ipr_handle_log_data - Log an adapter error.
2489  * @ioa_cfg:    ioa config struct
2490  * @hostrcb:    hostrcb struct
2491  *
2492  * This function logs an adapter error to the system.
2493  *
2494  * Return value:
2495  *      none
2496  **/
2497 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2498                                 struct ipr_hostrcb *hostrcb)
2499 {
2500         u32 ioasc;
2501         int error_index;
2502         struct ipr_hostrcb_type_21_error *error;
2503
2504         if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2505                 return;
2506
2507         if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2508                 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2509
2510         if (ioa_cfg->sis64)
2511                 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2512         else
2513                 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2514
2515         if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2516             ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2517                 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2518                 scsi_report_bus_reset(ioa_cfg->host,
2519                                       hostrcb->hcam.u.error.fd_res_addr.bus);
2520         }
2521
2522         error_index = ipr_get_error(ioasc);
2523
2524         if (!ipr_error_table[error_index].log_hcam)
2525                 return;
2526
2527         if (ioasc == IPR_IOASC_HW_CMD_FAILED &&
2528             hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) {
2529                 error = &hostrcb->hcam.u.error64.u.type_21_error;
2530
2531                 if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST &&
2532                         ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
2533                                 return;
2534         }
2535
2536         ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2537
2538         /* Set indication we have logged an error */
2539         ioa_cfg->errors_logged++;
2540
2541         if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2542                 return;
2543         if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2544                 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2545
2546         switch (hostrcb->hcam.overlay_id) {
2547         case IPR_HOST_RCB_OVERLAY_ID_2:
2548                 ipr_log_cache_error(ioa_cfg, hostrcb);
2549                 break;
2550         case IPR_HOST_RCB_OVERLAY_ID_3:
2551                 ipr_log_config_error(ioa_cfg, hostrcb);
2552                 break;
2553         case IPR_HOST_RCB_OVERLAY_ID_4:
2554         case IPR_HOST_RCB_OVERLAY_ID_6:
2555                 ipr_log_array_error(ioa_cfg, hostrcb);
2556                 break;
2557         case IPR_HOST_RCB_OVERLAY_ID_7:
2558                 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2559                 break;
2560         case IPR_HOST_RCB_OVERLAY_ID_12:
2561                 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2562                 break;
2563         case IPR_HOST_RCB_OVERLAY_ID_13:
2564                 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2565                 break;
2566         case IPR_HOST_RCB_OVERLAY_ID_14:
2567         case IPR_HOST_RCB_OVERLAY_ID_16:
2568                 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2569                 break;
2570         case IPR_HOST_RCB_OVERLAY_ID_17:
2571                 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2572                 break;
2573         case IPR_HOST_RCB_OVERLAY_ID_20:
2574                 ipr_log_fabric_error(ioa_cfg, hostrcb);
2575                 break;
2576         case IPR_HOST_RCB_OVERLAY_ID_21:
2577                 ipr_log_sis64_device_error(ioa_cfg, hostrcb);
2578                 break;
2579         case IPR_HOST_RCB_OVERLAY_ID_23:
2580                 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2581                 break;
2582         case IPR_HOST_RCB_OVERLAY_ID_24:
2583         case IPR_HOST_RCB_OVERLAY_ID_26:
2584                 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2585                 break;
2586         case IPR_HOST_RCB_OVERLAY_ID_30:
2587                 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2588                 break;
2589         case IPR_HOST_RCB_OVERLAY_ID_1:
2590         case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2591         default:
2592                 ipr_log_generic_error(ioa_cfg, hostrcb);
2593                 break;
2594         }
2595 }
2596
2597 static struct ipr_hostrcb *ipr_get_free_hostrcb(struct ipr_ioa_cfg *ioa)
2598 {
2599         struct ipr_hostrcb *hostrcb;
2600
2601         hostrcb = list_first_entry_or_null(&ioa->hostrcb_free_q,
2602                                         struct ipr_hostrcb, queue);
2603
2604         if (unlikely(!hostrcb)) {
2605                 dev_info(&ioa->pdev->dev, "Reclaiming async error buffers.");
2606                 hostrcb = list_first_entry_or_null(&ioa->hostrcb_report_q,
2607                                                 struct ipr_hostrcb, queue);
2608         }
2609
2610         list_del_init(&hostrcb->queue);
2611         return hostrcb;
2612 }
2613
2614 /**
2615  * ipr_process_error - Op done function for an adapter error log.
2616  * @ipr_cmd:    ipr command struct
2617  *
2618  * This function is the op done function for an error log host
2619  * controlled async from the adapter. It will log the error and
2620  * send the HCAM back to the adapter.
2621  *
2622  * Return value:
2623  *      none
2624  **/
2625 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2626 {
2627         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2628         struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2629         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2630         u32 fd_ioasc;
2631
2632         if (ioa_cfg->sis64)
2633                 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2634         else
2635                 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2636
2637         list_del_init(&hostrcb->queue);
2638         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
2639
2640         if (!ioasc) {
2641                 ipr_handle_log_data(ioa_cfg, hostrcb);
2642                 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2643                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2644         } else if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
2645                    ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) {
2646                 dev_err(&ioa_cfg->pdev->dev,
2647                         "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2648         }
2649
2650         list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q);
2651         schedule_work(&ioa_cfg->work_q);
2652         hostrcb = ipr_get_free_hostrcb(ioa_cfg);
2653
2654         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2655 }
2656
2657 /**
2658  * ipr_timeout -  An internally generated op has timed out.
2659  * @ipr_cmd:    ipr command struct
2660  *
2661  * This function blocks host requests and initiates an
2662  * adapter reset.
2663  *
2664  * Return value:
2665  *      none
2666  **/
2667 static void ipr_timeout(struct timer_list *t)
2668 {
2669         struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
2670         unsigned long lock_flags = 0;
2671         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2672
2673         ENTER;
2674         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2675
2676         ioa_cfg->errors_logged++;
2677         dev_err(&ioa_cfg->pdev->dev,
2678                 "Adapter being reset due to command timeout.\n");
2679
2680         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2681                 ioa_cfg->sdt_state = GET_DUMP;
2682
2683         if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2684                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2685
2686         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2687         LEAVE;
2688 }
2689
2690 /**
2691  * ipr_oper_timeout -  Adapter timed out transitioning to operational
2692  * @ipr_cmd:    ipr command struct
2693  *
2694  * This function blocks host requests and initiates an
2695  * adapter reset.
2696  *
2697  * Return value:
2698  *      none
2699  **/
2700 static void ipr_oper_timeout(struct timer_list *t)
2701 {
2702         struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
2703         unsigned long lock_flags = 0;
2704         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2705
2706         ENTER;
2707         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2708
2709         ioa_cfg->errors_logged++;
2710         dev_err(&ioa_cfg->pdev->dev,
2711                 "Adapter timed out transitioning to operational.\n");
2712
2713         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2714                 ioa_cfg->sdt_state = GET_DUMP;
2715
2716         if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2717                 if (ipr_fastfail)
2718                         ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2719                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2720         }
2721
2722         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2723         LEAVE;
2724 }
2725
2726 /**
2727  * ipr_find_ses_entry - Find matching SES in SES table
2728  * @res:        resource entry struct of SES
2729  *
2730  * Return value:
2731  *      pointer to SES table entry / NULL on failure
2732  **/
2733 static const struct ipr_ses_table_entry *
2734 ipr_find_ses_entry(struct ipr_resource_entry *res)
2735 {
2736         int i, j, matches;
2737         struct ipr_std_inq_vpids *vpids;
2738         const struct ipr_ses_table_entry *ste = ipr_ses_table;
2739
2740         for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2741                 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2742                         if (ste->compare_product_id_byte[j] == 'X') {
2743                                 vpids = &res->std_inq_data.vpids;
2744                                 if (vpids->product_id[j] == ste->product_id[j])
2745                                         matches++;
2746                                 else
2747                                         break;
2748                         } else
2749                                 matches++;
2750                 }
2751
2752                 if (matches == IPR_PROD_ID_LEN)
2753                         return ste;
2754         }
2755
2756         return NULL;
2757 }
2758
2759 /**
2760  * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2761  * @ioa_cfg:    ioa config struct
2762  * @bus:                SCSI bus
2763  * @bus_width:  bus width
2764  *
2765  * Return value:
2766  *      SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2767  *      For a 2-byte wide SCSI bus, the maximum transfer speed is
2768  *      twice the maximum transfer rate (e.g. for a wide enabled bus,
2769  *      max 160MHz = max 320MB/sec).
2770  **/
2771 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2772 {
2773         struct ipr_resource_entry *res;
2774         const struct ipr_ses_table_entry *ste;
2775         u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2776
2777         /* Loop through each config table entry in the config table buffer */
2778         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2779                 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2780                         continue;
2781
2782                 if (bus != res->bus)
2783                         continue;
2784
2785                 if (!(ste = ipr_find_ses_entry(res)))
2786                         continue;
2787
2788                 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2789         }
2790
2791         return max_xfer_rate;
2792 }
2793
2794 /**
2795  * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2796  * @ioa_cfg:            ioa config struct
2797  * @max_delay:          max delay in micro-seconds to wait
2798  *
2799  * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2800  *
2801  * Return value:
2802  *      0 on success / other on failure
2803  **/
2804 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2805 {
2806         volatile u32 pcii_reg;
2807         int delay = 1;
2808
2809         /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2810         while (delay < max_delay) {
2811                 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2812
2813                 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2814                         return 0;
2815
2816                 /* udelay cannot be used if delay is more than a few milliseconds */
2817                 if ((delay / 1000) > MAX_UDELAY_MS)
2818                         mdelay(delay / 1000);
2819                 else
2820                         udelay(delay);
2821
2822                 delay += delay;
2823         }
2824         return -EIO;
2825 }
2826
2827 /**
2828  * ipr_get_sis64_dump_data_section - Dump IOA memory
2829  * @ioa_cfg:                    ioa config struct
2830  * @start_addr:                 adapter address to dump
2831  * @dest:                       destination kernel buffer
2832  * @length_in_words:            length to dump in 4 byte words
2833  *
2834  * Return value:
2835  *      0 on success
2836  **/
2837 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2838                                            u32 start_addr,
2839                                            __be32 *dest, u32 length_in_words)
2840 {
2841         int i;
2842
2843         for (i = 0; i < length_in_words; i++) {
2844                 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2845                 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2846                 dest++;
2847         }
2848
2849         return 0;
2850 }
2851
2852 /**
2853  * ipr_get_ldump_data_section - Dump IOA memory
2854  * @ioa_cfg:                    ioa config struct
2855  * @start_addr:                 adapter address to dump
2856  * @dest:                               destination kernel buffer
2857  * @length_in_words:    length to dump in 4 byte words
2858  *
2859  * Return value:
2860  *      0 on success / -EIO on failure
2861  **/
2862 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2863                                       u32 start_addr,
2864                                       __be32 *dest, u32 length_in_words)
2865 {
2866         volatile u32 temp_pcii_reg;
2867         int i, delay = 0;
2868
2869         if (ioa_cfg->sis64)
2870                 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2871                                                        dest, length_in_words);
2872
2873         /* Write IOA interrupt reg starting LDUMP state  */
2874         writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2875                ioa_cfg->regs.set_uproc_interrupt_reg32);
2876
2877         /* Wait for IO debug acknowledge */
2878         if (ipr_wait_iodbg_ack(ioa_cfg,
2879                                IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2880                 dev_err(&ioa_cfg->pdev->dev,
2881                         "IOA dump long data transfer timeout\n");
2882                 return -EIO;
2883         }
2884
2885         /* Signal LDUMP interlocked - clear IO debug ack */
2886         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2887                ioa_cfg->regs.clr_interrupt_reg);
2888
2889         /* Write Mailbox with starting address */
2890         writel(start_addr, ioa_cfg->ioa_mailbox);
2891
2892         /* Signal address valid - clear IOA Reset alert */
2893         writel(IPR_UPROCI_RESET_ALERT,
2894                ioa_cfg->regs.clr_uproc_interrupt_reg32);
2895
2896         for (i = 0; i < length_in_words; i++) {
2897                 /* Wait for IO debug acknowledge */
2898                 if (ipr_wait_iodbg_ack(ioa_cfg,
2899                                        IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2900                         dev_err(&ioa_cfg->pdev->dev,
2901                                 "IOA dump short data transfer timeout\n");
2902                         return -EIO;
2903                 }
2904
2905                 /* Read data from mailbox and increment destination pointer */
2906                 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2907                 dest++;
2908
2909                 /* For all but the last word of data, signal data received */
2910                 if (i < (length_in_words - 1)) {
2911                         /* Signal dump data received - Clear IO debug Ack */
2912                         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2913                                ioa_cfg->regs.clr_interrupt_reg);
2914                 }
2915         }
2916
2917         /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2918         writel(IPR_UPROCI_RESET_ALERT,
2919                ioa_cfg->regs.set_uproc_interrupt_reg32);
2920
2921         writel(IPR_UPROCI_IO_DEBUG_ALERT,
2922                ioa_cfg->regs.clr_uproc_interrupt_reg32);
2923
2924         /* Signal dump data received - Clear IO debug Ack */
2925         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2926                ioa_cfg->regs.clr_interrupt_reg);
2927
2928         /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2929         while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2930                 temp_pcii_reg =
2931                     readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2932
2933                 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2934                         return 0;
2935
2936                 udelay(10);
2937                 delay += 10;
2938         }
2939
2940         return 0;
2941 }
2942
2943 #ifdef CONFIG_SCSI_IPR_DUMP
2944 /**
2945  * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2946  * @ioa_cfg:            ioa config struct
2947  * @pci_address:        adapter address
2948  * @length:                     length of data to copy
2949  *
2950  * Copy data from PCI adapter to kernel buffer.
2951  * Note: length MUST be a 4 byte multiple
2952  * Return value:
2953  *      0 on success / other on failure
2954  **/
2955 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2956                         unsigned long pci_address, u32 length)
2957 {
2958         int bytes_copied = 0;
2959         int cur_len, rc, rem_len, rem_page_len, max_dump_size;
2960         __be32 *page;
2961         unsigned long lock_flags = 0;
2962         struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2963
2964         if (ioa_cfg->sis64)
2965                 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2966         else
2967                 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2968
2969         while (bytes_copied < length &&
2970                (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
2971                 if (ioa_dump->page_offset >= PAGE_SIZE ||
2972                     ioa_dump->page_offset == 0) {
2973                         page = (__be32 *)__get_free_page(GFP_ATOMIC);
2974
2975                         if (!page) {
2976                                 ipr_trace;
2977                                 return bytes_copied;
2978                         }
2979
2980                         ioa_dump->page_offset = 0;
2981                         ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2982                         ioa_dump->next_page_index++;
2983                 } else
2984                         page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2985
2986                 rem_len = length - bytes_copied;
2987                 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2988                 cur_len = min(rem_len, rem_page_len);
2989
2990                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2991                 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2992                         rc = -EIO;
2993                 } else {
2994                         rc = ipr_get_ldump_data_section(ioa_cfg,
2995                                                         pci_address + bytes_copied,
2996                                                         &page[ioa_dump->page_offset / 4],
2997                                                         (cur_len / sizeof(u32)));
2998                 }
2999                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3000
3001                 if (!rc) {
3002                         ioa_dump->page_offset += cur_len;
3003                         bytes_copied += cur_len;
3004                 } else {
3005                         ipr_trace;
3006                         break;
3007                 }
3008                 schedule();
3009         }
3010
3011         return bytes_copied;
3012 }
3013
3014 /**
3015  * ipr_init_dump_entry_hdr - Initialize a dump entry header.
3016  * @hdr:        dump entry header struct
3017  *
3018  * Return value:
3019  *      nothing
3020  **/
3021 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
3022 {
3023         hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
3024         hdr->num_elems = 1;
3025         hdr->offset = sizeof(*hdr);
3026         hdr->status = IPR_DUMP_STATUS_SUCCESS;
3027 }
3028
3029 /**
3030  * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
3031  * @ioa_cfg:    ioa config struct
3032  * @driver_dump:        driver dump struct
3033  *
3034  * Return value:
3035  *      nothing
3036  **/
3037 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
3038                                    struct ipr_driver_dump *driver_dump)
3039 {
3040         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3041
3042         ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
3043         driver_dump->ioa_type_entry.hdr.len =
3044                 sizeof(struct ipr_dump_ioa_type_entry) -
3045                 sizeof(struct ipr_dump_entry_header);
3046         driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3047         driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
3048         driver_dump->ioa_type_entry.type = ioa_cfg->type;
3049         driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
3050                 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
3051                 ucode_vpd->minor_release[1];
3052         driver_dump->hdr.num_entries++;
3053 }
3054
3055 /**
3056  * ipr_dump_version_data - Fill in the driver version in the dump.
3057  * @ioa_cfg:    ioa config struct
3058  * @driver_dump:        driver dump struct
3059  *
3060  * Return value:
3061  *      nothing
3062  **/
3063 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
3064                                   struct ipr_driver_dump *driver_dump)
3065 {
3066         ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
3067         driver_dump->version_entry.hdr.len =
3068                 sizeof(struct ipr_dump_version_entry) -
3069                 sizeof(struct ipr_dump_entry_header);
3070         driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3071         driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
3072         strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
3073         driver_dump->hdr.num_entries++;
3074 }
3075
3076 /**
3077  * ipr_dump_trace_data - Fill in the IOA trace in the dump.
3078  * @ioa_cfg:    ioa config struct
3079  * @driver_dump:        driver dump struct
3080  *
3081  * Return value:
3082  *      nothing
3083  **/
3084 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
3085                                    struct ipr_driver_dump *driver_dump)
3086 {
3087         ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
3088         driver_dump->trace_entry.hdr.len =
3089                 sizeof(struct ipr_dump_trace_entry) -
3090                 sizeof(struct ipr_dump_entry_header);
3091         driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3092         driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
3093         memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
3094         driver_dump->hdr.num_entries++;
3095 }
3096
3097 /**
3098  * ipr_dump_location_data - Fill in the IOA location in the dump.
3099  * @ioa_cfg:    ioa config struct
3100  * @driver_dump:        driver dump struct
3101  *
3102  * Return value:
3103  *      nothing
3104  **/
3105 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
3106                                    struct ipr_driver_dump *driver_dump)
3107 {
3108         ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
3109         driver_dump->location_entry.hdr.len =
3110                 sizeof(struct ipr_dump_location_entry) -
3111                 sizeof(struct ipr_dump_entry_header);
3112         driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3113         driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
3114         strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
3115         driver_dump->hdr.num_entries++;
3116 }
3117
3118 /**
3119  * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3120  * @ioa_cfg:    ioa config struct
3121  * @dump:               dump struct
3122  *
3123  * Return value:
3124  *      nothing
3125  **/
3126 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
3127 {
3128         unsigned long start_addr, sdt_word;
3129         unsigned long lock_flags = 0;
3130         struct ipr_driver_dump *driver_dump = &dump->driver_dump;
3131         struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
3132         u32 num_entries, max_num_entries, start_off, end_off;
3133         u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
3134         struct ipr_sdt *sdt;
3135         int valid = 1;
3136         int i;
3137
3138         ENTER;
3139
3140         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3141
3142         if (ioa_cfg->sdt_state != READ_DUMP) {
3143                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3144                 return;
3145         }
3146
3147         if (ioa_cfg->sis64) {
3148                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3149                 ssleep(IPR_DUMP_DELAY_SECONDS);
3150                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3151         }
3152
3153         start_addr = readl(ioa_cfg->ioa_mailbox);
3154
3155         if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
3156                 dev_err(&ioa_cfg->pdev->dev,
3157                         "Invalid dump table format: %lx\n", start_addr);
3158                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3159                 return;
3160         }
3161
3162         dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3163
3164         driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3165
3166         /* Initialize the overall dump header */
3167         driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3168         driver_dump->hdr.num_entries = 1;
3169         driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3170         driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3171         driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3172         driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3173
3174         ipr_dump_version_data(ioa_cfg, driver_dump);
3175         ipr_dump_location_data(ioa_cfg, driver_dump);
3176         ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3177         ipr_dump_trace_data(ioa_cfg, driver_dump);
3178
3179         /* Update dump_header */
3180         driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3181
3182         /* IOA Dump entry */
3183         ipr_init_dump_entry_hdr(&ioa_dump->hdr);
3184         ioa_dump->hdr.len = 0;
3185         ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3186         ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3187
3188         /* First entries in sdt are actually a list of dump addresses and
3189          lengths to gather the real dump data.  sdt represents the pointer
3190          to the ioa generated dump table.  Dump data will be extracted based
3191          on entries in this table */
3192         sdt = &ioa_dump->sdt;
3193
3194         if (ioa_cfg->sis64) {
3195                 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3196                 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3197         } else {
3198                 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3199                 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3200         }
3201
3202         bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3203                         (max_num_entries * sizeof(struct ipr_sdt_entry));
3204         rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
3205                                         bytes_to_copy / sizeof(__be32));
3206
3207         /* Smart Dump table is ready to use and the first entry is valid */
3208         if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3209             (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
3210                 dev_err(&ioa_cfg->pdev->dev,
3211                         "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3212                         rc, be32_to_cpu(sdt->hdr.state));
3213                 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3214                 ioa_cfg->sdt_state = DUMP_OBTAINED;
3215                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3216                 return;
3217         }
3218
3219         num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3220
3221         if (num_entries > max_num_entries)
3222                 num_entries = max_num_entries;
3223
3224         /* Update dump length to the actual data to be copied */
3225         dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3226         if (ioa_cfg->sis64)
3227                 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3228         else
3229                 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
3230
3231         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3232
3233         for (i = 0; i < num_entries; i++) {
3234                 if (ioa_dump->hdr.len > max_dump_size) {
3235                         driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3236                         break;
3237                 }
3238
3239                 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
3240                         sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3241                         if (ioa_cfg->sis64)
3242                                 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3243                         else {
3244                                 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3245                                 end_off = be32_to_cpu(sdt->entry[i].end_token);
3246
3247                                 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3248                                         bytes_to_copy = end_off - start_off;
3249                                 else
3250                                         valid = 0;
3251                         }
3252                         if (valid) {
3253                                 if (bytes_to_copy > max_dump_size) {
3254                                         sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3255                                         continue;
3256                                 }
3257
3258                                 /* Copy data from adapter to driver buffers */
3259                                 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3260                                                             bytes_to_copy);
3261
3262                                 ioa_dump->hdr.len += bytes_copied;
3263
3264                                 if (bytes_copied != bytes_to_copy) {
3265                                         driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3266                                         break;
3267                                 }
3268                         }
3269                 }
3270         }
3271
3272         dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3273
3274         /* Update dump_header */
3275         driver_dump->hdr.len += ioa_dump->hdr.len;
3276         wmb();
3277         ioa_cfg->sdt_state = DUMP_OBTAINED;
3278         LEAVE;
3279 }
3280
3281 #else
3282 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3283 #endif
3284
3285 /**
3286  * ipr_release_dump - Free adapter dump memory
3287  * @kref:       kref struct
3288  *
3289  * Return value:
3290  *      nothing
3291  **/
3292 static void ipr_release_dump(struct kref *kref)
3293 {
3294         struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
3295         struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3296         unsigned long lock_flags = 0;
3297         int i;
3298
3299         ENTER;
3300         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3301         ioa_cfg->dump = NULL;
3302         ioa_cfg->sdt_state = INACTIVE;
3303         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3304
3305         for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3306                 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3307
3308         vfree(dump->ioa_dump.ioa_data);
3309         kfree(dump);
3310         LEAVE;
3311 }
3312
3313 /**
3314  * ipr_worker_thread - Worker thread
3315  * @work:               ioa config struct
3316  *
3317  * Called at task level from a work thread. This function takes care
3318  * of adding and removing device from the mid-layer as configuration
3319  * changes are detected by the adapter.
3320  *
3321  * Return value:
3322  *      nothing
3323  **/
3324 static void ipr_worker_thread(struct work_struct *work)
3325 {
3326         unsigned long lock_flags;
3327         struct ipr_resource_entry *res;
3328         struct scsi_device *sdev;
3329         struct ipr_dump *dump;
3330         struct ipr_ioa_cfg *ioa_cfg =
3331                 container_of(work, struct ipr_ioa_cfg, work_q);
3332         u8 bus, target, lun;
3333         int did_work;
3334
3335         ENTER;
3336         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3337
3338         if (ioa_cfg->sdt_state == READ_DUMP) {
3339                 dump = ioa_cfg->dump;
3340                 if (!dump) {
3341                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3342                         return;
3343                 }
3344                 kref_get(&dump->kref);
3345                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3346                 ipr_get_ioa_dump(ioa_cfg, dump);
3347                 kref_put(&dump->kref, ipr_release_dump);
3348
3349                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3350                 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
3351                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3352                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3353                 return;
3354         }
3355
3356         if (ioa_cfg->scsi_unblock) {
3357                 ioa_cfg->scsi_unblock = 0;
3358                 ioa_cfg->scsi_blocked = 0;
3359                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3360                 scsi_unblock_requests(ioa_cfg->host);
3361                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3362                 if (ioa_cfg->scsi_blocked)
3363                         scsi_block_requests(ioa_cfg->host);
3364         }
3365
3366         if (!ioa_cfg->scan_enabled) {
3367                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3368                 return;
3369         }
3370
3371 restart:
3372         do {
3373                 did_work = 0;
3374                 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
3375                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3376                         return;
3377                 }
3378
3379                 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3380                         if (res->del_from_ml && res->sdev) {
3381                                 did_work = 1;
3382                                 sdev = res->sdev;
3383                                 if (!scsi_device_get(sdev)) {
3384                                         if (!res->add_to_ml)
3385                                                 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3386                                         else
3387                                                 res->del_from_ml = 0;
3388                                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3389                                         scsi_remove_device(sdev);
3390                                         scsi_device_put(sdev);
3391                                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3392                                 }
3393                                 break;
3394                         }
3395                 }
3396         } while (did_work);
3397
3398         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3399                 if (res->add_to_ml) {
3400                         bus = res->bus;
3401                         target = res->target;
3402                         lun = res->lun;
3403                         res->add_to_ml = 0;
3404                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3405                         scsi_add_device(ioa_cfg->host, bus, target, lun);
3406                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3407                         goto restart;
3408                 }
3409         }
3410
3411         ioa_cfg->scan_done = 1;
3412         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3413         kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3414         LEAVE;
3415 }
3416
3417 #ifdef CONFIG_SCSI_IPR_TRACE
3418 /**
3419  * ipr_read_trace - Dump the adapter trace
3420  * @filp:               open sysfs file
3421  * @kobj:               kobject struct
3422  * @bin_attr:           bin_attribute struct
3423  * @buf:                buffer
3424  * @off:                offset
3425  * @count:              buffer size
3426  *
3427  * Return value:
3428  *      number of bytes printed to buffer
3429  **/
3430 static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
3431                               struct bin_attribute *bin_attr,
3432                               char *buf, loff_t off, size_t count)
3433 {
3434         struct device *dev = container_of(kobj, struct device, kobj);
3435         struct Scsi_Host *shost = class_to_shost(dev);
3436         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3437         unsigned long lock_flags = 0;
3438         ssize_t ret;
3439
3440         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3441         ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3442                                 IPR_TRACE_SIZE);
3443         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3444
3445         return ret;
3446 }
3447
3448 static struct bin_attribute ipr_trace_attr = {
3449         .attr = {
3450                 .name = "trace",
3451                 .mode = S_IRUGO,
3452         },
3453         .size = 0,
3454         .read = ipr_read_trace,
3455 };
3456 #endif
3457
3458 /**
3459  * ipr_show_fw_version - Show the firmware version
3460  * @dev:        class device struct
3461  * @buf:        buffer
3462  *
3463  * Return value:
3464  *      number of bytes printed to buffer
3465  **/
3466 static ssize_t ipr_show_fw_version(struct device *dev,
3467                                    struct device_attribute *attr, char *buf)
3468 {
3469         struct Scsi_Host *shost = class_to_shost(dev);
3470         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3471         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3472         unsigned long lock_flags = 0;
3473         int len;
3474
3475         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3476         len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3477                        ucode_vpd->major_release, ucode_vpd->card_type,
3478                        ucode_vpd->minor_release[0],
3479                        ucode_vpd->minor_release[1]);
3480         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3481         return len;
3482 }
3483
3484 static struct device_attribute ipr_fw_version_attr = {
3485         .attr = {
3486                 .name =         "fw_version",
3487                 .mode =         S_IRUGO,
3488         },
3489         .show = ipr_show_fw_version,
3490 };
3491
3492 /**
3493  * ipr_show_log_level - Show the adapter's error logging level
3494  * @dev:        class device struct
3495  * @buf:        buffer
3496  *
3497  * Return value:
3498  *      number of bytes printed to buffer
3499  **/
3500 static ssize_t ipr_show_log_level(struct device *dev,
3501                                    struct device_attribute *attr, char *buf)
3502 {
3503         struct Scsi_Host *shost = class_to_shost(dev);
3504         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3505         unsigned long lock_flags = 0;
3506         int len;
3507
3508         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3509         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3510         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3511         return len;
3512 }
3513
3514 /**
3515  * ipr_store_log_level - Change the adapter's error logging level
3516  * @dev:        class device struct
3517  * @buf:        buffer
3518  *
3519  * Return value:
3520  *      number of bytes printed to buffer
3521  **/
3522 static ssize_t ipr_store_log_level(struct device *dev,
3523                                    struct device_attribute *attr,
3524                                    const char *buf, size_t count)
3525 {
3526         struct Scsi_Host *shost = class_to_shost(dev);
3527         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3528         unsigned long lock_flags = 0;
3529
3530         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3531         ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3532         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3533         return strlen(buf);
3534 }
3535
3536 static struct device_attribute ipr_log_level_attr = {
3537         .attr = {
3538                 .name =         "log_level",
3539                 .mode =         S_IRUGO | S_IWUSR,
3540         },
3541         .show = ipr_show_log_level,
3542         .store = ipr_store_log_level
3543 };
3544
3545 /**
3546  * ipr_store_diagnostics - IOA Diagnostics interface
3547  * @dev:        device struct
3548  * @buf:        buffer
3549  * @count:      buffer size
3550  *
3551  * This function will reset the adapter and wait a reasonable
3552  * amount of time for any errors that the adapter might log.
3553  *
3554  * Return value:
3555  *      count on success / other on failure
3556  **/
3557 static ssize_t ipr_store_diagnostics(struct device *dev,
3558                                      struct device_attribute *attr,
3559                                      const char *buf, size_t count)
3560 {
3561         struct Scsi_Host *shost = class_to_shost(dev);
3562         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3563         unsigned long lock_flags = 0;
3564         int rc = count;
3565
3566         if (!capable(CAP_SYS_ADMIN))
3567                 return -EACCES;
3568
3569         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3570         while (ioa_cfg->in_reset_reload) {
3571                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3572                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3573                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3574         }
3575
3576         ioa_cfg->errors_logged = 0;
3577         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3578
3579         if (ioa_cfg->in_reset_reload) {
3580                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3581                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3582
3583                 /* Wait for a second for any errors to be logged */
3584                 msleep(1000);
3585         } else {
3586                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3587                 return -EIO;
3588         }
3589
3590         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3591         if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3592                 rc = -EIO;
3593         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3594
3595         return rc;
3596 }
3597
3598 static struct device_attribute ipr_diagnostics_attr = {
3599         .attr = {
3600                 .name =         "run_diagnostics",
3601                 .mode =         S_IWUSR,
3602         },
3603         .store = ipr_store_diagnostics
3604 };
3605
3606 /**
3607  * ipr_show_adapter_state - Show the adapter's state
3608  * @class_dev:  device struct
3609  * @buf:        buffer
3610  *
3611  * Return value:
3612  *      number of bytes printed to buffer
3613  **/
3614 static ssize_t ipr_show_adapter_state(struct device *dev,
3615                                       struct device_attribute *attr, char *buf)
3616 {
3617         struct Scsi_Host *shost = class_to_shost(dev);
3618         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3619         unsigned long lock_flags = 0;
3620         int len;
3621
3622         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3623         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
3624                 len = snprintf(buf, PAGE_SIZE, "offline\n");
3625         else
3626                 len = snprintf(buf, PAGE_SIZE, "online\n");
3627         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3628         return len;
3629 }
3630
3631 /**
3632  * ipr_store_adapter_state - Change adapter state
3633  * @dev:        device struct
3634  * @buf:        buffer
3635  * @count:      buffer size
3636  *
3637  * This function will change the adapter's state.
3638  *
3639  * Return value:
3640  *      count on success / other on failure
3641  **/
3642 static ssize_t ipr_store_adapter_state(struct device *dev,
3643                                        struct device_attribute *attr,
3644                                        const char *buf, size_t count)
3645 {
3646         struct Scsi_Host *shost = class_to_shost(dev);
3647         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3648         unsigned long lock_flags;
3649         int result = count, i;
3650
3651         if (!capable(CAP_SYS_ADMIN))
3652                 return -EACCES;
3653
3654         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3655         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3656             !strncmp(buf, "online", 6)) {
3657                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3658                         spin_lock(&ioa_cfg->hrrq[i]._lock);
3659                         ioa_cfg->hrrq[i].ioa_is_dead = 0;
3660                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
3661                 }
3662                 wmb();
3663                 ioa_cfg->reset_retries = 0;
3664                 ioa_cfg->in_ioa_bringdown = 0;
3665                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3666         }
3667         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3668         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3669
3670         return result;
3671 }
3672
3673 static struct device_attribute ipr_ioa_state_attr = {
3674         .attr = {
3675                 .name =         "online_state",
3676                 .mode =         S_IRUGO | S_IWUSR,
3677         },
3678         .show = ipr_show_adapter_state,
3679         .store = ipr_store_adapter_state
3680 };
3681
3682 /**
3683  * ipr_store_reset_adapter - Reset the adapter
3684  * @dev:        device struct
3685  * @buf:        buffer
3686  * @count:      buffer size
3687  *
3688  * This function will reset the adapter.
3689  *
3690  * Return value:
3691  *      count on success / other on failure
3692  **/
3693 static ssize_t ipr_store_reset_adapter(struct device *dev,
3694                                        struct device_attribute *attr,
3695                                        const char *buf, size_t count)
3696 {
3697         struct Scsi_Host *shost = class_to_shost(dev);
3698         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3699         unsigned long lock_flags;
3700         int result = count;
3701
3702         if (!capable(CAP_SYS_ADMIN))
3703                 return -EACCES;
3704
3705         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3706         if (!ioa_cfg->in_reset_reload)
3707                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3708         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3709         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3710
3711         return result;
3712 }
3713
3714 static struct device_attribute ipr_ioa_reset_attr = {
3715         .attr = {
3716                 .name =         "reset_host",
3717                 .mode =         S_IWUSR,
3718         },
3719         .store = ipr_store_reset_adapter
3720 };
3721
3722 static int ipr_iopoll(struct irq_poll *iop, int budget);
3723  /**
3724  * ipr_show_iopoll_weight - Show ipr polling mode
3725  * @dev:        class device struct
3726  * @buf:        buffer
3727  *
3728  * Return value:
3729  *      number of bytes printed to buffer
3730  **/
3731 static ssize_t ipr_show_iopoll_weight(struct device *dev,
3732                                    struct device_attribute *attr, char *buf)
3733 {
3734         struct Scsi_Host *shost = class_to_shost(dev);
3735         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3736         unsigned long lock_flags = 0;
3737         int len;
3738
3739         spin_lock_irqsave(shost->host_lock, lock_flags);
3740         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3741         spin_unlock_irqrestore(shost->host_lock, lock_flags);
3742
3743         return len;
3744 }
3745
3746 /**
3747  * ipr_store_iopoll_weight - Change the adapter's polling mode
3748  * @dev:        class device struct
3749  * @buf:        buffer
3750  *
3751  * Return value:
3752  *      number of bytes printed to buffer
3753  **/
3754 static ssize_t ipr_store_iopoll_weight(struct device *dev,
3755                                         struct device_attribute *attr,
3756                                         const char *buf, size_t count)
3757 {
3758         struct Scsi_Host *shost = class_to_shost(dev);
3759         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3760         unsigned long user_iopoll_weight;
3761         unsigned long lock_flags = 0;
3762         int i;
3763
3764         if (!ioa_cfg->sis64) {
3765                 dev_info(&ioa_cfg->pdev->dev, "irq_poll not supported on this adapter\n");
3766                 return -EINVAL;
3767         }
3768         if (kstrtoul(buf, 10, &user_iopoll_weight))
3769                 return -EINVAL;
3770
3771         if (user_iopoll_weight > 256) {
3772                 dev_info(&ioa_cfg->pdev->dev, "Invalid irq_poll weight. It must be less than 256\n");
3773                 return -EINVAL;
3774         }
3775
3776         if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3777                 dev_info(&ioa_cfg->pdev->dev, "Current irq_poll weight has the same weight\n");
3778                 return strlen(buf);
3779         }
3780
3781         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3782                 for (i = 1; i < ioa_cfg->hrrq_num; i++)
3783                         irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
3784         }
3785
3786         spin_lock_irqsave(shost->host_lock, lock_flags);
3787         ioa_cfg->iopoll_weight = user_iopoll_weight;
3788         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3789                 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3790                         irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
3791                                         ioa_cfg->iopoll_weight, ipr_iopoll);
3792                 }
3793         }
3794         spin_unlock_irqrestore(shost->host_lock, lock_flags);
3795
3796         return strlen(buf);
3797 }
3798
3799 static struct device_attribute ipr_iopoll_weight_attr = {
3800         .attr = {
3801                 .name =         "iopoll_weight",
3802                 .mode =         S_IRUGO | S_IWUSR,
3803         },
3804         .show = ipr_show_iopoll_weight,
3805         .store = ipr_store_iopoll_weight
3806 };
3807
3808 /**
3809  * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3810  * @buf_len:            buffer length
3811  *
3812  * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3813  * list to use for microcode download
3814  *
3815  * Return value:
3816  *      pointer to sglist / NULL on failure
3817  **/
3818 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3819 {
3820         int sg_size, order;
3821         struct ipr_sglist *sglist;
3822
3823         /* Get the minimum size per scatter/gather element */
3824         sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3825
3826         /* Get the actual size per element */
3827         order = get_order(sg_size);
3828
3829         /* Allocate a scatter/gather list for the DMA */
3830         sglist = kzalloc(sizeof(struct ipr_sglist), GFP_KERNEL);
3831         if (sglist == NULL) {
3832                 ipr_trace;
3833                 return NULL;
3834         }
3835         sglist->order = order;
3836         sglist->scatterlist = sgl_alloc_order(buf_len, order, false, GFP_KERNEL,
3837                                               &sglist->num_sg);
3838         if (!sglist->scatterlist) {
3839                 kfree(sglist);
3840                 return NULL;
3841         }
3842
3843         return sglist;
3844 }
3845
3846 /**
3847  * ipr_free_ucode_buffer - Frees a microcode download buffer
3848  * @p_dnld:             scatter/gather list pointer
3849  *
3850  * Free a DMA'able ucode download buffer previously allocated with
3851  * ipr_alloc_ucode_buffer
3852  *
3853  * Return value:
3854  *      nothing
3855  **/
3856 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3857 {
3858         sgl_free_order(sglist->scatterlist, sglist->order);
3859         kfree(sglist);
3860 }
3861
3862 /**
3863  * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3864  * @sglist:             scatter/gather list pointer
3865  * @buffer:             buffer pointer
3866  * @len:                buffer length
3867  *
3868  * Copy a microcode image from a user buffer into a buffer allocated by
3869  * ipr_alloc_ucode_buffer
3870  *
3871  * Return value:
3872  *      0 on success / other on failure
3873  **/
3874 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3875                                  u8 *buffer, u32 len)
3876 {
3877         int bsize_elem, i, result = 0;
3878         struct scatterlist *scatterlist;
3879         void *kaddr;
3880
3881         /* Determine the actual number of bytes per element */
3882         bsize_elem = PAGE_SIZE * (1 << sglist->order);
3883
3884         scatterlist = sglist->scatterlist;
3885
3886         for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3887                 struct page *page = sg_page(&scatterlist[i]);
3888
3889                 kaddr = kmap(page);
3890                 memcpy(kaddr, buffer, bsize_elem);
3891                 kunmap(page);
3892
3893                 scatterlist[i].length = bsize_elem;
3894
3895                 if (result != 0) {
3896                         ipr_trace;
3897                         return result;
3898                 }
3899         }
3900
3901         if (len % bsize_elem) {
3902                 struct page *page = sg_page(&scatterlist[i]);
3903
3904                 kaddr = kmap(page);
3905                 memcpy(kaddr, buffer, len % bsize_elem);
3906                 kunmap(page);
3907
3908                 scatterlist[i].length = len % bsize_elem;
3909         }
3910
3911         sglist->buffer_len = len;
3912         return result;
3913 }
3914
3915 /**
3916  * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3917  * @ipr_cmd:            ipr command struct
3918  * @sglist:             scatter/gather list
3919  *
3920  * Builds a microcode download IOA data list (IOADL).
3921  *
3922  **/
3923 static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3924                                     struct ipr_sglist *sglist)
3925 {
3926         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3927         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3928         struct scatterlist *scatterlist = sglist->scatterlist;
3929         int i;
3930
3931         ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3932         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3933         ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3934
3935         ioarcb->ioadl_len =
3936                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3937         for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3938                 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3939                 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3940                 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3941         }
3942
3943         ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3944 }
3945
3946 /**
3947  * ipr_build_ucode_ioadl - Build a microcode download IOADL
3948  * @ipr_cmd:    ipr command struct
3949  * @sglist:             scatter/gather list
3950  *
3951  * Builds a microcode download IOA data list (IOADL).
3952  *
3953  **/
3954 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3955                                   struct ipr_sglist *sglist)
3956 {
3957         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3958         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3959         struct scatterlist *scatterlist = sglist->scatterlist;
3960         int i;
3961
3962         ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3963         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3964         ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3965
3966         ioarcb->ioadl_len =
3967                 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3968
3969         for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3970                 ioadl[i].flags_and_data_len =
3971                         cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3972                 ioadl[i].address =
3973                         cpu_to_be32(sg_dma_address(&scatterlist[i]));
3974         }
3975
3976         ioadl[i-1].flags_and_data_len |=
3977                 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3978 }
3979
3980 /**
3981  * ipr_update_ioa_ucode - Update IOA's microcode
3982  * @ioa_cfg:    ioa config struct
3983  * @sglist:             scatter/gather list
3984  *
3985  * Initiate an adapter reset to update the IOA's microcode
3986  *
3987  * Return value:
3988  *      0 on success / -EIO on failure
3989  **/
3990 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3991                                 struct ipr_sglist *sglist)
3992 {
3993         unsigned long lock_flags;
3994
3995         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3996         while (ioa_cfg->in_reset_reload) {
3997                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3998                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3999                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4000         }
4001
4002         if (ioa_cfg->ucode_sglist) {
4003                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4004                 dev_err(&ioa_cfg->pdev->dev,
4005                         "Microcode download already in progress\n");
4006                 return -EIO;
4007         }
4008
4009         sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev,
4010                                         sglist->scatterlist, sglist->num_sg,
4011                                         DMA_TO_DEVICE);
4012
4013         if (!sglist->num_dma_sg) {
4014                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4015                 dev_err(&ioa_cfg->pdev->dev,
4016                         "Failed to map microcode download buffer!\n");
4017                 return -EIO;
4018         }
4019
4020         ioa_cfg->ucode_sglist = sglist;
4021         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
4022         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4023         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4024
4025         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4026         ioa_cfg->ucode_sglist = NULL;
4027         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4028         return 0;
4029 }
4030
4031 /**
4032  * ipr_store_update_fw - Update the firmware on the adapter
4033  * @class_dev:  device struct
4034  * @buf:        buffer
4035  * @count:      buffer size
4036  *
4037  * This function will update the firmware on the adapter.
4038  *
4039  * Return value:
4040  *      count on success / other on failure
4041  **/
4042 static ssize_t ipr_store_update_fw(struct device *dev,
4043                                    struct device_attribute *attr,
4044                                    const char *buf, size_t count)
4045 {
4046         struct Scsi_Host *shost = class_to_shost(dev);
4047         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4048         struct ipr_ucode_image_header *image_hdr;
4049         const struct firmware *fw_entry;
4050         struct ipr_sglist *sglist;
4051         char fname[100];
4052         char *src;
4053         char *endline;
4054         int result, dnld_size;
4055
4056         if (!capable(CAP_SYS_ADMIN))
4057                 return -EACCES;
4058
4059         snprintf(fname, sizeof(fname), "%s", buf);
4060
4061         endline = strchr(fname, '\n');
4062         if (endline)
4063                 *endline = '\0';
4064
4065         if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
4066                 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
4067                 return -EIO;
4068         }
4069
4070         image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
4071
4072         src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
4073         dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
4074         sglist = ipr_alloc_ucode_buffer(dnld_size);
4075
4076         if (!sglist) {
4077                 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
4078                 release_firmware(fw_entry);
4079                 return -ENOMEM;
4080         }
4081
4082         result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
4083
4084         if (result) {
4085                 dev_err(&ioa_cfg->pdev->dev,
4086                         "Microcode buffer copy to DMA buffer failed\n");
4087                 goto out;
4088         }
4089
4090         ipr_info("Updating microcode, please be patient.  This may take up to 30 minutes.\n");
4091
4092         result = ipr_update_ioa_ucode(ioa_cfg, sglist);
4093
4094         if (!result)
4095                 result = count;
4096 out:
4097         ipr_free_ucode_buffer(sglist);
4098         release_firmware(fw_entry);
4099         return result;
4100 }
4101
4102 static struct device_attribute ipr_update_fw_attr = {
4103         .attr = {
4104                 .name =         "update_fw",
4105                 .mode =         S_IWUSR,
4106         },
4107         .store = ipr_store_update_fw
4108 };
4109
4110 /**
4111  * ipr_show_fw_type - Show the adapter's firmware type.
4112  * @dev:        class device struct
4113  * @buf:        buffer
4114  *
4115  * Return value:
4116  *      number of bytes printed to buffer
4117  **/
4118 static ssize_t ipr_show_fw_type(struct device *dev,
4119                                 struct device_attribute *attr, char *buf)
4120 {
4121         struct Scsi_Host *shost = class_to_shost(dev);
4122         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4123         unsigned long lock_flags = 0;
4124         int len;
4125
4126         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4127         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4128         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4129         return len;
4130 }
4131
4132 static struct device_attribute ipr_ioa_fw_type_attr = {
4133         .attr = {
4134                 .name =         "fw_type",
4135                 .mode =         S_IRUGO,
4136         },
4137         .show = ipr_show_fw_type
4138 };
4139
4140 static ssize_t ipr_read_async_err_log(struct file *filep, struct kobject *kobj,
4141                                 struct bin_attribute *bin_attr, char *buf,
4142                                 loff_t off, size_t count)
4143 {
4144         struct device *cdev = container_of(kobj, struct device, kobj);
4145         struct Scsi_Host *shost = class_to_shost(cdev);
4146         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4147         struct ipr_hostrcb *hostrcb;
4148         unsigned long lock_flags = 0;
4149         int ret;
4150
4151         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4152         hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4153                                         struct ipr_hostrcb, queue);
4154         if (!hostrcb) {
4155                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4156                 return 0;
4157         }
4158         ret = memory_read_from_buffer(buf, count, &off, &hostrcb->hcam,
4159                                 sizeof(hostrcb->hcam));
4160         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4161         return ret;
4162 }
4163
4164 static ssize_t ipr_next_async_err_log(struct file *filep, struct kobject *kobj,
4165                                 struct bin_attribute *bin_attr, char *buf,
4166                                 loff_t off, size_t count)
4167 {
4168         struct device *cdev = container_of(kobj, struct device, kobj);
4169         struct Scsi_Host *shost = class_to_shost(cdev);
4170         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4171         struct ipr_hostrcb *hostrcb;
4172         unsigned long lock_flags = 0;
4173
4174         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4175         hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4176                                         struct ipr_hostrcb, queue);
4177         if (!hostrcb) {
4178                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4179                 return count;
4180         }
4181
4182         /* Reclaim hostrcb before exit */
4183         list_move_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
4184         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4185         return count;
4186 }
4187
4188 static struct bin_attribute ipr_ioa_async_err_log = {
4189         .attr = {
4190                 .name =         "async_err_log",
4191                 .mode =         S_IRUGO | S_IWUSR,
4192         },
4193         .size = 0,
4194         .read = ipr_read_async_err_log,
4195         .write = ipr_next_async_err_log
4196 };
4197
4198 static struct device_attribute *ipr_ioa_attrs[] = {
4199         &ipr_fw_version_attr,
4200         &ipr_log_level_attr,
4201         &ipr_diagnostics_attr,
4202         &ipr_ioa_state_attr,
4203         &ipr_ioa_reset_attr,
4204         &ipr_update_fw_attr,
4205         &ipr_ioa_fw_type_attr,
4206         &ipr_iopoll_weight_attr,
4207         NULL,
4208 };
4209
4210 #ifdef CONFIG_SCSI_IPR_DUMP
4211 /**
4212  * ipr_read_dump - Dump the adapter
4213  * @filp:               open sysfs file
4214  * @kobj:               kobject struct
4215  * @bin_attr:           bin_attribute struct
4216  * @buf:                buffer
4217  * @off:                offset
4218  * @count:              buffer size
4219  *
4220  * Return value:
4221  *      number of bytes printed to buffer
4222  **/
4223 static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
4224                              struct bin_attribute *bin_attr,
4225                              char *buf, loff_t off, size_t count)
4226 {
4227         struct device *cdev = container_of(kobj, struct device, kobj);
4228         struct Scsi_Host *shost = class_to_shost(cdev);
4229         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4230         struct ipr_dump *dump;
4231         unsigned long lock_flags = 0;
4232         char *src;
4233         int len, sdt_end;
4234         size_t rc = count;
4235
4236         if (!capable(CAP_SYS_ADMIN))
4237                 return -EACCES;
4238
4239         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4240         dump = ioa_cfg->dump;
4241
4242         if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4243                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4244                 return 0;
4245         }
4246         kref_get(&dump->kref);
4247         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4248
4249         if (off > dump->driver_dump.hdr.len) {
4250                 kref_put(&dump->kref, ipr_release_dump);
4251                 return 0;
4252         }
4253
4254         if (off + count > dump->driver_dump.hdr.len) {
4255                 count = dump->driver_dump.hdr.len - off;
4256                 rc = count;
4257         }
4258
4259         if (count && off < sizeof(dump->driver_dump)) {
4260                 if (off + count > sizeof(dump->driver_dump))
4261                         len = sizeof(dump->driver_dump) - off;
4262                 else
4263                         len = count;
4264                 src = (u8 *)&dump->driver_dump + off;
4265                 memcpy(buf, src, len);
4266                 buf += len;
4267                 off += len;
4268                 count -= len;
4269         }
4270
4271         off -= sizeof(dump->driver_dump);
4272
4273         if (ioa_cfg->sis64)
4274                 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4275                           (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4276                            sizeof(struct ipr_sdt_entry));
4277         else
4278                 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4279                           (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4280
4281         if (count && off < sdt_end) {
4282                 if (off + count > sdt_end)
4283                         len = sdt_end - off;
4284                 else
4285                         len = count;
4286                 src = (u8 *)&dump->ioa_dump + off;
4287                 memcpy(buf, src, len);
4288                 buf += len;
4289                 off += len;
4290                 count -= len;
4291         }
4292
4293         off -= sdt_end;
4294
4295         while (count) {
4296                 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4297                         len = PAGE_ALIGN(off) - off;
4298                 else
4299                         len = count;
4300                 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4301                 src += off & ~PAGE_MASK;
4302                 memcpy(buf, src, len);
4303                 buf += len;
4304                 off += len;
4305                 count -= len;
4306         }
4307
4308         kref_put(&dump->kref, ipr_release_dump);
4309         return rc;
4310 }
4311
4312 /**
4313  * ipr_alloc_dump - Prepare for adapter dump
4314  * @ioa_cfg:    ioa config struct
4315  *
4316  * Return value:
4317  *      0 on success / other on failure
4318  **/
4319 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4320 {
4321         struct ipr_dump *dump;
4322         __be32 **ioa_data;
4323         unsigned long lock_flags = 0;
4324
4325         dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
4326
4327         if (!dump) {
4328                 ipr_err("Dump memory allocation failed\n");
4329                 return -ENOMEM;
4330         }
4331
4332         if (ioa_cfg->sis64)
4333                 ioa_data = vmalloc(array_size(IPR_FMT3_MAX_NUM_DUMP_PAGES,
4334                                               sizeof(__be32 *)));
4335         else
4336                 ioa_data = vmalloc(array_size(IPR_FMT2_MAX_NUM_DUMP_PAGES,
4337                                               sizeof(__be32 *)));
4338
4339         if (!ioa_data) {
4340                 ipr_err("Dump memory allocation failed\n");
4341                 kfree(dump);
4342                 return -ENOMEM;
4343         }
4344
4345         dump->ioa_dump.ioa_data = ioa_data;
4346
4347         kref_init(&dump->kref);
4348         dump->ioa_cfg = ioa_cfg;
4349
4350         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4351
4352         if (INACTIVE != ioa_cfg->sdt_state) {
4353                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4354                 vfree(dump->ioa_dump.ioa_data);
4355                 kfree(dump);
4356                 return 0;
4357         }
4358
4359         ioa_cfg->dump = dump;
4360         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
4361         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
4362                 ioa_cfg->dump_taken = 1;
4363                 schedule_work(&ioa_cfg->work_q);
4364         }
4365         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4366
4367         return 0;
4368 }
4369
4370 /**
4371  * ipr_free_dump - Free adapter dump memory
4372  * @ioa_cfg:    ioa config struct
4373  *
4374  * Return value:
4375  *      0 on success / other on failure
4376  **/
4377 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4378 {
4379         struct ipr_dump *dump;
4380         unsigned long lock_flags = 0;
4381
4382         ENTER;
4383
4384         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4385         dump = ioa_cfg->dump;
4386         if (!dump) {
4387                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4388                 return 0;
4389         }
4390
4391         ioa_cfg->dump = NULL;
4392         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4393
4394         kref_put(&dump->kref, ipr_release_dump);
4395
4396         LEAVE;
4397         return 0;
4398 }
4399
4400 /**
4401  * ipr_write_dump - Setup dump state of adapter
4402  * @filp:               open sysfs file
4403  * @kobj:               kobject struct
4404  * @bin_attr:           bin_attribute struct
4405  * @buf:                buffer
4406  * @off:                offset
4407  * @count:              buffer size
4408  *
4409  * Return value:
4410  *      number of bytes printed to buffer
4411  **/
4412 static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
4413                               struct bin_attribute *bin_attr,
4414                               char *buf, loff_t off, size_t count)
4415 {
4416         struct device *cdev = container_of(kobj, struct device, kobj);
4417         struct Scsi_Host *shost = class_to_shost(cdev);
4418         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4419         int rc;
4420
4421         if (!capable(CAP_SYS_ADMIN))
4422                 return -EACCES;
4423
4424         if (buf[0] == '1')
4425                 rc = ipr_alloc_dump(ioa_cfg);
4426         else if (buf[0] == '0')
4427                 rc = ipr_free_dump(ioa_cfg);
4428         else
4429                 return -EINVAL;
4430
4431         if (rc)
4432                 return rc;
4433         else
4434                 return count;
4435 }
4436
4437 static struct bin_attribute ipr_dump_attr = {
4438         .attr = {
4439                 .name = "dump",
4440                 .mode = S_IRUSR | S_IWUSR,
4441         },
4442         .size = 0,
4443         .read = ipr_read_dump,
4444         .write = ipr_write_dump
4445 };
4446 #else
4447 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4448 #endif
4449
4450 /**
4451  * ipr_change_queue_depth - Change the device's queue depth
4452  * @sdev:       scsi device struct
4453  * @qdepth:     depth to set
4454  * @reason:     calling context
4455  *
4456  * Return value:
4457  *      actual depth set
4458  **/
4459 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
4460 {
4461         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4462         struct ipr_resource_entry *res;
4463         unsigned long lock_flags = 0;
4464
4465         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4466         res = (struct ipr_resource_entry *)sdev->hostdata;
4467
4468         if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4469                 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4470         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4471
4472         scsi_change_queue_depth(sdev, qdepth);
4473         return sdev->queue_depth;
4474 }
4475
4476 /**
4477  * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4478  * @dev:        device struct
4479  * @attr:       device attribute structure
4480  * @buf:        buffer
4481  *
4482  * Return value:
4483  *      number of bytes printed to buffer
4484  **/
4485 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4486 {
4487         struct scsi_device *sdev = to_scsi_device(dev);
4488         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4489         struct ipr_resource_entry *res;
4490         unsigned long lock_flags = 0;
4491         ssize_t len = -ENXIO;
4492
4493         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4494         res = (struct ipr_resource_entry *)sdev->hostdata;
4495         if (res)
4496                 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4497         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4498         return len;
4499 }
4500
4501 static struct device_attribute ipr_adapter_handle_attr = {
4502         .attr = {
4503                 .name =         "adapter_handle",
4504                 .mode =         S_IRUSR,
4505         },
4506         .show = ipr_show_adapter_handle
4507 };
4508
4509 /**
4510  * ipr_show_resource_path - Show the resource path or the resource address for
4511  *                          this device.
4512  * @dev:        device struct
4513  * @attr:       device attribute structure
4514  * @buf:        buffer
4515  *
4516  * Return value:
4517  *      number of bytes printed to buffer
4518  **/
4519 static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4520 {
4521         struct scsi_device *sdev = to_scsi_device(dev);
4522         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4523         struct ipr_resource_entry *res;
4524         unsigned long lock_flags = 0;
4525         ssize_t len = -ENXIO;
4526         char buffer[IPR_MAX_RES_PATH_LENGTH];
4527
4528         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4529         res = (struct ipr_resource_entry *)sdev->hostdata;
4530         if (res && ioa_cfg->sis64)
4531                 len = snprintf(buf, PAGE_SIZE, "%s\n",
4532                                __ipr_format_res_path(res->res_path, buffer,
4533                                                      sizeof(buffer)));
4534         else if (res)
4535                 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4536                                res->bus, res->target, res->lun);
4537
4538         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4539         return len;
4540 }
4541
4542 static struct device_attribute ipr_resource_path_attr = {
4543         .attr = {
4544                 .name =         "resource_path",
4545                 .mode =         S_IRUGO,
4546         },
4547         .show = ipr_show_resource_path
4548 };
4549
4550 /**
4551  * ipr_show_device_id - Show the device_id for this device.
4552  * @dev:        device struct
4553  * @attr:       device attribute structure
4554  * @buf:        buffer
4555  *
4556  * Return value:
4557  *      number of bytes printed to buffer
4558  **/
4559 static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4560 {
4561         struct scsi_device *sdev = to_scsi_device(dev);
4562         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4563         struct ipr_resource_entry *res;
4564         unsigned long lock_flags = 0;
4565         ssize_t len = -ENXIO;
4566
4567         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4568         res = (struct ipr_resource_entry *)sdev->hostdata;
4569         if (res && ioa_cfg->sis64)
4570                 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", be64_to_cpu(res->dev_id));
4571         else if (res)
4572                 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4573
4574         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4575         return len;
4576 }
4577
4578 static struct device_attribute ipr_device_id_attr = {
4579         .attr = {
4580                 .name =         "device_id",
4581                 .mode =         S_IRUGO,
4582         },
4583         .show = ipr_show_device_id
4584 };
4585
4586 /**
4587  * ipr_show_resource_type - Show the resource type for this device.
4588  * @dev:        device struct
4589  * @attr:       device attribute structure
4590  * @buf:        buffer
4591  *
4592  * Return value:
4593  *      number of bytes printed to buffer
4594  **/
4595 static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4596 {
4597         struct scsi_device *sdev = to_scsi_device(dev);
4598         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4599         struct ipr_resource_entry *res;
4600         unsigned long lock_flags = 0;
4601         ssize_t len = -ENXIO;
4602
4603         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4604         res = (struct ipr_resource_entry *)sdev->hostdata;
4605
4606         if (res)
4607                 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4608
4609         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4610         return len;
4611 }
4612
4613 static struct device_attribute ipr_resource_type_attr = {
4614         .attr = {
4615                 .name =         "resource_type",
4616                 .mode =         S_IRUGO,
4617         },
4618         .show = ipr_show_resource_type
4619 };
4620
4621 /**
4622  * ipr_show_raw_mode - Show the adapter's raw mode
4623  * @dev:        class device struct
4624  * @buf:        buffer
4625  *
4626  * Return value:
4627  *      number of bytes printed to buffer
4628  **/
4629 static ssize_t ipr_show_raw_mode(struct device *dev,
4630                                  struct device_attribute *attr, char *buf)
4631 {
4632         struct scsi_device *sdev = to_scsi_device(dev);
4633         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4634         struct ipr_resource_entry *res;
4635         unsigned long lock_flags = 0;
4636         ssize_t len;
4637
4638         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4639         res = (struct ipr_resource_entry *)sdev->hostdata;
4640         if (res)
4641                 len = snprintf(buf, PAGE_SIZE, "%d\n", res->raw_mode);
4642         else
4643                 len = -ENXIO;
4644         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4645         return len;
4646 }
4647
4648 /**
4649  * ipr_store_raw_mode - Change the adapter's raw mode
4650  * @dev:        class device struct
4651  * @buf:        buffer
4652  *
4653  * Return value:
4654  *      number of bytes printed to buffer
4655  **/
4656 static ssize_t ipr_store_raw_mode(struct device *dev,
4657                                   struct device_attribute *attr,
4658                                   const char *buf, size_t count)
4659 {
4660         struct scsi_device *sdev = to_scsi_device(dev);
4661         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4662         struct ipr_resource_entry *res;
4663         unsigned long lock_flags = 0;
4664         ssize_t len;
4665
4666         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4667         res = (struct ipr_resource_entry *)sdev->hostdata;
4668         if (res) {
4669                 if (ipr_is_af_dasd_device(res)) {
4670                         res->raw_mode = simple_strtoul(buf, NULL, 10);
4671                         len = strlen(buf);
4672                         if (res->sdev)
4673                                 sdev_printk(KERN_INFO, res->sdev, "raw mode is %s\n",
4674                                         res->raw_mode ? "enabled" : "disabled");
4675                 } else
4676                         len = -EINVAL;
4677         } else
4678                 len = -ENXIO;
4679         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4680         return len;
4681 }
4682
4683 static struct device_attribute ipr_raw_mode_attr = {
4684         .attr = {
4685                 .name =         "raw_mode",
4686                 .mode =         S_IRUGO | S_IWUSR,
4687         },
4688         .show = ipr_show_raw_mode,
4689         .store = ipr_store_raw_mode
4690 };
4691
4692 static struct device_attribute *ipr_dev_attrs[] = {
4693         &ipr_adapter_handle_attr,
4694         &ipr_resource_path_attr,
4695         &ipr_device_id_attr,
4696         &ipr_resource_type_attr,
4697         &ipr_raw_mode_attr,
4698         NULL,
4699 };
4700
4701 /**
4702  * ipr_biosparam - Return the HSC mapping
4703  * @sdev:                       scsi device struct
4704  * @block_device:       block device pointer
4705  * @capacity:           capacity of the device
4706  * @parm:                       Array containing returned HSC values.
4707  *
4708  * This function generates the HSC parms that fdisk uses.
4709  * We want to make sure we return something that places partitions
4710  * on 4k boundaries for best performance with the IOA.
4711  *
4712  * Return value:
4713  *      0 on success
4714  **/
4715 static int ipr_biosparam(struct scsi_device *sdev,
4716                          struct block_device *block_device,
4717                          sector_t capacity, int *parm)
4718 {
4719         int heads, sectors;
4720         sector_t cylinders;
4721
4722         heads = 128;
4723         sectors = 32;
4724
4725         cylinders = capacity;
4726         sector_div(cylinders, (128 * 32));
4727
4728         /* return result */
4729         parm[0] = heads;
4730         parm[1] = sectors;
4731         parm[2] = cylinders;
4732
4733         return 0;
4734 }
4735
4736 /**
4737  * ipr_find_starget - Find target based on bus/target.
4738  * @starget:    scsi target struct
4739  *
4740  * Return value:
4741  *      resource entry pointer if found / NULL if not found
4742  **/
4743 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4744 {
4745         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4746         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4747         struct ipr_resource_entry *res;
4748
4749         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4750                 if ((res->bus == starget->channel) &&
4751                     (res->target == starget->id)) {
4752                         return res;
4753                 }
4754         }
4755
4756         return NULL;
4757 }
4758
4759 static struct ata_port_info sata_port_info;
4760
4761 /**
4762  * ipr_target_alloc - Prepare for commands to a SCSI target
4763  * @starget:    scsi target struct
4764  *
4765  * If the device is a SATA device, this function allocates an
4766  * ATA port with libata, else it does nothing.
4767  *
4768  * Return value:
4769  *      0 on success / non-0 on failure
4770  **/
4771 static int ipr_target_alloc(struct scsi_target *starget)
4772 {
4773         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4774         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4775         struct ipr_sata_port *sata_port;
4776         struct ata_port *ap;
4777         struct ipr_resource_entry *res;
4778         unsigned long lock_flags;
4779
4780         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4781         res = ipr_find_starget(starget);
4782         starget->hostdata = NULL;
4783
4784         if (res && ipr_is_gata(res)) {
4785                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4786                 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4787                 if (!sata_port)
4788                         return -ENOMEM;
4789
4790                 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4791                 if (ap) {
4792                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4793                         sata_port->ioa_cfg = ioa_cfg;
4794                         sata_port->ap = ap;
4795                         sata_port->res = res;
4796
4797                         res->sata_port = sata_port;
4798                         ap->private_data = sata_port;
4799                         starget->hostdata = sata_port;
4800                 } else {
4801                         kfree(sata_port);
4802                         return -ENOMEM;
4803                 }
4804         }
4805         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4806
4807         return 0;
4808 }
4809
4810 /**
4811  * ipr_target_destroy - Destroy a SCSI target
4812  * @starget:    scsi target struct
4813  *
4814  * If the device was a SATA device, this function frees the libata
4815  * ATA port, else it does nothing.
4816  *
4817  **/
4818 static void ipr_target_destroy(struct scsi_target *starget)
4819 {
4820         struct ipr_sata_port *sata_port = starget->hostdata;
4821         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4822         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4823
4824         if (ioa_cfg->sis64) {
4825                 if (!ipr_find_starget(starget)) {
4826                         if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4827                                 clear_bit(starget->id, ioa_cfg->array_ids);
4828                         else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4829                                 clear_bit(starget->id, ioa_cfg->vset_ids);
4830                         else if (starget->channel == 0)
4831                                 clear_bit(starget->id, ioa_cfg->target_ids);
4832                 }
4833         }
4834
4835         if (sata_port) {
4836                 starget->hostdata = NULL;
4837                 ata_sas_port_destroy(sata_port->ap);
4838                 kfree(sata_port);
4839         }
4840 }
4841
4842 /**
4843  * ipr_find_sdev - Find device based on bus/target/lun.
4844  * @sdev:       scsi device struct
4845  *
4846  * Return value:
4847  *      resource entry pointer if found / NULL if not found
4848  **/
4849 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4850 {
4851         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4852         struct ipr_resource_entry *res;
4853
4854         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4855                 if ((res->bus == sdev->channel) &&
4856                     (res->target == sdev->id) &&
4857                     (res->lun == sdev->lun))
4858                         return res;
4859         }
4860
4861         return NULL;
4862 }
4863
4864 /**
4865  * ipr_slave_destroy - Unconfigure a SCSI device
4866  * @sdev:       scsi device struct
4867  *
4868  * Return value:
4869  *      nothing
4870  **/
4871 static void ipr_slave_destroy(struct scsi_device *sdev)
4872 {
4873         struct ipr_resource_entry *res;
4874         struct ipr_ioa_cfg *ioa_cfg;
4875         unsigned long lock_flags = 0;
4876
4877         ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4878
4879         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4880         res = (struct ipr_resource_entry *) sdev->hostdata;
4881         if (res) {
4882                 if (res->sata_port)
4883                         res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
4884                 sdev->hostdata = NULL;
4885                 res->sdev = NULL;
4886                 res->sata_port = NULL;
4887         }
4888         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4889 }
4890
4891 /**
4892  * ipr_slave_configure - Configure a SCSI device
4893  * @sdev:       scsi device struct
4894  *
4895  * This function configures the specified scsi device.
4896  *
4897  * Return value:
4898  *      0 on success
4899  **/
4900 static int ipr_slave_configure(struct scsi_device *sdev)
4901 {
4902         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4903         struct ipr_resource_entry *res;
4904         struct ata_port *ap = NULL;
4905         unsigned long lock_flags = 0;
4906         char buffer[IPR_MAX_RES_PATH_LENGTH];
4907
4908         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4909         res = sdev->hostdata;
4910         if (res) {
4911                 if (ipr_is_af_dasd_device(res))
4912                         sdev->type = TYPE_RAID;
4913                 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4914                         sdev->scsi_level = 4;
4915                         sdev->no_uld_attach = 1;
4916                 }
4917                 if (ipr_is_vset_device(res)) {
4918                         sdev->scsi_level = SCSI_SPC_3;
4919                         sdev->no_report_opcodes = 1;
4920                         blk_queue_rq_timeout(sdev->request_queue,
4921                                              IPR_VSET_RW_TIMEOUT);
4922                         blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
4923                 }
4924                 if (ipr_is_gata(res) && res->sata_port)
4925                         ap = res->sata_port->ap;
4926                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4927
4928                 if (ap) {
4929                         scsi_change_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN);
4930                         ata_sas_slave_configure(sdev, ap);
4931                 }
4932
4933                 if (ioa_cfg->sis64)
4934                         sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4935                                     ipr_format_res_path(ioa_cfg,
4936                                 res->res_path, buffer, sizeof(buffer)));
4937                 return 0;
4938         }
4939         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4940         return 0;
4941 }
4942
4943 /**
4944  * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4945  * @sdev:       scsi device struct
4946  *
4947  * This function initializes an ATA port so that future commands
4948  * sent through queuecommand will work.
4949  *
4950  * Return value:
4951  *      0 on success
4952  **/
4953 static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4954 {
4955         struct ipr_sata_port *sata_port = NULL;
4956         int rc = -ENXIO;
4957
4958         ENTER;
4959         if (sdev->sdev_target)
4960                 sata_port = sdev->sdev_target->hostdata;
4961         if (sata_port) {
4962                 rc = ata_sas_port_init(sata_port->ap);
4963                 if (rc == 0)
4964                         rc = ata_sas_sync_probe(sata_port->ap);
4965         }
4966
4967         if (rc)
4968                 ipr_slave_destroy(sdev);
4969
4970         LEAVE;
4971         return rc;
4972 }
4973
4974 /**
4975  * ipr_slave_alloc - Prepare for commands to a device.
4976  * @sdev:       scsi device struct
4977  *
4978  * This function saves a pointer to the resource entry
4979  * in the scsi device struct if the device exists. We
4980  * can then use this pointer in ipr_queuecommand when
4981  * handling new commands.
4982  *
4983  * Return value:
4984  *      0 on success / -ENXIO if device does not exist
4985  **/
4986 static int ipr_slave_alloc(struct scsi_device *sdev)
4987 {
4988         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4989         struct ipr_resource_entry *res;
4990         unsigned long lock_flags;
4991         int rc = -ENXIO;
4992
4993         sdev->hostdata = NULL;
4994
4995         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4996
4997         res = ipr_find_sdev(sdev);
4998         if (res) {
4999                 res->sdev = sdev;
5000                 res->add_to_ml = 0;
5001                 res->in_erp = 0;
5002                 sdev->hostdata = res;
5003                 if (!ipr_is_naca_model(res))
5004                         res->needs_sync_complete = 1;
5005                 rc = 0;
5006                 if (ipr_is_gata(res)) {
5007                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5008                         return ipr_ata_slave_alloc(sdev);
5009                 }
5010         }
5011
5012         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5013
5014         return rc;
5015 }
5016
5017 /**
5018  * ipr_match_lun - Match function for specified LUN
5019  * @ipr_cmd:    ipr command struct
5020  * @device:             device to match (sdev)
5021  *
5022  * Returns:
5023  *      1 if command matches sdev / 0 if command does not match sdev
5024  **/
5025 static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
5026 {
5027         if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
5028                 return 1;
5029         return 0;
5030 }
5031
5032 /**
5033  * ipr_cmnd_is_free - Check if a command is free or not
5034  * @ipr_cmd     ipr command struct
5035  *
5036  * Returns:
5037  *      true / false
5038  **/
5039 static bool ipr_cmnd_is_free(struct ipr_cmnd *ipr_cmd)
5040 {
5041         struct ipr_cmnd *loop_cmd;
5042
5043         list_for_each_entry(loop_cmd, &ipr_cmd->hrrq->hrrq_free_q, queue) {
5044                 if (loop_cmd == ipr_cmd)
5045                         return true;
5046         }
5047
5048         return false;
5049 }
5050
5051 /**
5052  * ipr_match_res - Match function for specified resource entry
5053  * @ipr_cmd:    ipr command struct
5054  * @resource:   resource entry to match
5055  *
5056  * Returns:
5057  *      1 if command matches sdev / 0 if command does not match sdev
5058  **/
5059 static int ipr_match_res(struct ipr_cmnd *ipr_cmd, void *resource)
5060 {
5061         struct ipr_resource_entry *res = resource;
5062
5063         if (res && ipr_cmd->ioarcb.res_handle == res->res_handle)
5064                 return 1;
5065         return 0;
5066 }
5067
5068 /**
5069  * ipr_wait_for_ops - Wait for matching commands to complete
5070  * @ipr_cmd:    ipr command struct
5071  * @device:             device to match (sdev)
5072  * @match:              match function to use
5073  *
5074  * Returns:
5075  *      SUCCESS / FAILED
5076  **/
5077 static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
5078                             int (*match)(struct ipr_cmnd *, void *))
5079 {
5080         struct ipr_cmnd *ipr_cmd;
5081         int wait, i;
5082         unsigned long flags;
5083         struct ipr_hrr_queue *hrrq;
5084         signed long timeout = IPR_ABORT_TASK_TIMEOUT;
5085         DECLARE_COMPLETION_ONSTACK(comp);
5086
5087         ENTER;
5088         do {
5089                 wait = 0;
5090
5091                 for_each_hrrq(hrrq, ioa_cfg) {
5092                         spin_lock_irqsave(hrrq->lock, flags);
5093                         for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5094                                 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5095                                 if (!ipr_cmnd_is_free(ipr_cmd)) {
5096                                         if (match(ipr_cmd, device)) {
5097                                                 ipr_cmd->eh_comp = &comp;
5098                                                 wait++;
5099                                         }
5100                                 }
5101                         }
5102                         spin_unlock_irqrestore(hrrq->lock, flags);
5103                 }
5104
5105                 if (wait) {
5106                         timeout = wait_for_completion_timeout(&comp, timeout);
5107
5108                         if (!timeout) {
5109                                 wait = 0;
5110
5111                                 for_each_hrrq(hrrq, ioa_cfg) {
5112                                         spin_lock_irqsave(hrrq->lock, flags);
5113                                         for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5114                                                 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5115                                                 if (!ipr_cmnd_is_free(ipr_cmd)) {
5116                                                         if (match(ipr_cmd, device)) {
5117                                                                 ipr_cmd->eh_comp = NULL;
5118                                                                 wait++;
5119                                                         }
5120                                                 }
5121                                         }
5122                                         spin_unlock_irqrestore(hrrq->lock, flags);
5123                                 }
5124
5125                                 if (wait)
5126                                         dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
5127                                 LEAVE;
5128                                 return wait ? FAILED : SUCCESS;
5129                         }
5130                 }
5131         } while (wait);
5132
5133         LEAVE;
5134         return SUCCESS;
5135 }
5136
5137 static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
5138 {
5139         struct ipr_ioa_cfg *ioa_cfg;
5140         unsigned long lock_flags = 0;
5141         int rc = SUCCESS;
5142
5143         ENTER;
5144         ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5145         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5146
5147         if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5148                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
5149                 dev_err(&ioa_cfg->pdev->dev,
5150                         "Adapter being reset as a result of error recovery.\n");
5151
5152                 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5153                         ioa_cfg->sdt_state = GET_DUMP;
5154         }
5155
5156         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5157         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5158         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5159
5160         /* If we got hit with a host reset while we were already resetting
5161          the adapter for some reason, and the reset failed. */
5162         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5163                 ipr_trace;
5164                 rc = FAILED;
5165         }
5166
5167         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5168         LEAVE;
5169         return rc;
5170 }
5171
5172 /**
5173  * ipr_device_reset - Reset the device
5174  * @ioa_cfg:    ioa config struct
5175  * @res:                resource entry struct
5176  *
5177  * This function issues a device reset to the affected device.
5178  * If the device is a SCSI device, a LUN reset will be sent
5179  * to the device first. If that does not work, a target reset
5180  * will be sent. If the device is a SATA device, a PHY reset will
5181  * be sent.
5182  *
5183  * Return value:
5184  *      0 on success / non-zero on failure
5185  **/
5186 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
5187                             struct ipr_resource_entry *res)
5188 {
5189         struct ipr_cmnd *ipr_cmd;
5190         struct ipr_ioarcb *ioarcb;
5191         struct ipr_cmd_pkt *cmd_pkt;
5192         struct ipr_ioarcb_ata_regs *regs;
5193         u32 ioasc;
5194
5195         ENTER;
5196         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5197         ioarcb = &ipr_cmd->ioarcb;
5198         cmd_pkt = &ioarcb->cmd_pkt;
5199
5200         if (ipr_cmd->ioa_cfg->sis64) {
5201                 regs = &ipr_cmd->i.ata_ioadl.regs;
5202                 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
5203         } else
5204                 regs = &ioarcb->u.add_data.u.regs;
5205
5206         ioarcb->res_handle = res->res_handle;
5207         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5208         cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5209         if (ipr_is_gata(res)) {
5210                 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
5211                 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
5212                 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5213         }
5214
5215         ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5216         ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5217         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5218         if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
5219                 if (ipr_cmd->ioa_cfg->sis64)
5220                         memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
5221                                sizeof(struct ipr_ioasa_gata));
5222                 else
5223                         memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
5224                                sizeof(struct ipr_ioasa_gata));
5225         }
5226
5227         LEAVE;
5228         return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
5229 }
5230
5231 /**
5232  * ipr_sata_reset - Reset the SATA port
5233  * @link:       SATA link to reset
5234  * @classes:    class of the attached device
5235  *
5236  * This function issues a SATA phy reset to the affected ATA link.
5237  *
5238  * Return value:
5239  *      0 on success / non-zero on failure
5240  **/
5241 static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
5242                                 unsigned long deadline)
5243 {
5244         struct ipr_sata_port *sata_port = link->ap->private_data;
5245         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5246         struct ipr_resource_entry *res;
5247         unsigned long lock_flags = 0;
5248         int rc = -ENXIO, ret;
5249
5250         ENTER;
5251         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5252         while (ioa_cfg->in_reset_reload) {
5253                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5254                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5255                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5256         }
5257
5258         res = sata_port->res;
5259         if (res) {
5260                 rc = ipr_device_reset(ioa_cfg, res);
5261                 *classes = res->ata_class;
5262                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5263
5264                 ret = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
5265                 if (ret != SUCCESS) {
5266                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5267                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
5268                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5269
5270                         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5271                 }
5272         } else
5273                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5274
5275         LEAVE;
5276         return rc;
5277 }
5278
5279 /**
5280  * ipr_eh_dev_reset - Reset the device
5281  * @scsi_cmd:   scsi command struct
5282  *
5283  * This function issues a device reset to the affected device.
5284  * A LUN reset will be sent to the device first. If that does
5285  * not work, a target reset will be sent.
5286  *
5287  * Return value:
5288  *      SUCCESS / FAILED
5289  **/
5290 static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
5291 {
5292         struct ipr_cmnd *ipr_cmd;
5293         struct ipr_ioa_cfg *ioa_cfg;
5294         struct ipr_resource_entry *res;
5295         struct ata_port *ap;
5296         int rc = 0, i;
5297         struct ipr_hrr_queue *hrrq;
5298
5299         ENTER;
5300         ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5301         res = scsi_cmd->device->hostdata;
5302
5303         /*
5304          * If we are currently going through reset/reload, return failed. This will force the
5305          * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5306          * reset to complete
5307          */
5308         if (ioa_cfg->in_reset_reload)
5309                 return FAILED;
5310         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5311                 return FAILED;
5312
5313         for_each_hrrq(hrrq, ioa_cfg) {
5314                 spin_lock(&hrrq->_lock);
5315                 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5316                         ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5317
5318                         if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
5319                                 if (!ipr_cmd->qc)
5320                                         continue;
5321                                 if (ipr_cmnd_is_free(ipr_cmd))
5322                                         continue;
5323
5324                                 ipr_cmd->done = ipr_sata_eh_done;
5325                                 if (!(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
5326                                         ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
5327                                         ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
5328                                 }
5329                         }
5330                 }
5331                 spin_unlock(&hrrq->_lock);
5332         }
5333         res->resetting_device = 1;
5334         scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
5335
5336         if (ipr_is_gata(res) && res->sata_port) {
5337                 ap = res->sata_port->ap;
5338                 spin_unlock_irq(scsi_cmd->device->host->host_lock);
5339                 ata_std_error_handler(ap);
5340                 spin_lock_irq(scsi_cmd->device->host->host_lock);
5341         } else
5342                 rc = ipr_device_reset(ioa_cfg, res);
5343         res->resetting_device = 0;
5344         res->reset_occurred = 1;
5345
5346         LEAVE;
5347         return rc ? FAILED : SUCCESS;
5348 }
5349
5350 static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
5351 {
5352         int rc;
5353         struct ipr_ioa_cfg *ioa_cfg;
5354         struct ipr_resource_entry *res;
5355
5356         ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5357         res = cmd->device->hostdata;
5358
5359         if (!res)
5360                 return FAILED;
5361
5362         spin_lock_irq(cmd->device->host->host_lock);
5363         rc = __ipr_eh_dev_reset(cmd);
5364         spin_unlock_irq(cmd->device->host->host_lock);
5365
5366         if (rc == SUCCESS) {
5367                 if (ipr_is_gata(res) && res->sata_port)
5368                         rc = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
5369                 else
5370                         rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
5371         }
5372
5373         return rc;
5374 }
5375
5376 /**
5377  * ipr_bus_reset_done - Op done function for bus reset.
5378  * @ipr_cmd:    ipr command struct
5379  *
5380  * This function is the op done function for a bus reset
5381  *
5382  * Return value:
5383  *      none
5384  **/
5385 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5386 {
5387         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5388         struct ipr_resource_entry *res;
5389
5390         ENTER;
5391         if (!ioa_cfg->sis64)
5392                 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5393                         if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5394                                 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5395                                 break;
5396                         }
5397                 }
5398
5399         /*
5400          * If abort has not completed, indicate the reset has, else call the
5401          * abort's done function to wake the sleeping eh thread
5402          */
5403         if (ipr_cmd->sibling->sibling)
5404                 ipr_cmd->sibling->sibling = NULL;
5405         else
5406                 ipr_cmd->sibling->done(ipr_cmd->sibling);
5407
5408         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5409         LEAVE;
5410 }
5411
5412 /**
5413  * ipr_abort_timeout - An abort task has timed out
5414  * @ipr_cmd:    ipr command struct
5415  *
5416  * This function handles when an abort task times out. If this
5417  * happens we issue a bus reset since we have resources tied
5418  * up that must be freed before returning to the midlayer.
5419  *
5420  * Return value:
5421  *      none
5422  **/
5423 static void ipr_abort_timeout(struct timer_list *t)
5424 {
5425         struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
5426         struct ipr_cmnd *reset_cmd;
5427         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5428         struct ipr_cmd_pkt *cmd_pkt;
5429         unsigned long lock_flags = 0;
5430
5431         ENTER;
5432         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5433         if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5434                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5435                 return;
5436         }
5437
5438         sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
5439         reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5440         ipr_cmd->sibling = reset_cmd;
5441         reset_cmd->sibling = ipr_cmd;
5442         reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5443         cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5444         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5445         cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5446         cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5447
5448         ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5449         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5450         LEAVE;
5451 }
5452
5453 /**
5454  * ipr_cancel_op - Cancel specified op
5455  * @scsi_cmd:   scsi command struct
5456  *
5457  * This function cancels specified op.
5458  *
5459  * Return value:
5460  *      SUCCESS / FAILED
5461  **/
5462 static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
5463 {
5464         struct ipr_cmnd *ipr_cmd;
5465         struct ipr_ioa_cfg *ioa_cfg;
5466         struct ipr_resource_entry *res;
5467         struct ipr_cmd_pkt *cmd_pkt;
5468         u32 ioasc, int_reg;
5469         int i, op_found = 0;
5470         struct ipr_hrr_queue *hrrq;
5471
5472         ENTER;
5473         ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5474         res = scsi_cmd->device->hostdata;
5475
5476         /* If we are currently going through reset/reload, return failed.
5477          * This will force the mid-layer to call ipr_eh_host_reset,
5478          * which will then go to sleep and wait for the reset to complete
5479          */
5480         if (ioa_cfg->in_reset_reload ||
5481             ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5482                 return FAILED;
5483         if (!res)
5484                 return FAILED;
5485
5486         /*
5487          * If we are aborting a timed out op, chances are that the timeout was caused
5488          * by a still not detected EEH error. In such cases, reading a register will
5489          * trigger the EEH recovery infrastructure.
5490          */
5491         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5492
5493         if (!ipr_is_gscsi(res))
5494                 return FAILED;
5495
5496         for_each_hrrq(hrrq, ioa_cfg) {
5497                 spin_lock(&hrrq->_lock);
5498                 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5499                         if (ioa_cfg->ipr_cmnd_list[i]->scsi_cmd == scsi_cmd) {
5500                                 if (!ipr_cmnd_is_free(ioa_cfg->ipr_cmnd_list[i])) {
5501                                         op_found = 1;
5502                                         break;
5503                                 }
5504                         }
5505                 }
5506                 spin_unlock(&hrrq->_lock);
5507         }
5508
5509         if (!op_found)
5510                 return SUCCESS;
5511
5512         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5513         ipr_cmd->ioarcb.res_handle = res->res_handle;
5514         cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5515         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5516         cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5517         ipr_cmd->u.sdev = scsi_cmd->device;
5518
5519         scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5520                     scsi_cmd->cmnd[0]);
5521         ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
5522         ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5523
5524         /*
5525          * If the abort task timed out and we sent a bus reset, we will get
5526          * one the following responses to the abort
5527          */
5528         if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5529                 ioasc = 0;
5530                 ipr_trace;
5531         }
5532
5533         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5534         if (!ipr_is_naca_model(res))
5535                 res->needs_sync_complete = 1;
5536
5537         LEAVE;
5538         return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
5539 }
5540
5541 /**
5542  * ipr_eh_abort - Abort a single op
5543  * @scsi_cmd:   scsi command struct
5544  *
5545  * Return value:
5546  *      0 if scan in progress / 1 if scan is complete
5547  **/
5548 static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time)
5549 {
5550         unsigned long lock_flags;
5551         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
5552         int rc = 0;
5553
5554         spin_lock_irqsave(shost->host_lock, lock_flags);
5555         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
5556                 rc = 1;
5557         if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2))
5558                 rc = 1;
5559         spin_unlock_irqrestore(shost->host_lock, lock_flags);
5560         return rc;
5561 }
5562
5563 /**
5564  * ipr_eh_host_reset - Reset the host adapter
5565  * @scsi_cmd:   scsi command struct
5566  *
5567  * Return value:
5568  *      SUCCESS / FAILED
5569  **/
5570 static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
5571 {
5572         unsigned long flags;
5573         int rc;
5574         struct ipr_ioa_cfg *ioa_cfg;
5575
5576         ENTER;
5577
5578         ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5579
5580         spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5581         rc = ipr_cancel_op(scsi_cmd);
5582         spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
5583
5584         if (rc == SUCCESS)
5585                 rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
5586         LEAVE;
5587         return rc;
5588 }
5589
5590 /**
5591  * ipr_handle_other_interrupt - Handle "other" interrupts
5592  * @ioa_cfg:    ioa config struct
5593  * @int_reg:    interrupt register
5594  *
5595  * Return value:
5596  *      IRQ_NONE / IRQ_HANDLED
5597  **/
5598 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5599                                               u32 int_reg)
5600 {
5601         irqreturn_t rc = IRQ_HANDLED;
5602         u32 int_mask_reg;
5603
5604         int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5605         int_reg &= ~int_mask_reg;
5606
5607         /* If an interrupt on the adapter did not occur, ignore it.
5608          * Or in the case of SIS 64, check for a stage change interrupt.
5609          */
5610         if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5611                 if (ioa_cfg->sis64) {
5612                         int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5613                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5614                         if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5615
5616                                 /* clear stage change */
5617                                 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5618                                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5619                                 list_del(&ioa_cfg->reset_cmd->queue);
5620                                 del_timer(&ioa_cfg->reset_cmd->timer);
5621                                 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5622                                 return IRQ_HANDLED;
5623                         }
5624                 }
5625
5626                 return IRQ_NONE;
5627         }
5628
5629         if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5630                 /* Mask the interrupt */
5631                 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5632                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5633
5634                 list_del(&ioa_cfg->reset_cmd->queue);
5635                 del_timer(&ioa_cfg->reset_cmd->timer);
5636                 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5637         } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
5638                 if (ioa_cfg->clear_isr) {
5639                         if (ipr_debug && printk_ratelimit())
5640                                 dev_err(&ioa_cfg->pdev->dev,
5641                                         "Spurious interrupt detected. 0x%08X\n", int_reg);
5642                         writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5643                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5644                         return IRQ_NONE;
5645                 }
5646         } else {
5647                 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5648                         ioa_cfg->ioa_unit_checked = 1;
5649                 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5650                         dev_err(&ioa_cfg->pdev->dev,
5651                                 "No Host RRQ. 0x%08X\n", int_reg);
5652                 else
5653                         dev_err(&ioa_cfg->pdev->dev,
5654                                 "Permanent IOA failure. 0x%08X\n", int_reg);
5655
5656                 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5657                         ioa_cfg->sdt_state = GET_DUMP;
5658
5659                 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5660                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5661         }
5662
5663         return rc;
5664 }
5665
5666 /**
5667  * ipr_isr_eh - Interrupt service routine error handler
5668  * @ioa_cfg:    ioa config struct
5669  * @msg:        message to log
5670  *
5671  * Return value:
5672  *      none
5673  **/
5674 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
5675 {
5676         ioa_cfg->errors_logged++;
5677         dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
5678
5679         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5680                 ioa_cfg->sdt_state = GET_DUMP;
5681
5682         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5683 }
5684
5685 static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
5686                                                 struct list_head *doneq)
5687 {
5688         u32 ioasc;
5689         u16 cmd_index;
5690         struct ipr_cmnd *ipr_cmd;
5691         struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5692         int num_hrrq = 0;
5693
5694         /* If interrupts are disabled, ignore the interrupt */
5695         if (!hrr_queue->allow_interrupts)
5696                 return 0;
5697
5698         while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5699                hrr_queue->toggle_bit) {
5700
5701                 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5702                              IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5703                              IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5704
5705                 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5706                              cmd_index < hrr_queue->min_cmd_id)) {
5707                         ipr_isr_eh(ioa_cfg,
5708                                 "Invalid response handle from IOA: ",
5709                                 cmd_index);
5710                         break;
5711                 }
5712
5713                 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5714                 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5715
5716                 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5717
5718                 list_move_tail(&ipr_cmd->queue, doneq);
5719
5720                 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5721                         hrr_queue->hrrq_curr++;
5722                 } else {
5723                         hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5724                         hrr_queue->toggle_bit ^= 1u;
5725                 }
5726                 num_hrrq++;
5727                 if (budget > 0 && num_hrrq >= budget)
5728                         break;
5729         }
5730
5731         return num_hrrq;
5732 }
5733
5734 static int ipr_iopoll(struct irq_poll *iop, int budget)
5735 {
5736         struct ipr_ioa_cfg *ioa_cfg;
5737         struct ipr_hrr_queue *hrrq;
5738         struct ipr_cmnd *ipr_cmd, *temp;
5739         unsigned long hrrq_flags;
5740         int completed_ops;
5741         LIST_HEAD(doneq);
5742
5743         hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5744         ioa_cfg = hrrq->ioa_cfg;
5745
5746         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5747         completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5748
5749         if (completed_ops < budget)
5750                 irq_poll_complete(iop);
5751         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5752
5753         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5754                 list_del(&ipr_cmd->queue);
5755                 del_timer(&ipr_cmd->timer);
5756                 ipr_cmd->fast_done(ipr_cmd);
5757         }
5758
5759         return completed_ops;
5760 }
5761
5762 /**
5763  * ipr_isr - Interrupt service routine
5764  * @irq:        irq number
5765  * @devp:       pointer to ioa config struct
5766  *
5767  * Return value:
5768  *      IRQ_NONE / IRQ_HANDLED
5769  **/
5770 static irqreturn_t ipr_isr(int irq, void *devp)
5771 {
5772         struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5773         struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5774         unsigned long hrrq_flags = 0;
5775         u32 int_reg = 0;
5776         int num_hrrq = 0;
5777         int irq_none = 0;
5778         struct ipr_cmnd *ipr_cmd, *temp;
5779         irqreturn_t rc = IRQ_NONE;
5780         LIST_HEAD(doneq);
5781
5782         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5783         /* If interrupts are disabled, ignore the interrupt */
5784         if (!hrrq->allow_interrupts) {
5785                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5786                 return IRQ_NONE;
5787         }
5788
5789         while (1) {
5790                 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5791                         rc =  IRQ_HANDLED;
5792
5793                         if (!ioa_cfg->clear_isr)
5794                                 break;
5795
5796                         /* Clear the PCI interrupt */
5797                         num_hrrq = 0;
5798                         do {
5799                                 writel(IPR_PCII_HRRQ_UPDATED,
5800                                      ioa_cfg->regs.clr_interrupt_reg32);
5801                                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5802                         } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5803                                 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5804
5805                 } else if (rc == IRQ_NONE && irq_none == 0) {
5806                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5807                         irq_none++;
5808                 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5809                            int_reg & IPR_PCII_HRRQ_UPDATED) {
5810                         ipr_isr_eh(ioa_cfg,
5811                                 "Error clearing HRRQ: ", num_hrrq);
5812                         rc = IRQ_HANDLED;
5813                         break;
5814                 } else
5815                         break;
5816         }
5817
5818         if (unlikely(rc == IRQ_NONE))
5819                 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5820
5821         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5822         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5823                 list_del(&ipr_cmd->queue);
5824                 del_timer(&ipr_cmd->timer);
5825                 ipr_cmd->fast_done(ipr_cmd);
5826         }
5827         return rc;
5828 }
5829
5830 /**
5831  * ipr_isr_mhrrq - Interrupt service routine
5832  * @irq:        irq number
5833  * @devp:       pointer to ioa config struct
5834  *
5835  * Return value:
5836  *      IRQ_NONE / IRQ_HANDLED
5837  **/
5838 static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5839 {
5840         struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5841         struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5842         unsigned long hrrq_flags = 0;
5843         struct ipr_cmnd *ipr_cmd, *temp;
5844         irqreturn_t rc = IRQ_NONE;
5845         LIST_HEAD(doneq);
5846
5847         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5848
5849         /* If interrupts are disabled, ignore the interrupt */
5850         if (!hrrq->allow_interrupts) {
5851                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5852                 return IRQ_NONE;
5853         }
5854
5855         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
5856                 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5857                        hrrq->toggle_bit) {
5858                         irq_poll_sched(&hrrq->iopoll);
5859                         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5860                         return IRQ_HANDLED;
5861                 }
5862         } else {
5863                 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5864                         hrrq->toggle_bit)
5865
5866                         if (ipr_process_hrrq(hrrq, -1, &doneq))
5867                                 rc =  IRQ_HANDLED;
5868         }
5869
5870         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5871
5872         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5873                 list_del(&ipr_cmd->queue);
5874                 del_timer(&ipr_cmd->timer);
5875                 ipr_cmd->fast_done(ipr_cmd);
5876         }
5877         return rc;
5878 }
5879
5880 /**
5881  * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5882  * @ioa_cfg:    ioa config struct
5883  * @ipr_cmd:    ipr command struct
5884  *
5885  * Return value:
5886  *      0 on success / -1 on failure
5887  **/
5888 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5889                              struct ipr_cmnd *ipr_cmd)
5890 {
5891         int i, nseg;
5892         struct scatterlist *sg;
5893         u32 length;
5894         u32 ioadl_flags = 0;
5895         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5896         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5897         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5898
5899         length = scsi_bufflen(scsi_cmd);
5900         if (!length)
5901                 return 0;
5902
5903         nseg = scsi_dma_map(scsi_cmd);
5904         if (nseg < 0) {
5905                 if (printk_ratelimit())
5906                         dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5907                 return -1;
5908         }
5909
5910         ipr_cmd->dma_use_sg = nseg;
5911
5912         ioarcb->data_transfer_length = cpu_to_be32(length);
5913         ioarcb->ioadl_len =
5914                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5915
5916         if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5917                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5918                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5919         } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5920                 ioadl_flags = IPR_IOADL_FLAGS_READ;
5921
5922         scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5923                 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5924                 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5925                 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5926         }
5927
5928         ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5929         return 0;
5930 }
5931
5932 /**
5933  * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5934  * @ioa_cfg:    ioa config struct
5935  * @ipr_cmd:    ipr command struct
5936  *
5937  * Return value:
5938  *      0 on success / -1 on failure
5939  **/
5940 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5941                            struct ipr_cmnd *ipr_cmd)
5942 {
5943         int i, nseg;
5944         struct scatterlist *sg;
5945         u32 length;
5946         u32 ioadl_flags = 0;
5947         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5948         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5949         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5950
5951         length = scsi_bufflen(scsi_cmd);
5952         if (!length)
5953                 return 0;
5954
5955         nseg = scsi_dma_map(scsi_cmd);
5956         if (nseg < 0) {
5957                 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5958                 return -1;
5959         }
5960
5961         ipr_cmd->dma_use_sg = nseg;
5962
5963         if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5964                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5965                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5966                 ioarcb->data_transfer_length = cpu_to_be32(length);
5967                 ioarcb->ioadl_len =
5968                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5969         } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5970                 ioadl_flags = IPR_IOADL_FLAGS_READ;
5971                 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5972                 ioarcb->read_ioadl_len =
5973                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5974         }
5975
5976         if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5977                 ioadl = ioarcb->u.add_data.u.ioadl;
5978                 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5979                                     offsetof(struct ipr_ioarcb, u.add_data));
5980                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5981         }
5982
5983         scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5984                 ioadl[i].flags_and_data_len =
5985                         cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5986                 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
5987         }
5988
5989         ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5990         return 0;
5991 }
5992
5993 /**
5994  * __ipr_erp_done - Process completion of ERP for a device
5995  * @ipr_cmd:            ipr command struct
5996  *
5997  * This function copies the sense buffer into the scsi_cmd
5998  * struct and pushes the scsi_done function.
5999  *
6000  * Return value:
6001  *      nothing
6002  **/
6003 static void __ipr_erp_done(struct ipr_cmnd *ipr_cmd)
6004 {
6005         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6006         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6007         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6008
6009         if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
6010                 scsi_cmd->result |= (DID_ERROR << 16);
6011                 scmd_printk(KERN_ERR, scsi_cmd,
6012                             "Request Sense failed with IOASC: 0x%08X\n", ioasc);
6013         } else {
6014                 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
6015                        SCSI_SENSE_BUFFERSIZE);
6016         }
6017
6018         if (res) {
6019                 if (!ipr_is_naca_model(res))
6020                         res->needs_sync_complete = 1;
6021                 res->in_erp = 0;
6022         }
6023         scsi_dma_unmap(ipr_cmd->scsi_cmd);
6024         scsi_cmd->scsi_done(scsi_cmd);
6025         if (ipr_cmd->eh_comp)
6026                 complete(ipr_cmd->eh_comp);
6027         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6028 }
6029
6030 /**
6031  * ipr_erp_done - Process completion of ERP for a device
6032  * @ipr_cmd:            ipr command struct
6033  *
6034  * This function copies the sense buffer into the scsi_cmd
6035  * struct and pushes the scsi_done function.
6036  *
6037  * Return value:
6038  *      nothing
6039  **/
6040 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
6041 {
6042         struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
6043         unsigned long hrrq_flags;
6044
6045         spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
6046         __ipr_erp_done(ipr_cmd);
6047         spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
6048 }
6049
6050 /**
6051  * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
6052  * @ipr_cmd:    ipr command struct
6053  *
6054  * Return value:
6055  *      none
6056  **/
6057 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
6058 {
6059         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6060         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6061         dma_addr_t dma_addr = ipr_cmd->dma_addr;
6062
6063         memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
6064         ioarcb->data_transfer_length = 0;
6065         ioarcb->read_data_transfer_length = 0;
6066         ioarcb->ioadl_len = 0;
6067         ioarcb->read_ioadl_len = 0;
6068         ioasa->hdr.ioasc = 0;
6069         ioasa->hdr.residual_data_len = 0;
6070
6071         if (ipr_cmd->ioa_cfg->sis64)
6072                 ioarcb->u.sis64_addr_data.data_ioadl_addr =
6073                         cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
6074         else {
6075                 ioarcb->write_ioadl_addr =
6076                         cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
6077                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6078         }
6079 }
6080
6081 /**
6082  * __ipr_erp_request_sense - Send request sense to a device
6083  * @ipr_cmd:    ipr command struct
6084  *
6085  * This function sends a request sense to a device as a result
6086  * of a check condition.
6087  *
6088  * Return value:
6089  *      nothing
6090  **/
6091 static void __ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
6092 {
6093         struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
6094         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6095
6096         if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
6097                 __ipr_erp_done(ipr_cmd);
6098                 return;
6099         }
6100
6101         ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6102
6103         cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
6104         cmd_pkt->cdb[0] = REQUEST_SENSE;
6105         cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
6106         cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
6107         cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6108         cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
6109
6110         ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
6111                        SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
6112
6113         ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
6114                    IPR_REQUEST_SENSE_TIMEOUT * 2);
6115 }
6116
6117 /**
6118  * ipr_erp_request_sense - Send request sense to a device
6119  * @ipr_cmd:    ipr command struct
6120  *
6121  * This function sends a request sense to a device as a result
6122  * of a check condition.
6123  *
6124  * Return value:
6125  *      nothing
6126  **/
6127 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
6128 {
6129         struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
6130         unsigned long hrrq_flags;
6131
6132         spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
6133         __ipr_erp_request_sense(ipr_cmd);
6134         spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
6135 }
6136
6137 /**
6138  * ipr_erp_cancel_all - Send cancel all to a device
6139  * @ipr_cmd:    ipr command struct
6140  *
6141  * This function sends a cancel all to a device to clear the
6142  * queue. If we are running TCQ on the device, QERR is set to 1,
6143  * which means all outstanding ops have been dropped on the floor.
6144  * Cancel all will return them to us.
6145  *
6146  * Return value:
6147  *      nothing
6148  **/
6149 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
6150 {
6151         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6152         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6153         struct ipr_cmd_pkt *cmd_pkt;
6154
6155         res->in_erp = 1;
6156
6157         ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6158
6159         if (!scsi_cmd->device->simple_tags) {
6160                 __ipr_erp_request_sense(ipr_cmd);
6161                 return;
6162         }
6163
6164         cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
6165         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
6166         cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
6167
6168         ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
6169                    IPR_CANCEL_ALL_TIMEOUT);
6170 }
6171
6172 /**
6173  * ipr_dump_ioasa - Dump contents of IOASA
6174  * @ioa_cfg:    ioa config struct
6175  * @ipr_cmd:    ipr command struct
6176  * @res:                resource entry struct
6177  *
6178  * This function is invoked by the interrupt handler when ops
6179  * fail. It will log the IOASA if appropriate. Only called
6180  * for GPDD ops.
6181  *
6182  * Return value:
6183  *      none
6184  **/
6185 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
6186                            struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
6187 {
6188         int i;
6189         u16 data_len;
6190         u32 ioasc, fd_ioasc;
6191         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6192         __be32 *ioasa_data = (__be32 *)ioasa;
6193         int error_index;
6194
6195         ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
6196         fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
6197
6198         if (0 == ioasc)
6199                 return;
6200
6201         if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
6202                 return;
6203
6204         if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
6205                 error_index = ipr_get_error(fd_ioasc);
6206         else
6207                 error_index = ipr_get_error(ioasc);
6208
6209         if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
6210                 /* Don't log an error if the IOA already logged one */
6211                 if (ioasa->hdr.ilid != 0)
6212                         return;
6213
6214                 if (!ipr_is_gscsi(res))
6215                         return;
6216
6217                 if (ipr_error_table[error_index].log_ioasa == 0)
6218                         return;
6219         }
6220
6221         ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
6222
6223         data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
6224         if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
6225                 data_len = sizeof(struct ipr_ioasa64);
6226         else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
6227                 data_len = sizeof(struct ipr_ioasa);
6228
6229         ipr_err("IOASA Dump:\n");
6230
6231         for (i = 0; i < data_len / 4; i += 4) {
6232                 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
6233                         be32_to_cpu(ioasa_data[i]),
6234                         be32_to_cpu(ioasa_data[i+1]),
6235                         be32_to_cpu(ioasa_data[i+2]),
6236                         be32_to_cpu(ioasa_data[i+3]));
6237         }
6238 }
6239
6240 /**
6241  * ipr_gen_sense - Generate SCSI sense data from an IOASA
6242  * @ioasa:              IOASA
6243  * @sense_buf:  sense data buffer
6244  *
6245  * Return value:
6246  *      none
6247  **/
6248 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
6249 {
6250         u32 failing_lba;
6251         u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
6252         struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
6253         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6254         u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
6255
6256         memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
6257
6258         if (ioasc >= IPR_FIRST_DRIVER_IOASC)
6259                 return;
6260
6261         ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
6262
6263         if (ipr_is_vset_device(res) &&
6264             ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
6265             ioasa->u.vset.failing_lba_hi != 0) {
6266                 sense_buf[0] = 0x72;
6267                 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
6268                 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
6269                 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
6270
6271                 sense_buf[7] = 12;
6272                 sense_buf[8] = 0;
6273                 sense_buf[9] = 0x0A;
6274                 sense_buf[10] = 0x80;
6275
6276                 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
6277
6278                 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
6279                 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
6280                 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
6281                 sense_buf[15] = failing_lba & 0x000000ff;
6282
6283                 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6284
6285                 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
6286                 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
6287                 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
6288                 sense_buf[19] = failing_lba & 0x000000ff;
6289         } else {
6290                 sense_buf[0] = 0x70;
6291                 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
6292                 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
6293                 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
6294
6295                 /* Illegal request */
6296                 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
6297                     (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
6298                         sense_buf[7] = 10;      /* additional length */
6299
6300                         /* IOARCB was in error */
6301                         if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
6302                                 sense_buf[15] = 0xC0;
6303                         else    /* Parameter data was invalid */
6304                                 sense_buf[15] = 0x80;
6305
6306                         sense_buf[16] =
6307                             ((IPR_FIELD_POINTER_MASK &
6308                               be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
6309                         sense_buf[17] =
6310                             (IPR_FIELD_POINTER_MASK &
6311                              be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
6312                 } else {
6313                         if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
6314                                 if (ipr_is_vset_device(res))
6315                                         failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6316                                 else
6317                                         failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
6318
6319                                 sense_buf[0] |= 0x80;   /* Or in the Valid bit */
6320                                 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
6321                                 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
6322                                 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
6323                                 sense_buf[6] = failing_lba & 0x000000ff;
6324                         }
6325
6326                         sense_buf[7] = 6;       /* additional length */
6327                 }
6328         }
6329 }
6330
6331 /**
6332  * ipr_get_autosense - Copy autosense data to sense buffer
6333  * @ipr_cmd:    ipr command struct
6334  *
6335  * This function copies the autosense buffer to the buffer
6336  * in the scsi_cmd, if there is autosense available.
6337  *
6338  * Return value:
6339  *      1 if autosense was available / 0 if not
6340  **/
6341 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
6342 {
6343         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6344         struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
6345
6346         if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
6347                 return 0;
6348
6349         if (ipr_cmd->ioa_cfg->sis64)
6350                 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
6351                        min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
6352                            SCSI_SENSE_BUFFERSIZE));
6353         else
6354                 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
6355                        min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
6356                            SCSI_SENSE_BUFFERSIZE));
6357         return 1;
6358 }
6359
6360 /**
6361  * ipr_erp_start - Process an error response for a SCSI op
6362  * @ioa_cfg:    ioa config struct
6363  * @ipr_cmd:    ipr command struct
6364  *
6365  * This function determines whether or not to initiate ERP
6366  * on the affected device.
6367  *
6368  * Return value:
6369  *      nothing
6370  **/
6371 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6372                               struct ipr_cmnd *ipr_cmd)
6373 {
6374         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6375         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6376         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6377         u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
6378
6379         if (!res) {
6380                 __ipr_scsi_eh_done(ipr_cmd);
6381                 return;
6382         }
6383
6384         if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
6385                 ipr_gen_sense(ipr_cmd);
6386
6387         ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6388
6389         switch (masked_ioasc) {
6390         case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
6391                 if (ipr_is_naca_model(res))
6392                         scsi_cmd->result |= (DID_ABORT << 16);
6393                 else
6394                         scsi_cmd->result |= (DID_IMM_RETRY << 16);
6395                 break;
6396         case IPR_IOASC_IR_RESOURCE_HANDLE:
6397         case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
6398                 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6399                 break;
6400         case IPR_IOASC_HW_SEL_TIMEOUT:
6401                 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6402                 if (!ipr_is_naca_model(res))
6403                         res->needs_sync_complete = 1;
6404                 break;
6405         case IPR_IOASC_SYNC_REQUIRED:
6406                 if (!res->in_erp)
6407                         res->needs_sync_complete = 1;
6408                 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6409                 break;
6410         case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
6411         case IPR_IOASA_IR_DUAL_IOA_DISABLED:
6412                 /*
6413                  * exception: do not set DID_PASSTHROUGH on CHECK CONDITION
6414                  * so SCSI mid-layer and upper layers handle it accordingly.
6415                  */
6416                 if (scsi_cmd->result != SAM_STAT_CHECK_CONDITION)
6417                         scsi_cmd->result |= (DID_PASSTHROUGH << 16);
6418                 break;
6419         case IPR_IOASC_BUS_WAS_RESET:
6420         case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6421                 /*
6422                  * Report the bus reset and ask for a retry. The device
6423                  * will give CC/UA the next command.
6424                  */
6425                 if (!res->resetting_device)
6426                         scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6427                 scsi_cmd->result |= (DID_ERROR << 16);
6428                 if (!ipr_is_naca_model(res))
6429                         res->needs_sync_complete = 1;
6430                 break;
6431         case IPR_IOASC_HW_DEV_BUS_STATUS:
6432                 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6433                 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
6434                         if (!ipr_get_autosense(ipr_cmd)) {
6435                                 if (!ipr_is_naca_model(res)) {
6436                                         ipr_erp_cancel_all(ipr_cmd);
6437                                         return;
6438                                 }
6439                         }
6440                 }
6441                 if (!ipr_is_naca_model(res))
6442                         res->needs_sync_complete = 1;
6443                 break;
6444         case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6445                 break;
6446         case IPR_IOASC_IR_NON_OPTIMIZED:
6447                 if (res->raw_mode) {
6448                         res->raw_mode = 0;
6449                         scsi_cmd->result |= (DID_IMM_RETRY << 16);
6450                 } else
6451                         scsi_cmd->result |= (DID_ERROR << 16);
6452                 break;
6453         default:
6454                 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6455                         scsi_cmd->result |= (DID_ERROR << 16);
6456                 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
6457                         res->needs_sync_complete = 1;
6458                 break;
6459         }
6460
6461         scsi_dma_unmap(ipr_cmd->scsi_cmd);
6462         scsi_cmd->scsi_done(scsi_cmd);
6463         if (ipr_cmd->eh_comp)
6464                 complete(ipr_cmd->eh_comp);
6465         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6466 }
6467
6468 /**
6469  * ipr_scsi_done - mid-layer done function
6470  * @ipr_cmd:    ipr command struct
6471  *
6472  * This function is invoked by the interrupt handler for
6473  * ops generated by the SCSI mid-layer
6474  *
6475  * Return value:
6476  *      none
6477  **/
6478 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6479 {
6480         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6481         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6482         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6483         unsigned long lock_flags;
6484
6485         scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
6486
6487         if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
6488                 scsi_dma_unmap(scsi_cmd);
6489
6490                 spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
6491                 scsi_cmd->scsi_done(scsi_cmd);
6492                 if (ipr_cmd->eh_comp)
6493                         complete(ipr_cmd->eh_comp);
6494                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6495                 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
6496         } else {
6497                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6498                 spin_lock(&ipr_cmd->hrrq->_lock);
6499                 ipr_erp_start(ioa_cfg, ipr_cmd);
6500                 spin_unlock(&ipr_cmd->hrrq->_lock);
6501                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6502         }
6503 }
6504
6505 /**
6506  * ipr_queuecommand - Queue a mid-layer request
6507  * @shost:              scsi host struct
6508  * @scsi_cmd:   scsi command struct
6509  *
6510  * This function queues a request generated by the mid-layer.
6511  *
6512  * Return value:
6513  *      0 on success
6514  *      SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6515  *      SCSI_MLQUEUE_HOST_BUSY if host is busy
6516  **/
6517 static int ipr_queuecommand(struct Scsi_Host *shost,
6518                             struct scsi_cmnd *scsi_cmd)
6519 {
6520         struct ipr_ioa_cfg *ioa_cfg;
6521         struct ipr_resource_entry *res;
6522         struct ipr_ioarcb *ioarcb;
6523         struct ipr_cmnd *ipr_cmd;
6524         unsigned long hrrq_flags, lock_flags;
6525         int rc;
6526         struct ipr_hrr_queue *hrrq;
6527         int hrrq_id;
6528
6529         ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6530
6531         scsi_cmd->result = (DID_OK << 16);
6532         res = scsi_cmd->device->hostdata;
6533
6534         if (ipr_is_gata(res) && res->sata_port) {
6535                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6536                 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6537                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6538                 return rc;
6539         }
6540
6541         hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6542         hrrq = &ioa_cfg->hrrq[hrrq_id];
6543
6544         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6545         /*
6546          * We are currently blocking all devices due to a host reset
6547          * We have told the host to stop giving us new requests, but
6548          * ERP ops don't count. FIXME
6549          */
6550         if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
6551                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6552                 return SCSI_MLQUEUE_HOST_BUSY;
6553         }
6554
6555         /*
6556          * FIXME - Create scsi_set_host_offline interface
6557          *  and the ioa_is_dead check can be removed
6558          */
6559         if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
6560                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6561                 goto err_nodev;
6562         }
6563
6564         ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6565         if (ipr_cmd == NULL) {
6566                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6567                 return SCSI_MLQUEUE_HOST_BUSY;
6568         }
6569         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6570
6571         ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
6572         ioarcb = &ipr_cmd->ioarcb;
6573
6574         memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6575         ipr_cmd->scsi_cmd = scsi_cmd;
6576         ipr_cmd->done = ipr_scsi_eh_done;
6577
6578         if (ipr_is_gscsi(res)) {
6579                 if (scsi_cmd->underflow == 0)
6580                         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6581
6582                 if (res->reset_occurred) {
6583                         res->reset_occurred = 0;
6584                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
6585                 }
6586         }
6587
6588         if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6589                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6590
6591                 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
6592                 if (scsi_cmd->flags & SCMD_TAGGED)
6593                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK;
6594                 else
6595                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK;
6596         }
6597
6598         if (scsi_cmd->cmnd[0] >= 0xC0 &&
6599             (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
6600                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6601         }
6602         if (res->raw_mode && ipr_is_af_dasd_device(res)) {
6603                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE;
6604
6605                 if (scsi_cmd->underflow == 0)
6606                         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6607         }
6608
6609         if (ioa_cfg->sis64)
6610                 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6611         else
6612                 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
6613
6614         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6615         if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
6616                 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6617                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6618                 if (!rc)
6619                         scsi_dma_unmap(scsi_cmd);
6620                 return SCSI_MLQUEUE_HOST_BUSY;
6621         }
6622
6623         if (unlikely(hrrq->ioa_is_dead)) {
6624                 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6625                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6626                 scsi_dma_unmap(scsi_cmd);
6627                 goto err_nodev;
6628         }
6629
6630         ioarcb->res_handle = res->res_handle;
6631         if (res->needs_sync_complete) {
6632                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6633                 res->needs_sync_complete = 0;
6634         }
6635         list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
6636         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6637         ipr_send_command(ipr_cmd);
6638         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6639         return 0;
6640
6641 err_nodev:
6642         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6643         memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6644         scsi_cmd->result = (DID_NO_CONNECT << 16);
6645         scsi_cmd->scsi_done(scsi_cmd);
6646         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6647         return 0;
6648 }
6649
6650 /**
6651  * ipr_ioctl - IOCTL handler
6652  * @sdev:       scsi device struct
6653  * @cmd:        IOCTL cmd
6654  * @arg:        IOCTL arg
6655  *
6656  * Return value:
6657  *      0 on success / other on failure
6658  **/
6659 static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
6660 {
6661         struct ipr_resource_entry *res;
6662
6663         res = (struct ipr_resource_entry *)sdev->hostdata;
6664         if (res && ipr_is_gata(res)) {
6665                 if (cmd == HDIO_GET_IDENTITY)
6666                         return -ENOTTY;
6667                 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
6668         }
6669
6670         return -EINVAL;
6671 }
6672
6673 /**
6674  * ipr_info - Get information about the card/driver
6675  * @scsi_host:  scsi host struct
6676  *
6677  * Return value:
6678  *      pointer to buffer with description string
6679  **/
6680 static const char *ipr_ioa_info(struct Scsi_Host *host)
6681 {
6682         static char buffer[512];
6683         struct ipr_ioa_cfg *ioa_cfg;
6684         unsigned long lock_flags = 0;
6685
6686         ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6687
6688         spin_lock_irqsave(host->host_lock, lock_flags);
6689         sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6690         spin_unlock_irqrestore(host->host_lock, lock_flags);
6691
6692         return buffer;
6693 }
6694
6695 static struct scsi_host_template driver_template = {
6696         .module = THIS_MODULE,
6697         .name = "IPR",
6698         .info = ipr_ioa_info,
6699         .ioctl = ipr_ioctl,
6700         .queuecommand = ipr_queuecommand,
6701         .eh_abort_handler = ipr_eh_abort,
6702         .eh_device_reset_handler = ipr_eh_dev_reset,
6703         .eh_host_reset_handler = ipr_eh_host_reset,
6704         .slave_alloc = ipr_slave_alloc,
6705         .slave_configure = ipr_slave_configure,
6706         .slave_destroy = ipr_slave_destroy,
6707         .scan_finished = ipr_scan_finished,
6708         .target_alloc = ipr_target_alloc,
6709         .target_destroy = ipr_target_destroy,
6710         .change_queue_depth = ipr_change_queue_depth,
6711         .bios_param = ipr_biosparam,
6712         .can_queue = IPR_MAX_COMMANDS,
6713         .this_id = -1,
6714         .sg_tablesize = IPR_MAX_SGLIST,
6715         .max_sectors = IPR_IOA_MAX_SECTORS,
6716         .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6717         .use_clustering = ENABLE_CLUSTERING,
6718         .shost_attrs = ipr_ioa_attrs,
6719         .sdev_attrs = ipr_dev_attrs,
6720         .proc_name = IPR_NAME,
6721 };
6722
6723 /**
6724  * ipr_ata_phy_reset - libata phy_reset handler
6725  * @ap:         ata port to reset
6726  *
6727  **/
6728 static void ipr_ata_phy_reset(struct ata_port *ap)
6729 {
6730         unsigned long flags;
6731         struct ipr_sata_port *sata_port = ap->private_data;
6732         struct ipr_resource_entry *res = sata_port->res;
6733         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6734         int rc;
6735
6736         ENTER;
6737         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6738         while (ioa_cfg->in_reset_reload) {
6739                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6740                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6741                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6742         }
6743
6744         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6745                 goto out_unlock;
6746
6747         rc = ipr_device_reset(ioa_cfg, res);
6748
6749         if (rc) {
6750                 ap->link.device[0].class = ATA_DEV_NONE;
6751                 goto out_unlock;
6752         }
6753
6754         ap->link.device[0].class = res->ata_class;
6755         if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
6756                 ap->link.device[0].class = ATA_DEV_NONE;
6757
6758 out_unlock:
6759         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6760         LEAVE;
6761 }
6762
6763 /**
6764  * ipr_ata_post_internal - Cleanup after an internal command
6765  * @qc: ATA queued command
6766  *
6767  * Return value:
6768  *      none
6769  **/
6770 static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6771 {
6772         struct ipr_sata_port *sata_port = qc->ap->private_data;
6773         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6774         struct ipr_cmnd *ipr_cmd;
6775         struct ipr_hrr_queue *hrrq;
6776         unsigned long flags;
6777
6778         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6779         while (ioa_cfg->in_reset_reload) {
6780                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6781                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6782                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6783         }
6784
6785         for_each_hrrq(hrrq, ioa_cfg) {
6786                 spin_lock(&hrrq->_lock);
6787                 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6788                         if (ipr_cmd->qc == qc) {
6789                                 ipr_device_reset(ioa_cfg, sata_port->res);
6790                                 break;
6791                         }
6792                 }
6793                 spin_unlock(&hrrq->_lock);
6794         }
6795         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6796 }
6797
6798 /**
6799  * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6800  * @regs:       destination
6801  * @tf: source ATA taskfile
6802  *
6803  * Return value:
6804  *      none
6805  **/
6806 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6807                              struct ata_taskfile *tf)
6808 {
6809         regs->feature = tf->feature;
6810         regs->nsect = tf->nsect;
6811         regs->lbal = tf->lbal;
6812         regs->lbam = tf->lbam;
6813         regs->lbah = tf->lbah;
6814         regs->device = tf->device;
6815         regs->command = tf->command;
6816         regs->hob_feature = tf->hob_feature;
6817         regs->hob_nsect = tf->hob_nsect;
6818         regs->hob_lbal = tf->hob_lbal;
6819         regs->hob_lbam = tf->hob_lbam;
6820         regs->hob_lbah = tf->hob_lbah;
6821         regs->ctl = tf->ctl;
6822 }
6823
6824 /**
6825  * ipr_sata_done - done function for SATA commands
6826  * @ipr_cmd:    ipr command struct
6827  *
6828  * This function is invoked by the interrupt handler for
6829  * ops generated by the SCSI mid-layer to SATA devices
6830  *
6831  * Return value:
6832  *      none
6833  **/
6834 static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6835 {
6836         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6837         struct ata_queued_cmd *qc = ipr_cmd->qc;
6838         struct ipr_sata_port *sata_port = qc->ap->private_data;
6839         struct ipr_resource_entry *res = sata_port->res;
6840         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6841
6842         spin_lock(&ipr_cmd->hrrq->_lock);
6843         if (ipr_cmd->ioa_cfg->sis64)
6844                 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6845                        sizeof(struct ipr_ioasa_gata));
6846         else
6847                 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6848                        sizeof(struct ipr_ioasa_gata));
6849         ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6850
6851         if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
6852                 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
6853
6854         if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6855                 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
6856         else
6857                 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
6858         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6859         spin_unlock(&ipr_cmd->hrrq->_lock);
6860         ata_qc_complete(qc);
6861 }
6862
6863 /**
6864  * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6865  * @ipr_cmd:    ipr command struct
6866  * @qc:         ATA queued command
6867  *
6868  **/
6869 static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6870                                   struct ata_queued_cmd *qc)
6871 {
6872         u32 ioadl_flags = 0;
6873         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6874         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
6875         struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6876         int len = qc->nbytes;
6877         struct scatterlist *sg;
6878         unsigned int si;
6879         dma_addr_t dma_addr = ipr_cmd->dma_addr;
6880
6881         if (len == 0)
6882                 return;
6883
6884         if (qc->dma_dir == DMA_TO_DEVICE) {
6885                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6886                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6887         } else if (qc->dma_dir == DMA_FROM_DEVICE)
6888                 ioadl_flags = IPR_IOADL_FLAGS_READ;
6889
6890         ioarcb->data_transfer_length = cpu_to_be32(len);
6891         ioarcb->ioadl_len =
6892                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6893         ioarcb->u.sis64_addr_data.data_ioadl_addr =
6894                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
6895
6896         for_each_sg(qc->sg, sg, qc->n_elem, si) {
6897                 ioadl64->flags = cpu_to_be32(ioadl_flags);
6898                 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6899                 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6900
6901                 last_ioadl64 = ioadl64;
6902                 ioadl64++;
6903         }
6904
6905         if (likely(last_ioadl64))
6906                 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6907 }
6908
6909 /**
6910  * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6911  * @ipr_cmd:    ipr command struct
6912  * @qc:         ATA queued command
6913  *
6914  **/
6915 static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6916                                 struct ata_queued_cmd *qc)
6917 {
6918         u32 ioadl_flags = 0;
6919         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6920         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
6921         struct ipr_ioadl_desc *last_ioadl = NULL;
6922         int len = qc->nbytes;
6923         struct scatterlist *sg;
6924         unsigned int si;
6925
6926         if (len == 0)
6927                 return;
6928
6929         if (qc->dma_dir == DMA_TO_DEVICE) {
6930                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6931                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6932                 ioarcb->data_transfer_length = cpu_to_be32(len);
6933                 ioarcb->ioadl_len =
6934                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6935         } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6936                 ioadl_flags = IPR_IOADL_FLAGS_READ;
6937                 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6938                 ioarcb->read_ioadl_len =
6939                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6940         }
6941
6942         for_each_sg(qc->sg, sg, qc->n_elem, si) {
6943                 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6944                 ioadl->address = cpu_to_be32(sg_dma_address(sg));
6945
6946                 last_ioadl = ioadl;
6947                 ioadl++;
6948         }
6949
6950         if (likely(last_ioadl))
6951                 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6952 }
6953
6954 /**
6955  * ipr_qc_defer - Get a free ipr_cmd
6956  * @qc: queued command
6957  *
6958  * Return value:
6959  *      0 if success
6960  **/
6961 static int ipr_qc_defer(struct ata_queued_cmd *qc)
6962 {
6963         struct ata_port *ap = qc->ap;
6964         struct ipr_sata_port *sata_port = ap->private_data;
6965         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6966         struct ipr_cmnd *ipr_cmd;
6967         struct ipr_hrr_queue *hrrq;
6968         int hrrq_id;
6969
6970         hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6971         hrrq = &ioa_cfg->hrrq[hrrq_id];
6972
6973         qc->lldd_task = NULL;
6974         spin_lock(&hrrq->_lock);
6975         if (unlikely(hrrq->ioa_is_dead)) {
6976                 spin_unlock(&hrrq->_lock);
6977                 return 0;
6978         }
6979
6980         if (unlikely(!hrrq->allow_cmds)) {
6981                 spin_unlock(&hrrq->_lock);
6982                 return ATA_DEFER_LINK;
6983         }
6984
6985         ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6986         if (ipr_cmd == NULL) {
6987                 spin_unlock(&hrrq->_lock);
6988                 return ATA_DEFER_LINK;
6989         }
6990
6991         qc->lldd_task = ipr_cmd;
6992         spin_unlock(&hrrq->_lock);
6993         return 0;
6994 }
6995
6996 /**
6997  * ipr_qc_issue - Issue a SATA qc to a device
6998  * @qc: queued command
6999  *
7000  * Return value:
7001  *      0 if success
7002  **/
7003 static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
7004 {
7005         struct ata_port *ap = qc->ap;
7006         struct ipr_sata_port *sata_port = ap->private_data;
7007         struct ipr_resource_entry *res = sata_port->res;
7008         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
7009         struct ipr_cmnd *ipr_cmd;
7010         struct ipr_ioarcb *ioarcb;
7011         struct ipr_ioarcb_ata_regs *regs;
7012
7013         if (qc->lldd_task == NULL)
7014                 ipr_qc_defer(qc);
7015
7016         ipr_cmd = qc->lldd_task;
7017         if (ipr_cmd == NULL)
7018                 return AC_ERR_SYSTEM;
7019
7020         qc->lldd_task = NULL;
7021         spin_lock(&ipr_cmd->hrrq->_lock);
7022         if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
7023                         ipr_cmd->hrrq->ioa_is_dead)) {
7024                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7025                 spin_unlock(&ipr_cmd->hrrq->_lock);
7026                 return AC_ERR_SYSTEM;
7027         }
7028
7029         ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
7030         ioarcb = &ipr_cmd->ioarcb;
7031
7032         if (ioa_cfg->sis64) {
7033                 regs = &ipr_cmd->i.ata_ioadl.regs;
7034                 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
7035         } else
7036                 regs = &ioarcb->u.add_data.u.regs;
7037
7038         memset(regs, 0, sizeof(*regs));
7039         ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
7040
7041         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7042         ipr_cmd->qc = qc;
7043         ipr_cmd->done = ipr_sata_done;
7044         ipr_cmd->ioarcb.res_handle = res->res_handle;
7045         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
7046         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
7047         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
7048         ipr_cmd->dma_use_sg = qc->n_elem;
7049
7050         if (ioa_cfg->sis64)
7051                 ipr_build_ata_ioadl64(ipr_cmd, qc);
7052         else
7053                 ipr_build_ata_ioadl(ipr_cmd, qc);
7054
7055         regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
7056         ipr_copy_sata_tf(regs, &qc->tf);
7057         memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
7058         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
7059
7060         switch (qc->tf.protocol) {
7061         case ATA_PROT_NODATA:
7062         case ATA_PROT_PIO:
7063                 break;
7064
7065         case ATA_PROT_DMA:
7066                 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
7067                 break;
7068
7069         case ATAPI_PROT_PIO:
7070         case ATAPI_PROT_NODATA:
7071                 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
7072                 break;
7073
7074         case ATAPI_PROT_DMA:
7075                 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
7076                 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
7077                 break;
7078
7079         default:
7080                 WARN_ON(1);
7081                 spin_unlock(&ipr_cmd->hrrq->_lock);
7082                 return AC_ERR_INVALID;
7083         }
7084
7085         ipr_send_command(ipr_cmd);
7086         spin_unlock(&ipr_cmd->hrrq->_lock);
7087
7088         return 0;
7089 }
7090
7091 /**
7092  * ipr_qc_fill_rtf - Read result TF
7093  * @qc: ATA queued command
7094  *
7095  * Return value:
7096  *      true
7097  **/
7098 static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
7099 {
7100         struct ipr_sata_port *sata_port = qc->ap->private_data;
7101         struct ipr_ioasa_gata *g = &sata_port->ioasa;
7102         struct ata_taskfile *tf = &qc->result_tf;
7103
7104         tf->feature = g->error;
7105         tf->nsect = g->nsect;
7106         tf->lbal = g->lbal;
7107         tf->lbam = g->lbam;
7108         tf->lbah = g->lbah;
7109         tf->device = g->device;
7110         tf->command = g->status;
7111         tf->hob_nsect = g->hob_nsect;
7112         tf->hob_lbal = g->hob_lbal;
7113         tf->hob_lbam = g->hob_lbam;
7114         tf->hob_lbah = g->hob_lbah;
7115
7116         return true;
7117 }
7118
7119 static struct ata_port_operations ipr_sata_ops = {
7120         .phy_reset = ipr_ata_phy_reset,
7121         .hardreset = ipr_sata_reset,
7122         .post_internal_cmd = ipr_ata_post_internal,
7123         .qc_prep = ata_noop_qc_prep,
7124         .qc_defer = ipr_qc_defer,
7125         .qc_issue = ipr_qc_issue,
7126         .qc_fill_rtf = ipr_qc_fill_rtf,
7127         .port_start = ata_sas_port_start,
7128         .port_stop = ata_sas_port_stop
7129 };
7130
7131 static struct ata_port_info sata_port_info = {
7132         .flags          = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
7133                           ATA_FLAG_SAS_HOST,
7134         .pio_mask       = ATA_PIO4_ONLY,
7135         .mwdma_mask     = ATA_MWDMA2,
7136         .udma_mask      = ATA_UDMA6,
7137         .port_ops       = &ipr_sata_ops
7138 };
7139
7140 #ifdef CONFIG_PPC_PSERIES
7141 static const u16 ipr_blocked_processors[] = {
7142         PVR_NORTHSTAR,
7143         PVR_PULSAR,
7144         PVR_POWER4,
7145         PVR_ICESTAR,
7146         PVR_SSTAR,
7147         PVR_POWER4p,
7148         PVR_630,
7149         PVR_630p
7150 };
7151
7152 /**
7153  * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
7154  * @ioa_cfg:    ioa cfg struct
7155  *
7156  * Adapters that use Gemstone revision < 3.1 do not work reliably on
7157  * certain pSeries hardware. This function determines if the given
7158  * adapter is in one of these confgurations or not.
7159  *
7160  * Return value:
7161  *      1 if adapter is not supported / 0 if adapter is supported
7162  **/
7163 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
7164 {
7165         int i;
7166
7167         if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
7168                 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
7169                         if (pvr_version_is(ipr_blocked_processors[i]))
7170                                 return 1;
7171                 }
7172         }
7173         return 0;
7174 }
7175 #else
7176 #define ipr_invalid_adapter(ioa_cfg) 0
7177 #endif
7178
7179 /**
7180  * ipr_ioa_bringdown_done - IOA bring down completion.
7181  * @ipr_cmd:    ipr command struct
7182  *
7183  * This function processes the completion of an adapter bring down.
7184  * It wakes any reset sleepers.
7185  *
7186  * Return value:
7187  *      IPR_RC_JOB_RETURN
7188  **/
7189 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
7190 {
7191         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7192         int i;
7193
7194         ENTER;
7195         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
7196                 ipr_trace;
7197                 ioa_cfg->scsi_unblock = 1;
7198                 schedule_work(&ioa_cfg->work_q);
7199         }
7200
7201         ioa_cfg->in_reset_reload = 0;
7202         ioa_cfg->reset_retries = 0;
7203         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7204                 spin_lock(&ioa_cfg->hrrq[i]._lock);
7205                 ioa_cfg->hrrq[i].ioa_is_dead = 1;
7206                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
7207         }
7208         wmb();
7209
7210         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7211         wake_up_all(&ioa_cfg->reset_wait_q);
7212         LEAVE;
7213
7214         return IPR_RC_JOB_RETURN;
7215 }
7216
7217 /**
7218  * ipr_ioa_reset_done - IOA reset completion.
7219  * @ipr_cmd:    ipr command struct
7220  *
7221  * This function processes the completion of an adapter reset.
7222  * It schedules any necessary mid-layer add/removes and
7223  * wakes any reset sleepers.
7224  *
7225  * Return value:
7226  *      IPR_RC_JOB_RETURN
7227  **/
7228 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
7229 {
7230         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7231         struct ipr_resource_entry *res;
7232         int j;
7233
7234         ENTER;
7235         ioa_cfg->in_reset_reload = 0;
7236         for (j = 0; j < ioa_cfg->hrrq_num; j++) {
7237                 spin_lock(&ioa_cfg->hrrq[j]._lock);
7238                 ioa_cfg->hrrq[j].allow_cmds = 1;
7239                 spin_unlock(&ioa_cfg->hrrq[j]._lock);
7240         }
7241         wmb();
7242         ioa_cfg->reset_cmd = NULL;
7243         ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
7244
7245         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
7246                 if (res->add_to_ml || res->del_from_ml) {
7247                         ipr_trace;
7248                         break;
7249                 }
7250         }
7251         schedule_work(&ioa_cfg->work_q);
7252
7253         for (j = 0; j < IPR_NUM_HCAMS; j++) {
7254                 list_del_init(&ioa_cfg->hostrcb[j]->queue);
7255                 if (j < IPR_NUM_LOG_HCAMS)
7256                         ipr_send_hcam(ioa_cfg,
7257                                 IPR_HCAM_CDB_OP_CODE_LOG_DATA,
7258                                 ioa_cfg->hostrcb[j]);
7259                 else
7260                         ipr_send_hcam(ioa_cfg,
7261                                 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
7262                                 ioa_cfg->hostrcb[j]);
7263         }
7264
7265         scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
7266         dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
7267
7268         ioa_cfg->reset_retries = 0;
7269         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7270         wake_up_all(&ioa_cfg->reset_wait_q);
7271
7272         ioa_cfg->scsi_unblock = 1;
7273         schedule_work(&ioa_cfg->work_q);
7274         LEAVE;
7275         return IPR_RC_JOB_RETURN;
7276 }
7277
7278 /**
7279  * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
7280  * @supported_dev:      supported device struct
7281  * @vpids:                      vendor product id struct
7282  *
7283  * Return value:
7284  *      none
7285  **/
7286 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
7287                                  struct ipr_std_inq_vpids *vpids)
7288 {
7289         memset(supported_dev, 0, sizeof(struct ipr_supported_device));
7290         memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
7291         supported_dev->num_records = 1;
7292         supported_dev->data_length =
7293                 cpu_to_be16(sizeof(struct ipr_supported_device));
7294         supported_dev->reserved = 0;
7295 }
7296
7297 /**
7298  * ipr_set_supported_devs - Send Set Supported Devices for a device
7299  * @ipr_cmd:    ipr command struct
7300  *
7301  * This function sends a Set Supported Devices to the adapter
7302  *
7303  * Return value:
7304  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7305  **/
7306 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
7307 {
7308         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7309         struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
7310         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7311         struct ipr_resource_entry *res = ipr_cmd->u.res;
7312
7313         ipr_cmd->job_step = ipr_ioa_reset_done;
7314
7315         list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
7316                 if (!ipr_is_scsi_disk(res))
7317                         continue;
7318
7319                 ipr_cmd->u.res = res;
7320                 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
7321
7322                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7323                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7324                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7325
7326                 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
7327                 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
7328                 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
7329                 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
7330
7331                 ipr_init_ioadl(ipr_cmd,
7332                                ioa_cfg->vpd_cbs_dma +
7333                                  offsetof(struct ipr_misc_cbs, supp_dev),
7334                                sizeof(struct ipr_supported_device),
7335                                IPR_IOADL_FLAGS_WRITE_LAST);
7336
7337                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7338                            IPR_SET_SUP_DEVICE_TIMEOUT);
7339
7340                 if (!ioa_cfg->sis64)
7341                         ipr_cmd->job_step = ipr_set_supported_devs;
7342                 LEAVE;
7343                 return IPR_RC_JOB_RETURN;
7344         }
7345
7346         LEAVE;
7347         return IPR_RC_JOB_CONTINUE;
7348 }
7349
7350 /**
7351  * ipr_get_mode_page - Locate specified mode page
7352  * @mode_pages: mode page buffer
7353  * @page_code:  page code to find
7354  * @len:                minimum required length for mode page
7355  *
7356  * Return value:
7357  *      pointer to mode page / NULL on failure
7358  **/
7359 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
7360                                u32 page_code, u32 len)
7361 {
7362         struct ipr_mode_page_hdr *mode_hdr;
7363         u32 page_length;
7364         u32 length;
7365
7366         if (!mode_pages || (mode_pages->hdr.length == 0))
7367                 return NULL;
7368
7369         length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
7370         mode_hdr = (struct ipr_mode_page_hdr *)
7371                 (mode_pages->data + mode_pages->hdr.block_desc_len);
7372
7373         while (length) {
7374                 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
7375                         if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
7376                                 return mode_hdr;
7377                         break;
7378                 } else {
7379                         page_length = (sizeof(struct ipr_mode_page_hdr) +
7380                                        mode_hdr->page_length);
7381                         length -= page_length;
7382                         mode_hdr = (struct ipr_mode_page_hdr *)
7383                                 ((unsigned long)mode_hdr + page_length);
7384                 }
7385         }
7386         return NULL;
7387 }
7388
7389 /**
7390  * ipr_check_term_power - Check for term power errors
7391  * @ioa_cfg:    ioa config struct
7392  * @mode_pages: IOAFP mode pages buffer
7393  *
7394  * Check the IOAFP's mode page 28 for term power errors
7395  *
7396  * Return value:
7397  *      nothing
7398  **/
7399 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
7400                                  struct ipr_mode_pages *mode_pages)
7401 {
7402         int i;
7403         int entry_length;
7404         struct ipr_dev_bus_entry *bus;
7405         struct ipr_mode_page28 *mode_page;
7406
7407         mode_page = ipr_get_mode_page(mode_pages, 0x28,
7408                                       sizeof(struct ipr_mode_page28));
7409
7410         entry_length = mode_page->entry_length;
7411
7412         bus = mode_page->bus;
7413
7414         for (i = 0; i < mode_page->num_entries; i++) {
7415                 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
7416                         dev_err(&ioa_cfg->pdev->dev,
7417                                 "Term power is absent on scsi bus %d\n",
7418                                 bus->res_addr.bus);
7419                 }
7420
7421                 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
7422         }
7423 }
7424
7425 /**
7426  * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7427  * @ioa_cfg:    ioa config struct
7428  *
7429  * Looks through the config table checking for SES devices. If
7430  * the SES device is in the SES table indicating a maximum SCSI
7431  * bus speed, the speed is limited for the bus.
7432  *
7433  * Return value:
7434  *      none
7435  **/
7436 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7437 {
7438         u32 max_xfer_rate;
7439         int i;
7440
7441         for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7442                 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7443                                                        ioa_cfg->bus_attr[i].bus_width);
7444
7445                 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7446                         ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7447         }
7448 }
7449
7450 /**
7451  * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7452  * @ioa_cfg:    ioa config struct
7453  * @mode_pages: mode page 28 buffer
7454  *
7455  * Updates mode page 28 based on driver configuration
7456  *
7457  * Return value:
7458  *      none
7459  **/
7460 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
7461                                           struct ipr_mode_pages *mode_pages)
7462 {
7463         int i, entry_length;
7464         struct ipr_dev_bus_entry *bus;
7465         struct ipr_bus_attributes *bus_attr;
7466         struct ipr_mode_page28 *mode_page;
7467
7468         mode_page = ipr_get_mode_page(mode_pages, 0x28,
7469                                       sizeof(struct ipr_mode_page28));
7470
7471         entry_length = mode_page->entry_length;
7472
7473         /* Loop for each device bus entry */
7474         for (i = 0, bus = mode_page->bus;
7475              i < mode_page->num_entries;
7476              i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7477                 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7478                         dev_err(&ioa_cfg->pdev->dev,
7479                                 "Invalid resource address reported: 0x%08X\n",
7480                                 IPR_GET_PHYS_LOC(bus->res_addr));
7481                         continue;
7482                 }
7483
7484                 bus_attr = &ioa_cfg->bus_attr[i];
7485                 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7486                 bus->bus_width = bus_attr->bus_width;
7487                 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7488                 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7489                 if (bus_attr->qas_enabled)
7490                         bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7491                 else
7492                         bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7493         }
7494 }
7495
7496 /**
7497  * ipr_build_mode_select - Build a mode select command
7498  * @ipr_cmd:    ipr command struct
7499  * @res_handle: resource handle to send command to
7500  * @parm:               Byte 2 of Mode Sense command
7501  * @dma_addr:   DMA buffer address
7502  * @xfer_len:   data transfer length
7503  *
7504  * Return value:
7505  *      none
7506  **/
7507 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
7508                                   __be32 res_handle, u8 parm,
7509                                   dma_addr_t dma_addr, u8 xfer_len)
7510 {
7511         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7512
7513         ioarcb->res_handle = res_handle;
7514         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7515         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7516         ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7517         ioarcb->cmd_pkt.cdb[1] = parm;
7518         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7519
7520         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
7521 }
7522
7523 /**
7524  * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7525  * @ipr_cmd:    ipr command struct
7526  *
7527  * This function sets up the SCSI bus attributes and sends
7528  * a Mode Select for Page 28 to activate them.
7529  *
7530  * Return value:
7531  *      IPR_RC_JOB_RETURN
7532  **/
7533 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7534 {
7535         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7536         struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7537         int length;
7538
7539         ENTER;
7540         ipr_scsi_bus_speed_limit(ioa_cfg);
7541         ipr_check_term_power(ioa_cfg, mode_pages);
7542         ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7543         length = mode_pages->hdr.length + 1;
7544         mode_pages->hdr.length = 0;
7545
7546         ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7547                               ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7548                               length);
7549
7550         ipr_cmd->job_step = ipr_set_supported_devs;
7551         ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7552                                     struct ipr_resource_entry, queue);
7553         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7554
7555         LEAVE;
7556         return IPR_RC_JOB_RETURN;
7557 }
7558
7559 /**
7560  * ipr_build_mode_sense - Builds a mode sense command
7561  * @ipr_cmd:    ipr command struct
7562  * @res:                resource entry struct
7563  * @parm:               Byte 2 of mode sense command
7564  * @dma_addr:   DMA address of mode sense buffer
7565  * @xfer_len:   Size of DMA buffer
7566  *
7567  * Return value:
7568  *      none
7569  **/
7570 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7571                                  __be32 res_handle,
7572                                  u8 parm, dma_addr_t dma_addr, u8 xfer_len)
7573 {
7574         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7575
7576         ioarcb->res_handle = res_handle;
7577         ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7578         ioarcb->cmd_pkt.cdb[2] = parm;
7579         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7580         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7581
7582         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7583 }
7584
7585 /**
7586  * ipr_reset_cmd_failed - Handle failure of IOA reset command
7587  * @ipr_cmd:    ipr command struct
7588  *
7589  * This function handles the failure of an IOA bringup command.
7590  *
7591  * Return value:
7592  *      IPR_RC_JOB_RETURN
7593  **/
7594 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7595 {
7596         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7597         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7598
7599         dev_err(&ioa_cfg->pdev->dev,
7600                 "0x%02X failed with IOASC: 0x%08X\n",
7601                 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7602
7603         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7604         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7605         return IPR_RC_JOB_RETURN;
7606 }
7607
7608 /**
7609  * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7610  * @ipr_cmd:    ipr command struct
7611  *
7612  * This function handles the failure of a Mode Sense to the IOAFP.
7613  * Some adapters do not handle all mode pages.
7614  *
7615  * Return value:
7616  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7617  **/
7618 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7619 {
7620         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7621         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7622
7623         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7624                 ipr_cmd->job_step = ipr_set_supported_devs;
7625                 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7626                                             struct ipr_resource_entry, queue);
7627                 return IPR_RC_JOB_CONTINUE;
7628         }
7629
7630         return ipr_reset_cmd_failed(ipr_cmd);
7631 }
7632
7633 /**
7634  * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7635  * @ipr_cmd:    ipr command struct
7636  *
7637  * This function send a Page 28 mode sense to the IOA to
7638  * retrieve SCSI bus attributes.
7639  *
7640  * Return value:
7641  *      IPR_RC_JOB_RETURN
7642  **/
7643 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7644 {
7645         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7646
7647         ENTER;
7648         ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7649                              0x28, ioa_cfg->vpd_cbs_dma +
7650                              offsetof(struct ipr_misc_cbs, mode_pages),
7651                              sizeof(struct ipr_mode_pages));
7652
7653         ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
7654         ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
7655
7656         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7657
7658         LEAVE;
7659         return IPR_RC_JOB_RETURN;
7660 }
7661
7662 /**
7663  * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7664  * @ipr_cmd:    ipr command struct
7665  *
7666  * This function enables dual IOA RAID support if possible.
7667  *
7668  * Return value:
7669  *      IPR_RC_JOB_RETURN
7670  **/
7671 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7672 {
7673         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7674         struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7675         struct ipr_mode_page24 *mode_page;
7676         int length;
7677
7678         ENTER;
7679         mode_page = ipr_get_mode_page(mode_pages, 0x24,
7680                                       sizeof(struct ipr_mode_page24));
7681
7682         if (mode_page)
7683                 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7684
7685         length = mode_pages->hdr.length + 1;
7686         mode_pages->hdr.length = 0;
7687
7688         ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7689                               ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7690                               length);
7691
7692         ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7693         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7694
7695         LEAVE;
7696         return IPR_RC_JOB_RETURN;
7697 }
7698
7699 /**
7700  * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7701  * @ipr_cmd:    ipr command struct
7702  *
7703  * This function handles the failure of a Mode Sense to the IOAFP.
7704  * Some adapters do not handle all mode pages.
7705  *
7706  * Return value:
7707  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7708  **/
7709 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7710 {
7711         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7712
7713         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7714                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7715                 return IPR_RC_JOB_CONTINUE;
7716         }
7717
7718         return ipr_reset_cmd_failed(ipr_cmd);
7719 }
7720
7721 /**
7722  * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7723  * @ipr_cmd:    ipr command struct
7724  *
7725  * This function send a mode sense to the IOA to retrieve
7726  * the IOA Advanced Function Control mode page.
7727  *
7728  * Return value:
7729  *      IPR_RC_JOB_RETURN
7730  **/
7731 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7732 {
7733         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7734
7735         ENTER;
7736         ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7737                              0x24, ioa_cfg->vpd_cbs_dma +
7738                              offsetof(struct ipr_misc_cbs, mode_pages),
7739                              sizeof(struct ipr_mode_pages));
7740
7741         ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7742         ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7743
7744         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7745
7746         LEAVE;
7747         return IPR_RC_JOB_RETURN;
7748 }
7749
7750 /**
7751  * ipr_init_res_table - Initialize the resource table
7752  * @ipr_cmd:    ipr command struct
7753  *
7754  * This function looks through the existing resource table, comparing
7755  * it with the config table. This function will take care of old/new
7756  * devices and schedule adding/removing them from the mid-layer
7757  * as appropriate.
7758  *
7759  * Return value:
7760  *      IPR_RC_JOB_CONTINUE
7761  **/
7762 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7763 {
7764         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7765         struct ipr_resource_entry *res, *temp;
7766         struct ipr_config_table_entry_wrapper cfgtew;
7767         int entries, found, flag, i;
7768         LIST_HEAD(old_res);
7769
7770         ENTER;
7771         if (ioa_cfg->sis64)
7772                 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7773         else
7774                 flag = ioa_cfg->u.cfg_table->hdr.flags;
7775
7776         if (flag & IPR_UCODE_DOWNLOAD_REQ)
7777                 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7778
7779         list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7780                 list_move_tail(&res->queue, &old_res);
7781
7782         if (ioa_cfg->sis64)
7783                 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
7784         else
7785                 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7786
7787         for (i = 0; i < entries; i++) {
7788                 if (ioa_cfg->sis64)
7789                         cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7790                 else
7791                         cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
7792                 found = 0;
7793
7794                 list_for_each_entry_safe(res, temp, &old_res, queue) {
7795                         if (ipr_is_same_device(res, &cfgtew)) {
7796                                 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7797                                 found = 1;
7798                                 break;
7799                         }
7800                 }
7801
7802                 if (!found) {
7803                         if (list_empty(&ioa_cfg->free_res_q)) {
7804                                 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7805                                 break;
7806                         }
7807
7808                         found = 1;
7809                         res = list_entry(ioa_cfg->free_res_q.next,
7810                                          struct ipr_resource_entry, queue);
7811                         list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7812                         ipr_init_res_entry(res, &cfgtew);
7813                         res->add_to_ml = 1;
7814                 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7815                         res->sdev->allow_restart = 1;
7816
7817                 if (found)
7818                         ipr_update_res_entry(res, &cfgtew);
7819         }
7820
7821         list_for_each_entry_safe(res, temp, &old_res, queue) {
7822                 if (res->sdev) {
7823                         res->del_from_ml = 1;
7824                         res->res_handle = IPR_INVALID_RES_HANDLE;
7825                         list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7826                 }
7827         }
7828
7829         list_for_each_entry_safe(res, temp, &old_res, queue) {
7830                 ipr_clear_res_target(res);
7831                 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7832         }
7833
7834         if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7835                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7836         else
7837                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7838
7839         LEAVE;
7840         return IPR_RC_JOB_CONTINUE;
7841 }
7842
7843 /**
7844  * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7845  * @ipr_cmd:    ipr command struct
7846  *
7847  * This function sends a Query IOA Configuration command
7848  * to the adapter to retrieve the IOA configuration table.
7849  *
7850  * Return value:
7851  *      IPR_RC_JOB_RETURN
7852  **/
7853 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7854 {
7855         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7856         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7857         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
7858         struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7859
7860         ENTER;
7861         if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7862                 ioa_cfg->dual_raid = 1;
7863         dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7864                  ucode_vpd->major_release, ucode_vpd->card_type,
7865                  ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7866         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7867         ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7868
7869         ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
7870         ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
7871         ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7872         ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
7873
7874         ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
7875                        IPR_IOADL_FLAGS_READ_LAST);
7876
7877         ipr_cmd->job_step = ipr_init_res_table;
7878
7879         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7880
7881         LEAVE;
7882         return IPR_RC_JOB_RETURN;
7883 }
7884
7885 static int ipr_ioa_service_action_failed(struct ipr_cmnd *ipr_cmd)
7886 {
7887         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7888
7889         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT)
7890                 return IPR_RC_JOB_CONTINUE;
7891
7892         return ipr_reset_cmd_failed(ipr_cmd);
7893 }
7894
7895 static void ipr_build_ioa_service_action(struct ipr_cmnd *ipr_cmd,
7896                                          __be32 res_handle, u8 sa_code)
7897 {
7898         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7899
7900         ioarcb->res_handle = res_handle;
7901         ioarcb->cmd_pkt.cdb[0] = IPR_IOA_SERVICE_ACTION;
7902         ioarcb->cmd_pkt.cdb[1] = sa_code;
7903         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7904 }
7905
7906 /**
7907  * ipr_ioafp_set_caching_parameters - Issue Set Cache parameters service
7908  * action
7909  *
7910  * Return value:
7911  *      none
7912  **/
7913 static int ipr_ioafp_set_caching_parameters(struct ipr_cmnd *ipr_cmd)
7914 {
7915         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7916         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7917         struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
7918
7919         ENTER;
7920
7921         ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7922
7923         if (pageC4->cache_cap[0] & IPR_CAP_SYNC_CACHE) {
7924                 ipr_build_ioa_service_action(ipr_cmd,
7925                                              cpu_to_be32(IPR_IOA_RES_HANDLE),
7926                                              IPR_IOA_SA_CHANGE_CACHE_PARAMS);
7927
7928                 ioarcb->cmd_pkt.cdb[2] = 0x40;
7929
7930                 ipr_cmd->job_step_failed = ipr_ioa_service_action_failed;
7931                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7932                            IPR_SET_SUP_DEVICE_TIMEOUT);
7933
7934                 LEAVE;
7935                 return IPR_RC_JOB_RETURN;
7936         }
7937
7938         LEAVE;
7939         return IPR_RC_JOB_CONTINUE;
7940 }
7941
7942 /**
7943  * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7944  * @ipr_cmd:    ipr command struct
7945  *
7946  * This utility function sends an inquiry to the adapter.
7947  *
7948  * Return value:
7949  *      none
7950  **/
7951 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
7952                               dma_addr_t dma_addr, u8 xfer_len)
7953 {
7954         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7955
7956         ENTER;
7957         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7958         ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7959
7960         ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7961         ioarcb->cmd_pkt.cdb[1] = flags;
7962         ioarcb->cmd_pkt.cdb[2] = page;
7963         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7964
7965         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7966
7967         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7968         LEAVE;
7969 }
7970
7971 /**
7972  * ipr_inquiry_page_supported - Is the given inquiry page supported
7973  * @page0:              inquiry page 0 buffer
7974  * @page:               page code.
7975  *
7976  * This function determines if the specified inquiry page is supported.
7977  *
7978  * Return value:
7979  *      1 if page is supported / 0 if not
7980  **/
7981 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
7982 {
7983         int i;
7984
7985         for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
7986                 if (page0->page[i] == page)
7987                         return 1;
7988
7989         return 0;
7990 }
7991
7992 /**
7993  * ipr_ioafp_pageC4_inquiry - Send a Page 0xC4 Inquiry to the adapter.
7994  * @ipr_cmd:    ipr command struct
7995  *
7996  * This function sends a Page 0xC4 inquiry to the adapter
7997  * to retrieve software VPD information.
7998  *
7999  * Return value:
8000  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8001  **/
8002 static int ipr_ioafp_pageC4_inquiry(struct ipr_cmnd *ipr_cmd)
8003 {
8004         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8005         struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
8006         struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
8007
8008         ENTER;
8009         ipr_cmd->job_step = ipr_ioafp_set_caching_parameters;
8010         memset(pageC4, 0, sizeof(*pageC4));
8011
8012         if (ipr_inquiry_page_supported(page0, 0xC4)) {
8013                 ipr_ioafp_inquiry(ipr_cmd, 1, 0xC4,
8014                                   (ioa_cfg->vpd_cbs_dma
8015                                    + offsetof(struct ipr_misc_cbs,
8016                                               pageC4_data)),
8017                                   sizeof(struct ipr_inquiry_pageC4));
8018                 return IPR_RC_JOB_RETURN;
8019         }
8020
8021         LEAVE;
8022         return IPR_RC_JOB_CONTINUE;
8023 }
8024
8025 /**
8026  * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
8027  * @ipr_cmd:    ipr command struct
8028  *
8029  * This function sends a Page 0xD0 inquiry to the adapter
8030  * to retrieve adapter capabilities.
8031  *
8032  * Return value:
8033  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8034  **/
8035 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
8036 {
8037         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8038         struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
8039         struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
8040
8041         ENTER;
8042         ipr_cmd->job_step = ipr_ioafp_pageC4_inquiry;
8043         memset(cap, 0, sizeof(*cap));
8044
8045         if (ipr_inquiry_page_supported(page0, 0xD0)) {
8046                 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
8047                                   ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
8048                                   sizeof(struct ipr_inquiry_cap));
8049                 return IPR_RC_JOB_RETURN;
8050         }
8051
8052         LEAVE;
8053         return IPR_RC_JOB_CONTINUE;
8054 }
8055
8056 /**
8057  * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
8058  * @ipr_cmd:    ipr command struct
8059  *
8060  * This function sends a Page 3 inquiry to the adapter
8061  * to retrieve software VPD information.
8062  *
8063  * Return value:
8064  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8065  **/
8066 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
8067 {
8068         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8069
8070         ENTER;
8071
8072         ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
8073
8074         ipr_ioafp_inquiry(ipr_cmd, 1, 3,
8075                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
8076                           sizeof(struct ipr_inquiry_page3));
8077
8078         LEAVE;
8079         return IPR_RC_JOB_RETURN;
8080 }
8081
8082 /**
8083  * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
8084  * @ipr_cmd:    ipr command struct
8085  *
8086  * This function sends a Page 0 inquiry to the adapter
8087  * to retrieve supported inquiry pages.
8088  *
8089  * Return value:
8090  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8091  **/
8092 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
8093 {
8094         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8095         char type[5];
8096
8097         ENTER;
8098
8099         /* Grab the type out of the VPD and store it away */
8100         memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
8101         type[4] = '\0';
8102         ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
8103
8104         if (ipr_invalid_adapter(ioa_cfg)) {
8105                 dev_err(&ioa_cfg->pdev->dev,
8106                         "Adapter not supported in this hardware configuration.\n");
8107
8108                 if (!ipr_testmode) {
8109                         ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
8110                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8111                         list_add_tail(&ipr_cmd->queue,
8112                                         &ioa_cfg->hrrq->hrrq_free_q);
8113                         return IPR_RC_JOB_RETURN;
8114                 }
8115         }
8116
8117         ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
8118
8119         ipr_ioafp_inquiry(ipr_cmd, 1, 0,
8120                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
8121                           sizeof(struct ipr_inquiry_page0));
8122
8123         LEAVE;
8124         return IPR_RC_JOB_RETURN;
8125 }
8126
8127 /**
8128  * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
8129  * @ipr_cmd:    ipr command struct
8130  *
8131  * This function sends a standard inquiry to the adapter.
8132  *
8133  * Return value:
8134  *      IPR_RC_JOB_RETURN
8135  **/
8136 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
8137 {
8138         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8139
8140         ENTER;
8141         ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
8142
8143         ipr_ioafp_inquiry(ipr_cmd, 0, 0,
8144                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
8145                           sizeof(struct ipr_ioa_vpd));
8146
8147         LEAVE;
8148         return IPR_RC_JOB_RETURN;
8149 }
8150
8151 /**
8152  * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
8153  * @ipr_cmd:    ipr command struct
8154  *
8155  * This function send an Identify Host Request Response Queue
8156  * command to establish the HRRQ with the adapter.
8157  *
8158  * Return value:
8159  *      IPR_RC_JOB_RETURN
8160  **/
8161 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
8162 {
8163         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8164         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
8165         struct ipr_hrr_queue *hrrq;
8166
8167         ENTER;
8168         ipr_cmd->job_step = ipr_ioafp_std_inquiry;
8169         if (ioa_cfg->identify_hrrq_index == 0)
8170                 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
8171
8172         if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
8173                 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
8174
8175                 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
8176                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8177
8178                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8179                 if (ioa_cfg->sis64)
8180                         ioarcb->cmd_pkt.cdb[1] = 0x1;
8181
8182                 if (ioa_cfg->nvectors == 1)
8183                         ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
8184                 else
8185                         ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
8186
8187                 ioarcb->cmd_pkt.cdb[2] =
8188                         ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
8189                 ioarcb->cmd_pkt.cdb[3] =
8190                         ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
8191                 ioarcb->cmd_pkt.cdb[4] =
8192                         ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
8193                 ioarcb->cmd_pkt.cdb[5] =
8194                         ((u64) hrrq->host_rrq_dma) & 0xff;
8195                 ioarcb->cmd_pkt.cdb[7] =
8196                         ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
8197                 ioarcb->cmd_pkt.cdb[8] =
8198                         (sizeof(u32) * hrrq->size) & 0xff;
8199
8200                 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
8201                         ioarcb->cmd_pkt.cdb[9] =
8202                                         ioa_cfg->identify_hrrq_index;
8203
8204                 if (ioa_cfg->sis64) {
8205                         ioarcb->cmd_pkt.cdb[10] =
8206                                 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
8207                         ioarcb->cmd_pkt.cdb[11] =
8208                                 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
8209                         ioarcb->cmd_pkt.cdb[12] =
8210                                 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
8211                         ioarcb->cmd_pkt.cdb[13] =
8212                                 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
8213                 }
8214
8215                 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
8216                         ioarcb->cmd_pkt.cdb[14] =
8217                                         ioa_cfg->identify_hrrq_index;
8218
8219                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8220                            IPR_INTERNAL_TIMEOUT);
8221
8222                 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
8223                         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8224
8225                 LEAVE;
8226                 return IPR_RC_JOB_RETURN;
8227         }
8228
8229         LEAVE;
8230         return IPR_RC_JOB_CONTINUE;
8231 }
8232
8233 /**
8234  * ipr_reset_timer_done - Adapter reset timer function
8235  * @ipr_cmd:    ipr command struct
8236  *
8237  * Description: This function is used in adapter reset processing
8238  * for timing events. If the reset_cmd pointer in the IOA
8239  * config struct is not this adapter's we are doing nested
8240  * resets and fail_all_ops will take care of freeing the
8241  * command block.
8242  *
8243  * Return value:
8244  *      none
8245  **/
8246 static void ipr_reset_timer_done(struct timer_list *t)
8247 {
8248         struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
8249         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8250         unsigned long lock_flags = 0;
8251
8252         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8253
8254         if (ioa_cfg->reset_cmd == ipr_cmd) {
8255                 list_del(&ipr_cmd->queue);
8256                 ipr_cmd->done(ipr_cmd);
8257         }
8258
8259         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8260 }
8261
8262 /**
8263  * ipr_reset_start_timer - Start a timer for adapter reset job
8264  * @ipr_cmd:    ipr command struct
8265  * @timeout:    timeout value
8266  *
8267  * Description: This function is used in adapter reset processing
8268  * for timing events. If the reset_cmd pointer in the IOA
8269  * config struct is not this adapter's we are doing nested
8270  * resets and fail_all_ops will take care of freeing the
8271  * command block.
8272  *
8273  * Return value:
8274  *      none
8275  **/
8276 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
8277                                   unsigned long timeout)
8278 {
8279
8280         ENTER;
8281         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8282         ipr_cmd->done = ipr_reset_ioa_job;
8283
8284         ipr_cmd->timer.expires = jiffies + timeout;
8285         ipr_cmd->timer.function = ipr_reset_timer_done;
8286         add_timer(&ipr_cmd->timer);
8287 }
8288
8289 /**
8290  * ipr_init_ioa_mem - Initialize ioa_cfg control block
8291  * @ioa_cfg:    ioa cfg struct
8292  *
8293  * Return value:
8294  *      nothing
8295  **/
8296 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
8297 {
8298         struct ipr_hrr_queue *hrrq;
8299
8300         for_each_hrrq(hrrq, ioa_cfg) {
8301                 spin_lock(&hrrq->_lock);
8302                 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
8303
8304                 /* Initialize Host RRQ pointers */
8305                 hrrq->hrrq_start = hrrq->host_rrq;
8306                 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
8307                 hrrq->hrrq_curr = hrrq->hrrq_start;
8308                 hrrq->toggle_bit = 1;
8309                 spin_unlock(&hrrq->_lock);
8310         }
8311         wmb();
8312
8313         ioa_cfg->identify_hrrq_index = 0;
8314         if (ioa_cfg->hrrq_num == 1)
8315                 atomic_set(&ioa_cfg->hrrq_index, 0);
8316         else
8317                 atomic_set(&ioa_cfg->hrrq_index, 1);
8318
8319         /* Zero out config table */
8320         memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
8321 }
8322
8323 /**
8324  * ipr_reset_next_stage - Process IPL stage change based on feedback register.
8325  * @ipr_cmd:    ipr command struct
8326  *
8327  * Return value:
8328  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8329  **/
8330 static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
8331 {
8332         unsigned long stage, stage_time;
8333         u32 feedback;
8334         volatile u32 int_reg;
8335         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8336         u64 maskval = 0;
8337
8338         feedback = readl(ioa_cfg->regs.init_feedback_reg);
8339         stage = feedback & IPR_IPL_INIT_STAGE_MASK;
8340         stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
8341
8342         ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
8343
8344         /* sanity check the stage_time value */
8345         if (stage_time == 0)
8346                 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
8347         else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
8348                 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
8349         else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
8350                 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
8351
8352         if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
8353                 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
8354                 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8355                 stage_time = ioa_cfg->transop_timeout;
8356                 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8357         } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
8358                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8359                 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8360                         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8361                         maskval = IPR_PCII_IPL_STAGE_CHANGE;
8362                         maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
8363                         writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
8364                         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8365                         return IPR_RC_JOB_CONTINUE;
8366                 }
8367         }
8368
8369         ipr_cmd->timer.expires = jiffies + stage_time * HZ;
8370         ipr_cmd->timer.function = ipr_oper_timeout;
8371         ipr_cmd->done = ipr_reset_ioa_job;
8372         add_timer(&ipr_cmd->timer);
8373
8374         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8375
8376         return IPR_RC_JOB_RETURN;
8377 }
8378
8379 /**
8380  * ipr_reset_enable_ioa - Enable the IOA following a reset.
8381  * @ipr_cmd:    ipr command struct
8382  *
8383  * This function reinitializes some control blocks and
8384  * enables destructive diagnostics on the adapter.
8385  *
8386  * Return value:
8387  *      IPR_RC_JOB_RETURN
8388  **/
8389 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
8390 {
8391         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8392         volatile u32 int_reg;
8393         volatile u64 maskval;
8394         int i;
8395
8396         ENTER;
8397         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8398         ipr_init_ioa_mem(ioa_cfg);
8399
8400         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8401                 spin_lock(&ioa_cfg->hrrq[i]._lock);
8402                 ioa_cfg->hrrq[i].allow_interrupts = 1;
8403                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8404         }
8405         if (ioa_cfg->sis64) {
8406                 /* Set the adapter to the correct endian mode. */
8407                 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8408                 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8409         }
8410
8411         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8412
8413         if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8414                 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
8415                        ioa_cfg->regs.clr_interrupt_mask_reg32);
8416                 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8417                 return IPR_RC_JOB_CONTINUE;
8418         }
8419
8420         /* Enable destructive diagnostics on IOA */
8421         writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
8422
8423         if (ioa_cfg->sis64) {
8424                 maskval = IPR_PCII_IPL_STAGE_CHANGE;
8425                 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
8426                 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
8427         } else
8428                 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
8429
8430         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8431
8432         dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
8433
8434         if (ioa_cfg->sis64) {
8435                 ipr_cmd->job_step = ipr_reset_next_stage;
8436                 return IPR_RC_JOB_CONTINUE;
8437         }
8438
8439         ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
8440         ipr_cmd->timer.function = ipr_oper_timeout;
8441         ipr_cmd->done = ipr_reset_ioa_job;
8442         add_timer(&ipr_cmd->timer);
8443         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8444
8445         LEAVE;
8446         return IPR_RC_JOB_RETURN;
8447 }
8448
8449 /**
8450  * ipr_reset_wait_for_dump - Wait for a dump to timeout.
8451  * @ipr_cmd:    ipr command struct
8452  *
8453  * This function is invoked when an adapter dump has run out
8454  * of processing time.
8455  *
8456  * Return value:
8457  *      IPR_RC_JOB_CONTINUE
8458  **/
8459 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
8460 {
8461         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8462
8463         if (ioa_cfg->sdt_state == GET_DUMP)
8464                 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8465         else if (ioa_cfg->sdt_state == READ_DUMP)
8466                 ioa_cfg->sdt_state = ABORT_DUMP;
8467
8468         ioa_cfg->dump_timeout = 1;
8469         ipr_cmd->job_step = ipr_reset_alert;
8470
8471         return IPR_RC_JOB_CONTINUE;
8472 }
8473
8474 /**
8475  * ipr_unit_check_no_data - Log a unit check/no data error log
8476  * @ioa_cfg:            ioa config struct
8477  *
8478  * Logs an error indicating the adapter unit checked, but for some
8479  * reason, we were unable to fetch the unit check buffer.
8480  *
8481  * Return value:
8482  *      nothing
8483  **/
8484 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
8485 {
8486         ioa_cfg->errors_logged++;
8487         dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
8488 }
8489
8490 /**
8491  * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8492  * @ioa_cfg:            ioa config struct
8493  *
8494  * Fetches the unit check buffer from the adapter by clocking the data
8495  * through the mailbox register.
8496  *
8497  * Return value:
8498  *      nothing
8499  **/
8500 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
8501 {
8502         unsigned long mailbox;
8503         struct ipr_hostrcb *hostrcb;
8504         struct ipr_uc_sdt sdt;
8505         int rc, length;
8506         u32 ioasc;
8507
8508         mailbox = readl(ioa_cfg->ioa_mailbox);
8509
8510         if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
8511                 ipr_unit_check_no_data(ioa_cfg);
8512                 return;
8513         }
8514
8515         memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
8516         rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
8517                                         (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
8518
8519         if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
8520             ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
8521             (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
8522                 ipr_unit_check_no_data(ioa_cfg);
8523                 return;
8524         }
8525
8526         /* Find length of the first sdt entry (UC buffer) */
8527         if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
8528                 length = be32_to_cpu(sdt.entry[0].end_token);
8529         else
8530                 length = (be32_to_cpu(sdt.entry[0].end_token) -
8531                           be32_to_cpu(sdt.entry[0].start_token)) &
8532                           IPR_FMT2_MBX_ADDR_MASK;
8533
8534         hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8535                              struct ipr_hostrcb, queue);
8536         list_del_init(&hostrcb->queue);
8537         memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8538
8539         rc = ipr_get_ldump_data_section(ioa_cfg,
8540                                         be32_to_cpu(sdt.entry[0].start_token),
8541                                         (__be32 *)&hostrcb->hcam,
8542                                         min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
8543
8544         if (!rc) {
8545                 ipr_handle_log_data(ioa_cfg, hostrcb);
8546                 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
8547                 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8548                     ioa_cfg->sdt_state == GET_DUMP)
8549                         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8550         } else
8551                 ipr_unit_check_no_data(ioa_cfg);
8552
8553         list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8554 }
8555
8556 /**
8557  * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8558  * @ipr_cmd:    ipr command struct
8559  *
8560  * Description: This function will call to get the unit check buffer.
8561  *
8562  * Return value:
8563  *      IPR_RC_JOB_RETURN
8564  **/
8565 static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8566 {
8567         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8568
8569         ENTER;
8570         ioa_cfg->ioa_unit_checked = 0;
8571         ipr_get_unit_check_buffer(ioa_cfg);
8572         ipr_cmd->job_step = ipr_reset_alert;
8573         ipr_reset_start_timer(ipr_cmd, 0);
8574
8575         LEAVE;
8576         return IPR_RC_JOB_RETURN;
8577 }
8578
8579 static int ipr_dump_mailbox_wait(struct ipr_cmnd *ipr_cmd)
8580 {
8581         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8582
8583         ENTER;
8584
8585         if (ioa_cfg->sdt_state != GET_DUMP)
8586                 return IPR_RC_JOB_RETURN;
8587
8588         if (!ioa_cfg->sis64 || !ipr_cmd->u.time_left ||
8589             (readl(ioa_cfg->regs.sense_interrupt_reg) &
8590              IPR_PCII_MAILBOX_STABLE)) {
8591
8592                 if (!ipr_cmd->u.time_left)
8593                         dev_err(&ioa_cfg->pdev->dev,
8594                                 "Timed out waiting for Mailbox register.\n");
8595
8596                 ioa_cfg->sdt_state = READ_DUMP;
8597                 ioa_cfg->dump_timeout = 0;
8598                 if (ioa_cfg->sis64)
8599                         ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8600                 else
8601                         ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
8602                 ipr_cmd->job_step = ipr_reset_wait_for_dump;
8603                 schedule_work(&ioa_cfg->work_q);
8604
8605         } else {
8606                 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8607                 ipr_reset_start_timer(ipr_cmd,
8608                                       IPR_CHECK_FOR_RESET_TIMEOUT);
8609         }
8610
8611         LEAVE;
8612         return IPR_RC_JOB_RETURN;
8613 }
8614
8615 /**
8616  * ipr_reset_restore_cfg_space - Restore PCI config space.
8617  * @ipr_cmd:    ipr command struct
8618  *
8619  * Description: This function restores the saved PCI config space of
8620  * the adapter, fails all outstanding ops back to the callers, and
8621  * fetches the dump/unit check if applicable to this reset.
8622  *
8623  * Return value:
8624  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8625  **/
8626 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8627 {
8628         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8629         u32 int_reg;
8630
8631         ENTER;
8632         ioa_cfg->pdev->state_saved = true;
8633         pci_restore_state(ioa_cfg->pdev);
8634
8635         if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
8636                 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8637                 return IPR_RC_JOB_CONTINUE;
8638         }
8639
8640         ipr_fail_all_ops(ioa_cfg);
8641
8642         if (ioa_cfg->sis64) {
8643                 /* Set the adapter to the correct endian mode. */
8644                 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8645                 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8646         }
8647
8648         if (ioa_cfg->ioa_unit_checked) {
8649                 if (ioa_cfg->sis64) {
8650                         ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8651                         ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8652                         return IPR_RC_JOB_RETURN;
8653                 } else {
8654                         ioa_cfg->ioa_unit_checked = 0;
8655                         ipr_get_unit_check_buffer(ioa_cfg);
8656                         ipr_cmd->job_step = ipr_reset_alert;
8657                         ipr_reset_start_timer(ipr_cmd, 0);
8658                         return IPR_RC_JOB_RETURN;
8659                 }
8660         }
8661
8662         if (ioa_cfg->in_ioa_bringdown) {
8663                 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8664         } else if (ioa_cfg->sdt_state == GET_DUMP) {
8665                 ipr_cmd->job_step = ipr_dump_mailbox_wait;
8666                 ipr_cmd->u.time_left = IPR_WAIT_FOR_MAILBOX;
8667         } else {
8668                 ipr_cmd->job_step = ipr_reset_enable_ioa;
8669         }
8670
8671         LEAVE;
8672         return IPR_RC_JOB_CONTINUE;
8673 }
8674
8675 /**
8676  * ipr_reset_bist_done - BIST has completed on the adapter.
8677  * @ipr_cmd:    ipr command struct
8678  *
8679  * Description: Unblock config space and resume the reset process.
8680  *
8681  * Return value:
8682  *      IPR_RC_JOB_CONTINUE
8683  **/
8684 static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8685 {
8686         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8687
8688         ENTER;
8689         if (ioa_cfg->cfg_locked)
8690                 pci_cfg_access_unlock(ioa_cfg->pdev);
8691         ioa_cfg->cfg_locked = 0;
8692         ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8693         LEAVE;
8694         return IPR_RC_JOB_CONTINUE;
8695 }
8696
8697 /**
8698  * ipr_reset_start_bist - Run BIST on the adapter.
8699  * @ipr_cmd:    ipr command struct
8700  *
8701  * Description: This function runs BIST on the adapter, then delays 2 seconds.
8702  *
8703  * Return value:
8704  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8705  **/
8706 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8707 {
8708         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8709         int rc = PCIBIOS_SUCCESSFUL;
8710
8711         ENTER;
8712         if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8713                 writel(IPR_UPROCI_SIS64_START_BIST,
8714                        ioa_cfg->regs.set_uproc_interrupt_reg32);
8715         else
8716                 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8717
8718         if (rc == PCIBIOS_SUCCESSFUL) {
8719                 ipr_cmd->job_step = ipr_reset_bist_done;
8720                 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8721                 rc = IPR_RC_JOB_RETURN;
8722         } else {
8723                 if (ioa_cfg->cfg_locked)
8724                         pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8725                 ioa_cfg->cfg_locked = 0;
8726                 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8727                 rc = IPR_RC_JOB_CONTINUE;
8728         }
8729
8730         LEAVE;
8731         return rc;
8732 }
8733
8734 /**
8735  * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8736  * @ipr_cmd:    ipr command struct
8737  *
8738  * Description: This clears PCI reset to the adapter and delays two seconds.
8739  *
8740  * Return value:
8741  *      IPR_RC_JOB_RETURN
8742  **/
8743 static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8744 {
8745         ENTER;
8746         ipr_cmd->job_step = ipr_reset_bist_done;
8747         ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8748         LEAVE;
8749         return IPR_RC_JOB_RETURN;
8750 }
8751
8752 /**
8753  * ipr_reset_reset_work - Pulse a PCIe fundamental reset
8754  * @work:       work struct
8755  *
8756  * Description: This pulses warm reset to a slot.
8757  *
8758  **/
8759 static void ipr_reset_reset_work(struct work_struct *work)
8760 {
8761         struct ipr_cmnd *ipr_cmd = container_of(work, struct ipr_cmnd, work);
8762         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8763         struct pci_dev *pdev = ioa_cfg->pdev;
8764         unsigned long lock_flags = 0;
8765
8766         ENTER;
8767         pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8768         msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT));
8769         pci_set_pcie_reset_state(pdev, pcie_deassert_reset);
8770
8771         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8772         if (ioa_cfg->reset_cmd == ipr_cmd)
8773                 ipr_reset_ioa_job(ipr_cmd);
8774         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8775         LEAVE;
8776 }
8777
8778 /**
8779  * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8780  * @ipr_cmd:    ipr command struct
8781  *
8782  * Description: This asserts PCI reset to the adapter.
8783  *
8784  * Return value:
8785  *      IPR_RC_JOB_RETURN
8786  **/
8787 static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8788 {
8789         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8790
8791         ENTER;
8792         INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work);
8793         queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work);
8794         ipr_cmd->job_step = ipr_reset_slot_reset_done;
8795         LEAVE;
8796         return IPR_RC_JOB_RETURN;
8797 }
8798
8799 /**
8800  * ipr_reset_block_config_access_wait - Wait for permission to block config access
8801  * @ipr_cmd:    ipr command struct
8802  *
8803  * Description: This attempts to block config access to the IOA.
8804  *
8805  * Return value:
8806  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8807  **/
8808 static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8809 {
8810         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8811         int rc = IPR_RC_JOB_CONTINUE;
8812
8813         if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8814                 ioa_cfg->cfg_locked = 1;
8815                 ipr_cmd->job_step = ioa_cfg->reset;
8816         } else {
8817                 if (ipr_cmd->u.time_left) {
8818                         rc = IPR_RC_JOB_RETURN;
8819                         ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8820                         ipr_reset_start_timer(ipr_cmd,
8821                                               IPR_CHECK_FOR_RESET_TIMEOUT);
8822                 } else {
8823                         ipr_cmd->job_step = ioa_cfg->reset;
8824                         dev_err(&ioa_cfg->pdev->dev,
8825                                 "Timed out waiting to lock config access. Resetting anyway.\n");
8826                 }
8827         }
8828
8829         return rc;
8830 }
8831
8832 /**
8833  * ipr_reset_block_config_access - Block config access to the IOA
8834  * @ipr_cmd:    ipr command struct
8835  *
8836  * Description: This attempts to block config access to the IOA
8837  *
8838  * Return value:
8839  *      IPR_RC_JOB_CONTINUE
8840  **/
8841 static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8842 {
8843         ipr_cmd->ioa_cfg->cfg_locked = 0;
8844         ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8845         ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8846         return IPR_RC_JOB_CONTINUE;
8847 }
8848
8849 /**
8850  * ipr_reset_allowed - Query whether or not IOA can be reset
8851  * @ioa_cfg:    ioa config struct
8852  *
8853  * Return value:
8854  *      0 if reset not allowed / non-zero if reset is allowed
8855  **/
8856 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8857 {
8858         volatile u32 temp_reg;
8859
8860         temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8861         return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8862 }
8863
8864 /**
8865  * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8866  * @ipr_cmd:    ipr command struct
8867  *
8868  * Description: This function waits for adapter permission to run BIST,
8869  * then runs BIST. If the adapter does not give permission after a
8870  * reasonable time, we will reset the adapter anyway. The impact of
8871  * resetting the adapter without warning the adapter is the risk of
8872  * losing the persistent error log on the adapter. If the adapter is
8873  * reset while it is writing to the flash on the adapter, the flash
8874  * segment will have bad ECC and be zeroed.
8875  *
8876  * Return value:
8877  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8878  **/
8879 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8880 {
8881         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8882         int rc = IPR_RC_JOB_RETURN;
8883
8884         if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8885                 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8886                 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8887         } else {
8888                 ipr_cmd->job_step = ipr_reset_block_config_access;
8889                 rc = IPR_RC_JOB_CONTINUE;
8890         }
8891
8892         return rc;
8893 }
8894
8895 /**
8896  * ipr_reset_alert - Alert the adapter of a pending reset
8897  * @ipr_cmd:    ipr command struct
8898  *
8899  * Description: This function alerts the adapter that it will be reset.
8900  * If memory space is not currently enabled, proceed directly
8901  * to running BIST on the adapter. The timer must always be started
8902  * so we guarantee we do not run BIST from ipr_isr.
8903  *
8904  * Return value:
8905  *      IPR_RC_JOB_RETURN
8906  **/
8907 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8908 {
8909         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8910         u16 cmd_reg;
8911         int rc;
8912
8913         ENTER;
8914         rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8915
8916         if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8917                 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
8918                 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
8919                 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8920         } else {
8921                 ipr_cmd->job_step = ipr_reset_block_config_access;
8922         }
8923
8924         ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8925         ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8926
8927         LEAVE;
8928         return IPR_RC_JOB_RETURN;
8929 }
8930
8931 /**
8932  * ipr_reset_quiesce_done - Complete IOA disconnect
8933  * @ipr_cmd:    ipr command struct
8934  *
8935  * Description: Freeze the adapter to complete quiesce processing
8936  *
8937  * Return value:
8938  *      IPR_RC_JOB_CONTINUE
8939  **/
8940 static int ipr_reset_quiesce_done(struct ipr_cmnd *ipr_cmd)
8941 {
8942         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8943
8944         ENTER;
8945         ipr_cmd->job_step = ipr_ioa_bringdown_done;
8946         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8947         LEAVE;
8948         return IPR_RC_JOB_CONTINUE;
8949 }
8950
8951 /**
8952  * ipr_reset_cancel_hcam_done - Check for outstanding commands
8953  * @ipr_cmd:    ipr command struct
8954  *
8955  * Description: Ensure nothing is outstanding to the IOA and
8956  *                      proceed with IOA disconnect. Otherwise reset the IOA.
8957  *
8958  * Return value:
8959  *      IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE
8960  **/
8961 static int ipr_reset_cancel_hcam_done(struct ipr_cmnd *ipr_cmd)
8962 {
8963         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8964         struct ipr_cmnd *loop_cmd;
8965         struct ipr_hrr_queue *hrrq;
8966         int rc = IPR_RC_JOB_CONTINUE;
8967         int count = 0;
8968
8969         ENTER;
8970         ipr_cmd->job_step = ipr_reset_quiesce_done;
8971
8972         for_each_hrrq(hrrq, ioa_cfg) {
8973                 spin_lock(&hrrq->_lock);
8974                 list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) {
8975                         count++;
8976                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8977                         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
8978                         rc = IPR_RC_JOB_RETURN;
8979                         break;
8980                 }
8981                 spin_unlock(&hrrq->_lock);
8982
8983                 if (count)
8984                         break;
8985         }
8986
8987         LEAVE;
8988         return rc;
8989 }
8990
8991 /**
8992  * ipr_reset_cancel_hcam - Cancel outstanding HCAMs
8993  * @ipr_cmd:    ipr command struct
8994  *
8995  * Description: Cancel any oustanding HCAMs to the IOA.
8996  *
8997  * Return value:
8998  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8999  **/
9000 static int ipr_reset_cancel_hcam(struct ipr_cmnd *ipr_cmd)
9001 {
9002         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9003         int rc = IPR_RC_JOB_CONTINUE;
9004         struct ipr_cmd_pkt *cmd_pkt;
9005         struct ipr_cmnd *hcam_cmd;
9006         struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ];
9007
9008         ENTER;
9009         ipr_cmd->job_step = ipr_reset_cancel_hcam_done;
9010
9011         if (!hrrq->ioa_is_dead) {
9012                 if (!list_empty(&ioa_cfg->hostrcb_pending_q)) {
9013                         list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) {
9014                                 if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC)
9015                                         continue;
9016
9017                                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9018                                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9019                                 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
9020                                 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
9021                                 cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST;
9022                                 cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB;
9023                                 cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff;
9024                                 cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff;
9025                                 cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff;
9026                                 cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff;
9027                                 cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff;
9028                                 cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff;
9029                                 cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff;
9030                                 cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff;
9031
9032                                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
9033                                            IPR_CANCEL_TIMEOUT);
9034
9035                                 rc = IPR_RC_JOB_RETURN;
9036                                 ipr_cmd->job_step = ipr_reset_cancel_hcam;
9037                                 break;
9038                         }
9039                 }
9040         } else
9041                 ipr_cmd->job_step = ipr_reset_alert;
9042
9043         LEAVE;
9044         return rc;
9045 }
9046
9047 /**
9048  * ipr_reset_ucode_download_done - Microcode download completion
9049  * @ipr_cmd:    ipr command struct
9050  *
9051  * Description: This function unmaps the microcode download buffer.
9052  *
9053  * Return value:
9054  *      IPR_RC_JOB_CONTINUE
9055  **/
9056 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
9057 {
9058         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9059         struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
9060
9061         dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist,
9062                      sglist->num_sg, DMA_TO_DEVICE);
9063
9064         ipr_cmd->job_step = ipr_reset_alert;
9065         return IPR_RC_JOB_CONTINUE;
9066 }
9067
9068 /**
9069  * ipr_reset_ucode_download - Download microcode to the adapter
9070  * @ipr_cmd:    ipr command struct
9071  *
9072  * Description: This function checks to see if it there is microcode
9073  * to download to the adapter. If there is, a download is performed.
9074  *
9075  * Return value:
9076  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9077  **/
9078 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
9079 {
9080         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9081         struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
9082
9083         ENTER;
9084         ipr_cmd->job_step = ipr_reset_alert;
9085
9086         if (!sglist)
9087                 return IPR_RC_JOB_CONTINUE;
9088
9089         ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9090         ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
9091         ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
9092         ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
9093         ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
9094         ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
9095         ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
9096
9097         if (ioa_cfg->sis64)
9098                 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
9099         else
9100                 ipr_build_ucode_ioadl(ipr_cmd, sglist);
9101         ipr_cmd->job_step = ipr_reset_ucode_download_done;
9102
9103         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
9104                    IPR_WRITE_BUFFER_TIMEOUT);
9105
9106         LEAVE;
9107         return IPR_RC_JOB_RETURN;
9108 }
9109
9110 /**
9111  * ipr_reset_shutdown_ioa - Shutdown the adapter
9112  * @ipr_cmd:    ipr command struct
9113  *
9114  * Description: This function issues an adapter shutdown of the
9115  * specified type to the specified adapter as part of the
9116  * adapter reset job.
9117  *
9118  * Return value:
9119  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9120  **/
9121 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
9122 {
9123         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9124         enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
9125         unsigned long timeout;
9126         int rc = IPR_RC_JOB_CONTINUE;
9127
9128         ENTER;
9129         if (shutdown_type == IPR_SHUTDOWN_QUIESCE)
9130                 ipr_cmd->job_step = ipr_reset_cancel_hcam;
9131         else if (shutdown_type != IPR_SHUTDOWN_NONE &&
9132                         !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
9133                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9134                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9135                 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
9136                 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
9137
9138                 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
9139                         timeout = IPR_SHUTDOWN_TIMEOUT;
9140                 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
9141                         timeout = IPR_INTERNAL_TIMEOUT;
9142                 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
9143                         timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
9144                 else
9145                         timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
9146
9147                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
9148
9149                 rc = IPR_RC_JOB_RETURN;
9150                 ipr_cmd->job_step = ipr_reset_ucode_download;
9151         } else
9152                 ipr_cmd->job_step = ipr_reset_alert;
9153
9154         LEAVE;
9155         return rc;
9156 }
9157
9158 /**
9159  * ipr_reset_ioa_job - Adapter reset job
9160  * @ipr_cmd:    ipr command struct
9161  *
9162  * Description: This function is the job router for the adapter reset job.
9163  *
9164  * Return value:
9165  *      none
9166  **/
9167 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
9168 {
9169         u32 rc, ioasc;
9170         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9171
9172         do {
9173                 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
9174
9175                 if (ioa_cfg->reset_cmd != ipr_cmd) {
9176                         /*
9177                          * We are doing nested adapter resets and this is
9178                          * not the current reset job.
9179                          */
9180                         list_add_tail(&ipr_cmd->queue,
9181                                         &ipr_cmd->hrrq->hrrq_free_q);
9182                         return;
9183                 }
9184
9185                 if (IPR_IOASC_SENSE_KEY(ioasc)) {
9186                         rc = ipr_cmd->job_step_failed(ipr_cmd);
9187                         if (rc == IPR_RC_JOB_RETURN)
9188                                 return;
9189                 }
9190
9191                 ipr_reinit_ipr_cmnd(ipr_cmd);
9192                 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
9193                 rc = ipr_cmd->job_step(ipr_cmd);
9194         } while (rc == IPR_RC_JOB_CONTINUE);
9195 }
9196
9197 /**
9198  * _ipr_initiate_ioa_reset - Initiate an adapter reset
9199  * @ioa_cfg:            ioa config struct
9200  * @job_step:           first job step of reset job
9201  * @shutdown_type:      shutdown type
9202  *
9203  * Description: This function will initiate the reset of the given adapter
9204  * starting at the selected job step.
9205  * If the caller needs to wait on the completion of the reset,
9206  * the caller must sleep on the reset_wait_q.
9207  *
9208  * Return value:
9209  *      none
9210  **/
9211 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9212                                     int (*job_step) (struct ipr_cmnd *),
9213                                     enum ipr_shutdown_type shutdown_type)
9214 {
9215         struct ipr_cmnd *ipr_cmd;
9216         int i;
9217
9218         ioa_cfg->in_reset_reload = 1;
9219         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9220                 spin_lock(&ioa_cfg->hrrq[i]._lock);
9221                 ioa_cfg->hrrq[i].allow_cmds = 0;
9222                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9223         }
9224         wmb();
9225         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
9226                 ioa_cfg->scsi_unblock = 0;
9227                 ioa_cfg->scsi_blocked = 1;
9228                 scsi_block_requests(ioa_cfg->host);
9229         }
9230
9231         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
9232         ioa_cfg->reset_cmd = ipr_cmd;
9233         ipr_cmd->job_step = job_step;
9234         ipr_cmd->u.shutdown_type = shutdown_type;
9235
9236         ipr_reset_ioa_job(ipr_cmd);
9237 }
9238
9239 /**
9240  * ipr_initiate_ioa_reset - Initiate an adapter reset
9241  * @ioa_cfg:            ioa config struct
9242  * @shutdown_type:      shutdown type
9243  *
9244  * Description: This function will initiate the reset of the given adapter.
9245  * If the caller needs to wait on the completion of the reset,
9246  * the caller must sleep on the reset_wait_q.
9247  *
9248  * Return value:
9249  *      none
9250  **/
9251 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9252                                    enum ipr_shutdown_type shutdown_type)
9253 {
9254         int i;
9255
9256         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
9257                 return;
9258
9259         if (ioa_cfg->in_reset_reload) {
9260                 if (ioa_cfg->sdt_state == GET_DUMP)
9261                         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9262                 else if (ioa_cfg->sdt_state == READ_DUMP)
9263                         ioa_cfg->sdt_state = ABORT_DUMP;
9264         }
9265
9266         if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
9267                 dev_err(&ioa_cfg->pdev->dev,
9268                         "IOA taken offline - error recovery failed\n");
9269
9270                 ioa_cfg->reset_retries = 0;
9271                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9272                         spin_lock(&ioa_cfg->hrrq[i]._lock);
9273                         ioa_cfg->hrrq[i].ioa_is_dead = 1;
9274                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
9275                 }
9276                 wmb();
9277
9278                 if (ioa_cfg->in_ioa_bringdown) {
9279                         ioa_cfg->reset_cmd = NULL;
9280                         ioa_cfg->in_reset_reload = 0;
9281                         ipr_fail_all_ops(ioa_cfg);
9282                         wake_up_all(&ioa_cfg->reset_wait_q);
9283
9284                         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
9285                                 ioa_cfg->scsi_unblock = 1;
9286                                 schedule_work(&ioa_cfg->work_q);
9287                         }
9288                         return;
9289                 } else {
9290                         ioa_cfg->in_ioa_bringdown = 1;
9291                         shutdown_type = IPR_SHUTDOWN_NONE;
9292                 }
9293         }
9294
9295         _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
9296                                 shutdown_type);
9297 }
9298
9299 /**
9300  * ipr_reset_freeze - Hold off all I/O activity
9301  * @ipr_cmd:    ipr command struct
9302  *
9303  * Description: If the PCI slot is frozen, hold off all I/O
9304  * activity; then, as soon as the slot is available again,
9305  * initiate an adapter reset.
9306  */
9307 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
9308 {
9309         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9310         int i;
9311
9312         /* Disallow new interrupts, avoid loop */
9313         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9314                 spin_lock(&ioa_cfg->hrrq[i]._lock);
9315                 ioa_cfg->hrrq[i].allow_interrupts = 0;
9316                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9317         }
9318         wmb();
9319         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
9320         ipr_cmd->done = ipr_reset_ioa_job;
9321         return IPR_RC_JOB_RETURN;
9322 }
9323
9324 /**
9325  * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
9326  * @pdev:       PCI device struct
9327  *
9328  * Description: This routine is called to tell us that the MMIO
9329  * access to the IOA has been restored
9330  */
9331 static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
9332 {
9333         unsigned long flags = 0;
9334         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9335
9336         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9337         if (!ioa_cfg->probe_done)
9338                 pci_save_state(pdev);
9339         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9340         return PCI_ERS_RESULT_NEED_RESET;
9341 }
9342
9343 /**
9344  * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
9345  * @pdev:       PCI device struct
9346  *
9347  * Description: This routine is called to tell us that the PCI bus
9348  * is down. Can't do anything here, except put the device driver
9349  * into a holding pattern, waiting for the PCI bus to come back.
9350  */
9351 static void ipr_pci_frozen(struct pci_dev *pdev)
9352 {
9353         unsigned long flags = 0;
9354         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9355
9356         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9357         if (ioa_cfg->probe_done)
9358                 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
9359         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9360 }
9361
9362 /**
9363  * ipr_pci_slot_reset - Called when PCI slot has been reset.
9364  * @pdev:       PCI device struct
9365  *
9366  * Description: This routine is called by the pci error recovery
9367  * code after the PCI slot has been reset, just before we
9368  * should resume normal operations.
9369  */
9370 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
9371 {
9372         unsigned long flags = 0;
9373         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9374
9375         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9376         if (ioa_cfg->probe_done) {
9377                 if (ioa_cfg->needs_warm_reset)
9378                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9379                 else
9380                         _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
9381                                                 IPR_SHUTDOWN_NONE);
9382         } else
9383                 wake_up_all(&ioa_cfg->eeh_wait_q);
9384         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9385         return PCI_ERS_RESULT_RECOVERED;
9386 }
9387
9388 /**
9389  * ipr_pci_perm_failure - Called when PCI slot is dead for good.
9390  * @pdev:       PCI device struct
9391  *
9392  * Description: This routine is called when the PCI bus has
9393  * permanently failed.
9394  */
9395 static void ipr_pci_perm_failure(struct pci_dev *pdev)
9396 {
9397         unsigned long flags = 0;
9398         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9399         int i;
9400
9401         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9402         if (ioa_cfg->probe_done) {
9403                 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9404                         ioa_cfg->sdt_state = ABORT_DUMP;
9405                 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
9406                 ioa_cfg->in_ioa_bringdown = 1;
9407                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9408                         spin_lock(&ioa_cfg->hrrq[i]._lock);
9409                         ioa_cfg->hrrq[i].allow_cmds = 0;
9410                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
9411                 }
9412                 wmb();
9413                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9414         } else
9415                 wake_up_all(&ioa_cfg->eeh_wait_q);
9416         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9417 }
9418
9419 /**
9420  * ipr_pci_error_detected - Called when a PCI error is detected.
9421  * @pdev:       PCI device struct
9422  * @state:      PCI channel state
9423  *
9424  * Description: Called when a PCI error is detected.
9425  *
9426  * Return value:
9427  *      PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
9428  */
9429 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
9430                                                pci_channel_state_t state)
9431 {
9432         switch (state) {
9433         case pci_channel_io_frozen:
9434                 ipr_pci_frozen(pdev);
9435                 return PCI_ERS_RESULT_CAN_RECOVER;
9436         case pci_channel_io_perm_failure:
9437                 ipr_pci_perm_failure(pdev);
9438                 return PCI_ERS_RESULT_DISCONNECT;
9439                 break;
9440         default:
9441                 break;
9442         }
9443         return PCI_ERS_RESULT_NEED_RESET;
9444 }
9445
9446 /**
9447  * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
9448  * @ioa_cfg:    ioa cfg struct
9449  *
9450  * Description: This is the second phase of adapter initialization
9451  * This function takes care of initilizing the adapter to the point
9452  * where it can accept new commands.
9453
9454  * Return value:
9455  *      0 on success / -EIO on failure
9456  **/
9457 static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
9458 {
9459         int rc = 0;
9460         unsigned long host_lock_flags = 0;
9461
9462         ENTER;
9463         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9464         dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
9465         ioa_cfg->probe_done = 1;
9466         if (ioa_cfg->needs_hard_reset) {
9467                 ioa_cfg->needs_hard_reset = 0;
9468                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9469         } else
9470                 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
9471                                         IPR_SHUTDOWN_NONE);
9472         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9473
9474         LEAVE;
9475         return rc;
9476 }
9477
9478 /**
9479  * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
9480  * @ioa_cfg:    ioa config struct
9481  *
9482  * Return value:
9483  *      none
9484  **/
9485 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9486 {
9487         int i;
9488
9489         if (ioa_cfg->ipr_cmnd_list) {
9490                 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9491                         if (ioa_cfg->ipr_cmnd_list[i])
9492                                 dma_pool_free(ioa_cfg->ipr_cmd_pool,
9493                                               ioa_cfg->ipr_cmnd_list[i],
9494                                               ioa_cfg->ipr_cmnd_list_dma[i]);
9495
9496                         ioa_cfg->ipr_cmnd_list[i] = NULL;
9497                 }
9498         }
9499
9500         if (ioa_cfg->ipr_cmd_pool)
9501                 dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
9502
9503         kfree(ioa_cfg->ipr_cmnd_list);
9504         kfree(ioa_cfg->ipr_cmnd_list_dma);
9505         ioa_cfg->ipr_cmnd_list = NULL;
9506         ioa_cfg->ipr_cmnd_list_dma = NULL;
9507         ioa_cfg->ipr_cmd_pool = NULL;
9508 }
9509
9510 /**
9511  * ipr_free_mem - Frees memory allocated for an adapter
9512  * @ioa_cfg:    ioa cfg struct
9513  *
9514  * Return value:
9515  *      nothing
9516  **/
9517 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
9518 {
9519         int i;
9520
9521         kfree(ioa_cfg->res_entries);
9522         dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs),
9523                           ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9524         ipr_free_cmd_blks(ioa_cfg);
9525
9526         for (i = 0; i < ioa_cfg->hrrq_num; i++)
9527                 dma_free_coherent(&ioa_cfg->pdev->dev,
9528                                   sizeof(u32) * ioa_cfg->hrrq[i].size,
9529                                   ioa_cfg->hrrq[i].host_rrq,
9530                                   ioa_cfg->hrrq[i].host_rrq_dma);
9531
9532         dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
9533                           ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9534
9535         for (i = 0; i < IPR_MAX_HCAMS; i++) {
9536                 dma_free_coherent(&ioa_cfg->pdev->dev,
9537                                   sizeof(struct ipr_hostrcb),
9538                                   ioa_cfg->hostrcb[i],
9539                                   ioa_cfg->hostrcb_dma[i]);
9540         }
9541
9542         ipr_free_dump(ioa_cfg);
9543         kfree(ioa_cfg->trace);
9544 }
9545
9546 /**
9547  * ipr_free_irqs - Free all allocated IRQs for the adapter.
9548  * @ioa_cfg:    ipr cfg struct
9549  *
9550  * This function frees all allocated IRQs for the
9551  * specified adapter.
9552  *
9553  * Return value:
9554  *      none
9555  **/
9556 static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg)
9557 {
9558         struct pci_dev *pdev = ioa_cfg->pdev;
9559         int i;
9560
9561         for (i = 0; i < ioa_cfg->nvectors; i++)
9562                 free_irq(pci_irq_vector(pdev, i), &ioa_cfg->hrrq[i]);
9563         pci_free_irq_vectors(pdev);
9564 }
9565
9566 /**
9567  * ipr_free_all_resources - Free all allocated resources for an adapter.
9568  * @ipr_cmd:    ipr command struct
9569  *
9570  * This function frees all allocated resources for the
9571  * specified adapter.
9572  *
9573  * Return value:
9574  *      none
9575  **/
9576 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
9577 {
9578         struct pci_dev *pdev = ioa_cfg->pdev;
9579
9580         ENTER;
9581         ipr_free_irqs(ioa_cfg);
9582         if (ioa_cfg->reset_work_q)
9583                 destroy_workqueue(ioa_cfg->reset_work_q);
9584         iounmap(ioa_cfg->hdw_dma_regs);
9585         pci_release_regions(pdev);
9586         ipr_free_mem(ioa_cfg);
9587         scsi_host_put(ioa_cfg->host);
9588         pci_disable_device(pdev);
9589         LEAVE;
9590 }
9591
9592 /**
9593  * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
9594  * @ioa_cfg:    ioa config struct
9595  *
9596  * Return value:
9597  *      0 on success / -ENOMEM on allocation failure
9598  **/
9599 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9600 {
9601         struct ipr_cmnd *ipr_cmd;
9602         struct ipr_ioarcb *ioarcb;
9603         dma_addr_t dma_addr;
9604         int i, entries_each_hrrq, hrrq_id = 0;
9605
9606         ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev,
9607                                                 sizeof(struct ipr_cmnd), 512, 0);
9608
9609         if (!ioa_cfg->ipr_cmd_pool)
9610                 return -ENOMEM;
9611
9612         ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
9613         ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
9614
9615         if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
9616                 ipr_free_cmd_blks(ioa_cfg);
9617                 return -ENOMEM;
9618         }
9619
9620         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9621                 if (ioa_cfg->hrrq_num > 1) {
9622                         if (i == 0) {
9623                                 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
9624                                 ioa_cfg->hrrq[i].min_cmd_id = 0;
9625                                 ioa_cfg->hrrq[i].max_cmd_id =
9626                                         (entries_each_hrrq - 1);
9627                         } else {
9628                                 entries_each_hrrq =
9629                                         IPR_NUM_BASE_CMD_BLKS/
9630                                         (ioa_cfg->hrrq_num - 1);
9631                                 ioa_cfg->hrrq[i].min_cmd_id =
9632                                         IPR_NUM_INTERNAL_CMD_BLKS +
9633                                         (i - 1) * entries_each_hrrq;
9634                                 ioa_cfg->hrrq[i].max_cmd_id =
9635                                         (IPR_NUM_INTERNAL_CMD_BLKS +
9636                                         i * entries_each_hrrq - 1);
9637                         }
9638                 } else {
9639                         entries_each_hrrq = IPR_NUM_CMD_BLKS;
9640                         ioa_cfg->hrrq[i].min_cmd_id = 0;
9641                         ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
9642                 }
9643                 ioa_cfg->hrrq[i].size = entries_each_hrrq;
9644         }
9645
9646         BUG_ON(ioa_cfg->hrrq_num == 0);
9647
9648         i = IPR_NUM_CMD_BLKS -
9649                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
9650         if (i > 0) {
9651                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
9652                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
9653         }
9654
9655         for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9656                 ipr_cmd = dma_pool_zalloc(ioa_cfg->ipr_cmd_pool,
9657                                 GFP_KERNEL, &dma_addr);
9658
9659                 if (!ipr_cmd) {
9660                         ipr_free_cmd_blks(ioa_cfg);
9661                         return -ENOMEM;
9662                 }
9663
9664                 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
9665                 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
9666
9667                 ioarcb = &ipr_cmd->ioarcb;
9668                 ipr_cmd->dma_addr = dma_addr;
9669                 if (ioa_cfg->sis64)
9670                         ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
9671                 else
9672                         ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
9673
9674                 ioarcb->host_response_handle = cpu_to_be32(i << 2);
9675                 if (ioa_cfg->sis64) {
9676                         ioarcb->u.sis64_addr_data.data_ioadl_addr =
9677                                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
9678                         ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
9679                                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
9680                 } else {
9681                         ioarcb->write_ioadl_addr =
9682                                 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
9683                         ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
9684                         ioarcb->ioasa_host_pci_addr =
9685                                 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
9686                 }
9687                 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
9688                 ipr_cmd->cmd_index = i;
9689                 ipr_cmd->ioa_cfg = ioa_cfg;
9690                 ipr_cmd->sense_buffer_dma = dma_addr +
9691                         offsetof(struct ipr_cmnd, sense_buffer);
9692
9693                 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
9694                 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
9695                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9696                 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9697                         hrrq_id++;
9698         }
9699
9700         return 0;
9701 }
9702
9703 /**
9704  * ipr_alloc_mem - Allocate memory for an adapter
9705  * @ioa_cfg:    ioa config struct
9706  *
9707  * Return value:
9708  *      0 on success / non-zero for error
9709  **/
9710 static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
9711 {
9712         struct pci_dev *pdev = ioa_cfg->pdev;
9713         int i, rc = -ENOMEM;
9714
9715         ENTER;
9716         ioa_cfg->res_entries = kcalloc(ioa_cfg->max_devs_supported,
9717                                        sizeof(struct ipr_resource_entry),
9718                                        GFP_KERNEL);
9719
9720         if (!ioa_cfg->res_entries)
9721                 goto out;
9722
9723         for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
9724                 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
9725                 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9726         }
9727
9728         ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev,
9729                                               sizeof(struct ipr_misc_cbs),
9730                                               &ioa_cfg->vpd_cbs_dma,
9731                                               GFP_KERNEL);
9732
9733         if (!ioa_cfg->vpd_cbs)
9734                 goto out_free_res_entries;
9735
9736         if (ipr_alloc_cmd_blks(ioa_cfg))
9737                 goto out_free_vpd_cbs;
9738
9739         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9740                 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev,
9741                                         sizeof(u32) * ioa_cfg->hrrq[i].size,
9742                                         &ioa_cfg->hrrq[i].host_rrq_dma,
9743                                         GFP_KERNEL);
9744
9745                 if (!ioa_cfg->hrrq[i].host_rrq)  {
9746                         while (--i > 0)
9747                                 dma_free_coherent(&pdev->dev,
9748                                         sizeof(u32) * ioa_cfg->hrrq[i].size,
9749                                         ioa_cfg->hrrq[i].host_rrq,
9750                                         ioa_cfg->hrrq[i].host_rrq_dma);
9751                         goto out_ipr_free_cmd_blocks;
9752                 }
9753                 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9754         }
9755
9756         ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev,
9757                                                   ioa_cfg->cfg_table_size,
9758                                                   &ioa_cfg->cfg_table_dma,
9759                                                   GFP_KERNEL);
9760
9761         if (!ioa_cfg->u.cfg_table)
9762                 goto out_free_host_rrq;
9763
9764         for (i = 0; i < IPR_MAX_HCAMS; i++) {
9765                 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
9766                                                          sizeof(struct ipr_hostrcb),
9767                                                          &ioa_cfg->hostrcb_dma[i],
9768                                                          GFP_KERNEL);
9769
9770                 if (!ioa_cfg->hostrcb[i])
9771                         goto out_free_hostrcb_dma;
9772
9773                 ioa_cfg->hostrcb[i]->hostrcb_dma =
9774                         ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
9775                 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
9776                 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9777         }
9778
9779         ioa_cfg->trace = kcalloc(IPR_NUM_TRACE_ENTRIES,
9780                                  sizeof(struct ipr_trace_entry),
9781                                  GFP_KERNEL);
9782
9783         if (!ioa_cfg->trace)
9784                 goto out_free_hostrcb_dma;
9785
9786         rc = 0;
9787 out:
9788         LEAVE;
9789         return rc;
9790
9791 out_free_hostrcb_dma:
9792         while (i-- > 0) {
9793                 dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb),
9794                                   ioa_cfg->hostrcb[i],
9795                                   ioa_cfg->hostrcb_dma[i]);
9796         }
9797         dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size,
9798                           ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9799 out_free_host_rrq:
9800         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9801                 dma_free_coherent(&pdev->dev,
9802                                   sizeof(u32) * ioa_cfg->hrrq[i].size,
9803                                   ioa_cfg->hrrq[i].host_rrq,
9804                                   ioa_cfg->hrrq[i].host_rrq_dma);
9805         }
9806 out_ipr_free_cmd_blocks:
9807         ipr_free_cmd_blks(ioa_cfg);
9808 out_free_vpd_cbs:
9809         dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs),
9810                           ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9811 out_free_res_entries:
9812         kfree(ioa_cfg->res_entries);
9813         goto out;
9814 }
9815
9816 /**
9817  * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9818  * @ioa_cfg:    ioa config struct
9819  *
9820  * Return value:
9821  *      none
9822  **/
9823 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
9824 {
9825         int i;
9826
9827         for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9828                 ioa_cfg->bus_attr[i].bus = i;
9829                 ioa_cfg->bus_attr[i].qas_enabled = 0;
9830                 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9831                 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9832                         ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9833                 else
9834                         ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9835         }
9836 }
9837
9838 /**
9839  * ipr_init_regs - Initialize IOA registers
9840  * @ioa_cfg:    ioa config struct
9841  *
9842  * Return value:
9843  *      none
9844  **/
9845 static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
9846 {
9847         const struct ipr_interrupt_offsets *p;
9848         struct ipr_interrupts *t;
9849         void __iomem *base;
9850
9851         p = &ioa_cfg->chip_cfg->regs;
9852         t = &ioa_cfg->regs;
9853         base = ioa_cfg->hdw_dma_regs;
9854
9855         t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9856         t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
9857         t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
9858         t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
9859         t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
9860         t->clr_interrupt_reg = base + p->clr_interrupt_reg;
9861         t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
9862         t->sense_interrupt_reg = base + p->sense_interrupt_reg;
9863         t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
9864         t->ioarrin_reg = base + p->ioarrin_reg;
9865         t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
9866         t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
9867         t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
9868         t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
9869         t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
9870         t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
9871
9872         if (ioa_cfg->sis64) {
9873                 t->init_feedback_reg = base + p->init_feedback_reg;
9874                 t->dump_addr_reg = base + p->dump_addr_reg;
9875                 t->dump_data_reg = base + p->dump_data_reg;
9876                 t->endian_swap_reg = base + p->endian_swap_reg;
9877         }
9878 }
9879
9880 /**
9881  * ipr_init_ioa_cfg - Initialize IOA config struct
9882  * @ioa_cfg:    ioa config struct
9883  * @host:               scsi host struct
9884  * @pdev:               PCI dev struct
9885  *
9886  * Return value:
9887  *      none
9888  **/
9889 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9890                              struct Scsi_Host *host, struct pci_dev *pdev)
9891 {
9892         int i;
9893
9894         ioa_cfg->host = host;
9895         ioa_cfg->pdev = pdev;
9896         ioa_cfg->log_level = ipr_log_level;
9897         ioa_cfg->doorbell = IPR_DOORBELL;
9898         sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9899         sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
9900         sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9901         sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9902         sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9903         sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9904
9905         INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9906         INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9907         INIT_LIST_HEAD(&ioa_cfg->hostrcb_report_q);
9908         INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9909         INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9910         INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
9911         init_waitqueue_head(&ioa_cfg->reset_wait_q);
9912         init_waitqueue_head(&ioa_cfg->msi_wait_q);
9913         init_waitqueue_head(&ioa_cfg->eeh_wait_q);
9914         ioa_cfg->sdt_state = INACTIVE;
9915
9916         ipr_initialize_bus_attr(ioa_cfg);
9917         ioa_cfg->max_devs_supported = ipr_max_devs;
9918
9919         if (ioa_cfg->sis64) {
9920                 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9921                 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9922                 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9923                         ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9924                 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9925                                            + ((sizeof(struct ipr_config_table_entry64)
9926                                                * ioa_cfg->max_devs_supported)));
9927         } else {
9928                 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9929                 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9930                 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9931                         ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9932                 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9933                                            + ((sizeof(struct ipr_config_table_entry)
9934                                                * ioa_cfg->max_devs_supported)));
9935         }
9936
9937         host->max_channel = IPR_VSET_BUS;
9938         host->unique_id = host->host_no;
9939         host->max_cmd_len = IPR_MAX_CDB_LEN;
9940         host->can_queue = ioa_cfg->max_cmds;
9941         pci_set_drvdata(pdev, ioa_cfg);
9942
9943         for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9944                 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9945                 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9946                 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9947                 if (i == 0)
9948                         ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9949                 else
9950                         ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
9951         }
9952 }
9953
9954 /**
9955  * ipr_get_chip_info - Find adapter chip information
9956  * @dev_id:             PCI device id struct
9957  *
9958  * Return value:
9959  *      ptr to chip information on success / NULL on failure
9960  **/
9961 static const struct ipr_chip_t *
9962 ipr_get_chip_info(const struct pci_device_id *dev_id)
9963 {
9964         int i;
9965
9966         for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
9967                 if (ipr_chip[i].vendor == dev_id->vendor &&
9968                     ipr_chip[i].device == dev_id->device)
9969                         return &ipr_chip[i];
9970         return NULL;
9971 }
9972
9973 /**
9974  * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
9975  *                                              during probe time
9976  * @ioa_cfg:    ioa config struct
9977  *
9978  * Return value:
9979  *      None
9980  **/
9981 static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
9982 {
9983         struct pci_dev *pdev = ioa_cfg->pdev;
9984
9985         if (pci_channel_offline(pdev)) {
9986                 wait_event_timeout(ioa_cfg->eeh_wait_q,
9987                                    !pci_channel_offline(pdev),
9988                                    IPR_PCI_ERROR_RECOVERY_TIMEOUT);
9989                 pci_restore_state(pdev);
9990         }
9991 }
9992
9993 static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
9994 {
9995         int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
9996
9997         for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
9998                 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
9999                          "host%d-%d", ioa_cfg->host->host_no, vec_idx);
10000                 ioa_cfg->vectors_info[vec_idx].
10001                         desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
10002         }
10003 }
10004
10005 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg,
10006                 struct pci_dev *pdev)
10007 {
10008         int i, rc;
10009
10010         for (i = 1; i < ioa_cfg->nvectors; i++) {
10011                 rc = request_irq(pci_irq_vector(pdev, i),
10012                         ipr_isr_mhrrq,
10013                         0,
10014                         ioa_cfg->vectors_info[i].desc,
10015                         &ioa_cfg->hrrq[i]);
10016                 if (rc) {
10017                         while (--i >= 0)
10018                                 free_irq(pci_irq_vector(pdev, i),
10019                                         &ioa_cfg->hrrq[i]);
10020                         return rc;
10021                 }
10022         }
10023         return 0;
10024 }
10025
10026 /**
10027  * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
10028  * @pdev:               PCI device struct
10029  *
10030  * Description: Simply set the msi_received flag to 1 indicating that
10031  * Message Signaled Interrupts are supported.
10032  *
10033  * Return value:
10034  *      0 on success / non-zero on failure
10035  **/
10036 static irqreturn_t ipr_test_intr(int irq, void *devp)
10037 {
10038         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
10039         unsigned long lock_flags = 0;
10040         irqreturn_t rc = IRQ_HANDLED;
10041
10042         dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
10043         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10044
10045         ioa_cfg->msi_received = 1;
10046         wake_up(&ioa_cfg->msi_wait_q);
10047
10048         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10049         return rc;
10050 }
10051
10052 /**
10053  * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
10054  * @pdev:               PCI device struct
10055  *
10056  * Description: This routine sets up and initiates a test interrupt to determine
10057  * if the interrupt is received via the ipr_test_intr() service routine.
10058  * If the tests fails, the driver will fall back to LSI.
10059  *
10060  * Return value:
10061  *      0 on success / non-zero on failure
10062  **/
10063 static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
10064 {
10065         int rc;
10066         volatile u32 int_reg;
10067         unsigned long lock_flags = 0;
10068         int irq = pci_irq_vector(pdev, 0);
10069
10070         ENTER;
10071
10072         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10073         init_waitqueue_head(&ioa_cfg->msi_wait_q);
10074         ioa_cfg->msi_received = 0;
10075         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10076         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
10077         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
10078         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10079
10080         rc = request_irq(irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
10081         if (rc) {
10082                 dev_err(&pdev->dev, "Can not assign irq %d\n", irq);
10083                 return rc;
10084         } else if (ipr_debug)
10085                 dev_info(&pdev->dev, "IRQ assigned: %d\n", irq);
10086
10087         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
10088         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
10089         wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
10090         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10091         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10092
10093         if (!ioa_cfg->msi_received) {
10094                 /* MSI test failed */
10095                 dev_info(&pdev->dev, "MSI test failed.  Falling back to LSI.\n");
10096                 rc = -EOPNOTSUPP;
10097         } else if (ipr_debug)
10098                 dev_info(&pdev->dev, "MSI test succeeded.\n");
10099
10100         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10101
10102         free_irq(irq, ioa_cfg);
10103
10104         LEAVE;
10105
10106         return rc;
10107 }
10108
10109  /* ipr_probe_ioa - Allocates memory and does first stage of initialization
10110  * @pdev:               PCI device struct
10111  * @dev_id:             PCI device id struct
10112  *
10113  * Return value:
10114  *      0 on success / non-zero on failure
10115  **/
10116 static int ipr_probe_ioa(struct pci_dev *pdev,
10117                          const struct pci_device_id *dev_id)
10118 {
10119         struct ipr_ioa_cfg *ioa_cfg;
10120         struct Scsi_Host *host;
10121         unsigned long ipr_regs_pci;
10122         void __iomem *ipr_regs;
10123         int rc = PCIBIOS_SUCCESSFUL;
10124         volatile u32 mask, uproc, interrupts;
10125         unsigned long lock_flags, driver_lock_flags;
10126         unsigned int irq_flag;
10127
10128         ENTER;
10129
10130         dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
10131         host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
10132
10133         if (!host) {
10134                 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
10135                 rc = -ENOMEM;
10136                 goto out;
10137         }
10138
10139         ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
10140         memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
10141         ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
10142
10143         ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
10144
10145         if (!ioa_cfg->ipr_chip) {
10146                 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
10147                         dev_id->vendor, dev_id->device);
10148                 goto out_scsi_host_put;
10149         }
10150
10151         /* set SIS 32 or SIS 64 */
10152         ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
10153         ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
10154         ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
10155         ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
10156
10157         if (ipr_transop_timeout)
10158                 ioa_cfg->transop_timeout = ipr_transop_timeout;
10159         else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
10160                 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
10161         else
10162                 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
10163
10164         ioa_cfg->revid = pdev->revision;
10165
10166         ipr_init_ioa_cfg(ioa_cfg, host, pdev);
10167
10168         ipr_regs_pci = pci_resource_start(pdev, 0);
10169
10170         rc = pci_request_regions(pdev, IPR_NAME);
10171         if (rc < 0) {
10172                 dev_err(&pdev->dev,
10173                         "Couldn't register memory range of registers\n");
10174                 goto out_scsi_host_put;
10175         }
10176
10177         rc = pci_enable_device(pdev);
10178
10179         if (rc || pci_channel_offline(pdev)) {
10180                 if (pci_channel_offline(pdev)) {
10181                         ipr_wait_for_pci_err_recovery(ioa_cfg);
10182                         rc = pci_enable_device(pdev);
10183                 }
10184
10185                 if (rc) {
10186                         dev_err(&pdev->dev, "Cannot enable adapter\n");
10187                         ipr_wait_for_pci_err_recovery(ioa_cfg);
10188                         goto out_release_regions;
10189                 }
10190         }
10191
10192         ipr_regs = pci_ioremap_bar(pdev, 0);
10193
10194         if (!ipr_regs) {
10195                 dev_err(&pdev->dev,
10196                         "Couldn't map memory range of registers\n");
10197                 rc = -ENOMEM;
10198                 goto out_disable;
10199         }
10200
10201         ioa_cfg->hdw_dma_regs = ipr_regs;
10202         ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
10203         ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
10204
10205         ipr_init_regs(ioa_cfg);
10206
10207         if (ioa_cfg->sis64) {
10208                 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10209                 if (rc < 0) {
10210                         dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n");
10211                         rc = dma_set_mask_and_coherent(&pdev->dev,
10212                                                        DMA_BIT_MASK(32));
10213                 }
10214         } else
10215                 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10216
10217         if (rc < 0) {
10218                 dev_err(&pdev->dev, "Failed to set DMA mask\n");
10219                 goto cleanup_nomem;
10220         }
10221
10222         rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
10223                                    ioa_cfg->chip_cfg->cache_line_size);
10224
10225         if (rc != PCIBIOS_SUCCESSFUL) {
10226                 dev_err(&pdev->dev, "Write of cache line size failed\n");
10227                 ipr_wait_for_pci_err_recovery(ioa_cfg);
10228                 rc = -EIO;
10229                 goto cleanup_nomem;
10230         }
10231
10232         /* Issue MMIO read to ensure card is not in EEH */
10233         interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
10234         ipr_wait_for_pci_err_recovery(ioa_cfg);
10235
10236         if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
10237                 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
10238                         IPR_MAX_MSIX_VECTORS);
10239                 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
10240         }
10241
10242         irq_flag = PCI_IRQ_LEGACY;
10243         if (ioa_cfg->ipr_chip->has_msi)
10244                 irq_flag |= PCI_IRQ_MSI | PCI_IRQ_MSIX;
10245         rc = pci_alloc_irq_vectors(pdev, 1, ipr_number_of_msix, irq_flag);
10246         if (rc < 0) {
10247                 ipr_wait_for_pci_err_recovery(ioa_cfg);
10248                 goto cleanup_nomem;
10249         }
10250         ioa_cfg->nvectors = rc;
10251
10252         if (!pdev->msi_enabled && !pdev->msix_enabled)
10253                 ioa_cfg->clear_isr = 1;
10254
10255         pci_set_master(pdev);
10256
10257         if (pci_channel_offline(pdev)) {
10258                 ipr_wait_for_pci_err_recovery(ioa_cfg);
10259                 pci_set_master(pdev);
10260                 if (pci_channel_offline(pdev)) {
10261                         rc = -EIO;
10262                         goto out_msi_disable;
10263                 }
10264         }
10265
10266         if (pdev->msi_enabled || pdev->msix_enabled) {
10267                 rc = ipr_test_msi(ioa_cfg, pdev);
10268                 switch (rc) {
10269                 case 0:
10270                         dev_info(&pdev->dev,
10271                                 "Request for %d MSI%ss succeeded.", ioa_cfg->nvectors,
10272                                 pdev->msix_enabled ? "-X" : "");
10273                         break;
10274                 case -EOPNOTSUPP:
10275                         ipr_wait_for_pci_err_recovery(ioa_cfg);
10276                         pci_free_irq_vectors(pdev);
10277
10278                         ioa_cfg->nvectors = 1;
10279                         ioa_cfg->clear_isr = 1;
10280                         break;
10281                 default:
10282                         goto out_msi_disable;
10283                 }
10284         }
10285
10286         ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
10287                                 (unsigned int)num_online_cpus(),
10288                                 (unsigned int)IPR_MAX_HRRQ_NUM);
10289
10290         if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
10291                 goto out_msi_disable;
10292
10293         if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
10294                 goto out_msi_disable;
10295
10296         rc = ipr_alloc_mem(ioa_cfg);
10297         if (rc < 0) {
10298                 dev_err(&pdev->dev,
10299                         "Couldn't allocate enough memory for device driver!\n");
10300                 goto out_msi_disable;
10301         }
10302
10303         /* Save away PCI config space for use following IOA reset */
10304         rc = pci_save_state(pdev);
10305
10306         if (rc != PCIBIOS_SUCCESSFUL) {
10307                 dev_err(&pdev->dev, "Failed to save PCI config space\n");
10308                 rc = -EIO;
10309                 goto cleanup_nolog;
10310         }
10311
10312         /*
10313          * If HRRQ updated interrupt is not masked, or reset alert is set,
10314          * the card is in an unknown state and needs a hard reset
10315          */
10316         mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
10317         interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
10318         uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
10319         if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
10320                 ioa_cfg->needs_hard_reset = 1;
10321         if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
10322                 ioa_cfg->needs_hard_reset = 1;
10323         if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
10324                 ioa_cfg->ioa_unit_checked = 1;
10325
10326         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10327         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10328         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10329
10330         if (pdev->msi_enabled || pdev->msix_enabled) {
10331                 name_msi_vectors(ioa_cfg);
10332                 rc = request_irq(pci_irq_vector(pdev, 0), ipr_isr, 0,
10333                         ioa_cfg->vectors_info[0].desc,
10334                         &ioa_cfg->hrrq[0]);
10335                 if (!rc)
10336                         rc = ipr_request_other_msi_irqs(ioa_cfg, pdev);
10337         } else {
10338                 rc = request_irq(pdev->irq, ipr_isr,
10339                          IRQF_SHARED,
10340                          IPR_NAME, &ioa_cfg->hrrq[0]);
10341         }
10342         if (rc) {
10343                 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
10344                         pdev->irq, rc);
10345                 goto cleanup_nolog;
10346         }
10347
10348         if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
10349             (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
10350                 ioa_cfg->needs_warm_reset = 1;
10351                 ioa_cfg->reset = ipr_reset_slot_reset;
10352
10353                 ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d",
10354                                                                 WQ_MEM_RECLAIM, host->host_no);
10355
10356                 if (!ioa_cfg->reset_work_q) {
10357                         dev_err(&pdev->dev, "Couldn't register reset workqueue\n");
10358                         rc = -ENOMEM;
10359                         goto out_free_irq;
10360                 }
10361         } else
10362                 ioa_cfg->reset = ipr_reset_start_bist;
10363
10364         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10365         list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
10366         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10367
10368         LEAVE;
10369 out:
10370         return rc;
10371
10372 out_free_irq:
10373         ipr_free_irqs(ioa_cfg);
10374 cleanup_nolog:
10375         ipr_free_mem(ioa_cfg);
10376 out_msi_disable:
10377         ipr_wait_for_pci_err_recovery(ioa_cfg);
10378         pci_free_irq_vectors(pdev);
10379 cleanup_nomem:
10380         iounmap(ipr_regs);
10381 out_disable:
10382         pci_disable_device(pdev);
10383 out_release_regions:
10384         pci_release_regions(pdev);
10385 out_scsi_host_put:
10386         scsi_host_put(host);
10387         goto out;
10388 }
10389
10390 /**
10391  * ipr_initiate_ioa_bringdown - Bring down an adapter
10392  * @ioa_cfg:            ioa config struct
10393  * @shutdown_type:      shutdown type
10394  *
10395  * Description: This function will initiate bringing down the adapter.
10396  * This consists of issuing an IOA shutdown to the adapter
10397  * to flush the cache, and running BIST.
10398  * If the caller needs to wait on the completion of the reset,
10399  * the caller must sleep on the reset_wait_q.
10400  *
10401  * Return value:
10402  *      none
10403  **/
10404 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
10405                                        enum ipr_shutdown_type shutdown_type)
10406 {
10407         ENTER;
10408         if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
10409                 ioa_cfg->sdt_state = ABORT_DUMP;
10410         ioa_cfg->reset_retries = 0;
10411         ioa_cfg->in_ioa_bringdown = 1;
10412         ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
10413         LEAVE;
10414 }
10415
10416 /**
10417  * __ipr_remove - Remove a single adapter
10418  * @pdev:       pci device struct
10419  *
10420  * Adapter hot plug remove entry point.
10421  *
10422  * Return value:
10423  *      none
10424  **/
10425 static void __ipr_remove(struct pci_dev *pdev)
10426 {
10427         unsigned long host_lock_flags = 0;
10428         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10429         int i;
10430         unsigned long driver_lock_flags;
10431         ENTER;
10432
10433         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10434         while (ioa_cfg->in_reset_reload) {
10435                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10436                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10437                 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10438         }
10439
10440         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
10441                 spin_lock(&ioa_cfg->hrrq[i]._lock);
10442                 ioa_cfg->hrrq[i].removing_ioa = 1;
10443                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
10444         }
10445         wmb();
10446         ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
10447
10448         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10449         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10450         flush_work(&ioa_cfg->work_q);
10451         if (ioa_cfg->reset_work_q)
10452                 flush_workqueue(ioa_cfg->reset_work_q);
10453         INIT_LIST_HEAD(&ioa_cfg->used_res_q);
10454         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10455
10456         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10457         list_del(&ioa_cfg->queue);
10458         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10459
10460         if (ioa_cfg->sdt_state == ABORT_DUMP)
10461                 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
10462         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10463
10464         ipr_free_all_resources(ioa_cfg);
10465
10466         LEAVE;
10467 }
10468
10469 /**
10470  * ipr_remove - IOA hot plug remove entry point
10471  * @pdev:       pci device struct
10472  *
10473  * Adapter hot plug remove entry point.
10474  *
10475  * Return value:
10476  *      none
10477  **/
10478 static void ipr_remove(struct pci_dev *pdev)
10479 {
10480         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10481
10482         ENTER;
10483
10484         ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10485                               &ipr_trace_attr);
10486         ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10487                              &ipr_dump_attr);
10488         sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10489                         &ipr_ioa_async_err_log);
10490         scsi_remove_host(ioa_cfg->host);
10491
10492         __ipr_remove(pdev);
10493
10494         LEAVE;
10495 }
10496
10497 /**
10498  * ipr_probe - Adapter hot plug add entry point
10499  *
10500  * Return value:
10501  *      0 on success / non-zero on failure
10502  **/
10503 static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
10504 {
10505         struct ipr_ioa_cfg *ioa_cfg;
10506         unsigned long flags;
10507         int rc, i;
10508
10509         rc = ipr_probe_ioa(pdev, dev_id);
10510
10511         if (rc)
10512                 return rc;
10513
10514         ioa_cfg = pci_get_drvdata(pdev);
10515         rc = ipr_probe_ioa_part2(ioa_cfg);
10516
10517         if (rc) {
10518                 __ipr_remove(pdev);
10519                 return rc;
10520         }
10521
10522         rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
10523
10524         if (rc) {
10525                 __ipr_remove(pdev);
10526                 return rc;
10527         }
10528
10529         rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
10530                                    &ipr_trace_attr);
10531
10532         if (rc) {
10533                 scsi_remove_host(ioa_cfg->host);
10534                 __ipr_remove(pdev);
10535                 return rc;
10536         }
10537
10538         rc = sysfs_create_bin_file(&ioa_cfg->host->shost_dev.kobj,
10539                         &ipr_ioa_async_err_log);
10540
10541         if (rc) {
10542                 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10543                                 &ipr_dump_attr);
10544                 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10545                                 &ipr_trace_attr);
10546                 scsi_remove_host(ioa_cfg->host);
10547                 __ipr_remove(pdev);
10548                 return rc;
10549         }
10550
10551         rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
10552                                    &ipr_dump_attr);
10553
10554         if (rc) {
10555                 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10556                                       &ipr_ioa_async_err_log);
10557                 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10558                                       &ipr_trace_attr);
10559                 scsi_remove_host(ioa_cfg->host);
10560                 __ipr_remove(pdev);
10561                 return rc;
10562         }
10563         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10564         ioa_cfg->scan_enabled = 1;
10565         schedule_work(&ioa_cfg->work_q);
10566         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10567
10568         ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
10569
10570         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10571                 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
10572                         irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
10573                                         ioa_cfg->iopoll_weight, ipr_iopoll);
10574                 }
10575         }
10576
10577         scsi_scan_host(ioa_cfg->host);
10578
10579         return 0;
10580 }
10581
10582 /**
10583  * ipr_shutdown - Shutdown handler.
10584  * @pdev:       pci device struct
10585  *
10586  * This function is invoked upon system shutdown/reboot. It will issue
10587  * an adapter shutdown to the adapter to flush the write cache.
10588  *
10589  * Return value:
10590  *      none
10591  **/
10592 static void ipr_shutdown(struct pci_dev *pdev)
10593 {
10594         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10595         unsigned long lock_flags = 0;
10596         enum ipr_shutdown_type shutdown_type = IPR_SHUTDOWN_NORMAL;
10597         int i;
10598
10599         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10600         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10601                 ioa_cfg->iopoll_weight = 0;
10602                 for (i = 1; i < ioa_cfg->hrrq_num; i++)
10603                         irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
10604         }
10605
10606         while (ioa_cfg->in_reset_reload) {
10607                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10608                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10609                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10610         }
10611
10612         if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64)
10613                 shutdown_type = IPR_SHUTDOWN_QUIESCE;
10614
10615         ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type);
10616         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10617         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10618         if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) {
10619                 ipr_free_irqs(ioa_cfg);
10620                 pci_disable_device(ioa_cfg->pdev);
10621         }
10622 }
10623
10624 static struct pci_device_id ipr_pci_table[] = {
10625         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10626                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
10627         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10628                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
10629         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10630                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
10631         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10632                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
10633         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10634                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
10635         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10636                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
10637         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10638                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
10639         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10640                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
10641                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10642         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10643               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10644         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10645               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10646               IPR_USE_LONG_TRANSOP_TIMEOUT },
10647         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10648               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10649               IPR_USE_LONG_TRANSOP_TIMEOUT },
10650         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10651               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10652         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10653               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10654               IPR_USE_LONG_TRANSOP_TIMEOUT},
10655         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10656               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10657               IPR_USE_LONG_TRANSOP_TIMEOUT },
10658         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10659               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
10660               IPR_USE_LONG_TRANSOP_TIMEOUT },
10661         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10662               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
10663         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10664               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
10665         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10666               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
10667               IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
10668         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
10669                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
10670         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10671                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
10672         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10673                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
10674                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10675         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10676                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
10677                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10678         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10679                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
10680         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10681                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
10682         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10683                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
10684         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10685                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
10686         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10687                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
10688         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10689                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
10690         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10691                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
10692         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10693                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
10694         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10695                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
10696         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10697                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
10698         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10699                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
10700         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10701                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
10702         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10703                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
10704         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10705                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
10706         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10707                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
10708         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10709                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
10710         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10711                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
10712         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10713                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
10714         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10715                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
10716         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10717                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
10718         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10719                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
10720         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10721                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
10722         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10723                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
10724         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10725                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
10726         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10727                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
10728         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10729                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
10730         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10731                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580A, 0, 0, 0 },
10732         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10733                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580B, 0, 0, 0 },
10734         { }
10735 };
10736 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10737
10738 static const struct pci_error_handlers ipr_err_handler = {
10739         .error_detected = ipr_pci_error_detected,
10740         .mmio_enabled = ipr_pci_mmio_enabled,
10741         .slot_reset = ipr_pci_slot_reset,
10742 };
10743
10744 static struct pci_driver ipr_driver = {
10745         .name = IPR_NAME,
10746         .id_table = ipr_pci_table,
10747         .probe = ipr_probe,
10748         .remove = ipr_remove,
10749         .shutdown = ipr_shutdown,
10750         .err_handler = &ipr_err_handler,
10751 };
10752
10753 /**
10754  * ipr_halt_done - Shutdown prepare completion
10755  *
10756  * Return value:
10757  *      none
10758  **/
10759 static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10760 {
10761         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
10762 }
10763
10764 /**
10765  * ipr_halt - Issue shutdown prepare to all adapters
10766  *
10767  * Return value:
10768  *      NOTIFY_OK on success / NOTIFY_DONE on failure
10769  **/
10770 static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10771 {
10772         struct ipr_cmnd *ipr_cmd;
10773         struct ipr_ioa_cfg *ioa_cfg;
10774         unsigned long flags = 0, driver_lock_flags;
10775
10776         if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10777                 return NOTIFY_DONE;
10778
10779         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10780
10781         list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10782                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10783                 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
10784                     (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) {
10785                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10786                         continue;
10787                 }
10788
10789                 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10790                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10791                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10792                 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10793                 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10794
10795                 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10796                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10797         }
10798         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10799
10800         return NOTIFY_OK;
10801 }
10802
10803 static struct notifier_block ipr_notifier = {
10804         ipr_halt, NULL, 0
10805 };
10806
10807 /**
10808  * ipr_init - Module entry point
10809  *
10810  * Return value:
10811  *      0 on success / negative value on failure
10812  **/
10813 static int __init ipr_init(void)
10814 {
10815         ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10816                  IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10817
10818         register_reboot_notifier(&ipr_notifier);
10819         return pci_register_driver(&ipr_driver);
10820 }
10821
10822 /**
10823  * ipr_exit - Module unload
10824  *
10825  * Module unload entry point.
10826  *
10827  * Return value:
10828  *      none
10829  **/
10830 static void __exit ipr_exit(void)
10831 {
10832         unregister_reboot_notifier(&ipr_notifier);
10833         pci_unregister_driver(&ipr_driver);
10834 }
10835
10836 module_init(ipr_init);
10837 module_exit(ipr_exit);