kill dentry_update_name_case()
[sfrench/cifs-2.6.git] / drivers / scsi / ipr.c
1 /*
2  * ipr.c -- driver for IBM Power Linux RAID adapters
3  *
4  * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5  *
6  * Copyright (C) 2003, 2004 IBM Corporation
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21  *
22  */
23
24 /*
25  * Notes:
26  *
27  * This driver is used to control the following SCSI adapters:
28  *
29  * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30  *
31  * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32  *              PCI-X Dual Channel Ultra 320 SCSI Adapter
33  *              PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34  *              Embedded SCSI adapter on p615 and p655 systems
35  *
36  * Supported Hardware Features:
37  *      - Ultra 320 SCSI controller
38  *      - PCI-X host interface
39  *      - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40  *      - Non-Volatile Write Cache
41  *      - Supports attachment of non-RAID disks, tape, and optical devices
42  *      - RAID Levels 0, 5, 10
43  *      - Hot spare
44  *      - Background Parity Checking
45  *      - Background Data Scrubbing
46  *      - Ability to increase the capacity of an existing RAID 5 disk array
47  *              by adding disks
48  *
49  * Driver Features:
50  *      - Tagged command queuing
51  *      - Adapter microcode download
52  *      - PCI hot plug
53  *      - SCSI device hot plug
54  *
55  */
56
57 #include <linux/fs.h>
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/slab.h>
63 #include <linux/vmalloc.h>
64 #include <linux/ioport.h>
65 #include <linux/delay.h>
66 #include <linux/pci.h>
67 #include <linux/wait.h>
68 #include <linux/spinlock.h>
69 #include <linux/sched.h>
70 #include <linux/interrupt.h>
71 #include <linux/blkdev.h>
72 #include <linux/firmware.h>
73 #include <linux/module.h>
74 #include <linux/moduleparam.h>
75 #include <linux/libata.h>
76 #include <linux/hdreg.h>
77 #include <linux/reboot.h>
78 #include <linux/stringify.h>
79 #include <asm/io.h>
80 #include <asm/irq.h>
81 #include <asm/processor.h>
82 #include <scsi/scsi.h>
83 #include <scsi/scsi_host.h>
84 #include <scsi/scsi_tcq.h>
85 #include <scsi/scsi_eh.h>
86 #include <scsi/scsi_cmnd.h>
87 #include "ipr.h"
88
89 /*
90  *   Global Data
91  */
92 static LIST_HEAD(ipr_ioa_head);
93 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94 static unsigned int ipr_max_speed = 1;
95 static int ipr_testmode = 0;
96 static unsigned int ipr_fastfail = 0;
97 static unsigned int ipr_transop_timeout = 0;
98 static unsigned int ipr_debug = 0;
99 static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
100 static unsigned int ipr_dual_ioa_raid = 1;
101 static unsigned int ipr_number_of_msix = 16;
102 static unsigned int ipr_fast_reboot;
103 static DEFINE_SPINLOCK(ipr_driver_lock);
104
105 /* This table describes the differences between DMA controller chips */
106 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
107         { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
108                 .mailbox = 0x0042C,
109                 .max_cmds = 100,
110                 .cache_line_size = 0x20,
111                 .clear_isr = 1,
112                 .iopoll_weight = 0,
113                 {
114                         .set_interrupt_mask_reg = 0x0022C,
115                         .clr_interrupt_mask_reg = 0x00230,
116                         .clr_interrupt_mask_reg32 = 0x00230,
117                         .sense_interrupt_mask_reg = 0x0022C,
118                         .sense_interrupt_mask_reg32 = 0x0022C,
119                         .clr_interrupt_reg = 0x00228,
120                         .clr_interrupt_reg32 = 0x00228,
121                         .sense_interrupt_reg = 0x00224,
122                         .sense_interrupt_reg32 = 0x00224,
123                         .ioarrin_reg = 0x00404,
124                         .sense_uproc_interrupt_reg = 0x00214,
125                         .sense_uproc_interrupt_reg32 = 0x00214,
126                         .set_uproc_interrupt_reg = 0x00214,
127                         .set_uproc_interrupt_reg32 = 0x00214,
128                         .clr_uproc_interrupt_reg = 0x00218,
129                         .clr_uproc_interrupt_reg32 = 0x00218
130                 }
131         },
132         { /* Snipe and Scamp */
133                 .mailbox = 0x0052C,
134                 .max_cmds = 100,
135                 .cache_line_size = 0x20,
136                 .clear_isr = 1,
137                 .iopoll_weight = 0,
138                 {
139                         .set_interrupt_mask_reg = 0x00288,
140                         .clr_interrupt_mask_reg = 0x0028C,
141                         .clr_interrupt_mask_reg32 = 0x0028C,
142                         .sense_interrupt_mask_reg = 0x00288,
143                         .sense_interrupt_mask_reg32 = 0x00288,
144                         .clr_interrupt_reg = 0x00284,
145                         .clr_interrupt_reg32 = 0x00284,
146                         .sense_interrupt_reg = 0x00280,
147                         .sense_interrupt_reg32 = 0x00280,
148                         .ioarrin_reg = 0x00504,
149                         .sense_uproc_interrupt_reg = 0x00290,
150                         .sense_uproc_interrupt_reg32 = 0x00290,
151                         .set_uproc_interrupt_reg = 0x00290,
152                         .set_uproc_interrupt_reg32 = 0x00290,
153                         .clr_uproc_interrupt_reg = 0x00294,
154                         .clr_uproc_interrupt_reg32 = 0x00294
155                 }
156         },
157         { /* CRoC */
158                 .mailbox = 0x00044,
159                 .max_cmds = 1000,
160                 .cache_line_size = 0x20,
161                 .clear_isr = 0,
162                 .iopoll_weight = 64,
163                 {
164                         .set_interrupt_mask_reg = 0x00010,
165                         .clr_interrupt_mask_reg = 0x00018,
166                         .clr_interrupt_mask_reg32 = 0x0001C,
167                         .sense_interrupt_mask_reg = 0x00010,
168                         .sense_interrupt_mask_reg32 = 0x00014,
169                         .clr_interrupt_reg = 0x00008,
170                         .clr_interrupt_reg32 = 0x0000C,
171                         .sense_interrupt_reg = 0x00000,
172                         .sense_interrupt_reg32 = 0x00004,
173                         .ioarrin_reg = 0x00070,
174                         .sense_uproc_interrupt_reg = 0x00020,
175                         .sense_uproc_interrupt_reg32 = 0x00024,
176                         .set_uproc_interrupt_reg = 0x00020,
177                         .set_uproc_interrupt_reg32 = 0x00024,
178                         .clr_uproc_interrupt_reg = 0x00028,
179                         .clr_uproc_interrupt_reg32 = 0x0002C,
180                         .init_feedback_reg = 0x0005C,
181                         .dump_addr_reg = 0x00064,
182                         .dump_data_reg = 0x00068,
183                         .endian_swap_reg = 0x00084
184                 }
185         },
186 };
187
188 static const struct ipr_chip_t ipr_chip[] = {
189         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
190         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
191         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
192         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
193         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, true, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
194         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
195         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
196         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
197         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
198         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
199 };
200
201 static int ipr_max_bus_speeds[] = {
202         IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
203 };
204
205 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
206 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
207 module_param_named(max_speed, ipr_max_speed, uint, 0);
208 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
209 module_param_named(log_level, ipr_log_level, uint, 0);
210 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
211 module_param_named(testmode, ipr_testmode, int, 0);
212 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
213 module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
214 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
215 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
216 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
217 module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
218 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
219 module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
220 MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
221 module_param_named(max_devs, ipr_max_devs, int, 0);
222 MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
223                  "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
224 module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
225 MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16).  (default:16)");
226 module_param_named(fast_reboot, ipr_fast_reboot, int, S_IRUGO | S_IWUSR);
227 MODULE_PARM_DESC(fast_reboot, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)");
228 MODULE_LICENSE("GPL");
229 MODULE_VERSION(IPR_DRIVER_VERSION);
230
231 /*  A constant array of IOASCs/URCs/Error Messages */
232 static const
233 struct ipr_error_table_t ipr_error_table[] = {
234         {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
235         "8155: An unknown error was received"},
236         {0x00330000, 0, 0,
237         "Soft underlength error"},
238         {0x005A0000, 0, 0,
239         "Command to be cancelled not found"},
240         {0x00808000, 0, 0,
241         "Qualified success"},
242         {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
243         "FFFE: Soft device bus error recovered by the IOA"},
244         {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
245         "4101: Soft device bus fabric error"},
246         {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
247         "FFFC: Logical block guard error recovered by the device"},
248         {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
249         "FFFC: Logical block reference tag error recovered by the device"},
250         {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
251         "4171: Recovered scatter list tag / sequence number error"},
252         {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
253         "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
254         {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
255         "4171: Recovered logical block sequence number error on IOA to Host transfer"},
256         {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
257         "FFFD: Recovered logical block reference tag error detected by the IOA"},
258         {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
259         "FFFD: Logical block guard error recovered by the IOA"},
260         {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
261         "FFF9: Device sector reassign successful"},
262         {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
263         "FFF7: Media error recovered by device rewrite procedures"},
264         {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
265         "7001: IOA sector reassignment successful"},
266         {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
267         "FFF9: Soft media error. Sector reassignment recommended"},
268         {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
269         "FFF7: Media error recovered by IOA rewrite procedures"},
270         {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
271         "FF3D: Soft PCI bus error recovered by the IOA"},
272         {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
273         "FFF6: Device hardware error recovered by the IOA"},
274         {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
275         "FFF6: Device hardware error recovered by the device"},
276         {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
277         "FF3D: Soft IOA error recovered by the IOA"},
278         {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
279         "FFFA: Undefined device response recovered by the IOA"},
280         {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
281         "FFF6: Device bus error, message or command phase"},
282         {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
283         "FFFE: Task Management Function failed"},
284         {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
285         "FFF6: Failure prediction threshold exceeded"},
286         {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
287         "8009: Impending cache battery pack failure"},
288         {0x02040100, 0, 0,
289         "Logical Unit in process of becoming ready"},
290         {0x02040200, 0, 0,
291         "Initializing command required"},
292         {0x02040400, 0, 0,
293         "34FF: Disk device format in progress"},
294         {0x02040C00, 0, 0,
295         "Logical unit not accessible, target port in unavailable state"},
296         {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
297         "9070: IOA requested reset"},
298         {0x023F0000, 0, 0,
299         "Synchronization required"},
300         {0x02408500, 0, 0,
301         "IOA microcode download required"},
302         {0x02408600, 0, 0,
303         "Device bus connection is prohibited by host"},
304         {0x024E0000, 0, 0,
305         "No ready, IOA shutdown"},
306         {0x025A0000, 0, 0,
307         "Not ready, IOA has been shutdown"},
308         {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
309         "3020: Storage subsystem configuration error"},
310         {0x03110B00, 0, 0,
311         "FFF5: Medium error, data unreadable, recommend reassign"},
312         {0x03110C00, 0, 0,
313         "7000: Medium error, data unreadable, do not reassign"},
314         {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
315         "FFF3: Disk media format bad"},
316         {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
317         "3002: Addressed device failed to respond to selection"},
318         {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
319         "3100: Device bus error"},
320         {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
321         "3109: IOA timed out a device command"},
322         {0x04088000, 0, 0,
323         "3120: SCSI bus is not operational"},
324         {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
325         "4100: Hard device bus fabric error"},
326         {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
327         "310C: Logical block guard error detected by the device"},
328         {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
329         "310C: Logical block reference tag error detected by the device"},
330         {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
331         "4170: Scatter list tag / sequence number error"},
332         {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
333         "8150: Logical block CRC error on IOA to Host transfer"},
334         {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
335         "4170: Logical block sequence number error on IOA to Host transfer"},
336         {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
337         "310D: Logical block reference tag error detected by the IOA"},
338         {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
339         "310D: Logical block guard error detected by the IOA"},
340         {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
341         "9000: IOA reserved area data check"},
342         {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
343         "9001: IOA reserved area invalid data pattern"},
344         {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
345         "9002: IOA reserved area LRC error"},
346         {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
347         "Hardware Error, IOA metadata access error"},
348         {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
349         "102E: Out of alternate sectors for disk storage"},
350         {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
351         "FFF4: Data transfer underlength error"},
352         {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
353         "FFF4: Data transfer overlength error"},
354         {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
355         "3400: Logical unit failure"},
356         {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
357         "FFF4: Device microcode is corrupt"},
358         {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
359         "8150: PCI bus error"},
360         {0x04430000, 1, 0,
361         "Unsupported device bus message received"},
362         {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
363         "FFF4: Disk device problem"},
364         {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
365         "8150: Permanent IOA failure"},
366         {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
367         "3010: Disk device returned wrong response to IOA"},
368         {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
369         "8151: IOA microcode error"},
370         {0x04448500, 0, 0,
371         "Device bus status error"},
372         {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
373         "8157: IOA error requiring IOA reset to recover"},
374         {0x04448700, 0, 0,
375         "ATA device status error"},
376         {0x04490000, 0, 0,
377         "Message reject received from the device"},
378         {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
379         "8008: A permanent cache battery pack failure occurred"},
380         {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
381         "9090: Disk unit has been modified after the last known status"},
382         {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
383         "9081: IOA detected device error"},
384         {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
385         "9082: IOA detected device error"},
386         {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
387         "3110: Device bus error, message or command phase"},
388         {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
389         "3110: SAS Command / Task Management Function failed"},
390         {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
391         "9091: Incorrect hardware configuration change has been detected"},
392         {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
393         "9073: Invalid multi-adapter configuration"},
394         {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
395         "4010: Incorrect connection between cascaded expanders"},
396         {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
397         "4020: Connections exceed IOA design limits"},
398         {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
399         "4030: Incorrect multipath connection"},
400         {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
401         "4110: Unsupported enclosure function"},
402         {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
403         "4120: SAS cable VPD cannot be read"},
404         {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
405         "FFF4: Command to logical unit failed"},
406         {0x05240000, 1, 0,
407         "Illegal request, invalid request type or request packet"},
408         {0x05250000, 0, 0,
409         "Illegal request, invalid resource handle"},
410         {0x05258000, 0, 0,
411         "Illegal request, commands not allowed to this device"},
412         {0x05258100, 0, 0,
413         "Illegal request, command not allowed to a secondary adapter"},
414         {0x05258200, 0, 0,
415         "Illegal request, command not allowed to a non-optimized resource"},
416         {0x05260000, 0, 0,
417         "Illegal request, invalid field in parameter list"},
418         {0x05260100, 0, 0,
419         "Illegal request, parameter not supported"},
420         {0x05260200, 0, 0,
421         "Illegal request, parameter value invalid"},
422         {0x052C0000, 0, 0,
423         "Illegal request, command sequence error"},
424         {0x052C8000, 1, 0,
425         "Illegal request, dual adapter support not enabled"},
426         {0x052C8100, 1, 0,
427         "Illegal request, another cable connector was physically disabled"},
428         {0x054E8000, 1, 0,
429         "Illegal request, inconsistent group id/group count"},
430         {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
431         "9031: Array protection temporarily suspended, protection resuming"},
432         {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
433         "9040: Array protection temporarily suspended, protection resuming"},
434         {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
435         "4080: IOA exceeded maximum operating temperature"},
436         {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
437         "4085: Service required"},
438         {0x060B8100, 0, IPR_DEFAULT_LOG_LEVEL,
439         "4086: SAS Adapter Hardware Configuration Error"},
440         {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
441         "3140: Device bus not ready to ready transition"},
442         {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
443         "FFFB: SCSI bus was reset"},
444         {0x06290500, 0, 0,
445         "FFFE: SCSI bus transition to single ended"},
446         {0x06290600, 0, 0,
447         "FFFE: SCSI bus transition to LVD"},
448         {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
449         "FFFB: SCSI bus was reset by another initiator"},
450         {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
451         "3029: A device replacement has occurred"},
452         {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
453         "4102: Device bus fabric performance degradation"},
454         {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
455         "9051: IOA cache data exists for a missing or failed device"},
456         {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
457         "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
458         {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
459         "9025: Disk unit is not supported at its physical location"},
460         {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
461         "3020: IOA detected a SCSI bus configuration error"},
462         {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
463         "3150: SCSI bus configuration error"},
464         {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
465         "9074: Asymmetric advanced function disk configuration"},
466         {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
467         "4040: Incomplete multipath connection between IOA and enclosure"},
468         {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
469         "4041: Incomplete multipath connection between enclosure and device"},
470         {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
471         "9075: Incomplete multipath connection between IOA and remote IOA"},
472         {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
473         "9076: Configuration error, missing remote IOA"},
474         {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
475         "4050: Enclosure does not support a required multipath function"},
476         {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
477         "4121: Configuration error, required cable is missing"},
478         {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
479         "4122: Cable is not plugged into the correct location on remote IOA"},
480         {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
481         "4123: Configuration error, invalid cable vital product data"},
482         {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
483         "4124: Configuration error, both cable ends are plugged into the same IOA"},
484         {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
485         "4070: Logically bad block written on device"},
486         {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
487         "9041: Array protection temporarily suspended"},
488         {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
489         "9042: Corrupt array parity detected on specified device"},
490         {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
491         "9030: Array no longer protected due to missing or failed disk unit"},
492         {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
493         "9071: Link operational transition"},
494         {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
495         "9072: Link not operational transition"},
496         {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
497         "9032: Array exposed but still protected"},
498         {0x066B8300, 0, IPR_DEBUG_LOG_LEVEL,
499         "70DD: Device forced failed by disrupt device command"},
500         {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
501         "4061: Multipath redundancy level got better"},
502         {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
503         "4060: Multipath redundancy level got worse"},
504         {0x06808100, 0, IPR_DEBUG_LOG_LEVEL,
505         "9083: Device raw mode enabled"},
506         {0x06808200, 0, IPR_DEBUG_LOG_LEVEL,
507         "9084: Device raw mode disabled"},
508         {0x07270000, 0, 0,
509         "Failure due to other device"},
510         {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
511         "9008: IOA does not support functions expected by devices"},
512         {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
513         "9010: Cache data associated with attached devices cannot be found"},
514         {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
515         "9011: Cache data belongs to devices other than those attached"},
516         {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
517         "9020: Array missing 2 or more devices with only 1 device present"},
518         {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
519         "9021: Array missing 2 or more devices with 2 or more devices present"},
520         {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
521         "9022: Exposed array is missing a required device"},
522         {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
523         "9023: Array member(s) not at required physical locations"},
524         {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
525         "9024: Array not functional due to present hardware configuration"},
526         {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
527         "9026: Array not functional due to present hardware configuration"},
528         {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
529         "9027: Array is missing a device and parity is out of sync"},
530         {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
531         "9028: Maximum number of arrays already exist"},
532         {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
533         "9050: Required cache data cannot be located for a disk unit"},
534         {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
535         "9052: Cache data exists for a device that has been modified"},
536         {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
537         "9054: IOA resources not available due to previous problems"},
538         {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
539         "9092: Disk unit requires initialization before use"},
540         {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
541         "9029: Incorrect hardware configuration change has been detected"},
542         {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
543         "9060: One or more disk pairs are missing from an array"},
544         {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
545         "9061: One or more disks are missing from an array"},
546         {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
547         "9062: One or more disks are missing from an array"},
548         {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
549         "9063: Maximum number of functional arrays has been exceeded"},
550         {0x07279A00, 0, 0,
551         "Data protect, other volume set problem"},
552         {0x0B260000, 0, 0,
553         "Aborted command, invalid descriptor"},
554         {0x0B3F9000, 0, 0,
555         "Target operating conditions have changed, dual adapter takeover"},
556         {0x0B530200, 0, 0,
557         "Aborted command, medium removal prevented"},
558         {0x0B5A0000, 0, 0,
559         "Command terminated by host"},
560         {0x0B5B8000, 0, 0,
561         "Aborted command, command terminated by host"}
562 };
563
564 static const struct ipr_ses_table_entry ipr_ses_table[] = {
565         { "2104-DL1        ", "XXXXXXXXXXXXXXXX", 80 },
566         { "2104-TL1        ", "XXXXXXXXXXXXXXXX", 80 },
567         { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
568         { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
569         { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
570         { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
571         { "2104-DU3        ", "XXXXXXXXXXXXXXXX", 160 },
572         { "2104-TU3        ", "XXXXXXXXXXXXXXXX", 160 },
573         { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
574         { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
575         { "St  V1S2        ", "XXXXXXXXXXXXXXXX", 160 },
576         { "HSBPD4M  PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
577         { "VSBPD1H   U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
578 };
579
580 /*
581  *  Function Prototypes
582  */
583 static int ipr_reset_alert(struct ipr_cmnd *);
584 static void ipr_process_ccn(struct ipr_cmnd *);
585 static void ipr_process_error(struct ipr_cmnd *);
586 static void ipr_reset_ioa_job(struct ipr_cmnd *);
587 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
588                                    enum ipr_shutdown_type);
589
590 #ifdef CONFIG_SCSI_IPR_TRACE
591 /**
592  * ipr_trc_hook - Add a trace entry to the driver trace
593  * @ipr_cmd:    ipr command struct
594  * @type:               trace type
595  * @add_data:   additional data
596  *
597  * Return value:
598  *      none
599  **/
600 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
601                          u8 type, u32 add_data)
602 {
603         struct ipr_trace_entry *trace_entry;
604         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
605         unsigned int trace_index;
606
607         trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
608         trace_entry = &ioa_cfg->trace[trace_index];
609         trace_entry->time = jiffies;
610         trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
611         trace_entry->type = type;
612         if (ipr_cmd->ioa_cfg->sis64)
613                 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
614         else
615                 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
616         trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
617         trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
618         trace_entry->u.add_data = add_data;
619         wmb();
620 }
621 #else
622 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
623 #endif
624
625 /**
626  * ipr_lock_and_done - Acquire lock and complete command
627  * @ipr_cmd:    ipr command struct
628  *
629  * Return value:
630  *      none
631  **/
632 static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
633 {
634         unsigned long lock_flags;
635         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
636
637         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
638         ipr_cmd->done(ipr_cmd);
639         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
640 }
641
642 /**
643  * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
644  * @ipr_cmd:    ipr command struct
645  *
646  * Return value:
647  *      none
648  **/
649 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
650 {
651         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
652         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
653         struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
654         dma_addr_t dma_addr = ipr_cmd->dma_addr;
655         int hrrq_id;
656
657         hrrq_id = ioarcb->cmd_pkt.hrrq_id;
658         memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
659         ioarcb->cmd_pkt.hrrq_id = hrrq_id;
660         ioarcb->data_transfer_length = 0;
661         ioarcb->read_data_transfer_length = 0;
662         ioarcb->ioadl_len = 0;
663         ioarcb->read_ioadl_len = 0;
664
665         if (ipr_cmd->ioa_cfg->sis64) {
666                 ioarcb->u.sis64_addr_data.data_ioadl_addr =
667                         cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
668                 ioasa64->u.gata.status = 0;
669         } else {
670                 ioarcb->write_ioadl_addr =
671                         cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
672                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
673                 ioasa->u.gata.status = 0;
674         }
675
676         ioasa->hdr.ioasc = 0;
677         ioasa->hdr.residual_data_len = 0;
678         ipr_cmd->scsi_cmd = NULL;
679         ipr_cmd->qc = NULL;
680         ipr_cmd->sense_buffer[0] = 0;
681         ipr_cmd->dma_use_sg = 0;
682 }
683
684 /**
685  * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
686  * @ipr_cmd:    ipr command struct
687  *
688  * Return value:
689  *      none
690  **/
691 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
692                               void (*fast_done) (struct ipr_cmnd *))
693 {
694         ipr_reinit_ipr_cmnd(ipr_cmd);
695         ipr_cmd->u.scratch = 0;
696         ipr_cmd->sibling = NULL;
697         ipr_cmd->eh_comp = NULL;
698         ipr_cmd->fast_done = fast_done;
699         timer_setup(&ipr_cmd->timer, NULL, 0);
700 }
701
702 /**
703  * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
704  * @ioa_cfg:    ioa config struct
705  *
706  * Return value:
707  *      pointer to ipr command struct
708  **/
709 static
710 struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
711 {
712         struct ipr_cmnd *ipr_cmd = NULL;
713
714         if (likely(!list_empty(&hrrq->hrrq_free_q))) {
715                 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
716                         struct ipr_cmnd, queue);
717                 list_del(&ipr_cmd->queue);
718         }
719
720
721         return ipr_cmd;
722 }
723
724 /**
725  * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
726  * @ioa_cfg:    ioa config struct
727  *
728  * Return value:
729  *      pointer to ipr command struct
730  **/
731 static
732 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
733 {
734         struct ipr_cmnd *ipr_cmd =
735                 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
736         ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
737         return ipr_cmd;
738 }
739
740 /**
741  * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
742  * @ioa_cfg:    ioa config struct
743  * @clr_ints:     interrupts to clear
744  *
745  * This function masks all interrupts on the adapter, then clears the
746  * interrupts specified in the mask
747  *
748  * Return value:
749  *      none
750  **/
751 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
752                                           u32 clr_ints)
753 {
754         volatile u32 int_reg;
755         int i;
756
757         /* Stop new interrupts */
758         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
759                 spin_lock(&ioa_cfg->hrrq[i]._lock);
760                 ioa_cfg->hrrq[i].allow_interrupts = 0;
761                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
762         }
763         wmb();
764
765         /* Set interrupt mask to stop all new interrupts */
766         if (ioa_cfg->sis64)
767                 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
768         else
769                 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
770
771         /* Clear any pending interrupts */
772         if (ioa_cfg->sis64)
773                 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
774         writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
775         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
776 }
777
778 /**
779  * ipr_save_pcix_cmd_reg - Save PCI-X command register
780  * @ioa_cfg:    ioa config struct
781  *
782  * Return value:
783  *      0 on success / -EIO on failure
784  **/
785 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
786 {
787         int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
788
789         if (pcix_cmd_reg == 0)
790                 return 0;
791
792         if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
793                                  &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
794                 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
795                 return -EIO;
796         }
797
798         ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
799         return 0;
800 }
801
802 /**
803  * ipr_set_pcix_cmd_reg - Setup PCI-X command register
804  * @ioa_cfg:    ioa config struct
805  *
806  * Return value:
807  *      0 on success / -EIO on failure
808  **/
809 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
810 {
811         int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
812
813         if (pcix_cmd_reg) {
814                 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
815                                           ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
816                         dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
817                         return -EIO;
818                 }
819         }
820
821         return 0;
822 }
823
824 /**
825  * __ipr_sata_eh_done - done function for aborted SATA commands
826  * @ipr_cmd:    ipr command struct
827  *
828  * This function is invoked for ops generated to SATA
829  * devices which are being aborted.
830  *
831  * Return value:
832  *      none
833  **/
834 static void __ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
835 {
836         struct ata_queued_cmd *qc = ipr_cmd->qc;
837         struct ipr_sata_port *sata_port = qc->ap->private_data;
838
839         qc->err_mask |= AC_ERR_OTHER;
840         sata_port->ioasa.status |= ATA_BUSY;
841         ata_qc_complete(qc);
842         if (ipr_cmd->eh_comp)
843                 complete(ipr_cmd->eh_comp);
844         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
845 }
846
847 /**
848  * ipr_sata_eh_done - done function for aborted SATA commands
849  * @ipr_cmd:    ipr command struct
850  *
851  * This function is invoked for ops generated to SATA
852  * devices which are being aborted.
853  *
854  * Return value:
855  *      none
856  **/
857 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
858 {
859         struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
860         unsigned long hrrq_flags;
861
862         spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
863         __ipr_sata_eh_done(ipr_cmd);
864         spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
865 }
866
867 /**
868  * __ipr_scsi_eh_done - mid-layer done function for aborted ops
869  * @ipr_cmd:    ipr command struct
870  *
871  * This function is invoked by the interrupt handler for
872  * ops generated by the SCSI mid-layer which are being aborted.
873  *
874  * Return value:
875  *      none
876  **/
877 static void __ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
878 {
879         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
880
881         scsi_cmd->result |= (DID_ERROR << 16);
882
883         scsi_dma_unmap(ipr_cmd->scsi_cmd);
884         scsi_cmd->scsi_done(scsi_cmd);
885         if (ipr_cmd->eh_comp)
886                 complete(ipr_cmd->eh_comp);
887         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
888 }
889
890 /**
891  * ipr_scsi_eh_done - mid-layer done function for aborted ops
892  * @ipr_cmd:    ipr command struct
893  *
894  * This function is invoked by the interrupt handler for
895  * ops generated by the SCSI mid-layer which are being aborted.
896  *
897  * Return value:
898  *      none
899  **/
900 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
901 {
902         unsigned long hrrq_flags;
903         struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
904
905         spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
906         __ipr_scsi_eh_done(ipr_cmd);
907         spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
908 }
909
910 /**
911  * ipr_fail_all_ops - Fails all outstanding ops.
912  * @ioa_cfg:    ioa config struct
913  *
914  * This function fails all outstanding ops.
915  *
916  * Return value:
917  *      none
918  **/
919 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
920 {
921         struct ipr_cmnd *ipr_cmd, *temp;
922         struct ipr_hrr_queue *hrrq;
923
924         ENTER;
925         for_each_hrrq(hrrq, ioa_cfg) {
926                 spin_lock(&hrrq->_lock);
927                 list_for_each_entry_safe(ipr_cmd,
928                                         temp, &hrrq->hrrq_pending_q, queue) {
929                         list_del(&ipr_cmd->queue);
930
931                         ipr_cmd->s.ioasa.hdr.ioasc =
932                                 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
933                         ipr_cmd->s.ioasa.hdr.ilid =
934                                 cpu_to_be32(IPR_DRIVER_ILID);
935
936                         if (ipr_cmd->scsi_cmd)
937                                 ipr_cmd->done = __ipr_scsi_eh_done;
938                         else if (ipr_cmd->qc)
939                                 ipr_cmd->done = __ipr_sata_eh_done;
940
941                         ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
942                                      IPR_IOASC_IOA_WAS_RESET);
943                         del_timer(&ipr_cmd->timer);
944                         ipr_cmd->done(ipr_cmd);
945                 }
946                 spin_unlock(&hrrq->_lock);
947         }
948         LEAVE;
949 }
950
951 /**
952  * ipr_send_command -  Send driver initiated requests.
953  * @ipr_cmd:            ipr command struct
954  *
955  * This function sends a command to the adapter using the correct write call.
956  * In the case of sis64, calculate the ioarcb size required. Then or in the
957  * appropriate bits.
958  *
959  * Return value:
960  *      none
961  **/
962 static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
963 {
964         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
965         dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
966
967         if (ioa_cfg->sis64) {
968                 /* The default size is 256 bytes */
969                 send_dma_addr |= 0x1;
970
971                 /* If the number of ioadls * size of ioadl > 128 bytes,
972                    then use a 512 byte ioarcb */
973                 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
974                         send_dma_addr |= 0x4;
975                 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
976         } else
977                 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
978 }
979
980 /**
981  * ipr_do_req -  Send driver initiated requests.
982  * @ipr_cmd:            ipr command struct
983  * @done:                       done function
984  * @timeout_func:       timeout function
985  * @timeout:            timeout value
986  *
987  * This function sends the specified command to the adapter with the
988  * timeout given. The done function is invoked on command completion.
989  *
990  * Return value:
991  *      none
992  **/
993 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
994                        void (*done) (struct ipr_cmnd *),
995                        void (*timeout_func) (struct timer_list *), u32 timeout)
996 {
997         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
998
999         ipr_cmd->done = done;
1000
1001         ipr_cmd->timer.expires = jiffies + timeout;
1002         ipr_cmd->timer.function = timeout_func;
1003
1004         add_timer(&ipr_cmd->timer);
1005
1006         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
1007
1008         ipr_send_command(ipr_cmd);
1009 }
1010
1011 /**
1012  * ipr_internal_cmd_done - Op done function for an internally generated op.
1013  * @ipr_cmd:    ipr command struct
1014  *
1015  * This function is the op done function for an internally generated,
1016  * blocking op. It simply wakes the sleeping thread.
1017  *
1018  * Return value:
1019  *      none
1020  **/
1021 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
1022 {
1023         if (ipr_cmd->sibling)
1024                 ipr_cmd->sibling = NULL;
1025         else
1026                 complete(&ipr_cmd->completion);
1027 }
1028
1029 /**
1030  * ipr_init_ioadl - initialize the ioadl for the correct SIS type
1031  * @ipr_cmd:    ipr command struct
1032  * @dma_addr:   dma address
1033  * @len:        transfer length
1034  * @flags:      ioadl flag value
1035  *
1036  * This function initializes an ioadl in the case where there is only a single
1037  * descriptor.
1038  *
1039  * Return value:
1040  *      nothing
1041  **/
1042 static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
1043                            u32 len, int flags)
1044 {
1045         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1046         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
1047
1048         ipr_cmd->dma_use_sg = 1;
1049
1050         if (ipr_cmd->ioa_cfg->sis64) {
1051                 ioadl64->flags = cpu_to_be32(flags);
1052                 ioadl64->data_len = cpu_to_be32(len);
1053                 ioadl64->address = cpu_to_be64(dma_addr);
1054
1055                 ipr_cmd->ioarcb.ioadl_len =
1056                         cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
1057                 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1058         } else {
1059                 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
1060                 ioadl->address = cpu_to_be32(dma_addr);
1061
1062                 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
1063                         ipr_cmd->ioarcb.read_ioadl_len =
1064                                 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1065                         ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
1066                 } else {
1067                         ipr_cmd->ioarcb.ioadl_len =
1068                                 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1069                         ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1070                 }
1071         }
1072 }
1073
1074 /**
1075  * ipr_send_blocking_cmd - Send command and sleep on its completion.
1076  * @ipr_cmd:    ipr command struct
1077  * @timeout_func:       function to invoke if command times out
1078  * @timeout:    timeout
1079  *
1080  * Return value:
1081  *      none
1082  **/
1083 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
1084                                   void (*timeout_func) (struct timer_list *),
1085                                   u32 timeout)
1086 {
1087         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1088
1089         init_completion(&ipr_cmd->completion);
1090         ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1091
1092         spin_unlock_irq(ioa_cfg->host->host_lock);
1093         wait_for_completion(&ipr_cmd->completion);
1094         spin_lock_irq(ioa_cfg->host->host_lock);
1095 }
1096
1097 static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1098 {
1099         unsigned int hrrq;
1100
1101         if (ioa_cfg->hrrq_num == 1)
1102                 hrrq = 0;
1103         else {
1104                 hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
1105                 hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
1106         }
1107         return hrrq;
1108 }
1109
1110 /**
1111  * ipr_send_hcam - Send an HCAM to the adapter.
1112  * @ioa_cfg:    ioa config struct
1113  * @type:               HCAM type
1114  * @hostrcb:    hostrcb struct
1115  *
1116  * This function will send a Host Controlled Async command to the adapter.
1117  * If HCAMs are currently not allowed to be issued to the adapter, it will
1118  * place the hostrcb on the free queue.
1119  *
1120  * Return value:
1121  *      none
1122  **/
1123 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1124                           struct ipr_hostrcb *hostrcb)
1125 {
1126         struct ipr_cmnd *ipr_cmd;
1127         struct ipr_ioarcb *ioarcb;
1128
1129         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1130                 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
1131                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1132                 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1133
1134                 ipr_cmd->u.hostrcb = hostrcb;
1135                 ioarcb = &ipr_cmd->ioarcb;
1136
1137                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1138                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1139                 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1140                 ioarcb->cmd_pkt.cdb[1] = type;
1141                 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1142                 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1143
1144                 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1145                                sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1146
1147                 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1148                         ipr_cmd->done = ipr_process_ccn;
1149                 else
1150                         ipr_cmd->done = ipr_process_error;
1151
1152                 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1153
1154                 ipr_send_command(ipr_cmd);
1155         } else {
1156                 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1157         }
1158 }
1159
1160 /**
1161  * ipr_update_ata_class - Update the ata class in the resource entry
1162  * @res:        resource entry struct
1163  * @proto:      cfgte device bus protocol value
1164  *
1165  * Return value:
1166  *      none
1167  **/
1168 static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1169 {
1170         switch (proto) {
1171         case IPR_PROTO_SATA:
1172         case IPR_PROTO_SAS_STP:
1173                 res->ata_class = ATA_DEV_ATA;
1174                 break;
1175         case IPR_PROTO_SATA_ATAPI:
1176         case IPR_PROTO_SAS_STP_ATAPI:
1177                 res->ata_class = ATA_DEV_ATAPI;
1178                 break;
1179         default:
1180                 res->ata_class = ATA_DEV_UNKNOWN;
1181                 break;
1182         };
1183 }
1184
1185 /**
1186  * ipr_init_res_entry - Initialize a resource entry struct.
1187  * @res:        resource entry struct
1188  * @cfgtew:     config table entry wrapper struct
1189  *
1190  * Return value:
1191  *      none
1192  **/
1193 static void ipr_init_res_entry(struct ipr_resource_entry *res,
1194                                struct ipr_config_table_entry_wrapper *cfgtew)
1195 {
1196         int found = 0;
1197         unsigned int proto;
1198         struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1199         struct ipr_resource_entry *gscsi_res = NULL;
1200
1201         res->needs_sync_complete = 0;
1202         res->in_erp = 0;
1203         res->add_to_ml = 0;
1204         res->del_from_ml = 0;
1205         res->resetting_device = 0;
1206         res->reset_occurred = 0;
1207         res->sdev = NULL;
1208         res->sata_port = NULL;
1209
1210         if (ioa_cfg->sis64) {
1211                 proto = cfgtew->u.cfgte64->proto;
1212                 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1213                 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
1214                 res->qmodel = IPR_QUEUEING_MODEL64(res);
1215                 res->type = cfgtew->u.cfgte64->res_type;
1216
1217                 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1218                         sizeof(res->res_path));
1219
1220                 res->bus = 0;
1221                 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1222                         sizeof(res->dev_lun.scsi_lun));
1223                 res->lun = scsilun_to_int(&res->dev_lun);
1224
1225                 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1226                         list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1227                                 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1228                                         found = 1;
1229                                         res->target = gscsi_res->target;
1230                                         break;
1231                                 }
1232                         }
1233                         if (!found) {
1234                                 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1235                                                                   ioa_cfg->max_devs_supported);
1236                                 set_bit(res->target, ioa_cfg->target_ids);
1237                         }
1238                 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1239                         res->bus = IPR_IOAFP_VIRTUAL_BUS;
1240                         res->target = 0;
1241                 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1242                         res->bus = IPR_ARRAY_VIRTUAL_BUS;
1243                         res->target = find_first_zero_bit(ioa_cfg->array_ids,
1244                                                           ioa_cfg->max_devs_supported);
1245                         set_bit(res->target, ioa_cfg->array_ids);
1246                 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1247                         res->bus = IPR_VSET_VIRTUAL_BUS;
1248                         res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1249                                                           ioa_cfg->max_devs_supported);
1250                         set_bit(res->target, ioa_cfg->vset_ids);
1251                 } else {
1252                         res->target = find_first_zero_bit(ioa_cfg->target_ids,
1253                                                           ioa_cfg->max_devs_supported);
1254                         set_bit(res->target, ioa_cfg->target_ids);
1255                 }
1256         } else {
1257                 proto = cfgtew->u.cfgte->proto;
1258                 res->qmodel = IPR_QUEUEING_MODEL(res);
1259                 res->flags = cfgtew->u.cfgte->flags;
1260                 if (res->flags & IPR_IS_IOA_RESOURCE)
1261                         res->type = IPR_RES_TYPE_IOAFP;
1262                 else
1263                         res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1264
1265                 res->bus = cfgtew->u.cfgte->res_addr.bus;
1266                 res->target = cfgtew->u.cfgte->res_addr.target;
1267                 res->lun = cfgtew->u.cfgte->res_addr.lun;
1268                 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
1269         }
1270
1271         ipr_update_ata_class(res, proto);
1272 }
1273
1274 /**
1275  * ipr_is_same_device - Determine if two devices are the same.
1276  * @res:        resource entry struct
1277  * @cfgtew:     config table entry wrapper struct
1278  *
1279  * Return value:
1280  *      1 if the devices are the same / 0 otherwise
1281  **/
1282 static int ipr_is_same_device(struct ipr_resource_entry *res,
1283                               struct ipr_config_table_entry_wrapper *cfgtew)
1284 {
1285         if (res->ioa_cfg->sis64) {
1286                 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1287                                         sizeof(cfgtew->u.cfgte64->dev_id)) &&
1288                         !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1289                                         sizeof(cfgtew->u.cfgte64->lun))) {
1290                         return 1;
1291                 }
1292         } else {
1293                 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1294                     res->target == cfgtew->u.cfgte->res_addr.target &&
1295                     res->lun == cfgtew->u.cfgte->res_addr.lun)
1296                         return 1;
1297         }
1298
1299         return 0;
1300 }
1301
1302 /**
1303  * __ipr_format_res_path - Format the resource path for printing.
1304  * @res_path:   resource path
1305  * @buf:        buffer
1306  * @len:        length of buffer provided
1307  *
1308  * Return value:
1309  *      pointer to buffer
1310  **/
1311 static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
1312 {
1313         int i;
1314         char *p = buffer;
1315
1316         *p = '\0';
1317         p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1318         for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1319                 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
1320
1321         return buffer;
1322 }
1323
1324 /**
1325  * ipr_format_res_path - Format the resource path for printing.
1326  * @ioa_cfg:    ioa config struct
1327  * @res_path:   resource path
1328  * @buf:        buffer
1329  * @len:        length of buffer provided
1330  *
1331  * Return value:
1332  *      pointer to buffer
1333  **/
1334 static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1335                                  u8 *res_path, char *buffer, int len)
1336 {
1337         char *p = buffer;
1338
1339         *p = '\0';
1340         p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1341         __ipr_format_res_path(res_path, p, len - (buffer - p));
1342         return buffer;
1343 }
1344
1345 /**
1346  * ipr_update_res_entry - Update the resource entry.
1347  * @res:        resource entry struct
1348  * @cfgtew:     config table entry wrapper struct
1349  *
1350  * Return value:
1351  *      none
1352  **/
1353 static void ipr_update_res_entry(struct ipr_resource_entry *res,
1354                                  struct ipr_config_table_entry_wrapper *cfgtew)
1355 {
1356         char buffer[IPR_MAX_RES_PATH_LENGTH];
1357         unsigned int proto;
1358         int new_path = 0;
1359
1360         if (res->ioa_cfg->sis64) {
1361                 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1362                 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
1363                 res->type = cfgtew->u.cfgte64->res_type;
1364
1365                 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1366                         sizeof(struct ipr_std_inq_data));
1367
1368                 res->qmodel = IPR_QUEUEING_MODEL64(res);
1369                 proto = cfgtew->u.cfgte64->proto;
1370                 res->res_handle = cfgtew->u.cfgte64->res_handle;
1371                 res->dev_id = cfgtew->u.cfgte64->dev_id;
1372
1373                 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1374                         sizeof(res->dev_lun.scsi_lun));
1375
1376                 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1377                                         sizeof(res->res_path))) {
1378                         memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1379                                 sizeof(res->res_path));
1380                         new_path = 1;
1381                 }
1382
1383                 if (res->sdev && new_path)
1384                         sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1385                                     ipr_format_res_path(res->ioa_cfg,
1386                                         res->res_path, buffer, sizeof(buffer)));
1387         } else {
1388                 res->flags = cfgtew->u.cfgte->flags;
1389                 if (res->flags & IPR_IS_IOA_RESOURCE)
1390                         res->type = IPR_RES_TYPE_IOAFP;
1391                 else
1392                         res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1393
1394                 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1395                         sizeof(struct ipr_std_inq_data));
1396
1397                 res->qmodel = IPR_QUEUEING_MODEL(res);
1398                 proto = cfgtew->u.cfgte->proto;
1399                 res->res_handle = cfgtew->u.cfgte->res_handle;
1400         }
1401
1402         ipr_update_ata_class(res, proto);
1403 }
1404
1405 /**
1406  * ipr_clear_res_target - Clear the bit in the bit map representing the target
1407  *                        for the resource.
1408  * @res:        resource entry struct
1409  * @cfgtew:     config table entry wrapper struct
1410  *
1411  * Return value:
1412  *      none
1413  **/
1414 static void ipr_clear_res_target(struct ipr_resource_entry *res)
1415 {
1416         struct ipr_resource_entry *gscsi_res = NULL;
1417         struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1418
1419         if (!ioa_cfg->sis64)
1420                 return;
1421
1422         if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1423                 clear_bit(res->target, ioa_cfg->array_ids);
1424         else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1425                 clear_bit(res->target, ioa_cfg->vset_ids);
1426         else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1427                 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1428                         if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1429                                 return;
1430                 clear_bit(res->target, ioa_cfg->target_ids);
1431
1432         } else if (res->bus == 0)
1433                 clear_bit(res->target, ioa_cfg->target_ids);
1434 }
1435
1436 /**
1437  * ipr_handle_config_change - Handle a config change from the adapter
1438  * @ioa_cfg:    ioa config struct
1439  * @hostrcb:    hostrcb
1440  *
1441  * Return value:
1442  *      none
1443  **/
1444 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1445                                      struct ipr_hostrcb *hostrcb)
1446 {
1447         struct ipr_resource_entry *res = NULL;
1448         struct ipr_config_table_entry_wrapper cfgtew;
1449         __be32 cc_res_handle;
1450
1451         u32 is_ndn = 1;
1452
1453         if (ioa_cfg->sis64) {
1454                 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1455                 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1456         } else {
1457                 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1458                 cc_res_handle = cfgtew.u.cfgte->res_handle;
1459         }
1460
1461         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1462                 if (res->res_handle == cc_res_handle) {
1463                         is_ndn = 0;
1464                         break;
1465                 }
1466         }
1467
1468         if (is_ndn) {
1469                 if (list_empty(&ioa_cfg->free_res_q)) {
1470                         ipr_send_hcam(ioa_cfg,
1471                                       IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1472                                       hostrcb);
1473                         return;
1474                 }
1475
1476                 res = list_entry(ioa_cfg->free_res_q.next,
1477                                  struct ipr_resource_entry, queue);
1478
1479                 list_del(&res->queue);
1480                 ipr_init_res_entry(res, &cfgtew);
1481                 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1482         }
1483
1484         ipr_update_res_entry(res, &cfgtew);
1485
1486         if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1487                 if (res->sdev) {
1488                         res->del_from_ml = 1;
1489                         res->res_handle = IPR_INVALID_RES_HANDLE;
1490                         schedule_work(&ioa_cfg->work_q);
1491                 } else {
1492                         ipr_clear_res_target(res);
1493                         list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1494                 }
1495         } else if (!res->sdev || res->del_from_ml) {
1496                 res->add_to_ml = 1;
1497                 schedule_work(&ioa_cfg->work_q);
1498         }
1499
1500         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1501 }
1502
1503 /**
1504  * ipr_process_ccn - Op done function for a CCN.
1505  * @ipr_cmd:    ipr command struct
1506  *
1507  * This function is the op done function for a configuration
1508  * change notification host controlled async from the adapter.
1509  *
1510  * Return value:
1511  *      none
1512  **/
1513 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1514 {
1515         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1516         struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1517         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1518
1519         list_del_init(&hostrcb->queue);
1520         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1521
1522         if (ioasc) {
1523                 if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
1524                     ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST)
1525                         dev_err(&ioa_cfg->pdev->dev,
1526                                 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1527
1528                 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1529         } else {
1530                 ipr_handle_config_change(ioa_cfg, hostrcb);
1531         }
1532 }
1533
1534 /**
1535  * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1536  * @i:          index into buffer
1537  * @buf:                string to modify
1538  *
1539  * This function will strip all trailing whitespace, pad the end
1540  * of the string with a single space, and NULL terminate the string.
1541  *
1542  * Return value:
1543  *      new length of string
1544  **/
1545 static int strip_and_pad_whitespace(int i, char *buf)
1546 {
1547         while (i && buf[i] == ' ')
1548                 i--;
1549         buf[i+1] = ' ';
1550         buf[i+2] = '\0';
1551         return i + 2;
1552 }
1553
1554 /**
1555  * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1556  * @prefix:             string to print at start of printk
1557  * @hostrcb:    hostrcb pointer
1558  * @vpd:                vendor/product id/sn struct
1559  *
1560  * Return value:
1561  *      none
1562  **/
1563 static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1564                                 struct ipr_vpd *vpd)
1565 {
1566         char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1567         int i = 0;
1568
1569         memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1570         i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1571
1572         memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1573         i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1574
1575         memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1576         buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1577
1578         ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1579 }
1580
1581 /**
1582  * ipr_log_vpd - Log the passed VPD to the error log.
1583  * @vpd:                vendor/product id/sn struct
1584  *
1585  * Return value:
1586  *      none
1587  **/
1588 static void ipr_log_vpd(struct ipr_vpd *vpd)
1589 {
1590         char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1591                     + IPR_SERIAL_NUM_LEN];
1592
1593         memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1594         memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1595                IPR_PROD_ID_LEN);
1596         buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1597         ipr_err("Vendor/Product ID: %s\n", buffer);
1598
1599         memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1600         buffer[IPR_SERIAL_NUM_LEN] = '\0';
1601         ipr_err("    Serial Number: %s\n", buffer);
1602 }
1603
1604 /**
1605  * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1606  * @prefix:             string to print at start of printk
1607  * @hostrcb:    hostrcb pointer
1608  * @vpd:                vendor/product id/sn/wwn struct
1609  *
1610  * Return value:
1611  *      none
1612  **/
1613 static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1614                                     struct ipr_ext_vpd *vpd)
1615 {
1616         ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1617         ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1618                      be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1619 }
1620
1621 /**
1622  * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1623  * @vpd:                vendor/product id/sn/wwn struct
1624  *
1625  * Return value:
1626  *      none
1627  **/
1628 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1629 {
1630         ipr_log_vpd(&vpd->vpd);
1631         ipr_err("    WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1632                 be32_to_cpu(vpd->wwid[1]));
1633 }
1634
1635 /**
1636  * ipr_log_enhanced_cache_error - Log a cache error.
1637  * @ioa_cfg:    ioa config struct
1638  * @hostrcb:    hostrcb struct
1639  *
1640  * Return value:
1641  *      none
1642  **/
1643 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1644                                          struct ipr_hostrcb *hostrcb)
1645 {
1646         struct ipr_hostrcb_type_12_error *error;
1647
1648         if (ioa_cfg->sis64)
1649                 error = &hostrcb->hcam.u.error64.u.type_12_error;
1650         else
1651                 error = &hostrcb->hcam.u.error.u.type_12_error;
1652
1653         ipr_err("-----Current Configuration-----\n");
1654         ipr_err("Cache Directory Card Information:\n");
1655         ipr_log_ext_vpd(&error->ioa_vpd);
1656         ipr_err("Adapter Card Information:\n");
1657         ipr_log_ext_vpd(&error->cfc_vpd);
1658
1659         ipr_err("-----Expected Configuration-----\n");
1660         ipr_err("Cache Directory Card Information:\n");
1661         ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1662         ipr_err("Adapter Card Information:\n");
1663         ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1664
1665         ipr_err("Additional IOA Data: %08X %08X %08X\n",
1666                      be32_to_cpu(error->ioa_data[0]),
1667                      be32_to_cpu(error->ioa_data[1]),
1668                      be32_to_cpu(error->ioa_data[2]));
1669 }
1670
1671 /**
1672  * ipr_log_cache_error - Log a cache error.
1673  * @ioa_cfg:    ioa config struct
1674  * @hostrcb:    hostrcb struct
1675  *
1676  * Return value:
1677  *      none
1678  **/
1679 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1680                                 struct ipr_hostrcb *hostrcb)
1681 {
1682         struct ipr_hostrcb_type_02_error *error =
1683                 &hostrcb->hcam.u.error.u.type_02_error;
1684
1685         ipr_err("-----Current Configuration-----\n");
1686         ipr_err("Cache Directory Card Information:\n");
1687         ipr_log_vpd(&error->ioa_vpd);
1688         ipr_err("Adapter Card Information:\n");
1689         ipr_log_vpd(&error->cfc_vpd);
1690
1691         ipr_err("-----Expected Configuration-----\n");
1692         ipr_err("Cache Directory Card Information:\n");
1693         ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1694         ipr_err("Adapter Card Information:\n");
1695         ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1696
1697         ipr_err("Additional IOA Data: %08X %08X %08X\n",
1698                      be32_to_cpu(error->ioa_data[0]),
1699                      be32_to_cpu(error->ioa_data[1]),
1700                      be32_to_cpu(error->ioa_data[2]));
1701 }
1702
1703 /**
1704  * ipr_log_enhanced_config_error - Log a configuration error.
1705  * @ioa_cfg:    ioa config struct
1706  * @hostrcb:    hostrcb struct
1707  *
1708  * Return value:
1709  *      none
1710  **/
1711 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1712                                           struct ipr_hostrcb *hostrcb)
1713 {
1714         int errors_logged, i;
1715         struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1716         struct ipr_hostrcb_type_13_error *error;
1717
1718         error = &hostrcb->hcam.u.error.u.type_13_error;
1719         errors_logged = be32_to_cpu(error->errors_logged);
1720
1721         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1722                 be32_to_cpu(error->errors_detected), errors_logged);
1723
1724         dev_entry = error->dev;
1725
1726         for (i = 0; i < errors_logged; i++, dev_entry++) {
1727                 ipr_err_separator;
1728
1729                 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1730                 ipr_log_ext_vpd(&dev_entry->vpd);
1731
1732                 ipr_err("-----New Device Information-----\n");
1733                 ipr_log_ext_vpd(&dev_entry->new_vpd);
1734
1735                 ipr_err("Cache Directory Card Information:\n");
1736                 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1737
1738                 ipr_err("Adapter Card Information:\n");
1739                 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1740         }
1741 }
1742
1743 /**
1744  * ipr_log_sis64_config_error - Log a device error.
1745  * @ioa_cfg:    ioa config struct
1746  * @hostrcb:    hostrcb struct
1747  *
1748  * Return value:
1749  *      none
1750  **/
1751 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1752                                        struct ipr_hostrcb *hostrcb)
1753 {
1754         int errors_logged, i;
1755         struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1756         struct ipr_hostrcb_type_23_error *error;
1757         char buffer[IPR_MAX_RES_PATH_LENGTH];
1758
1759         error = &hostrcb->hcam.u.error64.u.type_23_error;
1760         errors_logged = be32_to_cpu(error->errors_logged);
1761
1762         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1763                 be32_to_cpu(error->errors_detected), errors_logged);
1764
1765         dev_entry = error->dev;
1766
1767         for (i = 0; i < errors_logged; i++, dev_entry++) {
1768                 ipr_err_separator;
1769
1770                 ipr_err("Device %d : %s", i + 1,
1771                         __ipr_format_res_path(dev_entry->res_path,
1772                                               buffer, sizeof(buffer)));
1773                 ipr_log_ext_vpd(&dev_entry->vpd);
1774
1775                 ipr_err("-----New Device Information-----\n");
1776                 ipr_log_ext_vpd(&dev_entry->new_vpd);
1777
1778                 ipr_err("Cache Directory Card Information:\n");
1779                 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1780
1781                 ipr_err("Adapter Card Information:\n");
1782                 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1783         }
1784 }
1785
1786 /**
1787  * ipr_log_config_error - Log a configuration error.
1788  * @ioa_cfg:    ioa config struct
1789  * @hostrcb:    hostrcb struct
1790  *
1791  * Return value:
1792  *      none
1793  **/
1794 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1795                                  struct ipr_hostrcb *hostrcb)
1796 {
1797         int errors_logged, i;
1798         struct ipr_hostrcb_device_data_entry *dev_entry;
1799         struct ipr_hostrcb_type_03_error *error;
1800
1801         error = &hostrcb->hcam.u.error.u.type_03_error;
1802         errors_logged = be32_to_cpu(error->errors_logged);
1803
1804         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1805                 be32_to_cpu(error->errors_detected), errors_logged);
1806
1807         dev_entry = error->dev;
1808
1809         for (i = 0; i < errors_logged; i++, dev_entry++) {
1810                 ipr_err_separator;
1811
1812                 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1813                 ipr_log_vpd(&dev_entry->vpd);
1814
1815                 ipr_err("-----New Device Information-----\n");
1816                 ipr_log_vpd(&dev_entry->new_vpd);
1817
1818                 ipr_err("Cache Directory Card Information:\n");
1819                 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1820
1821                 ipr_err("Adapter Card Information:\n");
1822                 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1823
1824                 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1825                         be32_to_cpu(dev_entry->ioa_data[0]),
1826                         be32_to_cpu(dev_entry->ioa_data[1]),
1827                         be32_to_cpu(dev_entry->ioa_data[2]),
1828                         be32_to_cpu(dev_entry->ioa_data[3]),
1829                         be32_to_cpu(dev_entry->ioa_data[4]));
1830         }
1831 }
1832
1833 /**
1834  * ipr_log_enhanced_array_error - Log an array configuration error.
1835  * @ioa_cfg:    ioa config struct
1836  * @hostrcb:    hostrcb struct
1837  *
1838  * Return value:
1839  *      none
1840  **/
1841 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1842                                          struct ipr_hostrcb *hostrcb)
1843 {
1844         int i, num_entries;
1845         struct ipr_hostrcb_type_14_error *error;
1846         struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1847         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1848
1849         error = &hostrcb->hcam.u.error.u.type_14_error;
1850
1851         ipr_err_separator;
1852
1853         ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1854                 error->protection_level,
1855                 ioa_cfg->host->host_no,
1856                 error->last_func_vset_res_addr.bus,
1857                 error->last_func_vset_res_addr.target,
1858                 error->last_func_vset_res_addr.lun);
1859
1860         ipr_err_separator;
1861
1862         array_entry = error->array_member;
1863         num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1864                             ARRAY_SIZE(error->array_member));
1865
1866         for (i = 0; i < num_entries; i++, array_entry++) {
1867                 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1868                         continue;
1869
1870                 if (be32_to_cpu(error->exposed_mode_adn) == i)
1871                         ipr_err("Exposed Array Member %d:\n", i);
1872                 else
1873                         ipr_err("Array Member %d:\n", i);
1874
1875                 ipr_log_ext_vpd(&array_entry->vpd);
1876                 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1877                 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1878                                  "Expected Location");
1879
1880                 ipr_err_separator;
1881         }
1882 }
1883
1884 /**
1885  * ipr_log_array_error - Log an array configuration error.
1886  * @ioa_cfg:    ioa config struct
1887  * @hostrcb:    hostrcb struct
1888  *
1889  * Return value:
1890  *      none
1891  **/
1892 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1893                                 struct ipr_hostrcb *hostrcb)
1894 {
1895         int i;
1896         struct ipr_hostrcb_type_04_error *error;
1897         struct ipr_hostrcb_array_data_entry *array_entry;
1898         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1899
1900         error = &hostrcb->hcam.u.error.u.type_04_error;
1901
1902         ipr_err_separator;
1903
1904         ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1905                 error->protection_level,
1906                 ioa_cfg->host->host_no,
1907                 error->last_func_vset_res_addr.bus,
1908                 error->last_func_vset_res_addr.target,
1909                 error->last_func_vset_res_addr.lun);
1910
1911         ipr_err_separator;
1912
1913         array_entry = error->array_member;
1914
1915         for (i = 0; i < 18; i++) {
1916                 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1917                         continue;
1918
1919                 if (be32_to_cpu(error->exposed_mode_adn) == i)
1920                         ipr_err("Exposed Array Member %d:\n", i);
1921                 else
1922                         ipr_err("Array Member %d:\n", i);
1923
1924                 ipr_log_vpd(&array_entry->vpd);
1925
1926                 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1927                 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1928                                  "Expected Location");
1929
1930                 ipr_err_separator;
1931
1932                 if (i == 9)
1933                         array_entry = error->array_member2;
1934                 else
1935                         array_entry++;
1936         }
1937 }
1938
1939 /**
1940  * ipr_log_hex_data - Log additional hex IOA error data.
1941  * @ioa_cfg:    ioa config struct
1942  * @data:               IOA error data
1943  * @len:                data length
1944  *
1945  * Return value:
1946  *      none
1947  **/
1948 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, __be32 *data, int len)
1949 {
1950         int i;
1951
1952         if (len == 0)
1953                 return;
1954
1955         if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1956                 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1957
1958         for (i = 0; i < len / 4; i += 4) {
1959                 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1960                         be32_to_cpu(data[i]),
1961                         be32_to_cpu(data[i+1]),
1962                         be32_to_cpu(data[i+2]),
1963                         be32_to_cpu(data[i+3]));
1964         }
1965 }
1966
1967 /**
1968  * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1969  * @ioa_cfg:    ioa config struct
1970  * @hostrcb:    hostrcb struct
1971  *
1972  * Return value:
1973  *      none
1974  **/
1975 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1976                                             struct ipr_hostrcb *hostrcb)
1977 {
1978         struct ipr_hostrcb_type_17_error *error;
1979
1980         if (ioa_cfg->sis64)
1981                 error = &hostrcb->hcam.u.error64.u.type_17_error;
1982         else
1983                 error = &hostrcb->hcam.u.error.u.type_17_error;
1984
1985         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1986         strim(error->failure_reason);
1987
1988         ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1989                      be32_to_cpu(hostrcb->hcam.u.error.prc));
1990         ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1991         ipr_log_hex_data(ioa_cfg, error->data,
1992                          be32_to_cpu(hostrcb->hcam.length) -
1993                          (offsetof(struct ipr_hostrcb_error, u) +
1994                           offsetof(struct ipr_hostrcb_type_17_error, data)));
1995 }
1996
1997 /**
1998  * ipr_log_dual_ioa_error - Log a dual adapter error.
1999  * @ioa_cfg:    ioa config struct
2000  * @hostrcb:    hostrcb struct
2001  *
2002  * Return value:
2003  *      none
2004  **/
2005 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
2006                                    struct ipr_hostrcb *hostrcb)
2007 {
2008         struct ipr_hostrcb_type_07_error *error;
2009
2010         error = &hostrcb->hcam.u.error.u.type_07_error;
2011         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2012         strim(error->failure_reason);
2013
2014         ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
2015                      be32_to_cpu(hostrcb->hcam.u.error.prc));
2016         ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
2017         ipr_log_hex_data(ioa_cfg, error->data,
2018                          be32_to_cpu(hostrcb->hcam.length) -
2019                          (offsetof(struct ipr_hostrcb_error, u) +
2020                           offsetof(struct ipr_hostrcb_type_07_error, data)));
2021 }
2022
2023 static const struct {
2024         u8 active;
2025         char *desc;
2026 } path_active_desc[] = {
2027         { IPR_PATH_NO_INFO, "Path" },
2028         { IPR_PATH_ACTIVE, "Active path" },
2029         { IPR_PATH_NOT_ACTIVE, "Inactive path" }
2030 };
2031
2032 static const struct {
2033         u8 state;
2034         char *desc;
2035 } path_state_desc[] = {
2036         { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
2037         { IPR_PATH_HEALTHY, "is healthy" },
2038         { IPR_PATH_DEGRADED, "is degraded" },
2039         { IPR_PATH_FAILED, "is failed" }
2040 };
2041
2042 /**
2043  * ipr_log_fabric_path - Log a fabric path error
2044  * @hostrcb:    hostrcb struct
2045  * @fabric:             fabric descriptor
2046  *
2047  * Return value:
2048  *      none
2049  **/
2050 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
2051                                 struct ipr_hostrcb_fabric_desc *fabric)
2052 {
2053         int i, j;
2054         u8 path_state = fabric->path_state;
2055         u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2056         u8 state = path_state & IPR_PATH_STATE_MASK;
2057
2058         for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2059                 if (path_active_desc[i].active != active)
2060                         continue;
2061
2062                 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2063                         if (path_state_desc[j].state != state)
2064                                 continue;
2065
2066                         if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
2067                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
2068                                              path_active_desc[i].desc, path_state_desc[j].desc,
2069                                              fabric->ioa_port);
2070                         } else if (fabric->cascaded_expander == 0xff) {
2071                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
2072                                              path_active_desc[i].desc, path_state_desc[j].desc,
2073                                              fabric->ioa_port, fabric->phy);
2074                         } else if (fabric->phy == 0xff) {
2075                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
2076                                              path_active_desc[i].desc, path_state_desc[j].desc,
2077                                              fabric->ioa_port, fabric->cascaded_expander);
2078                         } else {
2079                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2080                                              path_active_desc[i].desc, path_state_desc[j].desc,
2081                                              fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2082                         }
2083                         return;
2084                 }
2085         }
2086
2087         ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
2088                 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2089 }
2090
2091 /**
2092  * ipr_log64_fabric_path - Log a fabric path error
2093  * @hostrcb:    hostrcb struct
2094  * @fabric:             fabric descriptor
2095  *
2096  * Return value:
2097  *      none
2098  **/
2099 static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2100                                   struct ipr_hostrcb64_fabric_desc *fabric)
2101 {
2102         int i, j;
2103         u8 path_state = fabric->path_state;
2104         u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2105         u8 state = path_state & IPR_PATH_STATE_MASK;
2106         char buffer[IPR_MAX_RES_PATH_LENGTH];
2107
2108         for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2109                 if (path_active_desc[i].active != active)
2110                         continue;
2111
2112                 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2113                         if (path_state_desc[j].state != state)
2114                                 continue;
2115
2116                         ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2117                                      path_active_desc[i].desc, path_state_desc[j].desc,
2118                                      ipr_format_res_path(hostrcb->ioa_cfg,
2119                                                 fabric->res_path,
2120                                                 buffer, sizeof(buffer)));
2121                         return;
2122                 }
2123         }
2124
2125         ipr_err("Path state=%02X Resource Path=%s\n", path_state,
2126                 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2127                                     buffer, sizeof(buffer)));
2128 }
2129
2130 static const struct {
2131         u8 type;
2132         char *desc;
2133 } path_type_desc[] = {
2134         { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2135         { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2136         { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2137         { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2138 };
2139
2140 static const struct {
2141         u8 status;
2142         char *desc;
2143 } path_status_desc[] = {
2144         { IPR_PATH_CFG_NO_PROB, "Functional" },
2145         { IPR_PATH_CFG_DEGRADED, "Degraded" },
2146         { IPR_PATH_CFG_FAILED, "Failed" },
2147         { IPR_PATH_CFG_SUSPECT, "Suspect" },
2148         { IPR_PATH_NOT_DETECTED, "Missing" },
2149         { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2150 };
2151
2152 static const char *link_rate[] = {
2153         "unknown",
2154         "disabled",
2155         "phy reset problem",
2156         "spinup hold",
2157         "port selector",
2158         "unknown",
2159         "unknown",
2160         "unknown",
2161         "1.5Gbps",
2162         "3.0Gbps",
2163         "unknown",
2164         "unknown",
2165         "unknown",
2166         "unknown",
2167         "unknown",
2168         "unknown"
2169 };
2170
2171 /**
2172  * ipr_log_path_elem - Log a fabric path element.
2173  * @hostrcb:    hostrcb struct
2174  * @cfg:                fabric path element struct
2175  *
2176  * Return value:
2177  *      none
2178  **/
2179 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2180                               struct ipr_hostrcb_config_element *cfg)
2181 {
2182         int i, j;
2183         u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2184         u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2185
2186         if (type == IPR_PATH_CFG_NOT_EXIST)
2187                 return;
2188
2189         for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2190                 if (path_type_desc[i].type != type)
2191                         continue;
2192
2193                 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2194                         if (path_status_desc[j].status != status)
2195                                 continue;
2196
2197                         if (type == IPR_PATH_CFG_IOA_PORT) {
2198                                 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2199                                              path_status_desc[j].desc, path_type_desc[i].desc,
2200                                              cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2201                                              be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2202                         } else {
2203                                 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2204                                         ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2205                                                      path_status_desc[j].desc, path_type_desc[i].desc,
2206                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2207                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2208                                 } else if (cfg->cascaded_expander == 0xff) {
2209                                         ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2210                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2211                                                      path_type_desc[i].desc, cfg->phy,
2212                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2213                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2214                                 } else if (cfg->phy == 0xff) {
2215                                         ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2216                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2217                                                      path_type_desc[i].desc, cfg->cascaded_expander,
2218                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2219                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2220                                 } else {
2221                                         ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2222                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2223                                                      path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2224                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2225                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2226                                 }
2227                         }
2228                         return;
2229                 }
2230         }
2231
2232         ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2233                      "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2234                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2235                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2236 }
2237
2238 /**
2239  * ipr_log64_path_elem - Log a fabric path element.
2240  * @hostrcb:    hostrcb struct
2241  * @cfg:                fabric path element struct
2242  *
2243  * Return value:
2244  *      none
2245  **/
2246 static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2247                                 struct ipr_hostrcb64_config_element *cfg)
2248 {
2249         int i, j;
2250         u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2251         u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2252         u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2253         char buffer[IPR_MAX_RES_PATH_LENGTH];
2254
2255         if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2256                 return;
2257
2258         for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2259                 if (path_type_desc[i].type != type)
2260                         continue;
2261
2262                 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2263                         if (path_status_desc[j].status != status)
2264                                 continue;
2265
2266                         ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2267                                      path_status_desc[j].desc, path_type_desc[i].desc,
2268                                      ipr_format_res_path(hostrcb->ioa_cfg,
2269                                         cfg->res_path, buffer, sizeof(buffer)),
2270                                         link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2271                                         be32_to_cpu(cfg->wwid[0]),
2272                                         be32_to_cpu(cfg->wwid[1]));
2273                         return;
2274                 }
2275         }
2276         ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2277                      "WWN=%08X%08X\n", cfg->type_status,
2278                      ipr_format_res_path(hostrcb->ioa_cfg,
2279                         cfg->res_path, buffer, sizeof(buffer)),
2280                         link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2281                         be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2282 }
2283
2284 /**
2285  * ipr_log_fabric_error - Log a fabric error.
2286  * @ioa_cfg:    ioa config struct
2287  * @hostrcb:    hostrcb struct
2288  *
2289  * Return value:
2290  *      none
2291  **/
2292 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2293                                  struct ipr_hostrcb *hostrcb)
2294 {
2295         struct ipr_hostrcb_type_20_error *error;
2296         struct ipr_hostrcb_fabric_desc *fabric;
2297         struct ipr_hostrcb_config_element *cfg;
2298         int i, add_len;
2299
2300         error = &hostrcb->hcam.u.error.u.type_20_error;
2301         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2302         ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2303
2304         add_len = be32_to_cpu(hostrcb->hcam.length) -
2305                 (offsetof(struct ipr_hostrcb_error, u) +
2306                  offsetof(struct ipr_hostrcb_type_20_error, desc));
2307
2308         for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2309                 ipr_log_fabric_path(hostrcb, fabric);
2310                 for_each_fabric_cfg(fabric, cfg)
2311                         ipr_log_path_elem(hostrcb, cfg);
2312
2313                 add_len -= be16_to_cpu(fabric->length);
2314                 fabric = (struct ipr_hostrcb_fabric_desc *)
2315                         ((unsigned long)fabric + be16_to_cpu(fabric->length));
2316         }
2317
2318         ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2319 }
2320
2321 /**
2322  * ipr_log_sis64_array_error - Log a sis64 array error.
2323  * @ioa_cfg:    ioa config struct
2324  * @hostrcb:    hostrcb struct
2325  *
2326  * Return value:
2327  *      none
2328  **/
2329 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2330                                       struct ipr_hostrcb *hostrcb)
2331 {
2332         int i, num_entries;
2333         struct ipr_hostrcb_type_24_error *error;
2334         struct ipr_hostrcb64_array_data_entry *array_entry;
2335         char buffer[IPR_MAX_RES_PATH_LENGTH];
2336         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2337
2338         error = &hostrcb->hcam.u.error64.u.type_24_error;
2339
2340         ipr_err_separator;
2341
2342         ipr_err("RAID %s Array Configuration: %s\n",
2343                 error->protection_level,
2344                 ipr_format_res_path(ioa_cfg, error->last_res_path,
2345                         buffer, sizeof(buffer)));
2346
2347         ipr_err_separator;
2348
2349         array_entry = error->array_member;
2350         num_entries = min_t(u32, error->num_entries,
2351                             ARRAY_SIZE(error->array_member));
2352
2353         for (i = 0; i < num_entries; i++, array_entry++) {
2354
2355                 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2356                         continue;
2357
2358                 if (error->exposed_mode_adn == i)
2359                         ipr_err("Exposed Array Member %d:\n", i);
2360                 else
2361                         ipr_err("Array Member %d:\n", i);
2362
2363                 ipr_err("Array Member %d:\n", i);
2364                 ipr_log_ext_vpd(&array_entry->vpd);
2365                 ipr_err("Current Location: %s\n",
2366                          ipr_format_res_path(ioa_cfg, array_entry->res_path,
2367                                 buffer, sizeof(buffer)));
2368                 ipr_err("Expected Location: %s\n",
2369                          ipr_format_res_path(ioa_cfg,
2370                                 array_entry->expected_res_path,
2371                                 buffer, sizeof(buffer)));
2372
2373                 ipr_err_separator;
2374         }
2375 }
2376
2377 /**
2378  * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2379  * @ioa_cfg:    ioa config struct
2380  * @hostrcb:    hostrcb struct
2381  *
2382  * Return value:
2383  *      none
2384  **/
2385 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2386                                        struct ipr_hostrcb *hostrcb)
2387 {
2388         struct ipr_hostrcb_type_30_error *error;
2389         struct ipr_hostrcb64_fabric_desc *fabric;
2390         struct ipr_hostrcb64_config_element *cfg;
2391         int i, add_len;
2392
2393         error = &hostrcb->hcam.u.error64.u.type_30_error;
2394
2395         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2396         ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2397
2398         add_len = be32_to_cpu(hostrcb->hcam.length) -
2399                 (offsetof(struct ipr_hostrcb64_error, u) +
2400                  offsetof(struct ipr_hostrcb_type_30_error, desc));
2401
2402         for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2403                 ipr_log64_fabric_path(hostrcb, fabric);
2404                 for_each_fabric_cfg(fabric, cfg)
2405                         ipr_log64_path_elem(hostrcb, cfg);
2406
2407                 add_len -= be16_to_cpu(fabric->length);
2408                 fabric = (struct ipr_hostrcb64_fabric_desc *)
2409                         ((unsigned long)fabric + be16_to_cpu(fabric->length));
2410         }
2411
2412         ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2413 }
2414
2415 /**
2416  * ipr_log_generic_error - Log an adapter error.
2417  * @ioa_cfg:    ioa config struct
2418  * @hostrcb:    hostrcb struct
2419  *
2420  * Return value:
2421  *      none
2422  **/
2423 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2424                                   struct ipr_hostrcb *hostrcb)
2425 {
2426         ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2427                          be32_to_cpu(hostrcb->hcam.length));
2428 }
2429
2430 /**
2431  * ipr_log_sis64_device_error - Log a cache error.
2432  * @ioa_cfg:    ioa config struct
2433  * @hostrcb:    hostrcb struct
2434  *
2435  * Return value:
2436  *      none
2437  **/
2438 static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
2439                                          struct ipr_hostrcb *hostrcb)
2440 {
2441         struct ipr_hostrcb_type_21_error *error;
2442         char buffer[IPR_MAX_RES_PATH_LENGTH];
2443
2444         error = &hostrcb->hcam.u.error64.u.type_21_error;
2445
2446         ipr_err("-----Failing Device Information-----\n");
2447         ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2448                 be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
2449                  be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
2450         ipr_err("Device Resource Path: %s\n",
2451                 __ipr_format_res_path(error->res_path,
2452                                       buffer, sizeof(buffer)));
2453         error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
2454         error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
2455         ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
2456         ipr_err("Secondary Problem Description:  %s\n", error->second_problem_desc);
2457         ipr_err("SCSI Sense Data:\n");
2458         ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
2459         ipr_err("SCSI Command Descriptor Block: \n");
2460         ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
2461
2462         ipr_err("Additional IOA Data:\n");
2463         ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
2464 }
2465
2466 /**
2467  * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2468  * @ioasc:      IOASC
2469  *
2470  * This function will return the index of into the ipr_error_table
2471  * for the specified IOASC. If the IOASC is not in the table,
2472  * 0 will be returned, which points to the entry used for unknown errors.
2473  *
2474  * Return value:
2475  *      index into the ipr_error_table
2476  **/
2477 static u32 ipr_get_error(u32 ioasc)
2478 {
2479         int i;
2480
2481         for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2482                 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2483                         return i;
2484
2485         return 0;
2486 }
2487
2488 /**
2489  * ipr_handle_log_data - Log an adapter error.
2490  * @ioa_cfg:    ioa config struct
2491  * @hostrcb:    hostrcb struct
2492  *
2493  * This function logs an adapter error to the system.
2494  *
2495  * Return value:
2496  *      none
2497  **/
2498 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2499                                 struct ipr_hostrcb *hostrcb)
2500 {
2501         u32 ioasc;
2502         int error_index;
2503         struct ipr_hostrcb_type_21_error *error;
2504
2505         if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2506                 return;
2507
2508         if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2509                 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2510
2511         if (ioa_cfg->sis64)
2512                 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2513         else
2514                 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2515
2516         if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2517             ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2518                 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2519                 scsi_report_bus_reset(ioa_cfg->host,
2520                                       hostrcb->hcam.u.error.fd_res_addr.bus);
2521         }
2522
2523         error_index = ipr_get_error(ioasc);
2524
2525         if (!ipr_error_table[error_index].log_hcam)
2526                 return;
2527
2528         if (ioasc == IPR_IOASC_HW_CMD_FAILED &&
2529             hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) {
2530                 error = &hostrcb->hcam.u.error64.u.type_21_error;
2531
2532                 if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST &&
2533                         ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
2534                                 return;
2535         }
2536
2537         ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2538
2539         /* Set indication we have logged an error */
2540         ioa_cfg->errors_logged++;
2541
2542         if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2543                 return;
2544         if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2545                 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2546
2547         switch (hostrcb->hcam.overlay_id) {
2548         case IPR_HOST_RCB_OVERLAY_ID_2:
2549                 ipr_log_cache_error(ioa_cfg, hostrcb);
2550                 break;
2551         case IPR_HOST_RCB_OVERLAY_ID_3:
2552                 ipr_log_config_error(ioa_cfg, hostrcb);
2553                 break;
2554         case IPR_HOST_RCB_OVERLAY_ID_4:
2555         case IPR_HOST_RCB_OVERLAY_ID_6:
2556                 ipr_log_array_error(ioa_cfg, hostrcb);
2557                 break;
2558         case IPR_HOST_RCB_OVERLAY_ID_7:
2559                 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2560                 break;
2561         case IPR_HOST_RCB_OVERLAY_ID_12:
2562                 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2563                 break;
2564         case IPR_HOST_RCB_OVERLAY_ID_13:
2565                 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2566                 break;
2567         case IPR_HOST_RCB_OVERLAY_ID_14:
2568         case IPR_HOST_RCB_OVERLAY_ID_16:
2569                 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2570                 break;
2571         case IPR_HOST_RCB_OVERLAY_ID_17:
2572                 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2573                 break;
2574         case IPR_HOST_RCB_OVERLAY_ID_20:
2575                 ipr_log_fabric_error(ioa_cfg, hostrcb);
2576                 break;
2577         case IPR_HOST_RCB_OVERLAY_ID_21:
2578                 ipr_log_sis64_device_error(ioa_cfg, hostrcb);
2579                 break;
2580         case IPR_HOST_RCB_OVERLAY_ID_23:
2581                 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2582                 break;
2583         case IPR_HOST_RCB_OVERLAY_ID_24:
2584         case IPR_HOST_RCB_OVERLAY_ID_26:
2585                 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2586                 break;
2587         case IPR_HOST_RCB_OVERLAY_ID_30:
2588                 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2589                 break;
2590         case IPR_HOST_RCB_OVERLAY_ID_1:
2591         case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2592         default:
2593                 ipr_log_generic_error(ioa_cfg, hostrcb);
2594                 break;
2595         }
2596 }
2597
2598 static struct ipr_hostrcb *ipr_get_free_hostrcb(struct ipr_ioa_cfg *ioa)
2599 {
2600         struct ipr_hostrcb *hostrcb;
2601
2602         hostrcb = list_first_entry_or_null(&ioa->hostrcb_free_q,
2603                                         struct ipr_hostrcb, queue);
2604
2605         if (unlikely(!hostrcb)) {
2606                 dev_info(&ioa->pdev->dev, "Reclaiming async error buffers.");
2607                 hostrcb = list_first_entry_or_null(&ioa->hostrcb_report_q,
2608                                                 struct ipr_hostrcb, queue);
2609         }
2610
2611         list_del_init(&hostrcb->queue);
2612         return hostrcb;
2613 }
2614
2615 /**
2616  * ipr_process_error - Op done function for an adapter error log.
2617  * @ipr_cmd:    ipr command struct
2618  *
2619  * This function is the op done function for an error log host
2620  * controlled async from the adapter. It will log the error and
2621  * send the HCAM back to the adapter.
2622  *
2623  * Return value:
2624  *      none
2625  **/
2626 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2627 {
2628         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2629         struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2630         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2631         u32 fd_ioasc;
2632
2633         if (ioa_cfg->sis64)
2634                 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2635         else
2636                 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2637
2638         list_del_init(&hostrcb->queue);
2639         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
2640
2641         if (!ioasc) {
2642                 ipr_handle_log_data(ioa_cfg, hostrcb);
2643                 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2644                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2645         } else if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
2646                    ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) {
2647                 dev_err(&ioa_cfg->pdev->dev,
2648                         "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2649         }
2650
2651         list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q);
2652         schedule_work(&ioa_cfg->work_q);
2653         hostrcb = ipr_get_free_hostrcb(ioa_cfg);
2654
2655         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2656 }
2657
2658 /**
2659  * ipr_timeout -  An internally generated op has timed out.
2660  * @ipr_cmd:    ipr command struct
2661  *
2662  * This function blocks host requests and initiates an
2663  * adapter reset.
2664  *
2665  * Return value:
2666  *      none
2667  **/
2668 static void ipr_timeout(struct timer_list *t)
2669 {
2670         struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
2671         unsigned long lock_flags = 0;
2672         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2673
2674         ENTER;
2675         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2676
2677         ioa_cfg->errors_logged++;
2678         dev_err(&ioa_cfg->pdev->dev,
2679                 "Adapter being reset due to command timeout.\n");
2680
2681         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2682                 ioa_cfg->sdt_state = GET_DUMP;
2683
2684         if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2685                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2686
2687         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2688         LEAVE;
2689 }
2690
2691 /**
2692  * ipr_oper_timeout -  Adapter timed out transitioning to operational
2693  * @ipr_cmd:    ipr command struct
2694  *
2695  * This function blocks host requests and initiates an
2696  * adapter reset.
2697  *
2698  * Return value:
2699  *      none
2700  **/
2701 static void ipr_oper_timeout(struct timer_list *t)
2702 {
2703         struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
2704         unsigned long lock_flags = 0;
2705         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2706
2707         ENTER;
2708         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2709
2710         ioa_cfg->errors_logged++;
2711         dev_err(&ioa_cfg->pdev->dev,
2712                 "Adapter timed out transitioning to operational.\n");
2713
2714         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2715                 ioa_cfg->sdt_state = GET_DUMP;
2716
2717         if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2718                 if (ipr_fastfail)
2719                         ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2720                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2721         }
2722
2723         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2724         LEAVE;
2725 }
2726
2727 /**
2728  * ipr_find_ses_entry - Find matching SES in SES table
2729  * @res:        resource entry struct of SES
2730  *
2731  * Return value:
2732  *      pointer to SES table entry / NULL on failure
2733  **/
2734 static const struct ipr_ses_table_entry *
2735 ipr_find_ses_entry(struct ipr_resource_entry *res)
2736 {
2737         int i, j, matches;
2738         struct ipr_std_inq_vpids *vpids;
2739         const struct ipr_ses_table_entry *ste = ipr_ses_table;
2740
2741         for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2742                 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2743                         if (ste->compare_product_id_byte[j] == 'X') {
2744                                 vpids = &res->std_inq_data.vpids;
2745                                 if (vpids->product_id[j] == ste->product_id[j])
2746                                         matches++;
2747                                 else
2748                                         break;
2749                         } else
2750                                 matches++;
2751                 }
2752
2753                 if (matches == IPR_PROD_ID_LEN)
2754                         return ste;
2755         }
2756
2757         return NULL;
2758 }
2759
2760 /**
2761  * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2762  * @ioa_cfg:    ioa config struct
2763  * @bus:                SCSI bus
2764  * @bus_width:  bus width
2765  *
2766  * Return value:
2767  *      SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2768  *      For a 2-byte wide SCSI bus, the maximum transfer speed is
2769  *      twice the maximum transfer rate (e.g. for a wide enabled bus,
2770  *      max 160MHz = max 320MB/sec).
2771  **/
2772 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2773 {
2774         struct ipr_resource_entry *res;
2775         const struct ipr_ses_table_entry *ste;
2776         u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2777
2778         /* Loop through each config table entry in the config table buffer */
2779         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2780                 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2781                         continue;
2782
2783                 if (bus != res->bus)
2784                         continue;
2785
2786                 if (!(ste = ipr_find_ses_entry(res)))
2787                         continue;
2788
2789                 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2790         }
2791
2792         return max_xfer_rate;
2793 }
2794
2795 /**
2796  * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2797  * @ioa_cfg:            ioa config struct
2798  * @max_delay:          max delay in micro-seconds to wait
2799  *
2800  * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2801  *
2802  * Return value:
2803  *      0 on success / other on failure
2804  **/
2805 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2806 {
2807         volatile u32 pcii_reg;
2808         int delay = 1;
2809
2810         /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2811         while (delay < max_delay) {
2812                 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2813
2814                 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2815                         return 0;
2816
2817                 /* udelay cannot be used if delay is more than a few milliseconds */
2818                 if ((delay / 1000) > MAX_UDELAY_MS)
2819                         mdelay(delay / 1000);
2820                 else
2821                         udelay(delay);
2822
2823                 delay += delay;
2824         }
2825         return -EIO;
2826 }
2827
2828 /**
2829  * ipr_get_sis64_dump_data_section - Dump IOA memory
2830  * @ioa_cfg:                    ioa config struct
2831  * @start_addr:                 adapter address to dump
2832  * @dest:                       destination kernel buffer
2833  * @length_in_words:            length to dump in 4 byte words
2834  *
2835  * Return value:
2836  *      0 on success
2837  **/
2838 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2839                                            u32 start_addr,
2840                                            __be32 *dest, u32 length_in_words)
2841 {
2842         int i;
2843
2844         for (i = 0; i < length_in_words; i++) {
2845                 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2846                 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2847                 dest++;
2848         }
2849
2850         return 0;
2851 }
2852
2853 /**
2854  * ipr_get_ldump_data_section - Dump IOA memory
2855  * @ioa_cfg:                    ioa config struct
2856  * @start_addr:                 adapter address to dump
2857  * @dest:                               destination kernel buffer
2858  * @length_in_words:    length to dump in 4 byte words
2859  *
2860  * Return value:
2861  *      0 on success / -EIO on failure
2862  **/
2863 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2864                                       u32 start_addr,
2865                                       __be32 *dest, u32 length_in_words)
2866 {
2867         volatile u32 temp_pcii_reg;
2868         int i, delay = 0;
2869
2870         if (ioa_cfg->sis64)
2871                 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2872                                                        dest, length_in_words);
2873
2874         /* Write IOA interrupt reg starting LDUMP state  */
2875         writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2876                ioa_cfg->regs.set_uproc_interrupt_reg32);
2877
2878         /* Wait for IO debug acknowledge */
2879         if (ipr_wait_iodbg_ack(ioa_cfg,
2880                                IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2881                 dev_err(&ioa_cfg->pdev->dev,
2882                         "IOA dump long data transfer timeout\n");
2883                 return -EIO;
2884         }
2885
2886         /* Signal LDUMP interlocked - clear IO debug ack */
2887         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2888                ioa_cfg->regs.clr_interrupt_reg);
2889
2890         /* Write Mailbox with starting address */
2891         writel(start_addr, ioa_cfg->ioa_mailbox);
2892
2893         /* Signal address valid - clear IOA Reset alert */
2894         writel(IPR_UPROCI_RESET_ALERT,
2895                ioa_cfg->regs.clr_uproc_interrupt_reg32);
2896
2897         for (i = 0; i < length_in_words; i++) {
2898                 /* Wait for IO debug acknowledge */
2899                 if (ipr_wait_iodbg_ack(ioa_cfg,
2900                                        IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2901                         dev_err(&ioa_cfg->pdev->dev,
2902                                 "IOA dump short data transfer timeout\n");
2903                         return -EIO;
2904                 }
2905
2906                 /* Read data from mailbox and increment destination pointer */
2907                 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2908                 dest++;
2909
2910                 /* For all but the last word of data, signal data received */
2911                 if (i < (length_in_words - 1)) {
2912                         /* Signal dump data received - Clear IO debug Ack */
2913                         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2914                                ioa_cfg->regs.clr_interrupt_reg);
2915                 }
2916         }
2917
2918         /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2919         writel(IPR_UPROCI_RESET_ALERT,
2920                ioa_cfg->regs.set_uproc_interrupt_reg32);
2921
2922         writel(IPR_UPROCI_IO_DEBUG_ALERT,
2923                ioa_cfg->regs.clr_uproc_interrupt_reg32);
2924
2925         /* Signal dump data received - Clear IO debug Ack */
2926         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2927                ioa_cfg->regs.clr_interrupt_reg);
2928
2929         /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2930         while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2931                 temp_pcii_reg =
2932                     readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2933
2934                 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2935                         return 0;
2936
2937                 udelay(10);
2938                 delay += 10;
2939         }
2940
2941         return 0;
2942 }
2943
2944 #ifdef CONFIG_SCSI_IPR_DUMP
2945 /**
2946  * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2947  * @ioa_cfg:            ioa config struct
2948  * @pci_address:        adapter address
2949  * @length:                     length of data to copy
2950  *
2951  * Copy data from PCI adapter to kernel buffer.
2952  * Note: length MUST be a 4 byte multiple
2953  * Return value:
2954  *      0 on success / other on failure
2955  **/
2956 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2957                         unsigned long pci_address, u32 length)
2958 {
2959         int bytes_copied = 0;
2960         int cur_len, rc, rem_len, rem_page_len, max_dump_size;
2961         __be32 *page;
2962         unsigned long lock_flags = 0;
2963         struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2964
2965         if (ioa_cfg->sis64)
2966                 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2967         else
2968                 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2969
2970         while (bytes_copied < length &&
2971                (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
2972                 if (ioa_dump->page_offset >= PAGE_SIZE ||
2973                     ioa_dump->page_offset == 0) {
2974                         page = (__be32 *)__get_free_page(GFP_ATOMIC);
2975
2976                         if (!page) {
2977                                 ipr_trace;
2978                                 return bytes_copied;
2979                         }
2980
2981                         ioa_dump->page_offset = 0;
2982                         ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2983                         ioa_dump->next_page_index++;
2984                 } else
2985                         page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2986
2987                 rem_len = length - bytes_copied;
2988                 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2989                 cur_len = min(rem_len, rem_page_len);
2990
2991                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2992                 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2993                         rc = -EIO;
2994                 } else {
2995                         rc = ipr_get_ldump_data_section(ioa_cfg,
2996                                                         pci_address + bytes_copied,
2997                                                         &page[ioa_dump->page_offset / 4],
2998                                                         (cur_len / sizeof(u32)));
2999                 }
3000                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3001
3002                 if (!rc) {
3003                         ioa_dump->page_offset += cur_len;
3004                         bytes_copied += cur_len;
3005                 } else {
3006                         ipr_trace;
3007                         break;
3008                 }
3009                 schedule();
3010         }
3011
3012         return bytes_copied;
3013 }
3014
3015 /**
3016  * ipr_init_dump_entry_hdr - Initialize a dump entry header.
3017  * @hdr:        dump entry header struct
3018  *
3019  * Return value:
3020  *      nothing
3021  **/
3022 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
3023 {
3024         hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
3025         hdr->num_elems = 1;
3026         hdr->offset = sizeof(*hdr);
3027         hdr->status = IPR_DUMP_STATUS_SUCCESS;
3028 }
3029
3030 /**
3031  * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
3032  * @ioa_cfg:    ioa config struct
3033  * @driver_dump:        driver dump struct
3034  *
3035  * Return value:
3036  *      nothing
3037  **/
3038 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
3039                                    struct ipr_driver_dump *driver_dump)
3040 {
3041         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3042
3043         ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
3044         driver_dump->ioa_type_entry.hdr.len =
3045                 sizeof(struct ipr_dump_ioa_type_entry) -
3046                 sizeof(struct ipr_dump_entry_header);
3047         driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3048         driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
3049         driver_dump->ioa_type_entry.type = ioa_cfg->type;
3050         driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
3051                 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
3052                 ucode_vpd->minor_release[1];
3053         driver_dump->hdr.num_entries++;
3054 }
3055
3056 /**
3057  * ipr_dump_version_data - Fill in the driver version in the dump.
3058  * @ioa_cfg:    ioa config struct
3059  * @driver_dump:        driver dump struct
3060  *
3061  * Return value:
3062  *      nothing
3063  **/
3064 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
3065                                   struct ipr_driver_dump *driver_dump)
3066 {
3067         ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
3068         driver_dump->version_entry.hdr.len =
3069                 sizeof(struct ipr_dump_version_entry) -
3070                 sizeof(struct ipr_dump_entry_header);
3071         driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3072         driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
3073         strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
3074         driver_dump->hdr.num_entries++;
3075 }
3076
3077 /**
3078  * ipr_dump_trace_data - Fill in the IOA trace in the dump.
3079  * @ioa_cfg:    ioa config struct
3080  * @driver_dump:        driver dump struct
3081  *
3082  * Return value:
3083  *      nothing
3084  **/
3085 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
3086                                    struct ipr_driver_dump *driver_dump)
3087 {
3088         ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
3089         driver_dump->trace_entry.hdr.len =
3090                 sizeof(struct ipr_dump_trace_entry) -
3091                 sizeof(struct ipr_dump_entry_header);
3092         driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3093         driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
3094         memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
3095         driver_dump->hdr.num_entries++;
3096 }
3097
3098 /**
3099  * ipr_dump_location_data - Fill in the IOA location in the dump.
3100  * @ioa_cfg:    ioa config struct
3101  * @driver_dump:        driver dump struct
3102  *
3103  * Return value:
3104  *      nothing
3105  **/
3106 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
3107                                    struct ipr_driver_dump *driver_dump)
3108 {
3109         ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
3110         driver_dump->location_entry.hdr.len =
3111                 sizeof(struct ipr_dump_location_entry) -
3112                 sizeof(struct ipr_dump_entry_header);
3113         driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3114         driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
3115         strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
3116         driver_dump->hdr.num_entries++;
3117 }
3118
3119 /**
3120  * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3121  * @ioa_cfg:    ioa config struct
3122  * @dump:               dump struct
3123  *
3124  * Return value:
3125  *      nothing
3126  **/
3127 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
3128 {
3129         unsigned long start_addr, sdt_word;
3130         unsigned long lock_flags = 0;
3131         struct ipr_driver_dump *driver_dump = &dump->driver_dump;
3132         struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
3133         u32 num_entries, max_num_entries, start_off, end_off;
3134         u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
3135         struct ipr_sdt *sdt;
3136         int valid = 1;
3137         int i;
3138
3139         ENTER;
3140
3141         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3142
3143         if (ioa_cfg->sdt_state != READ_DUMP) {
3144                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3145                 return;
3146         }
3147
3148         if (ioa_cfg->sis64) {
3149                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3150                 ssleep(IPR_DUMP_DELAY_SECONDS);
3151                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3152         }
3153
3154         start_addr = readl(ioa_cfg->ioa_mailbox);
3155
3156         if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
3157                 dev_err(&ioa_cfg->pdev->dev,
3158                         "Invalid dump table format: %lx\n", start_addr);
3159                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3160                 return;
3161         }
3162
3163         dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3164
3165         driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3166
3167         /* Initialize the overall dump header */
3168         driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3169         driver_dump->hdr.num_entries = 1;
3170         driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3171         driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3172         driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3173         driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3174
3175         ipr_dump_version_data(ioa_cfg, driver_dump);
3176         ipr_dump_location_data(ioa_cfg, driver_dump);
3177         ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3178         ipr_dump_trace_data(ioa_cfg, driver_dump);
3179
3180         /* Update dump_header */
3181         driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3182
3183         /* IOA Dump entry */
3184         ipr_init_dump_entry_hdr(&ioa_dump->hdr);
3185         ioa_dump->hdr.len = 0;
3186         ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3187         ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3188
3189         /* First entries in sdt are actually a list of dump addresses and
3190          lengths to gather the real dump data.  sdt represents the pointer
3191          to the ioa generated dump table.  Dump data will be extracted based
3192          on entries in this table */
3193         sdt = &ioa_dump->sdt;
3194
3195         if (ioa_cfg->sis64) {
3196                 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3197                 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3198         } else {
3199                 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3200                 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3201         }
3202
3203         bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3204                         (max_num_entries * sizeof(struct ipr_sdt_entry));
3205         rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
3206                                         bytes_to_copy / sizeof(__be32));
3207
3208         /* Smart Dump table is ready to use and the first entry is valid */
3209         if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3210             (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
3211                 dev_err(&ioa_cfg->pdev->dev,
3212                         "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3213                         rc, be32_to_cpu(sdt->hdr.state));
3214                 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3215                 ioa_cfg->sdt_state = DUMP_OBTAINED;
3216                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3217                 return;
3218         }
3219
3220         num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3221
3222         if (num_entries > max_num_entries)
3223                 num_entries = max_num_entries;
3224
3225         /* Update dump length to the actual data to be copied */
3226         dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3227         if (ioa_cfg->sis64)
3228                 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3229         else
3230                 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
3231
3232         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3233
3234         for (i = 0; i < num_entries; i++) {
3235                 if (ioa_dump->hdr.len > max_dump_size) {
3236                         driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3237                         break;
3238                 }
3239
3240                 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
3241                         sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3242                         if (ioa_cfg->sis64)
3243                                 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3244                         else {
3245                                 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3246                                 end_off = be32_to_cpu(sdt->entry[i].end_token);
3247
3248                                 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3249                                         bytes_to_copy = end_off - start_off;
3250                                 else
3251                                         valid = 0;
3252                         }
3253                         if (valid) {
3254                                 if (bytes_to_copy > max_dump_size) {
3255                                         sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3256                                         continue;
3257                                 }
3258
3259                                 /* Copy data from adapter to driver buffers */
3260                                 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3261                                                             bytes_to_copy);
3262
3263                                 ioa_dump->hdr.len += bytes_copied;
3264
3265                                 if (bytes_copied != bytes_to_copy) {
3266                                         driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3267                                         break;
3268                                 }
3269                         }
3270                 }
3271         }
3272
3273         dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3274
3275         /* Update dump_header */
3276         driver_dump->hdr.len += ioa_dump->hdr.len;
3277         wmb();
3278         ioa_cfg->sdt_state = DUMP_OBTAINED;
3279         LEAVE;
3280 }
3281
3282 #else
3283 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3284 #endif
3285
3286 /**
3287  * ipr_release_dump - Free adapter dump memory
3288  * @kref:       kref struct
3289  *
3290  * Return value:
3291  *      nothing
3292  **/
3293 static void ipr_release_dump(struct kref *kref)
3294 {
3295         struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
3296         struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3297         unsigned long lock_flags = 0;
3298         int i;
3299
3300         ENTER;
3301         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3302         ioa_cfg->dump = NULL;
3303         ioa_cfg->sdt_state = INACTIVE;
3304         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3305
3306         for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3307                 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3308
3309         vfree(dump->ioa_dump.ioa_data);
3310         kfree(dump);
3311         LEAVE;
3312 }
3313
3314 /**
3315  * ipr_worker_thread - Worker thread
3316  * @work:               ioa config struct
3317  *
3318  * Called at task level from a work thread. This function takes care
3319  * of adding and removing device from the mid-layer as configuration
3320  * changes are detected by the adapter.
3321  *
3322  * Return value:
3323  *      nothing
3324  **/
3325 static void ipr_worker_thread(struct work_struct *work)
3326 {
3327         unsigned long lock_flags;
3328         struct ipr_resource_entry *res;
3329         struct scsi_device *sdev;
3330         struct ipr_dump *dump;
3331         struct ipr_ioa_cfg *ioa_cfg =
3332                 container_of(work, struct ipr_ioa_cfg, work_q);
3333         u8 bus, target, lun;
3334         int did_work;
3335
3336         ENTER;
3337         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3338
3339         if (ioa_cfg->sdt_state == READ_DUMP) {
3340                 dump = ioa_cfg->dump;
3341                 if (!dump) {
3342                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3343                         return;
3344                 }
3345                 kref_get(&dump->kref);
3346                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3347                 ipr_get_ioa_dump(ioa_cfg, dump);
3348                 kref_put(&dump->kref, ipr_release_dump);
3349
3350                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3351                 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
3352                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3353                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3354                 return;
3355         }
3356
3357         if (ioa_cfg->scsi_unblock) {
3358                 ioa_cfg->scsi_unblock = 0;
3359                 ioa_cfg->scsi_blocked = 0;
3360                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3361                 scsi_unblock_requests(ioa_cfg->host);
3362                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3363                 if (ioa_cfg->scsi_blocked)
3364                         scsi_block_requests(ioa_cfg->host);
3365         }
3366
3367         if (!ioa_cfg->scan_enabled) {
3368                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3369                 return;
3370         }
3371
3372 restart:
3373         do {
3374                 did_work = 0;
3375                 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
3376                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3377                         return;
3378                 }
3379
3380                 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3381                         if (res->del_from_ml && res->sdev) {
3382                                 did_work = 1;
3383                                 sdev = res->sdev;
3384                                 if (!scsi_device_get(sdev)) {
3385                                         if (!res->add_to_ml)
3386                                                 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3387                                         else
3388                                                 res->del_from_ml = 0;
3389                                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3390                                         scsi_remove_device(sdev);
3391                                         scsi_device_put(sdev);
3392                                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3393                                 }
3394                                 break;
3395                         }
3396                 }
3397         } while (did_work);
3398
3399         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3400                 if (res->add_to_ml) {
3401                         bus = res->bus;
3402                         target = res->target;
3403                         lun = res->lun;
3404                         res->add_to_ml = 0;
3405                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3406                         scsi_add_device(ioa_cfg->host, bus, target, lun);
3407                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3408                         goto restart;
3409                 }
3410         }
3411
3412         ioa_cfg->scan_done = 1;
3413         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3414         kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3415         LEAVE;
3416 }
3417
3418 #ifdef CONFIG_SCSI_IPR_TRACE
3419 /**
3420  * ipr_read_trace - Dump the adapter trace
3421  * @filp:               open sysfs file
3422  * @kobj:               kobject struct
3423  * @bin_attr:           bin_attribute struct
3424  * @buf:                buffer
3425  * @off:                offset
3426  * @count:              buffer size
3427  *
3428  * Return value:
3429  *      number of bytes printed to buffer
3430  **/
3431 static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
3432                               struct bin_attribute *bin_attr,
3433                               char *buf, loff_t off, size_t count)
3434 {
3435         struct device *dev = container_of(kobj, struct device, kobj);
3436         struct Scsi_Host *shost = class_to_shost(dev);
3437         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3438         unsigned long lock_flags = 0;
3439         ssize_t ret;
3440
3441         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3442         ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3443                                 IPR_TRACE_SIZE);
3444         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3445
3446         return ret;
3447 }
3448
3449 static struct bin_attribute ipr_trace_attr = {
3450         .attr = {
3451                 .name = "trace",
3452                 .mode = S_IRUGO,
3453         },
3454         .size = 0,
3455         .read = ipr_read_trace,
3456 };
3457 #endif
3458
3459 /**
3460  * ipr_show_fw_version - Show the firmware version
3461  * @dev:        class device struct
3462  * @buf:        buffer
3463  *
3464  * Return value:
3465  *      number of bytes printed to buffer
3466  **/
3467 static ssize_t ipr_show_fw_version(struct device *dev,
3468                                    struct device_attribute *attr, char *buf)
3469 {
3470         struct Scsi_Host *shost = class_to_shost(dev);
3471         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3472         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3473         unsigned long lock_flags = 0;
3474         int len;
3475
3476         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3477         len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3478                        ucode_vpd->major_release, ucode_vpd->card_type,
3479                        ucode_vpd->minor_release[0],
3480                        ucode_vpd->minor_release[1]);
3481         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3482         return len;
3483 }
3484
3485 static struct device_attribute ipr_fw_version_attr = {
3486         .attr = {
3487                 .name =         "fw_version",
3488                 .mode =         S_IRUGO,
3489         },
3490         .show = ipr_show_fw_version,
3491 };
3492
3493 /**
3494  * ipr_show_log_level - Show the adapter's error logging level
3495  * @dev:        class device struct
3496  * @buf:        buffer
3497  *
3498  * Return value:
3499  *      number of bytes printed to buffer
3500  **/
3501 static ssize_t ipr_show_log_level(struct device *dev,
3502                                    struct device_attribute *attr, char *buf)
3503 {
3504         struct Scsi_Host *shost = class_to_shost(dev);
3505         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3506         unsigned long lock_flags = 0;
3507         int len;
3508
3509         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3510         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3511         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3512         return len;
3513 }
3514
3515 /**
3516  * ipr_store_log_level - Change the adapter's error logging level
3517  * @dev:        class device struct
3518  * @buf:        buffer
3519  *
3520  * Return value:
3521  *      number of bytes printed to buffer
3522  **/
3523 static ssize_t ipr_store_log_level(struct device *dev,
3524                                    struct device_attribute *attr,
3525                                    const char *buf, size_t count)
3526 {
3527         struct Scsi_Host *shost = class_to_shost(dev);
3528         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3529         unsigned long lock_flags = 0;
3530
3531         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3532         ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3533         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3534         return strlen(buf);
3535 }
3536
3537 static struct device_attribute ipr_log_level_attr = {
3538         .attr = {
3539                 .name =         "log_level",
3540                 .mode =         S_IRUGO | S_IWUSR,
3541         },
3542         .show = ipr_show_log_level,
3543         .store = ipr_store_log_level
3544 };
3545
3546 /**
3547  * ipr_store_diagnostics - IOA Diagnostics interface
3548  * @dev:        device struct
3549  * @buf:        buffer
3550  * @count:      buffer size
3551  *
3552  * This function will reset the adapter and wait a reasonable
3553  * amount of time for any errors that the adapter might log.
3554  *
3555  * Return value:
3556  *      count on success / other on failure
3557  **/
3558 static ssize_t ipr_store_diagnostics(struct device *dev,
3559                                      struct device_attribute *attr,
3560                                      const char *buf, size_t count)
3561 {
3562         struct Scsi_Host *shost = class_to_shost(dev);
3563         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3564         unsigned long lock_flags = 0;
3565         int rc = count;
3566
3567         if (!capable(CAP_SYS_ADMIN))
3568                 return -EACCES;
3569
3570         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3571         while (ioa_cfg->in_reset_reload) {
3572                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3573                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3574                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3575         }
3576
3577         ioa_cfg->errors_logged = 0;
3578         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3579
3580         if (ioa_cfg->in_reset_reload) {
3581                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3582                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3583
3584                 /* Wait for a second for any errors to be logged */
3585                 msleep(1000);
3586         } else {
3587                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3588                 return -EIO;
3589         }
3590
3591         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3592         if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3593                 rc = -EIO;
3594         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3595
3596         return rc;
3597 }
3598
3599 static struct device_attribute ipr_diagnostics_attr = {
3600         .attr = {
3601                 .name =         "run_diagnostics",
3602                 .mode =         S_IWUSR,
3603         },
3604         .store = ipr_store_diagnostics
3605 };
3606
3607 /**
3608  * ipr_show_adapter_state - Show the adapter's state
3609  * @class_dev:  device struct
3610  * @buf:        buffer
3611  *
3612  * Return value:
3613  *      number of bytes printed to buffer
3614  **/
3615 static ssize_t ipr_show_adapter_state(struct device *dev,
3616                                       struct device_attribute *attr, char *buf)
3617 {
3618         struct Scsi_Host *shost = class_to_shost(dev);
3619         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3620         unsigned long lock_flags = 0;
3621         int len;
3622
3623         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3624         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
3625                 len = snprintf(buf, PAGE_SIZE, "offline\n");
3626         else
3627                 len = snprintf(buf, PAGE_SIZE, "online\n");
3628         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3629         return len;
3630 }
3631
3632 /**
3633  * ipr_store_adapter_state - Change adapter state
3634  * @dev:        device struct
3635  * @buf:        buffer
3636  * @count:      buffer size
3637  *
3638  * This function will change the adapter's state.
3639  *
3640  * Return value:
3641  *      count on success / other on failure
3642  **/
3643 static ssize_t ipr_store_adapter_state(struct device *dev,
3644                                        struct device_attribute *attr,
3645                                        const char *buf, size_t count)
3646 {
3647         struct Scsi_Host *shost = class_to_shost(dev);
3648         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3649         unsigned long lock_flags;
3650         int result = count, i;
3651
3652         if (!capable(CAP_SYS_ADMIN))
3653                 return -EACCES;
3654
3655         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3656         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3657             !strncmp(buf, "online", 6)) {
3658                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3659                         spin_lock(&ioa_cfg->hrrq[i]._lock);
3660                         ioa_cfg->hrrq[i].ioa_is_dead = 0;
3661                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
3662                 }
3663                 wmb();
3664                 ioa_cfg->reset_retries = 0;
3665                 ioa_cfg->in_ioa_bringdown = 0;
3666                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3667         }
3668         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3669         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3670
3671         return result;
3672 }
3673
3674 static struct device_attribute ipr_ioa_state_attr = {
3675         .attr = {
3676                 .name =         "online_state",
3677                 .mode =         S_IRUGO | S_IWUSR,
3678         },
3679         .show = ipr_show_adapter_state,
3680         .store = ipr_store_adapter_state
3681 };
3682
3683 /**
3684  * ipr_store_reset_adapter - Reset the adapter
3685  * @dev:        device struct
3686  * @buf:        buffer
3687  * @count:      buffer size
3688  *
3689  * This function will reset the adapter.
3690  *
3691  * Return value:
3692  *      count on success / other on failure
3693  **/
3694 static ssize_t ipr_store_reset_adapter(struct device *dev,
3695                                        struct device_attribute *attr,
3696                                        const char *buf, size_t count)
3697 {
3698         struct Scsi_Host *shost = class_to_shost(dev);
3699         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3700         unsigned long lock_flags;
3701         int result = count;
3702
3703         if (!capable(CAP_SYS_ADMIN))
3704                 return -EACCES;
3705
3706         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3707         if (!ioa_cfg->in_reset_reload)
3708                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3709         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3710         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3711
3712         return result;
3713 }
3714
3715 static struct device_attribute ipr_ioa_reset_attr = {
3716         .attr = {
3717                 .name =         "reset_host",
3718                 .mode =         S_IWUSR,
3719         },
3720         .store = ipr_store_reset_adapter
3721 };
3722
3723 static int ipr_iopoll(struct irq_poll *iop, int budget);
3724  /**
3725  * ipr_show_iopoll_weight - Show ipr polling mode
3726  * @dev:        class device struct
3727  * @buf:        buffer
3728  *
3729  * Return value:
3730  *      number of bytes printed to buffer
3731  **/
3732 static ssize_t ipr_show_iopoll_weight(struct device *dev,
3733                                    struct device_attribute *attr, char *buf)
3734 {
3735         struct Scsi_Host *shost = class_to_shost(dev);
3736         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3737         unsigned long lock_flags = 0;
3738         int len;
3739
3740         spin_lock_irqsave(shost->host_lock, lock_flags);
3741         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3742         spin_unlock_irqrestore(shost->host_lock, lock_flags);
3743
3744         return len;
3745 }
3746
3747 /**
3748  * ipr_store_iopoll_weight - Change the adapter's polling mode
3749  * @dev:        class device struct
3750  * @buf:        buffer
3751  *
3752  * Return value:
3753  *      number of bytes printed to buffer
3754  **/
3755 static ssize_t ipr_store_iopoll_weight(struct device *dev,
3756                                         struct device_attribute *attr,
3757                                         const char *buf, size_t count)
3758 {
3759         struct Scsi_Host *shost = class_to_shost(dev);
3760         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3761         unsigned long user_iopoll_weight;
3762         unsigned long lock_flags = 0;
3763         int i;
3764
3765         if (!ioa_cfg->sis64) {
3766                 dev_info(&ioa_cfg->pdev->dev, "irq_poll not supported on this adapter\n");
3767                 return -EINVAL;
3768         }
3769         if (kstrtoul(buf, 10, &user_iopoll_weight))
3770                 return -EINVAL;
3771
3772         if (user_iopoll_weight > 256) {
3773                 dev_info(&ioa_cfg->pdev->dev, "Invalid irq_poll weight. It must be less than 256\n");
3774                 return -EINVAL;
3775         }
3776
3777         if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3778                 dev_info(&ioa_cfg->pdev->dev, "Current irq_poll weight has the same weight\n");
3779                 return strlen(buf);
3780         }
3781
3782         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3783                 for (i = 1; i < ioa_cfg->hrrq_num; i++)
3784                         irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
3785         }
3786
3787         spin_lock_irqsave(shost->host_lock, lock_flags);
3788         ioa_cfg->iopoll_weight = user_iopoll_weight;
3789         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3790                 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3791                         irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
3792                                         ioa_cfg->iopoll_weight, ipr_iopoll);
3793                 }
3794         }
3795         spin_unlock_irqrestore(shost->host_lock, lock_flags);
3796
3797         return strlen(buf);
3798 }
3799
3800 static struct device_attribute ipr_iopoll_weight_attr = {
3801         .attr = {
3802                 .name =         "iopoll_weight",
3803                 .mode =         S_IRUGO | S_IWUSR,
3804         },
3805         .show = ipr_show_iopoll_weight,
3806         .store = ipr_store_iopoll_weight
3807 };
3808
3809 /**
3810  * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3811  * @buf_len:            buffer length
3812  *
3813  * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3814  * list to use for microcode download
3815  *
3816  * Return value:
3817  *      pointer to sglist / NULL on failure
3818  **/
3819 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3820 {
3821         int sg_size, order;
3822         struct ipr_sglist *sglist;
3823
3824         /* Get the minimum size per scatter/gather element */
3825         sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3826
3827         /* Get the actual size per element */
3828         order = get_order(sg_size);
3829
3830         /* Allocate a scatter/gather list for the DMA */
3831         sglist = kzalloc(sizeof(struct ipr_sglist), GFP_KERNEL);
3832         if (sglist == NULL) {
3833                 ipr_trace;
3834                 return NULL;
3835         }
3836         sglist->order = order;
3837         sglist->scatterlist = sgl_alloc_order(buf_len, order, false, GFP_KERNEL,
3838                                               &sglist->num_sg);
3839         if (!sglist->scatterlist) {
3840                 kfree(sglist);
3841                 return NULL;
3842         }
3843
3844         return sglist;
3845 }
3846
3847 /**
3848  * ipr_free_ucode_buffer - Frees a microcode download buffer
3849  * @p_dnld:             scatter/gather list pointer
3850  *
3851  * Free a DMA'able ucode download buffer previously allocated with
3852  * ipr_alloc_ucode_buffer
3853  *
3854  * Return value:
3855  *      nothing
3856  **/
3857 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3858 {
3859         sgl_free_order(sglist->scatterlist, sglist->order);
3860         kfree(sglist);
3861 }
3862
3863 /**
3864  * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3865  * @sglist:             scatter/gather list pointer
3866  * @buffer:             buffer pointer
3867  * @len:                buffer length
3868  *
3869  * Copy a microcode image from a user buffer into a buffer allocated by
3870  * ipr_alloc_ucode_buffer
3871  *
3872  * Return value:
3873  *      0 on success / other on failure
3874  **/
3875 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3876                                  u8 *buffer, u32 len)
3877 {
3878         int bsize_elem, i, result = 0;
3879         struct scatterlist *scatterlist;
3880         void *kaddr;
3881
3882         /* Determine the actual number of bytes per element */
3883         bsize_elem = PAGE_SIZE * (1 << sglist->order);
3884
3885         scatterlist = sglist->scatterlist;
3886
3887         for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3888                 struct page *page = sg_page(&scatterlist[i]);
3889
3890                 kaddr = kmap(page);
3891                 memcpy(kaddr, buffer, bsize_elem);
3892                 kunmap(page);
3893
3894                 scatterlist[i].length = bsize_elem;
3895
3896                 if (result != 0) {
3897                         ipr_trace;
3898                         return result;
3899                 }
3900         }
3901
3902         if (len % bsize_elem) {
3903                 struct page *page = sg_page(&scatterlist[i]);
3904
3905                 kaddr = kmap(page);
3906                 memcpy(kaddr, buffer, len % bsize_elem);
3907                 kunmap(page);
3908
3909                 scatterlist[i].length = len % bsize_elem;
3910         }
3911
3912         sglist->buffer_len = len;
3913         return result;
3914 }
3915
3916 /**
3917  * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3918  * @ipr_cmd:            ipr command struct
3919  * @sglist:             scatter/gather list
3920  *
3921  * Builds a microcode download IOA data list (IOADL).
3922  *
3923  **/
3924 static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3925                                     struct ipr_sglist *sglist)
3926 {
3927         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3928         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3929         struct scatterlist *scatterlist = sglist->scatterlist;
3930         int i;
3931
3932         ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3933         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3934         ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3935
3936         ioarcb->ioadl_len =
3937                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3938         for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3939                 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3940                 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3941                 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3942         }
3943
3944         ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3945 }
3946
3947 /**
3948  * ipr_build_ucode_ioadl - Build a microcode download IOADL
3949  * @ipr_cmd:    ipr command struct
3950  * @sglist:             scatter/gather list
3951  *
3952  * Builds a microcode download IOA data list (IOADL).
3953  *
3954  **/
3955 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3956                                   struct ipr_sglist *sglist)
3957 {
3958         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3959         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3960         struct scatterlist *scatterlist = sglist->scatterlist;
3961         int i;
3962
3963         ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3964         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3965         ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3966
3967         ioarcb->ioadl_len =
3968                 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3969
3970         for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3971                 ioadl[i].flags_and_data_len =
3972                         cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3973                 ioadl[i].address =
3974                         cpu_to_be32(sg_dma_address(&scatterlist[i]));
3975         }
3976
3977         ioadl[i-1].flags_and_data_len |=
3978                 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3979 }
3980
3981 /**
3982  * ipr_update_ioa_ucode - Update IOA's microcode
3983  * @ioa_cfg:    ioa config struct
3984  * @sglist:             scatter/gather list
3985  *
3986  * Initiate an adapter reset to update the IOA's microcode
3987  *
3988  * Return value:
3989  *      0 on success / -EIO on failure
3990  **/
3991 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3992                                 struct ipr_sglist *sglist)
3993 {
3994         unsigned long lock_flags;
3995
3996         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3997         while (ioa_cfg->in_reset_reload) {
3998                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3999                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4000                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4001         }
4002
4003         if (ioa_cfg->ucode_sglist) {
4004                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4005                 dev_err(&ioa_cfg->pdev->dev,
4006                         "Microcode download already in progress\n");
4007                 return -EIO;
4008         }
4009
4010         sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev,
4011                                         sglist->scatterlist, sglist->num_sg,
4012                                         DMA_TO_DEVICE);
4013
4014         if (!sglist->num_dma_sg) {
4015                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4016                 dev_err(&ioa_cfg->pdev->dev,
4017                         "Failed to map microcode download buffer!\n");
4018                 return -EIO;
4019         }
4020
4021         ioa_cfg->ucode_sglist = sglist;
4022         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
4023         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4024         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4025
4026         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4027         ioa_cfg->ucode_sglist = NULL;
4028         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4029         return 0;
4030 }
4031
4032 /**
4033  * ipr_store_update_fw - Update the firmware on the adapter
4034  * @class_dev:  device struct
4035  * @buf:        buffer
4036  * @count:      buffer size
4037  *
4038  * This function will update the firmware on the adapter.
4039  *
4040  * Return value:
4041  *      count on success / other on failure
4042  **/
4043 static ssize_t ipr_store_update_fw(struct device *dev,
4044                                    struct device_attribute *attr,
4045                                    const char *buf, size_t count)
4046 {
4047         struct Scsi_Host *shost = class_to_shost(dev);
4048         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4049         struct ipr_ucode_image_header *image_hdr;
4050         const struct firmware *fw_entry;
4051         struct ipr_sglist *sglist;
4052         char fname[100];
4053         char *src;
4054         char *endline;
4055         int result, dnld_size;
4056
4057         if (!capable(CAP_SYS_ADMIN))
4058                 return -EACCES;
4059
4060         snprintf(fname, sizeof(fname), "%s", buf);
4061
4062         endline = strchr(fname, '\n');
4063         if (endline)
4064                 *endline = '\0';
4065
4066         if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
4067                 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
4068                 return -EIO;
4069         }
4070
4071         image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
4072
4073         src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
4074         dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
4075         sglist = ipr_alloc_ucode_buffer(dnld_size);
4076
4077         if (!sglist) {
4078                 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
4079                 release_firmware(fw_entry);
4080                 return -ENOMEM;
4081         }
4082
4083         result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
4084
4085         if (result) {
4086                 dev_err(&ioa_cfg->pdev->dev,
4087                         "Microcode buffer copy to DMA buffer failed\n");
4088                 goto out;
4089         }
4090
4091         ipr_info("Updating microcode, please be patient.  This may take up to 30 minutes.\n");
4092
4093         result = ipr_update_ioa_ucode(ioa_cfg, sglist);
4094
4095         if (!result)
4096                 result = count;
4097 out:
4098         ipr_free_ucode_buffer(sglist);
4099         release_firmware(fw_entry);
4100         return result;
4101 }
4102
4103 static struct device_attribute ipr_update_fw_attr = {
4104         .attr = {
4105                 .name =         "update_fw",
4106                 .mode =         S_IWUSR,
4107         },
4108         .store = ipr_store_update_fw
4109 };
4110
4111 /**
4112  * ipr_show_fw_type - Show the adapter's firmware type.
4113  * @dev:        class device struct
4114  * @buf:        buffer
4115  *
4116  * Return value:
4117  *      number of bytes printed to buffer
4118  **/
4119 static ssize_t ipr_show_fw_type(struct device *dev,
4120                                 struct device_attribute *attr, char *buf)
4121 {
4122         struct Scsi_Host *shost = class_to_shost(dev);
4123         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4124         unsigned long lock_flags = 0;
4125         int len;
4126
4127         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4128         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4129         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4130         return len;
4131 }
4132
4133 static struct device_attribute ipr_ioa_fw_type_attr = {
4134         .attr = {
4135                 .name =         "fw_type",
4136                 .mode =         S_IRUGO,
4137         },
4138         .show = ipr_show_fw_type
4139 };
4140
4141 static ssize_t ipr_read_async_err_log(struct file *filep, struct kobject *kobj,
4142                                 struct bin_attribute *bin_attr, char *buf,
4143                                 loff_t off, size_t count)
4144 {
4145         struct device *cdev = container_of(kobj, struct device, kobj);
4146         struct Scsi_Host *shost = class_to_shost(cdev);
4147         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4148         struct ipr_hostrcb *hostrcb;
4149         unsigned long lock_flags = 0;
4150         int ret;
4151
4152         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4153         hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4154                                         struct ipr_hostrcb, queue);
4155         if (!hostrcb) {
4156                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4157                 return 0;
4158         }
4159         ret = memory_read_from_buffer(buf, count, &off, &hostrcb->hcam,
4160                                 sizeof(hostrcb->hcam));
4161         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4162         return ret;
4163 }
4164
4165 static ssize_t ipr_next_async_err_log(struct file *filep, struct kobject *kobj,
4166                                 struct bin_attribute *bin_attr, char *buf,
4167                                 loff_t off, size_t count)
4168 {
4169         struct device *cdev = container_of(kobj, struct device, kobj);
4170         struct Scsi_Host *shost = class_to_shost(cdev);
4171         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4172         struct ipr_hostrcb *hostrcb;
4173         unsigned long lock_flags = 0;
4174
4175         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4176         hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4177                                         struct ipr_hostrcb, queue);
4178         if (!hostrcb) {
4179                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4180                 return count;
4181         }
4182
4183         /* Reclaim hostrcb before exit */
4184         list_move_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
4185         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4186         return count;
4187 }
4188
4189 static struct bin_attribute ipr_ioa_async_err_log = {
4190         .attr = {
4191                 .name =         "async_err_log",
4192                 .mode =         S_IRUGO | S_IWUSR,
4193         },
4194         .size = 0,
4195         .read = ipr_read_async_err_log,
4196         .write = ipr_next_async_err_log
4197 };
4198
4199 static struct device_attribute *ipr_ioa_attrs[] = {
4200         &ipr_fw_version_attr,
4201         &ipr_log_level_attr,
4202         &ipr_diagnostics_attr,
4203         &ipr_ioa_state_attr,
4204         &ipr_ioa_reset_attr,
4205         &ipr_update_fw_attr,
4206         &ipr_ioa_fw_type_attr,
4207         &ipr_iopoll_weight_attr,
4208         NULL,
4209 };
4210
4211 #ifdef CONFIG_SCSI_IPR_DUMP
4212 /**
4213  * ipr_read_dump - Dump the adapter
4214  * @filp:               open sysfs file
4215  * @kobj:               kobject struct
4216  * @bin_attr:           bin_attribute struct
4217  * @buf:                buffer
4218  * @off:                offset
4219  * @count:              buffer size
4220  *
4221  * Return value:
4222  *      number of bytes printed to buffer
4223  **/
4224 static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
4225                              struct bin_attribute *bin_attr,
4226                              char *buf, loff_t off, size_t count)
4227 {
4228         struct device *cdev = container_of(kobj, struct device, kobj);
4229         struct Scsi_Host *shost = class_to_shost(cdev);
4230         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4231         struct ipr_dump *dump;
4232         unsigned long lock_flags = 0;
4233         char *src;
4234         int len, sdt_end;
4235         size_t rc = count;
4236
4237         if (!capable(CAP_SYS_ADMIN))
4238                 return -EACCES;
4239
4240         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4241         dump = ioa_cfg->dump;
4242
4243         if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4244                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4245                 return 0;
4246         }
4247         kref_get(&dump->kref);
4248         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4249
4250         if (off > dump->driver_dump.hdr.len) {
4251                 kref_put(&dump->kref, ipr_release_dump);
4252                 return 0;
4253         }
4254
4255         if (off + count > dump->driver_dump.hdr.len) {
4256                 count = dump->driver_dump.hdr.len - off;
4257                 rc = count;
4258         }
4259
4260         if (count && off < sizeof(dump->driver_dump)) {
4261                 if (off + count > sizeof(dump->driver_dump))
4262                         len = sizeof(dump->driver_dump) - off;
4263                 else
4264                         len = count;
4265                 src = (u8 *)&dump->driver_dump + off;
4266                 memcpy(buf, src, len);
4267                 buf += len;
4268                 off += len;
4269                 count -= len;
4270         }
4271
4272         off -= sizeof(dump->driver_dump);
4273
4274         if (ioa_cfg->sis64)
4275                 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4276                           (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4277                            sizeof(struct ipr_sdt_entry));
4278         else
4279                 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4280                           (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4281
4282         if (count && off < sdt_end) {
4283                 if (off + count > sdt_end)
4284                         len = sdt_end - off;
4285                 else
4286                         len = count;
4287                 src = (u8 *)&dump->ioa_dump + off;
4288                 memcpy(buf, src, len);
4289                 buf += len;
4290                 off += len;
4291                 count -= len;
4292         }
4293
4294         off -= sdt_end;
4295
4296         while (count) {
4297                 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4298                         len = PAGE_ALIGN(off) - off;
4299                 else
4300                         len = count;
4301                 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4302                 src += off & ~PAGE_MASK;
4303                 memcpy(buf, src, len);
4304                 buf += len;
4305                 off += len;
4306                 count -= len;
4307         }
4308
4309         kref_put(&dump->kref, ipr_release_dump);
4310         return rc;
4311 }
4312
4313 /**
4314  * ipr_alloc_dump - Prepare for adapter dump
4315  * @ioa_cfg:    ioa config struct
4316  *
4317  * Return value:
4318  *      0 on success / other on failure
4319  **/
4320 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4321 {
4322         struct ipr_dump *dump;
4323         __be32 **ioa_data;
4324         unsigned long lock_flags = 0;
4325
4326         dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
4327
4328         if (!dump) {
4329                 ipr_err("Dump memory allocation failed\n");
4330                 return -ENOMEM;
4331         }
4332
4333         if (ioa_cfg->sis64)
4334                 ioa_data = vmalloc(array_size(IPR_FMT3_MAX_NUM_DUMP_PAGES,
4335                                               sizeof(__be32 *)));
4336         else
4337                 ioa_data = vmalloc(array_size(IPR_FMT2_MAX_NUM_DUMP_PAGES,
4338                                               sizeof(__be32 *)));
4339
4340         if (!ioa_data) {
4341                 ipr_err("Dump memory allocation failed\n");
4342                 kfree(dump);
4343                 return -ENOMEM;
4344         }
4345
4346         dump->ioa_dump.ioa_data = ioa_data;
4347
4348         kref_init(&dump->kref);
4349         dump->ioa_cfg = ioa_cfg;
4350
4351         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4352
4353         if (INACTIVE != ioa_cfg->sdt_state) {
4354                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4355                 vfree(dump->ioa_dump.ioa_data);
4356                 kfree(dump);
4357                 return 0;
4358         }
4359
4360         ioa_cfg->dump = dump;
4361         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
4362         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
4363                 ioa_cfg->dump_taken = 1;
4364                 schedule_work(&ioa_cfg->work_q);
4365         }
4366         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4367
4368         return 0;
4369 }
4370
4371 /**
4372  * ipr_free_dump - Free adapter dump memory
4373  * @ioa_cfg:    ioa config struct
4374  *
4375  * Return value:
4376  *      0 on success / other on failure
4377  **/
4378 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4379 {
4380         struct ipr_dump *dump;
4381         unsigned long lock_flags = 0;
4382
4383         ENTER;
4384
4385         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4386         dump = ioa_cfg->dump;
4387         if (!dump) {
4388                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4389                 return 0;
4390         }
4391
4392         ioa_cfg->dump = NULL;
4393         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4394
4395         kref_put(&dump->kref, ipr_release_dump);
4396
4397         LEAVE;
4398         return 0;
4399 }
4400
4401 /**
4402  * ipr_write_dump - Setup dump state of adapter
4403  * @filp:               open sysfs file
4404  * @kobj:               kobject struct
4405  * @bin_attr:           bin_attribute struct
4406  * @buf:                buffer
4407  * @off:                offset
4408  * @count:              buffer size
4409  *
4410  * Return value:
4411  *      number of bytes printed to buffer
4412  **/
4413 static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
4414                               struct bin_attribute *bin_attr,
4415                               char *buf, loff_t off, size_t count)
4416 {
4417         struct device *cdev = container_of(kobj, struct device, kobj);
4418         struct Scsi_Host *shost = class_to_shost(cdev);
4419         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4420         int rc;
4421
4422         if (!capable(CAP_SYS_ADMIN))
4423                 return -EACCES;
4424
4425         if (buf[0] == '1')
4426                 rc = ipr_alloc_dump(ioa_cfg);
4427         else if (buf[0] == '0')
4428                 rc = ipr_free_dump(ioa_cfg);
4429         else
4430                 return -EINVAL;
4431
4432         if (rc)
4433                 return rc;
4434         else
4435                 return count;
4436 }
4437
4438 static struct bin_attribute ipr_dump_attr = {
4439         .attr = {
4440                 .name = "dump",
4441                 .mode = S_IRUSR | S_IWUSR,
4442         },
4443         .size = 0,
4444         .read = ipr_read_dump,
4445         .write = ipr_write_dump
4446 };
4447 #else
4448 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4449 #endif
4450
4451 /**
4452  * ipr_change_queue_depth - Change the device's queue depth
4453  * @sdev:       scsi device struct
4454  * @qdepth:     depth to set
4455  * @reason:     calling context
4456  *
4457  * Return value:
4458  *      actual depth set
4459  **/
4460 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
4461 {
4462         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4463         struct ipr_resource_entry *res;
4464         unsigned long lock_flags = 0;
4465
4466         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4467         res = (struct ipr_resource_entry *)sdev->hostdata;
4468
4469         if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4470                 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4471         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4472
4473         scsi_change_queue_depth(sdev, qdepth);
4474         return sdev->queue_depth;
4475 }
4476
4477 /**
4478  * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4479  * @dev:        device struct
4480  * @attr:       device attribute structure
4481  * @buf:        buffer
4482  *
4483  * Return value:
4484  *      number of bytes printed to buffer
4485  **/
4486 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4487 {
4488         struct scsi_device *sdev = to_scsi_device(dev);
4489         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4490         struct ipr_resource_entry *res;
4491         unsigned long lock_flags = 0;
4492         ssize_t len = -ENXIO;
4493
4494         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4495         res = (struct ipr_resource_entry *)sdev->hostdata;
4496         if (res)
4497                 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4498         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4499         return len;
4500 }
4501
4502 static struct device_attribute ipr_adapter_handle_attr = {
4503         .attr = {
4504                 .name =         "adapter_handle",
4505                 .mode =         S_IRUSR,
4506         },
4507         .show = ipr_show_adapter_handle
4508 };
4509
4510 /**
4511  * ipr_show_resource_path - Show the resource path or the resource address for
4512  *                          this device.
4513  * @dev:        device struct
4514  * @attr:       device attribute structure
4515  * @buf:        buffer
4516  *
4517  * Return value:
4518  *      number of bytes printed to buffer
4519  **/
4520 static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4521 {
4522         struct scsi_device *sdev = to_scsi_device(dev);
4523         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4524         struct ipr_resource_entry *res;
4525         unsigned long lock_flags = 0;
4526         ssize_t len = -ENXIO;
4527         char buffer[IPR_MAX_RES_PATH_LENGTH];
4528
4529         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4530         res = (struct ipr_resource_entry *)sdev->hostdata;
4531         if (res && ioa_cfg->sis64)
4532                 len = snprintf(buf, PAGE_SIZE, "%s\n",
4533                                __ipr_format_res_path(res->res_path, buffer,
4534                                                      sizeof(buffer)));
4535         else if (res)
4536                 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4537                                res->bus, res->target, res->lun);
4538
4539         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4540         return len;
4541 }
4542
4543 static struct device_attribute ipr_resource_path_attr = {
4544         .attr = {
4545                 .name =         "resource_path",
4546                 .mode =         S_IRUGO,
4547         },
4548         .show = ipr_show_resource_path
4549 };
4550
4551 /**
4552  * ipr_show_device_id - Show the device_id for this device.
4553  * @dev:        device struct
4554  * @attr:       device attribute structure
4555  * @buf:        buffer
4556  *
4557  * Return value:
4558  *      number of bytes printed to buffer
4559  **/
4560 static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4561 {
4562         struct scsi_device *sdev = to_scsi_device(dev);
4563         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4564         struct ipr_resource_entry *res;
4565         unsigned long lock_flags = 0;
4566         ssize_t len = -ENXIO;
4567
4568         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4569         res = (struct ipr_resource_entry *)sdev->hostdata;
4570         if (res && ioa_cfg->sis64)
4571                 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", be64_to_cpu(res->dev_id));
4572         else if (res)
4573                 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4574
4575         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4576         return len;
4577 }
4578
4579 static struct device_attribute ipr_device_id_attr = {
4580         .attr = {
4581                 .name =         "device_id",
4582                 .mode =         S_IRUGO,
4583         },
4584         .show = ipr_show_device_id
4585 };
4586
4587 /**
4588  * ipr_show_resource_type - Show the resource type for this device.
4589  * @dev:        device struct
4590  * @attr:       device attribute structure
4591  * @buf:        buffer
4592  *
4593  * Return value:
4594  *      number of bytes printed to buffer
4595  **/
4596 static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4597 {
4598         struct scsi_device *sdev = to_scsi_device(dev);
4599         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4600         struct ipr_resource_entry *res;
4601         unsigned long lock_flags = 0;
4602         ssize_t len = -ENXIO;
4603
4604         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4605         res = (struct ipr_resource_entry *)sdev->hostdata;
4606
4607         if (res)
4608                 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4609
4610         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4611         return len;
4612 }
4613
4614 static struct device_attribute ipr_resource_type_attr = {
4615         .attr = {
4616                 .name =         "resource_type",
4617                 .mode =         S_IRUGO,
4618         },
4619         .show = ipr_show_resource_type
4620 };
4621
4622 /**
4623  * ipr_show_raw_mode - Show the adapter's raw mode
4624  * @dev:        class device struct
4625  * @buf:        buffer
4626  *
4627  * Return value:
4628  *      number of bytes printed to buffer
4629  **/
4630 static ssize_t ipr_show_raw_mode(struct device *dev,
4631                                  struct device_attribute *attr, char *buf)
4632 {
4633         struct scsi_device *sdev = to_scsi_device(dev);
4634         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4635         struct ipr_resource_entry *res;
4636         unsigned long lock_flags = 0;
4637         ssize_t len;
4638
4639         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4640         res = (struct ipr_resource_entry *)sdev->hostdata;
4641         if (res)
4642                 len = snprintf(buf, PAGE_SIZE, "%d\n", res->raw_mode);
4643         else
4644                 len = -ENXIO;
4645         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4646         return len;
4647 }
4648
4649 /**
4650  * ipr_store_raw_mode - Change the adapter's raw mode
4651  * @dev:        class device struct
4652  * @buf:        buffer
4653  *
4654  * Return value:
4655  *      number of bytes printed to buffer
4656  **/
4657 static ssize_t ipr_store_raw_mode(struct device *dev,
4658                                   struct device_attribute *attr,
4659                                   const char *buf, size_t count)
4660 {
4661         struct scsi_device *sdev = to_scsi_device(dev);
4662         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4663         struct ipr_resource_entry *res;
4664         unsigned long lock_flags = 0;
4665         ssize_t len;
4666
4667         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4668         res = (struct ipr_resource_entry *)sdev->hostdata;
4669         if (res) {
4670                 if (ipr_is_af_dasd_device(res)) {
4671                         res->raw_mode = simple_strtoul(buf, NULL, 10);
4672                         len = strlen(buf);
4673                         if (res->sdev)
4674                                 sdev_printk(KERN_INFO, res->sdev, "raw mode is %s\n",
4675                                         res->raw_mode ? "enabled" : "disabled");
4676                 } else
4677                         len = -EINVAL;
4678         } else
4679                 len = -ENXIO;
4680         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4681         return len;
4682 }
4683
4684 static struct device_attribute ipr_raw_mode_attr = {
4685         .attr = {
4686                 .name =         "raw_mode",
4687                 .mode =         S_IRUGO | S_IWUSR,
4688         },
4689         .show = ipr_show_raw_mode,
4690         .store = ipr_store_raw_mode
4691 };
4692
4693 static struct device_attribute *ipr_dev_attrs[] = {
4694         &ipr_adapter_handle_attr,
4695         &ipr_resource_path_attr,
4696         &ipr_device_id_attr,
4697         &ipr_resource_type_attr,
4698         &ipr_raw_mode_attr,
4699         NULL,
4700 };
4701
4702 /**
4703  * ipr_biosparam - Return the HSC mapping
4704  * @sdev:                       scsi device struct
4705  * @block_device:       block device pointer
4706  * @capacity:           capacity of the device
4707  * @parm:                       Array containing returned HSC values.
4708  *
4709  * This function generates the HSC parms that fdisk uses.
4710  * We want to make sure we return something that places partitions
4711  * on 4k boundaries for best performance with the IOA.
4712  *
4713  * Return value:
4714  *      0 on success
4715  **/
4716 static int ipr_biosparam(struct scsi_device *sdev,
4717                          struct block_device *block_device,
4718                          sector_t capacity, int *parm)
4719 {
4720         int heads, sectors;
4721         sector_t cylinders;
4722
4723         heads = 128;
4724         sectors = 32;
4725
4726         cylinders = capacity;
4727         sector_div(cylinders, (128 * 32));
4728
4729         /* return result */
4730         parm[0] = heads;
4731         parm[1] = sectors;
4732         parm[2] = cylinders;
4733
4734         return 0;
4735 }
4736
4737 /**
4738  * ipr_find_starget - Find target based on bus/target.
4739  * @starget:    scsi target struct
4740  *
4741  * Return value:
4742  *      resource entry pointer if found / NULL if not found
4743  **/
4744 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4745 {
4746         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4747         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4748         struct ipr_resource_entry *res;
4749
4750         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4751                 if ((res->bus == starget->channel) &&
4752                     (res->target == starget->id)) {
4753                         return res;
4754                 }
4755         }
4756
4757         return NULL;
4758 }
4759
4760 static struct ata_port_info sata_port_info;
4761
4762 /**
4763  * ipr_target_alloc - Prepare for commands to a SCSI target
4764  * @starget:    scsi target struct
4765  *
4766  * If the device is a SATA device, this function allocates an
4767  * ATA port with libata, else it does nothing.
4768  *
4769  * Return value:
4770  *      0 on success / non-0 on failure
4771  **/
4772 static int ipr_target_alloc(struct scsi_target *starget)
4773 {
4774         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4775         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4776         struct ipr_sata_port *sata_port;
4777         struct ata_port *ap;
4778         struct ipr_resource_entry *res;
4779         unsigned long lock_flags;
4780
4781         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4782         res = ipr_find_starget(starget);
4783         starget->hostdata = NULL;
4784
4785         if (res && ipr_is_gata(res)) {
4786                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4787                 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4788                 if (!sata_port)
4789                         return -ENOMEM;
4790
4791                 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4792                 if (ap) {
4793                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4794                         sata_port->ioa_cfg = ioa_cfg;
4795                         sata_port->ap = ap;
4796                         sata_port->res = res;
4797
4798                         res->sata_port = sata_port;
4799                         ap->private_data = sata_port;
4800                         starget->hostdata = sata_port;
4801                 } else {
4802                         kfree(sata_port);
4803                         return -ENOMEM;
4804                 }
4805         }
4806         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4807
4808         return 0;
4809 }
4810
4811 /**
4812  * ipr_target_destroy - Destroy a SCSI target
4813  * @starget:    scsi target struct
4814  *
4815  * If the device was a SATA device, this function frees the libata
4816  * ATA port, else it does nothing.
4817  *
4818  **/
4819 static void ipr_target_destroy(struct scsi_target *starget)
4820 {
4821         struct ipr_sata_port *sata_port = starget->hostdata;
4822         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4823         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4824
4825         if (ioa_cfg->sis64) {
4826                 if (!ipr_find_starget(starget)) {
4827                         if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4828                                 clear_bit(starget->id, ioa_cfg->array_ids);
4829                         else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4830                                 clear_bit(starget->id, ioa_cfg->vset_ids);
4831                         else if (starget->channel == 0)
4832                                 clear_bit(starget->id, ioa_cfg->target_ids);
4833                 }
4834         }
4835
4836         if (sata_port) {
4837                 starget->hostdata = NULL;
4838                 ata_sas_port_destroy(sata_port->ap);
4839                 kfree(sata_port);
4840         }
4841 }
4842
4843 /**
4844  * ipr_find_sdev - Find device based on bus/target/lun.
4845  * @sdev:       scsi device struct
4846  *
4847  * Return value:
4848  *      resource entry pointer if found / NULL if not found
4849  **/
4850 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4851 {
4852         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4853         struct ipr_resource_entry *res;
4854
4855         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4856                 if ((res->bus == sdev->channel) &&
4857                     (res->target == sdev->id) &&
4858                     (res->lun == sdev->lun))
4859                         return res;
4860         }
4861
4862         return NULL;
4863 }
4864
4865 /**
4866  * ipr_slave_destroy - Unconfigure a SCSI device
4867  * @sdev:       scsi device struct
4868  *
4869  * Return value:
4870  *      nothing
4871  **/
4872 static void ipr_slave_destroy(struct scsi_device *sdev)
4873 {
4874         struct ipr_resource_entry *res;
4875         struct ipr_ioa_cfg *ioa_cfg;
4876         unsigned long lock_flags = 0;
4877
4878         ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4879
4880         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4881         res = (struct ipr_resource_entry *) sdev->hostdata;
4882         if (res) {
4883                 if (res->sata_port)
4884                         res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
4885                 sdev->hostdata = NULL;
4886                 res->sdev = NULL;
4887                 res->sata_port = NULL;
4888         }
4889         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4890 }
4891
4892 /**
4893  * ipr_slave_configure - Configure a SCSI device
4894  * @sdev:       scsi device struct
4895  *
4896  * This function configures the specified scsi device.
4897  *
4898  * Return value:
4899  *      0 on success
4900  **/
4901 static int ipr_slave_configure(struct scsi_device *sdev)
4902 {
4903         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4904         struct ipr_resource_entry *res;
4905         struct ata_port *ap = NULL;
4906         unsigned long lock_flags = 0;
4907         char buffer[IPR_MAX_RES_PATH_LENGTH];
4908
4909         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4910         res = sdev->hostdata;
4911         if (res) {
4912                 if (ipr_is_af_dasd_device(res))
4913                         sdev->type = TYPE_RAID;
4914                 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4915                         sdev->scsi_level = 4;
4916                         sdev->no_uld_attach = 1;
4917                 }
4918                 if (ipr_is_vset_device(res)) {
4919                         sdev->scsi_level = SCSI_SPC_3;
4920                         sdev->no_report_opcodes = 1;
4921                         blk_queue_rq_timeout(sdev->request_queue,
4922                                              IPR_VSET_RW_TIMEOUT);
4923                         blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
4924                 }
4925                 if (ipr_is_gata(res) && res->sata_port)
4926                         ap = res->sata_port->ap;
4927                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4928
4929                 if (ap) {
4930                         scsi_change_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN);
4931                         ata_sas_slave_configure(sdev, ap);
4932                 }
4933
4934                 if (ioa_cfg->sis64)
4935                         sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4936                                     ipr_format_res_path(ioa_cfg,
4937                                 res->res_path, buffer, sizeof(buffer)));
4938                 return 0;
4939         }
4940         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4941         return 0;
4942 }
4943
4944 /**
4945  * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4946  * @sdev:       scsi device struct
4947  *
4948  * This function initializes an ATA port so that future commands
4949  * sent through queuecommand will work.
4950  *
4951  * Return value:
4952  *      0 on success
4953  **/
4954 static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4955 {
4956         struct ipr_sata_port *sata_port = NULL;
4957         int rc = -ENXIO;
4958
4959         ENTER;
4960         if (sdev->sdev_target)
4961                 sata_port = sdev->sdev_target->hostdata;
4962         if (sata_port) {
4963                 rc = ata_sas_port_init(sata_port->ap);
4964                 if (rc == 0)
4965                         rc = ata_sas_sync_probe(sata_port->ap);
4966         }
4967
4968         if (rc)
4969                 ipr_slave_destroy(sdev);
4970
4971         LEAVE;
4972         return rc;
4973 }
4974
4975 /**
4976  * ipr_slave_alloc - Prepare for commands to a device.
4977  * @sdev:       scsi device struct
4978  *
4979  * This function saves a pointer to the resource entry
4980  * in the scsi device struct if the device exists. We
4981  * can then use this pointer in ipr_queuecommand when
4982  * handling new commands.
4983  *
4984  * Return value:
4985  *      0 on success / -ENXIO if device does not exist
4986  **/
4987 static int ipr_slave_alloc(struct scsi_device *sdev)
4988 {
4989         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4990         struct ipr_resource_entry *res;
4991         unsigned long lock_flags;
4992         int rc = -ENXIO;
4993
4994         sdev->hostdata = NULL;
4995
4996         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4997
4998         res = ipr_find_sdev(sdev);
4999         if (res) {
5000                 res->sdev = sdev;
5001                 res->add_to_ml = 0;
5002                 res->in_erp = 0;
5003                 sdev->hostdata = res;
5004                 if (!ipr_is_naca_model(res))
5005                         res->needs_sync_complete = 1;
5006                 rc = 0;
5007                 if (ipr_is_gata(res)) {
5008                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5009                         return ipr_ata_slave_alloc(sdev);
5010                 }
5011         }
5012
5013         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5014
5015         return rc;
5016 }
5017
5018 /**
5019  * ipr_match_lun - Match function for specified LUN
5020  * @ipr_cmd:    ipr command struct
5021  * @device:             device to match (sdev)
5022  *
5023  * Returns:
5024  *      1 if command matches sdev / 0 if command does not match sdev
5025  **/
5026 static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
5027 {
5028         if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
5029                 return 1;
5030         return 0;
5031 }
5032
5033 /**
5034  * ipr_cmnd_is_free - Check if a command is free or not
5035  * @ipr_cmd     ipr command struct
5036  *
5037  * Returns:
5038  *      true / false
5039  **/
5040 static bool ipr_cmnd_is_free(struct ipr_cmnd *ipr_cmd)
5041 {
5042         struct ipr_cmnd *loop_cmd;
5043
5044         list_for_each_entry(loop_cmd, &ipr_cmd->hrrq->hrrq_free_q, queue) {
5045                 if (loop_cmd == ipr_cmd)
5046                         return true;
5047         }
5048
5049         return false;
5050 }
5051
5052 /**
5053  * ipr_match_res - Match function for specified resource entry
5054  * @ipr_cmd:    ipr command struct
5055  * @resource:   resource entry to match
5056  *
5057  * Returns:
5058  *      1 if command matches sdev / 0 if command does not match sdev
5059  **/
5060 static int ipr_match_res(struct ipr_cmnd *ipr_cmd, void *resource)
5061 {
5062         struct ipr_resource_entry *res = resource;
5063
5064         if (res && ipr_cmd->ioarcb.res_handle == res->res_handle)
5065                 return 1;
5066         return 0;
5067 }
5068
5069 /**
5070  * ipr_wait_for_ops - Wait for matching commands to complete
5071  * @ipr_cmd:    ipr command struct
5072  * @device:             device to match (sdev)
5073  * @match:              match function to use
5074  *
5075  * Returns:
5076  *      SUCCESS / FAILED
5077  **/
5078 static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
5079                             int (*match)(struct ipr_cmnd *, void *))
5080 {
5081         struct ipr_cmnd *ipr_cmd;
5082         int wait, i;
5083         unsigned long flags;
5084         struct ipr_hrr_queue *hrrq;
5085         signed long timeout = IPR_ABORT_TASK_TIMEOUT;
5086         DECLARE_COMPLETION_ONSTACK(comp);
5087
5088         ENTER;
5089         do {
5090                 wait = 0;
5091
5092                 for_each_hrrq(hrrq, ioa_cfg) {
5093                         spin_lock_irqsave(hrrq->lock, flags);
5094                         for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5095                                 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5096                                 if (!ipr_cmnd_is_free(ipr_cmd)) {
5097                                         if (match(ipr_cmd, device)) {
5098                                                 ipr_cmd->eh_comp = &comp;
5099                                                 wait++;
5100                                         }
5101                                 }
5102                         }
5103                         spin_unlock_irqrestore(hrrq->lock, flags);
5104                 }
5105
5106                 if (wait) {
5107                         timeout = wait_for_completion_timeout(&comp, timeout);
5108
5109                         if (!timeout) {
5110                                 wait = 0;
5111
5112                                 for_each_hrrq(hrrq, ioa_cfg) {
5113                                         spin_lock_irqsave(hrrq->lock, flags);
5114                                         for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5115                                                 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5116                                                 if (!ipr_cmnd_is_free(ipr_cmd)) {
5117                                                         if (match(ipr_cmd, device)) {
5118                                                                 ipr_cmd->eh_comp = NULL;
5119                                                                 wait++;
5120                                                         }
5121                                                 }
5122                                         }
5123                                         spin_unlock_irqrestore(hrrq->lock, flags);
5124                                 }
5125
5126                                 if (wait)
5127                                         dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
5128                                 LEAVE;
5129                                 return wait ? FAILED : SUCCESS;
5130                         }
5131                 }
5132         } while (wait);
5133
5134         LEAVE;
5135         return SUCCESS;
5136 }
5137
5138 static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
5139 {
5140         struct ipr_ioa_cfg *ioa_cfg;
5141         unsigned long lock_flags = 0;
5142         int rc = SUCCESS;
5143
5144         ENTER;
5145         ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5146         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5147
5148         if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5149                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
5150                 dev_err(&ioa_cfg->pdev->dev,
5151                         "Adapter being reset as a result of error recovery.\n");
5152
5153                 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5154                         ioa_cfg->sdt_state = GET_DUMP;
5155         }
5156
5157         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5158         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5159         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5160
5161         /* If we got hit with a host reset while we were already resetting
5162          the adapter for some reason, and the reset failed. */
5163         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5164                 ipr_trace;
5165                 rc = FAILED;
5166         }
5167
5168         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5169         LEAVE;
5170         return rc;
5171 }
5172
5173 /**
5174  * ipr_device_reset - Reset the device
5175  * @ioa_cfg:    ioa config struct
5176  * @res:                resource entry struct
5177  *
5178  * This function issues a device reset to the affected device.
5179  * If the device is a SCSI device, a LUN reset will be sent
5180  * to the device first. If that does not work, a target reset
5181  * will be sent. If the device is a SATA device, a PHY reset will
5182  * be sent.
5183  *
5184  * Return value:
5185  *      0 on success / non-zero on failure
5186  **/
5187 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
5188                             struct ipr_resource_entry *res)
5189 {
5190         struct ipr_cmnd *ipr_cmd;
5191         struct ipr_ioarcb *ioarcb;
5192         struct ipr_cmd_pkt *cmd_pkt;
5193         struct ipr_ioarcb_ata_regs *regs;
5194         u32 ioasc;
5195
5196         ENTER;
5197         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5198         ioarcb = &ipr_cmd->ioarcb;
5199         cmd_pkt = &ioarcb->cmd_pkt;
5200
5201         if (ipr_cmd->ioa_cfg->sis64) {
5202                 regs = &ipr_cmd->i.ata_ioadl.regs;
5203                 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
5204         } else
5205                 regs = &ioarcb->u.add_data.u.regs;
5206
5207         ioarcb->res_handle = res->res_handle;
5208         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5209         cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5210         if (ipr_is_gata(res)) {
5211                 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
5212                 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
5213                 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5214         }
5215
5216         ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5217         ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5218         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5219         if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
5220                 if (ipr_cmd->ioa_cfg->sis64)
5221                         memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
5222                                sizeof(struct ipr_ioasa_gata));
5223                 else
5224                         memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
5225                                sizeof(struct ipr_ioasa_gata));
5226         }
5227
5228         LEAVE;
5229         return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
5230 }
5231
5232 /**
5233  * ipr_sata_reset - Reset the SATA port
5234  * @link:       SATA link to reset
5235  * @classes:    class of the attached device
5236  *
5237  * This function issues a SATA phy reset to the affected ATA link.
5238  *
5239  * Return value:
5240  *      0 on success / non-zero on failure
5241  **/
5242 static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
5243                                 unsigned long deadline)
5244 {
5245         struct ipr_sata_port *sata_port = link->ap->private_data;
5246         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5247         struct ipr_resource_entry *res;
5248         unsigned long lock_flags = 0;
5249         int rc = -ENXIO, ret;
5250
5251         ENTER;
5252         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5253         while (ioa_cfg->in_reset_reload) {
5254                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5255                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5256                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5257         }
5258
5259         res = sata_port->res;
5260         if (res) {
5261                 rc = ipr_device_reset(ioa_cfg, res);
5262                 *classes = res->ata_class;
5263                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5264
5265                 ret = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
5266                 if (ret != SUCCESS) {
5267                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5268                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
5269                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5270
5271                         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5272                 }
5273         } else
5274                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5275
5276         LEAVE;
5277         return rc;
5278 }
5279
5280 /**
5281  * ipr_eh_dev_reset - Reset the device
5282  * @scsi_cmd:   scsi command struct
5283  *
5284  * This function issues a device reset to the affected device.
5285  * A LUN reset will be sent to the device first. If that does
5286  * not work, a target reset will be sent.
5287  *
5288  * Return value:
5289  *      SUCCESS / FAILED
5290  **/
5291 static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
5292 {
5293         struct ipr_cmnd *ipr_cmd;
5294         struct ipr_ioa_cfg *ioa_cfg;
5295         struct ipr_resource_entry *res;
5296         struct ata_port *ap;
5297         int rc = 0, i;
5298         struct ipr_hrr_queue *hrrq;
5299
5300         ENTER;
5301         ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5302         res = scsi_cmd->device->hostdata;
5303
5304         /*
5305          * If we are currently going through reset/reload, return failed. This will force the
5306          * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5307          * reset to complete
5308          */
5309         if (ioa_cfg->in_reset_reload)
5310                 return FAILED;
5311         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5312                 return FAILED;
5313
5314         for_each_hrrq(hrrq, ioa_cfg) {
5315                 spin_lock(&hrrq->_lock);
5316                 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5317                         ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5318
5319                         if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
5320                                 if (!ipr_cmd->qc)
5321                                         continue;
5322                                 if (ipr_cmnd_is_free(ipr_cmd))
5323                                         continue;
5324
5325                                 ipr_cmd->done = ipr_sata_eh_done;
5326                                 if (!(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
5327                                         ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
5328                                         ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
5329                                 }
5330                         }
5331                 }
5332                 spin_unlock(&hrrq->_lock);
5333         }
5334         res->resetting_device = 1;
5335         scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
5336
5337         if (ipr_is_gata(res) && res->sata_port) {
5338                 ap = res->sata_port->ap;
5339                 spin_unlock_irq(scsi_cmd->device->host->host_lock);
5340                 ata_std_error_handler(ap);
5341                 spin_lock_irq(scsi_cmd->device->host->host_lock);
5342         } else
5343                 rc = ipr_device_reset(ioa_cfg, res);
5344         res->resetting_device = 0;
5345         res->reset_occurred = 1;
5346
5347         LEAVE;
5348         return rc ? FAILED : SUCCESS;
5349 }
5350
5351 static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
5352 {
5353         int rc;
5354         struct ipr_ioa_cfg *ioa_cfg;
5355         struct ipr_resource_entry *res;
5356
5357         ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5358         res = cmd->device->hostdata;
5359
5360         if (!res)
5361                 return FAILED;
5362
5363         spin_lock_irq(cmd->device->host->host_lock);
5364         rc = __ipr_eh_dev_reset(cmd);
5365         spin_unlock_irq(cmd->device->host->host_lock);
5366
5367         if (rc == SUCCESS) {
5368                 if (ipr_is_gata(res) && res->sata_port)
5369                         rc = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
5370                 else
5371                         rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
5372         }
5373
5374         return rc;
5375 }
5376
5377 /**
5378  * ipr_bus_reset_done - Op done function for bus reset.
5379  * @ipr_cmd:    ipr command struct
5380  *
5381  * This function is the op done function for a bus reset
5382  *
5383  * Return value:
5384  *      none
5385  **/
5386 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5387 {
5388         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5389         struct ipr_resource_entry *res;
5390
5391         ENTER;
5392         if (!ioa_cfg->sis64)
5393                 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5394                         if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5395                                 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5396                                 break;
5397                         }
5398                 }
5399
5400         /*
5401          * If abort has not completed, indicate the reset has, else call the
5402          * abort's done function to wake the sleeping eh thread
5403          */
5404         if (ipr_cmd->sibling->sibling)
5405                 ipr_cmd->sibling->sibling = NULL;
5406         else
5407                 ipr_cmd->sibling->done(ipr_cmd->sibling);
5408
5409         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5410         LEAVE;
5411 }
5412
5413 /**
5414  * ipr_abort_timeout - An abort task has timed out
5415  * @ipr_cmd:    ipr command struct
5416  *
5417  * This function handles when an abort task times out. If this
5418  * happens we issue a bus reset since we have resources tied
5419  * up that must be freed before returning to the midlayer.
5420  *
5421  * Return value:
5422  *      none
5423  **/
5424 static void ipr_abort_timeout(struct timer_list *t)
5425 {
5426         struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
5427         struct ipr_cmnd *reset_cmd;
5428         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5429         struct ipr_cmd_pkt *cmd_pkt;
5430         unsigned long lock_flags = 0;
5431
5432         ENTER;
5433         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5434         if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5435                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5436                 return;
5437         }
5438
5439         sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
5440         reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5441         ipr_cmd->sibling = reset_cmd;
5442         reset_cmd->sibling = ipr_cmd;
5443         reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5444         cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5445         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5446         cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5447         cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5448
5449         ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5450         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5451         LEAVE;
5452 }
5453
5454 /**
5455  * ipr_cancel_op - Cancel specified op
5456  * @scsi_cmd:   scsi command struct
5457  *
5458  * This function cancels specified op.
5459  *
5460  * Return value:
5461  *      SUCCESS / FAILED
5462  **/
5463 static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
5464 {
5465         struct ipr_cmnd *ipr_cmd;
5466         struct ipr_ioa_cfg *ioa_cfg;
5467         struct ipr_resource_entry *res;
5468         struct ipr_cmd_pkt *cmd_pkt;
5469         u32 ioasc, int_reg;
5470         int i, op_found = 0;
5471         struct ipr_hrr_queue *hrrq;
5472
5473         ENTER;
5474         ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5475         res = scsi_cmd->device->hostdata;
5476
5477         /* If we are currently going through reset/reload, return failed.
5478          * This will force the mid-layer to call ipr_eh_host_reset,
5479          * which will then go to sleep and wait for the reset to complete
5480          */
5481         if (ioa_cfg->in_reset_reload ||
5482             ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5483                 return FAILED;
5484         if (!res)
5485                 return FAILED;
5486
5487         /*
5488          * If we are aborting a timed out op, chances are that the timeout was caused
5489          * by a still not detected EEH error. In such cases, reading a register will
5490          * trigger the EEH recovery infrastructure.
5491          */
5492         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5493
5494         if (!ipr_is_gscsi(res))
5495                 return FAILED;
5496
5497         for_each_hrrq(hrrq, ioa_cfg) {
5498                 spin_lock(&hrrq->_lock);
5499                 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5500                         if (ioa_cfg->ipr_cmnd_list[i]->scsi_cmd == scsi_cmd) {
5501                                 if (!ipr_cmnd_is_free(ioa_cfg->ipr_cmnd_list[i])) {
5502                                         op_found = 1;
5503                                         break;
5504                                 }
5505                         }
5506                 }
5507                 spin_unlock(&hrrq->_lock);
5508         }
5509
5510         if (!op_found)
5511                 return SUCCESS;
5512
5513         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5514         ipr_cmd->ioarcb.res_handle = res->res_handle;
5515         cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5516         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5517         cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5518         ipr_cmd->u.sdev = scsi_cmd->device;
5519
5520         scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5521                     scsi_cmd->cmnd[0]);
5522         ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
5523         ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5524
5525         /*
5526          * If the abort task timed out and we sent a bus reset, we will get
5527          * one the following responses to the abort
5528          */
5529         if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5530                 ioasc = 0;
5531                 ipr_trace;
5532         }
5533
5534         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5535         if (!ipr_is_naca_model(res))
5536                 res->needs_sync_complete = 1;
5537
5538         LEAVE;
5539         return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
5540 }
5541
5542 /**
5543  * ipr_eh_abort - Abort a single op
5544  * @scsi_cmd:   scsi command struct
5545  *
5546  * Return value:
5547  *      0 if scan in progress / 1 if scan is complete
5548  **/
5549 static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time)
5550 {
5551         unsigned long lock_flags;
5552         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
5553         int rc = 0;
5554
5555         spin_lock_irqsave(shost->host_lock, lock_flags);
5556         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
5557                 rc = 1;
5558         if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2))
5559                 rc = 1;
5560         spin_unlock_irqrestore(shost->host_lock, lock_flags);
5561         return rc;
5562 }
5563
5564 /**
5565  * ipr_eh_host_reset - Reset the host adapter
5566  * @scsi_cmd:   scsi command struct
5567  *
5568  * Return value:
5569  *      SUCCESS / FAILED
5570  **/
5571 static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
5572 {
5573         unsigned long flags;
5574         int rc;
5575         struct ipr_ioa_cfg *ioa_cfg;
5576
5577         ENTER;
5578
5579         ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5580
5581         spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5582         rc = ipr_cancel_op(scsi_cmd);
5583         spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
5584
5585         if (rc == SUCCESS)
5586                 rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
5587         LEAVE;
5588         return rc;
5589 }
5590
5591 /**
5592  * ipr_handle_other_interrupt - Handle "other" interrupts
5593  * @ioa_cfg:    ioa config struct
5594  * @int_reg:    interrupt register
5595  *
5596  * Return value:
5597  *      IRQ_NONE / IRQ_HANDLED
5598  **/
5599 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5600                                               u32 int_reg)
5601 {
5602         irqreturn_t rc = IRQ_HANDLED;
5603         u32 int_mask_reg;
5604
5605         int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5606         int_reg &= ~int_mask_reg;
5607
5608         /* If an interrupt on the adapter did not occur, ignore it.
5609          * Or in the case of SIS 64, check for a stage change interrupt.
5610          */
5611         if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5612                 if (ioa_cfg->sis64) {
5613                         int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5614                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5615                         if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5616
5617                                 /* clear stage change */
5618                                 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5619                                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5620                                 list_del(&ioa_cfg->reset_cmd->queue);
5621                                 del_timer(&ioa_cfg->reset_cmd->timer);
5622                                 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5623                                 return IRQ_HANDLED;
5624                         }
5625                 }
5626
5627                 return IRQ_NONE;
5628         }
5629
5630         if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5631                 /* Mask the interrupt */
5632                 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5633                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5634
5635                 list_del(&ioa_cfg->reset_cmd->queue);
5636                 del_timer(&ioa_cfg->reset_cmd->timer);
5637                 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5638         } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
5639                 if (ioa_cfg->clear_isr) {
5640                         if (ipr_debug && printk_ratelimit())
5641                                 dev_err(&ioa_cfg->pdev->dev,
5642                                         "Spurious interrupt detected. 0x%08X\n", int_reg);
5643                         writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5644                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5645                         return IRQ_NONE;
5646                 }
5647         } else {
5648                 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5649                         ioa_cfg->ioa_unit_checked = 1;
5650                 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5651                         dev_err(&ioa_cfg->pdev->dev,
5652                                 "No Host RRQ. 0x%08X\n", int_reg);
5653                 else
5654                         dev_err(&ioa_cfg->pdev->dev,
5655                                 "Permanent IOA failure. 0x%08X\n", int_reg);
5656
5657                 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5658                         ioa_cfg->sdt_state = GET_DUMP;
5659
5660                 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5661                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5662         }
5663
5664         return rc;
5665 }
5666
5667 /**
5668  * ipr_isr_eh - Interrupt service routine error handler
5669  * @ioa_cfg:    ioa config struct
5670  * @msg:        message to log
5671  *
5672  * Return value:
5673  *      none
5674  **/
5675 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
5676 {
5677         ioa_cfg->errors_logged++;
5678         dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
5679
5680         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5681                 ioa_cfg->sdt_state = GET_DUMP;
5682
5683         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5684 }
5685
5686 static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
5687                                                 struct list_head *doneq)
5688 {
5689         u32 ioasc;
5690         u16 cmd_index;
5691         struct ipr_cmnd *ipr_cmd;
5692         struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5693         int num_hrrq = 0;
5694
5695         /* If interrupts are disabled, ignore the interrupt */
5696         if (!hrr_queue->allow_interrupts)
5697                 return 0;
5698
5699         while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5700                hrr_queue->toggle_bit) {
5701
5702                 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5703                              IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5704                              IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5705
5706                 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5707                              cmd_index < hrr_queue->min_cmd_id)) {
5708                         ipr_isr_eh(ioa_cfg,
5709                                 "Invalid response handle from IOA: ",
5710                                 cmd_index);
5711                         break;
5712                 }
5713
5714                 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5715                 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5716
5717                 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5718
5719                 list_move_tail(&ipr_cmd->queue, doneq);
5720
5721                 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5722                         hrr_queue->hrrq_curr++;
5723                 } else {
5724                         hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5725                         hrr_queue->toggle_bit ^= 1u;
5726                 }
5727                 num_hrrq++;
5728                 if (budget > 0 && num_hrrq >= budget)
5729                         break;
5730         }
5731
5732         return num_hrrq;
5733 }
5734
5735 static int ipr_iopoll(struct irq_poll *iop, int budget)
5736 {
5737         struct ipr_ioa_cfg *ioa_cfg;
5738         struct ipr_hrr_queue *hrrq;
5739         struct ipr_cmnd *ipr_cmd, *temp;
5740         unsigned long hrrq_flags;
5741         int completed_ops;
5742         LIST_HEAD(doneq);
5743
5744         hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5745         ioa_cfg = hrrq->ioa_cfg;
5746
5747         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5748         completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5749
5750         if (completed_ops < budget)
5751                 irq_poll_complete(iop);
5752         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5753
5754         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5755                 list_del(&ipr_cmd->queue);
5756                 del_timer(&ipr_cmd->timer);
5757                 ipr_cmd->fast_done(ipr_cmd);
5758         }
5759
5760         return completed_ops;
5761 }
5762
5763 /**
5764  * ipr_isr - Interrupt service routine
5765  * @irq:        irq number
5766  * @devp:       pointer to ioa config struct
5767  *
5768  * Return value:
5769  *      IRQ_NONE / IRQ_HANDLED
5770  **/
5771 static irqreturn_t ipr_isr(int irq, void *devp)
5772 {
5773         struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5774         struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5775         unsigned long hrrq_flags = 0;
5776         u32 int_reg = 0;
5777         int num_hrrq = 0;
5778         int irq_none = 0;
5779         struct ipr_cmnd *ipr_cmd, *temp;
5780         irqreturn_t rc = IRQ_NONE;
5781         LIST_HEAD(doneq);
5782
5783         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5784         /* If interrupts are disabled, ignore the interrupt */
5785         if (!hrrq->allow_interrupts) {
5786                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5787                 return IRQ_NONE;
5788         }
5789
5790         while (1) {
5791                 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5792                         rc =  IRQ_HANDLED;
5793
5794                         if (!ioa_cfg->clear_isr)
5795                                 break;
5796
5797                         /* Clear the PCI interrupt */
5798                         num_hrrq = 0;
5799                         do {
5800                                 writel(IPR_PCII_HRRQ_UPDATED,
5801                                      ioa_cfg->regs.clr_interrupt_reg32);
5802                                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5803                         } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5804                                 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5805
5806                 } else if (rc == IRQ_NONE && irq_none == 0) {
5807                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5808                         irq_none++;
5809                 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5810                            int_reg & IPR_PCII_HRRQ_UPDATED) {
5811                         ipr_isr_eh(ioa_cfg,
5812                                 "Error clearing HRRQ: ", num_hrrq);
5813                         rc = IRQ_HANDLED;
5814                         break;
5815                 } else
5816                         break;
5817         }
5818
5819         if (unlikely(rc == IRQ_NONE))
5820                 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5821
5822         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5823         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5824                 list_del(&ipr_cmd->queue);
5825                 del_timer(&ipr_cmd->timer);
5826                 ipr_cmd->fast_done(ipr_cmd);
5827         }
5828         return rc;
5829 }
5830
5831 /**
5832  * ipr_isr_mhrrq - Interrupt service routine
5833  * @irq:        irq number
5834  * @devp:       pointer to ioa config struct
5835  *
5836  * Return value:
5837  *      IRQ_NONE / IRQ_HANDLED
5838  **/
5839 static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5840 {
5841         struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5842         struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5843         unsigned long hrrq_flags = 0;
5844         struct ipr_cmnd *ipr_cmd, *temp;
5845         irqreturn_t rc = IRQ_NONE;
5846         LIST_HEAD(doneq);
5847
5848         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5849
5850         /* If interrupts are disabled, ignore the interrupt */
5851         if (!hrrq->allow_interrupts) {
5852                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5853                 return IRQ_NONE;
5854         }
5855
5856         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
5857                 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5858                        hrrq->toggle_bit) {
5859                         irq_poll_sched(&hrrq->iopoll);
5860                         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5861                         return IRQ_HANDLED;
5862                 }
5863         } else {
5864                 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5865                         hrrq->toggle_bit)
5866
5867                         if (ipr_process_hrrq(hrrq, -1, &doneq))
5868                                 rc =  IRQ_HANDLED;
5869         }
5870
5871         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5872
5873         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5874                 list_del(&ipr_cmd->queue);
5875                 del_timer(&ipr_cmd->timer);
5876                 ipr_cmd->fast_done(ipr_cmd);
5877         }
5878         return rc;
5879 }
5880
5881 /**
5882  * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5883  * @ioa_cfg:    ioa config struct
5884  * @ipr_cmd:    ipr command struct
5885  *
5886  * Return value:
5887  *      0 on success / -1 on failure
5888  **/
5889 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5890                              struct ipr_cmnd *ipr_cmd)
5891 {
5892         int i, nseg;
5893         struct scatterlist *sg;
5894         u32 length;
5895         u32 ioadl_flags = 0;
5896         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5897         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5898         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5899
5900         length = scsi_bufflen(scsi_cmd);
5901         if (!length)
5902                 return 0;
5903
5904         nseg = scsi_dma_map(scsi_cmd);
5905         if (nseg < 0) {
5906                 if (printk_ratelimit())
5907                         dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5908                 return -1;
5909         }
5910
5911         ipr_cmd->dma_use_sg = nseg;
5912
5913         ioarcb->data_transfer_length = cpu_to_be32(length);
5914         ioarcb->ioadl_len =
5915                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5916
5917         if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5918                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5919                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5920         } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5921                 ioadl_flags = IPR_IOADL_FLAGS_READ;
5922
5923         scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5924                 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5925                 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5926                 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5927         }
5928
5929         ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5930         return 0;
5931 }
5932
5933 /**
5934  * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5935  * @ioa_cfg:    ioa config struct
5936  * @ipr_cmd:    ipr command struct
5937  *
5938  * Return value:
5939  *      0 on success / -1 on failure
5940  **/
5941 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5942                            struct ipr_cmnd *ipr_cmd)
5943 {
5944         int i, nseg;
5945         struct scatterlist *sg;
5946         u32 length;
5947         u32 ioadl_flags = 0;
5948         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5949         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5950         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5951
5952         length = scsi_bufflen(scsi_cmd);
5953         if (!length)
5954                 return 0;
5955
5956         nseg = scsi_dma_map(scsi_cmd);
5957         if (nseg < 0) {
5958                 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5959                 return -1;
5960         }
5961
5962         ipr_cmd->dma_use_sg = nseg;
5963
5964         if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5965                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5966                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5967                 ioarcb->data_transfer_length = cpu_to_be32(length);
5968                 ioarcb->ioadl_len =
5969                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5970         } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5971                 ioadl_flags = IPR_IOADL_FLAGS_READ;
5972                 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5973                 ioarcb->read_ioadl_len =
5974                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5975         }
5976
5977         if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5978                 ioadl = ioarcb->u.add_data.u.ioadl;
5979                 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5980                                     offsetof(struct ipr_ioarcb, u.add_data));
5981                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5982         }
5983
5984         scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5985                 ioadl[i].flags_and_data_len =
5986                         cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5987                 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
5988         }
5989
5990         ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5991         return 0;
5992 }
5993
5994 /**
5995  * __ipr_erp_done - Process completion of ERP for a device
5996  * @ipr_cmd:            ipr command struct
5997  *
5998  * This function copies the sense buffer into the scsi_cmd
5999  * struct and pushes the scsi_done function.
6000  *
6001  * Return value:
6002  *      nothing
6003  **/
6004 static void __ipr_erp_done(struct ipr_cmnd *ipr_cmd)
6005 {
6006         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6007         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6008         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6009
6010         if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
6011                 scsi_cmd->result |= (DID_ERROR << 16);
6012                 scmd_printk(KERN_ERR, scsi_cmd,
6013                             "Request Sense failed with IOASC: 0x%08X\n", ioasc);
6014         } else {
6015                 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
6016                        SCSI_SENSE_BUFFERSIZE);
6017         }
6018
6019         if (res) {
6020                 if (!ipr_is_naca_model(res))
6021                         res->needs_sync_complete = 1;
6022                 res->in_erp = 0;
6023         }
6024         scsi_dma_unmap(ipr_cmd->scsi_cmd);
6025         scsi_cmd->scsi_done(scsi_cmd);
6026         if (ipr_cmd->eh_comp)
6027                 complete(ipr_cmd->eh_comp);
6028         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6029 }
6030
6031 /**
6032  * ipr_erp_done - Process completion of ERP for a device
6033  * @ipr_cmd:            ipr command struct
6034  *
6035  * This function copies the sense buffer into the scsi_cmd
6036  * struct and pushes the scsi_done function.
6037  *
6038  * Return value:
6039  *      nothing
6040  **/
6041 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
6042 {
6043         struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
6044         unsigned long hrrq_flags;
6045
6046         spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
6047         __ipr_erp_done(ipr_cmd);
6048         spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
6049 }
6050
6051 /**
6052  * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
6053  * @ipr_cmd:    ipr command struct
6054  *
6055  * Return value:
6056  *      none
6057  **/
6058 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
6059 {
6060         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6061         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6062         dma_addr_t dma_addr = ipr_cmd->dma_addr;
6063
6064         memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
6065         ioarcb->data_transfer_length = 0;
6066         ioarcb->read_data_transfer_length = 0;
6067         ioarcb->ioadl_len = 0;
6068         ioarcb->read_ioadl_len = 0;
6069         ioasa->hdr.ioasc = 0;
6070         ioasa->hdr.residual_data_len = 0;
6071
6072         if (ipr_cmd->ioa_cfg->sis64)
6073                 ioarcb->u.sis64_addr_data.data_ioadl_addr =
6074                         cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
6075         else {
6076                 ioarcb->write_ioadl_addr =
6077                         cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
6078                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6079         }
6080 }
6081
6082 /**
6083  * __ipr_erp_request_sense - Send request sense to a device
6084  * @ipr_cmd:    ipr command struct
6085  *
6086  * This function sends a request sense to a device as a result
6087  * of a check condition.
6088  *
6089  * Return value:
6090  *      nothing
6091  **/
6092 static void __ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
6093 {
6094         struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
6095         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6096
6097         if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
6098                 __ipr_erp_done(ipr_cmd);
6099                 return;
6100         }
6101
6102         ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6103
6104         cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
6105         cmd_pkt->cdb[0] = REQUEST_SENSE;
6106         cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
6107         cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
6108         cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6109         cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
6110
6111         ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
6112                        SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
6113
6114         ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
6115                    IPR_REQUEST_SENSE_TIMEOUT * 2);
6116 }
6117
6118 /**
6119  * ipr_erp_request_sense - Send request sense to a device
6120  * @ipr_cmd:    ipr command struct
6121  *
6122  * This function sends a request sense to a device as a result
6123  * of a check condition.
6124  *
6125  * Return value:
6126  *      nothing
6127  **/
6128 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
6129 {
6130         struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
6131         unsigned long hrrq_flags;
6132
6133         spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
6134         __ipr_erp_request_sense(ipr_cmd);
6135         spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
6136 }
6137
6138 /**
6139  * ipr_erp_cancel_all - Send cancel all to a device
6140  * @ipr_cmd:    ipr command struct
6141  *
6142  * This function sends a cancel all to a device to clear the
6143  * queue. If we are running TCQ on the device, QERR is set to 1,
6144  * which means all outstanding ops have been dropped on the floor.
6145  * Cancel all will return them to us.
6146  *
6147  * Return value:
6148  *      nothing
6149  **/
6150 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
6151 {
6152         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6153         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6154         struct ipr_cmd_pkt *cmd_pkt;
6155
6156         res->in_erp = 1;
6157
6158         ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6159
6160         if (!scsi_cmd->device->simple_tags) {
6161                 __ipr_erp_request_sense(ipr_cmd);
6162                 return;
6163         }
6164
6165         cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
6166         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
6167         cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
6168
6169         ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
6170                    IPR_CANCEL_ALL_TIMEOUT);
6171 }
6172
6173 /**
6174  * ipr_dump_ioasa - Dump contents of IOASA
6175  * @ioa_cfg:    ioa config struct
6176  * @ipr_cmd:    ipr command struct
6177  * @res:                resource entry struct
6178  *
6179  * This function is invoked by the interrupt handler when ops
6180  * fail. It will log the IOASA if appropriate. Only called
6181  * for GPDD ops.
6182  *
6183  * Return value:
6184  *      none
6185  **/
6186 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
6187                            struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
6188 {
6189         int i;
6190         u16 data_len;
6191         u32 ioasc, fd_ioasc;
6192         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6193         __be32 *ioasa_data = (__be32 *)ioasa;
6194         int error_index;
6195
6196         ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
6197         fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
6198
6199         if (0 == ioasc)
6200                 return;
6201
6202         if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
6203                 return;
6204
6205         if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
6206                 error_index = ipr_get_error(fd_ioasc);
6207         else
6208                 error_index = ipr_get_error(ioasc);
6209
6210         if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
6211                 /* Don't log an error if the IOA already logged one */
6212                 if (ioasa->hdr.ilid != 0)
6213                         return;
6214
6215                 if (!ipr_is_gscsi(res))
6216                         return;
6217
6218                 if (ipr_error_table[error_index].log_ioasa == 0)
6219                         return;
6220         }
6221
6222         ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
6223
6224         data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
6225         if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
6226                 data_len = sizeof(struct ipr_ioasa64);
6227         else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
6228                 data_len = sizeof(struct ipr_ioasa);
6229
6230         ipr_err("IOASA Dump:\n");
6231
6232         for (i = 0; i < data_len / 4; i += 4) {
6233                 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
6234                         be32_to_cpu(ioasa_data[i]),
6235                         be32_to_cpu(ioasa_data[i+1]),
6236                         be32_to_cpu(ioasa_data[i+2]),
6237                         be32_to_cpu(ioasa_data[i+3]));
6238         }
6239 }
6240
6241 /**
6242  * ipr_gen_sense - Generate SCSI sense data from an IOASA
6243  * @ioasa:              IOASA
6244  * @sense_buf:  sense data buffer
6245  *
6246  * Return value:
6247  *      none
6248  **/
6249 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
6250 {
6251         u32 failing_lba;
6252         u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
6253         struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
6254         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6255         u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
6256
6257         memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
6258
6259         if (ioasc >= IPR_FIRST_DRIVER_IOASC)
6260                 return;
6261
6262         ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
6263
6264         if (ipr_is_vset_device(res) &&
6265             ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
6266             ioasa->u.vset.failing_lba_hi != 0) {
6267                 sense_buf[0] = 0x72;
6268                 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
6269                 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
6270                 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
6271
6272                 sense_buf[7] = 12;
6273                 sense_buf[8] = 0;
6274                 sense_buf[9] = 0x0A;
6275                 sense_buf[10] = 0x80;
6276
6277                 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
6278
6279                 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
6280                 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
6281                 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
6282                 sense_buf[15] = failing_lba & 0x000000ff;
6283
6284                 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6285
6286                 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
6287                 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
6288                 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
6289                 sense_buf[19] = failing_lba & 0x000000ff;
6290         } else {
6291                 sense_buf[0] = 0x70;
6292                 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
6293                 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
6294                 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
6295
6296                 /* Illegal request */
6297                 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
6298                     (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
6299                         sense_buf[7] = 10;      /* additional length */
6300
6301                         /* IOARCB was in error */
6302                         if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
6303                                 sense_buf[15] = 0xC0;
6304                         else    /* Parameter data was invalid */
6305                                 sense_buf[15] = 0x80;
6306
6307                         sense_buf[16] =
6308                             ((IPR_FIELD_POINTER_MASK &
6309                               be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
6310                         sense_buf[17] =
6311                             (IPR_FIELD_POINTER_MASK &
6312                              be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
6313                 } else {
6314                         if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
6315                                 if (ipr_is_vset_device(res))
6316                                         failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6317                                 else
6318                                         failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
6319
6320                                 sense_buf[0] |= 0x80;   /* Or in the Valid bit */
6321                                 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
6322                                 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
6323                                 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
6324                                 sense_buf[6] = failing_lba & 0x000000ff;
6325                         }
6326
6327                         sense_buf[7] = 6;       /* additional length */
6328                 }
6329         }
6330 }
6331
6332 /**
6333  * ipr_get_autosense - Copy autosense data to sense buffer
6334  * @ipr_cmd:    ipr command struct
6335  *
6336  * This function copies the autosense buffer to the buffer
6337  * in the scsi_cmd, if there is autosense available.
6338  *
6339  * Return value:
6340  *      1 if autosense was available / 0 if not
6341  **/
6342 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
6343 {
6344         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6345         struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
6346
6347         if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
6348                 return 0;
6349
6350         if (ipr_cmd->ioa_cfg->sis64)
6351                 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
6352                        min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
6353                            SCSI_SENSE_BUFFERSIZE));
6354         else
6355                 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
6356                        min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
6357                            SCSI_SENSE_BUFFERSIZE));
6358         return 1;
6359 }
6360
6361 /**
6362  * ipr_erp_start - Process an error response for a SCSI op
6363  * @ioa_cfg:    ioa config struct
6364  * @ipr_cmd:    ipr command struct
6365  *
6366  * This function determines whether or not to initiate ERP
6367  * on the affected device.
6368  *
6369  * Return value:
6370  *      nothing
6371  **/
6372 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6373                               struct ipr_cmnd *ipr_cmd)
6374 {
6375         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6376         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6377         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6378         u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
6379
6380         if (!res) {
6381                 __ipr_scsi_eh_done(ipr_cmd);
6382                 return;
6383         }
6384
6385         if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
6386                 ipr_gen_sense(ipr_cmd);
6387
6388         ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6389
6390         switch (masked_ioasc) {
6391         case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
6392                 if (ipr_is_naca_model(res))
6393                         scsi_cmd->result |= (DID_ABORT << 16);
6394                 else
6395                         scsi_cmd->result |= (DID_IMM_RETRY << 16);
6396                 break;
6397         case IPR_IOASC_IR_RESOURCE_HANDLE:
6398         case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
6399                 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6400                 break;
6401         case IPR_IOASC_HW_SEL_TIMEOUT:
6402                 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6403                 if (!ipr_is_naca_model(res))
6404                         res->needs_sync_complete = 1;
6405                 break;
6406         case IPR_IOASC_SYNC_REQUIRED:
6407                 if (!res->in_erp)
6408                         res->needs_sync_complete = 1;
6409                 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6410                 break;
6411         case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
6412         case IPR_IOASA_IR_DUAL_IOA_DISABLED:
6413                 /*
6414                  * exception: do not set DID_PASSTHROUGH on CHECK CONDITION
6415                  * so SCSI mid-layer and upper layers handle it accordingly.
6416                  */
6417                 if (scsi_cmd->result != SAM_STAT_CHECK_CONDITION)
6418                         scsi_cmd->result |= (DID_PASSTHROUGH << 16);
6419                 break;
6420         case IPR_IOASC_BUS_WAS_RESET:
6421         case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6422                 /*
6423                  * Report the bus reset and ask for a retry. The device
6424                  * will give CC/UA the next command.
6425                  */
6426                 if (!res->resetting_device)
6427                         scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6428                 scsi_cmd->result |= (DID_ERROR << 16);
6429                 if (!ipr_is_naca_model(res))
6430                         res->needs_sync_complete = 1;
6431                 break;
6432         case IPR_IOASC_HW_DEV_BUS_STATUS:
6433                 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6434                 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
6435                         if (!ipr_get_autosense(ipr_cmd)) {
6436                                 if (!ipr_is_naca_model(res)) {
6437                                         ipr_erp_cancel_all(ipr_cmd);
6438                                         return;
6439                                 }
6440                         }
6441                 }
6442                 if (!ipr_is_naca_model(res))
6443                         res->needs_sync_complete = 1;
6444                 break;
6445         case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6446                 break;
6447         case IPR_IOASC_IR_NON_OPTIMIZED:
6448                 if (res->raw_mode) {
6449                         res->raw_mode = 0;
6450                         scsi_cmd->result |= (DID_IMM_RETRY << 16);
6451                 } else
6452                         scsi_cmd->result |= (DID_ERROR << 16);
6453                 break;
6454         default:
6455                 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6456                         scsi_cmd->result |= (DID_ERROR << 16);
6457                 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
6458                         res->needs_sync_complete = 1;
6459                 break;
6460         }
6461
6462         scsi_dma_unmap(ipr_cmd->scsi_cmd);
6463         scsi_cmd->scsi_done(scsi_cmd);
6464         if (ipr_cmd->eh_comp)
6465                 complete(ipr_cmd->eh_comp);
6466         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6467 }
6468
6469 /**
6470  * ipr_scsi_done - mid-layer done function
6471  * @ipr_cmd:    ipr command struct
6472  *
6473  * This function is invoked by the interrupt handler for
6474  * ops generated by the SCSI mid-layer
6475  *
6476  * Return value:
6477  *      none
6478  **/
6479 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6480 {
6481         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6482         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6483         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6484         unsigned long lock_flags;
6485
6486         scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
6487
6488         if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
6489                 scsi_dma_unmap(scsi_cmd);
6490
6491                 spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
6492                 scsi_cmd->scsi_done(scsi_cmd);
6493                 if (ipr_cmd->eh_comp)
6494                         complete(ipr_cmd->eh_comp);
6495                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6496                 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
6497         } else {
6498                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6499                 spin_lock(&ipr_cmd->hrrq->_lock);
6500                 ipr_erp_start(ioa_cfg, ipr_cmd);
6501                 spin_unlock(&ipr_cmd->hrrq->_lock);
6502                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6503         }
6504 }
6505
6506 /**
6507  * ipr_queuecommand - Queue a mid-layer request
6508  * @shost:              scsi host struct
6509  * @scsi_cmd:   scsi command struct
6510  *
6511  * This function queues a request generated by the mid-layer.
6512  *
6513  * Return value:
6514  *      0 on success
6515  *      SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6516  *      SCSI_MLQUEUE_HOST_BUSY if host is busy
6517  **/
6518 static int ipr_queuecommand(struct Scsi_Host *shost,
6519                             struct scsi_cmnd *scsi_cmd)
6520 {
6521         struct ipr_ioa_cfg *ioa_cfg;
6522         struct ipr_resource_entry *res;
6523         struct ipr_ioarcb *ioarcb;
6524         struct ipr_cmnd *ipr_cmd;
6525         unsigned long hrrq_flags, lock_flags;
6526         int rc;
6527         struct ipr_hrr_queue *hrrq;
6528         int hrrq_id;
6529
6530         ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6531
6532         scsi_cmd->result = (DID_OK << 16);
6533         res = scsi_cmd->device->hostdata;
6534
6535         if (ipr_is_gata(res) && res->sata_port) {
6536                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6537                 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6538                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6539                 return rc;
6540         }
6541
6542         hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6543         hrrq = &ioa_cfg->hrrq[hrrq_id];
6544
6545         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6546         /*
6547          * We are currently blocking all devices due to a host reset
6548          * We have told the host to stop giving us new requests, but
6549          * ERP ops don't count. FIXME
6550          */
6551         if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
6552                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6553                 return SCSI_MLQUEUE_HOST_BUSY;
6554         }
6555
6556         /*
6557          * FIXME - Create scsi_set_host_offline interface
6558          *  and the ioa_is_dead check can be removed
6559          */
6560         if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
6561                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6562                 goto err_nodev;
6563         }
6564
6565         ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6566         if (ipr_cmd == NULL) {
6567                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6568                 return SCSI_MLQUEUE_HOST_BUSY;
6569         }
6570         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6571
6572         ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
6573         ioarcb = &ipr_cmd->ioarcb;
6574
6575         memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6576         ipr_cmd->scsi_cmd = scsi_cmd;
6577         ipr_cmd->done = ipr_scsi_eh_done;
6578
6579         if (ipr_is_gscsi(res)) {
6580                 if (scsi_cmd->underflow == 0)
6581                         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6582
6583                 if (res->reset_occurred) {
6584                         res->reset_occurred = 0;
6585                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
6586                 }
6587         }
6588
6589         if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6590                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6591
6592                 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
6593                 if (scsi_cmd->flags & SCMD_TAGGED)
6594                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK;
6595                 else
6596                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK;
6597         }
6598
6599         if (scsi_cmd->cmnd[0] >= 0xC0 &&
6600             (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
6601                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6602         }
6603         if (res->raw_mode && ipr_is_af_dasd_device(res)) {
6604                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE;
6605
6606                 if (scsi_cmd->underflow == 0)
6607                         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6608         }
6609
6610         if (ioa_cfg->sis64)
6611                 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6612         else
6613                 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
6614
6615         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6616         if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
6617                 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6618                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6619                 if (!rc)
6620                         scsi_dma_unmap(scsi_cmd);
6621                 return SCSI_MLQUEUE_HOST_BUSY;
6622         }
6623
6624         if (unlikely(hrrq->ioa_is_dead)) {
6625                 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6626                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6627                 scsi_dma_unmap(scsi_cmd);
6628                 goto err_nodev;
6629         }
6630
6631         ioarcb->res_handle = res->res_handle;
6632         if (res->needs_sync_complete) {
6633                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6634                 res->needs_sync_complete = 0;
6635         }
6636         list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
6637         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6638         ipr_send_command(ipr_cmd);
6639         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6640         return 0;
6641
6642 err_nodev:
6643         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6644         memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6645         scsi_cmd->result = (DID_NO_CONNECT << 16);
6646         scsi_cmd->scsi_done(scsi_cmd);
6647         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6648         return 0;
6649 }
6650
6651 /**
6652  * ipr_ioctl - IOCTL handler
6653  * @sdev:       scsi device struct
6654  * @cmd:        IOCTL cmd
6655  * @arg:        IOCTL arg
6656  *
6657  * Return value:
6658  *      0 on success / other on failure
6659  **/
6660 static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
6661 {
6662         struct ipr_resource_entry *res;
6663
6664         res = (struct ipr_resource_entry *)sdev->hostdata;
6665         if (res && ipr_is_gata(res)) {
6666                 if (cmd == HDIO_GET_IDENTITY)
6667                         return -ENOTTY;
6668                 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
6669         }
6670
6671         return -EINVAL;
6672 }
6673
6674 /**
6675  * ipr_info - Get information about the card/driver
6676  * @scsi_host:  scsi host struct
6677  *
6678  * Return value:
6679  *      pointer to buffer with description string
6680  **/
6681 static const char *ipr_ioa_info(struct Scsi_Host *host)
6682 {
6683         static char buffer[512];
6684         struct ipr_ioa_cfg *ioa_cfg;
6685         unsigned long lock_flags = 0;
6686
6687         ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6688
6689         spin_lock_irqsave(host->host_lock, lock_flags);
6690         sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6691         spin_unlock_irqrestore(host->host_lock, lock_flags);
6692
6693         return buffer;
6694 }
6695
6696 static struct scsi_host_template driver_template = {
6697         .module = THIS_MODULE,
6698         .name = "IPR",
6699         .info = ipr_ioa_info,
6700         .ioctl = ipr_ioctl,
6701         .queuecommand = ipr_queuecommand,
6702         .eh_abort_handler = ipr_eh_abort,
6703         .eh_device_reset_handler = ipr_eh_dev_reset,
6704         .eh_host_reset_handler = ipr_eh_host_reset,
6705         .slave_alloc = ipr_slave_alloc,
6706         .slave_configure = ipr_slave_configure,
6707         .slave_destroy = ipr_slave_destroy,
6708         .scan_finished = ipr_scan_finished,
6709         .target_alloc = ipr_target_alloc,
6710         .target_destroy = ipr_target_destroy,
6711         .change_queue_depth = ipr_change_queue_depth,
6712         .bios_param = ipr_biosparam,
6713         .can_queue = IPR_MAX_COMMANDS,
6714         .this_id = -1,
6715         .sg_tablesize = IPR_MAX_SGLIST,
6716         .max_sectors = IPR_IOA_MAX_SECTORS,
6717         .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6718         .use_clustering = ENABLE_CLUSTERING,
6719         .shost_attrs = ipr_ioa_attrs,
6720         .sdev_attrs = ipr_dev_attrs,
6721         .proc_name = IPR_NAME,
6722 };
6723
6724 /**
6725  * ipr_ata_phy_reset - libata phy_reset handler
6726  * @ap:         ata port to reset
6727  *
6728  **/
6729 static void ipr_ata_phy_reset(struct ata_port *ap)
6730 {
6731         unsigned long flags;
6732         struct ipr_sata_port *sata_port = ap->private_data;
6733         struct ipr_resource_entry *res = sata_port->res;
6734         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6735         int rc;
6736
6737         ENTER;
6738         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6739         while (ioa_cfg->in_reset_reload) {
6740                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6741                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6742                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6743         }
6744
6745         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6746                 goto out_unlock;
6747
6748         rc = ipr_device_reset(ioa_cfg, res);
6749
6750         if (rc) {
6751                 ap->link.device[0].class = ATA_DEV_NONE;
6752                 goto out_unlock;
6753         }
6754
6755         ap->link.device[0].class = res->ata_class;
6756         if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
6757                 ap->link.device[0].class = ATA_DEV_NONE;
6758
6759 out_unlock:
6760         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6761         LEAVE;
6762 }
6763
6764 /**
6765  * ipr_ata_post_internal - Cleanup after an internal command
6766  * @qc: ATA queued command
6767  *
6768  * Return value:
6769  *      none
6770  **/
6771 static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6772 {
6773         struct ipr_sata_port *sata_port = qc->ap->private_data;
6774         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6775         struct ipr_cmnd *ipr_cmd;
6776         struct ipr_hrr_queue *hrrq;
6777         unsigned long flags;
6778
6779         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6780         while (ioa_cfg->in_reset_reload) {
6781                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6782                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6783                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6784         }
6785
6786         for_each_hrrq(hrrq, ioa_cfg) {
6787                 spin_lock(&hrrq->_lock);
6788                 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6789                         if (ipr_cmd->qc == qc) {
6790                                 ipr_device_reset(ioa_cfg, sata_port->res);
6791                                 break;
6792                         }
6793                 }
6794                 spin_unlock(&hrrq->_lock);
6795         }
6796         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6797 }
6798
6799 /**
6800  * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6801  * @regs:       destination
6802  * @tf: source ATA taskfile
6803  *
6804  * Return value:
6805  *      none
6806  **/
6807 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6808                              struct ata_taskfile *tf)
6809 {
6810         regs->feature = tf->feature;
6811         regs->nsect = tf->nsect;
6812         regs->lbal = tf->lbal;
6813         regs->lbam = tf->lbam;
6814         regs->lbah = tf->lbah;
6815         regs->device = tf->device;
6816         regs->command = tf->command;
6817         regs->hob_feature = tf->hob_feature;
6818         regs->hob_nsect = tf->hob_nsect;
6819         regs->hob_lbal = tf->hob_lbal;
6820         regs->hob_lbam = tf->hob_lbam;
6821         regs->hob_lbah = tf->hob_lbah;
6822         regs->ctl = tf->ctl;
6823 }
6824
6825 /**
6826  * ipr_sata_done - done function for SATA commands
6827  * @ipr_cmd:    ipr command struct
6828  *
6829  * This function is invoked by the interrupt handler for
6830  * ops generated by the SCSI mid-layer to SATA devices
6831  *
6832  * Return value:
6833  *      none
6834  **/
6835 static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6836 {
6837         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6838         struct ata_queued_cmd *qc = ipr_cmd->qc;
6839         struct ipr_sata_port *sata_port = qc->ap->private_data;
6840         struct ipr_resource_entry *res = sata_port->res;
6841         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6842
6843         spin_lock(&ipr_cmd->hrrq->_lock);
6844         if (ipr_cmd->ioa_cfg->sis64)
6845                 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6846                        sizeof(struct ipr_ioasa_gata));
6847         else
6848                 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6849                        sizeof(struct ipr_ioasa_gata));
6850         ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6851
6852         if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
6853                 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
6854
6855         if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6856                 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
6857         else
6858                 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
6859         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6860         spin_unlock(&ipr_cmd->hrrq->_lock);
6861         ata_qc_complete(qc);
6862 }
6863
6864 /**
6865  * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6866  * @ipr_cmd:    ipr command struct
6867  * @qc:         ATA queued command
6868  *
6869  **/
6870 static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6871                                   struct ata_queued_cmd *qc)
6872 {
6873         u32 ioadl_flags = 0;
6874         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6875         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
6876         struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6877         int len = qc->nbytes;
6878         struct scatterlist *sg;
6879         unsigned int si;
6880         dma_addr_t dma_addr = ipr_cmd->dma_addr;
6881
6882         if (len == 0)
6883                 return;
6884
6885         if (qc->dma_dir == DMA_TO_DEVICE) {
6886                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6887                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6888         } else if (qc->dma_dir == DMA_FROM_DEVICE)
6889                 ioadl_flags = IPR_IOADL_FLAGS_READ;
6890
6891         ioarcb->data_transfer_length = cpu_to_be32(len);
6892         ioarcb->ioadl_len =
6893                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6894         ioarcb->u.sis64_addr_data.data_ioadl_addr =
6895                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
6896
6897         for_each_sg(qc->sg, sg, qc->n_elem, si) {
6898                 ioadl64->flags = cpu_to_be32(ioadl_flags);
6899                 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6900                 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6901
6902                 last_ioadl64 = ioadl64;
6903                 ioadl64++;
6904         }
6905
6906         if (likely(last_ioadl64))
6907                 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6908 }
6909
6910 /**
6911  * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6912  * @ipr_cmd:    ipr command struct
6913  * @qc:         ATA queued command
6914  *
6915  **/
6916 static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6917                                 struct ata_queued_cmd *qc)
6918 {
6919         u32 ioadl_flags = 0;
6920         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6921         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
6922         struct ipr_ioadl_desc *last_ioadl = NULL;
6923         int len = qc->nbytes;
6924         struct scatterlist *sg;
6925         unsigned int si;
6926
6927         if (len == 0)
6928                 return;
6929
6930         if (qc->dma_dir == DMA_TO_DEVICE) {
6931                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6932                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6933                 ioarcb->data_transfer_length = cpu_to_be32(len);
6934                 ioarcb->ioadl_len =
6935                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6936         } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6937                 ioadl_flags = IPR_IOADL_FLAGS_READ;
6938                 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6939                 ioarcb->read_ioadl_len =
6940                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6941         }
6942
6943         for_each_sg(qc->sg, sg, qc->n_elem, si) {
6944                 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6945                 ioadl->address = cpu_to_be32(sg_dma_address(sg));
6946
6947                 last_ioadl = ioadl;
6948                 ioadl++;
6949         }
6950
6951         if (likely(last_ioadl))
6952                 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6953 }
6954
6955 /**
6956  * ipr_qc_defer - Get a free ipr_cmd
6957  * @qc: queued command
6958  *
6959  * Return value:
6960  *      0 if success
6961  **/
6962 static int ipr_qc_defer(struct ata_queued_cmd *qc)
6963 {
6964         struct ata_port *ap = qc->ap;
6965         struct ipr_sata_port *sata_port = ap->private_data;
6966         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6967         struct ipr_cmnd *ipr_cmd;
6968         struct ipr_hrr_queue *hrrq;
6969         int hrrq_id;
6970
6971         hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6972         hrrq = &ioa_cfg->hrrq[hrrq_id];
6973
6974         qc->lldd_task = NULL;
6975         spin_lock(&hrrq->_lock);
6976         if (unlikely(hrrq->ioa_is_dead)) {
6977                 spin_unlock(&hrrq->_lock);
6978                 return 0;
6979         }
6980
6981         if (unlikely(!hrrq->allow_cmds)) {
6982                 spin_unlock(&hrrq->_lock);
6983                 return ATA_DEFER_LINK;
6984         }
6985
6986         ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6987         if (ipr_cmd == NULL) {
6988                 spin_unlock(&hrrq->_lock);
6989                 return ATA_DEFER_LINK;
6990         }
6991
6992         qc->lldd_task = ipr_cmd;
6993         spin_unlock(&hrrq->_lock);
6994         return 0;
6995 }
6996
6997 /**
6998  * ipr_qc_issue - Issue a SATA qc to a device
6999  * @qc: queued command
7000  *
7001  * Return value:
7002  *      0 if success
7003  **/
7004 static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
7005 {
7006         struct ata_port *ap = qc->ap;
7007         struct ipr_sata_port *sata_port = ap->private_data;
7008         struct ipr_resource_entry *res = sata_port->res;
7009         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
7010         struct ipr_cmnd *ipr_cmd;
7011         struct ipr_ioarcb *ioarcb;
7012         struct ipr_ioarcb_ata_regs *regs;
7013
7014         if (qc->lldd_task == NULL)
7015                 ipr_qc_defer(qc);
7016
7017         ipr_cmd = qc->lldd_task;
7018         if (ipr_cmd == NULL)
7019                 return AC_ERR_SYSTEM;
7020
7021         qc->lldd_task = NULL;
7022         spin_lock(&ipr_cmd->hrrq->_lock);
7023         if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
7024                         ipr_cmd->hrrq->ioa_is_dead)) {
7025                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7026                 spin_unlock(&ipr_cmd->hrrq->_lock);
7027                 return AC_ERR_SYSTEM;
7028         }
7029
7030         ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
7031         ioarcb = &ipr_cmd->ioarcb;
7032
7033         if (ioa_cfg->sis64) {
7034                 regs = &ipr_cmd->i.ata_ioadl.regs;
7035                 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
7036         } else
7037                 regs = &ioarcb->u.add_data.u.regs;
7038
7039         memset(regs, 0, sizeof(*regs));
7040         ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
7041
7042         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7043         ipr_cmd->qc = qc;
7044         ipr_cmd->done = ipr_sata_done;
7045         ipr_cmd->ioarcb.res_handle = res->res_handle;
7046         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
7047         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
7048         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
7049         ipr_cmd->dma_use_sg = qc->n_elem;
7050
7051         if (ioa_cfg->sis64)
7052                 ipr_build_ata_ioadl64(ipr_cmd, qc);
7053         else
7054                 ipr_build_ata_ioadl(ipr_cmd, qc);
7055
7056         regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
7057         ipr_copy_sata_tf(regs, &qc->tf);
7058         memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
7059         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
7060
7061         switch (qc->tf.protocol) {
7062         case ATA_PROT_NODATA:
7063         case ATA_PROT_PIO:
7064                 break;
7065
7066         case ATA_PROT_DMA:
7067                 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
7068                 break;
7069
7070         case ATAPI_PROT_PIO:
7071         case ATAPI_PROT_NODATA:
7072                 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
7073                 break;
7074
7075         case ATAPI_PROT_DMA:
7076                 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
7077                 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
7078                 break;
7079
7080         default:
7081                 WARN_ON(1);
7082                 spin_unlock(&ipr_cmd->hrrq->_lock);
7083                 return AC_ERR_INVALID;
7084         }
7085
7086         ipr_send_command(ipr_cmd);
7087         spin_unlock(&ipr_cmd->hrrq->_lock);
7088
7089         return 0;
7090 }
7091
7092 /**
7093  * ipr_qc_fill_rtf - Read result TF
7094  * @qc: ATA queued command
7095  *
7096  * Return value:
7097  *      true
7098  **/
7099 static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
7100 {
7101         struct ipr_sata_port *sata_port = qc->ap->private_data;
7102         struct ipr_ioasa_gata *g = &sata_port->ioasa;
7103         struct ata_taskfile *tf = &qc->result_tf;
7104
7105         tf->feature = g->error;
7106         tf->nsect = g->nsect;
7107         tf->lbal = g->lbal;
7108         tf->lbam = g->lbam;
7109         tf->lbah = g->lbah;
7110         tf->device = g->device;
7111         tf->command = g->status;
7112         tf->hob_nsect = g->hob_nsect;
7113         tf->hob_lbal = g->hob_lbal;
7114         tf->hob_lbam = g->hob_lbam;
7115         tf->hob_lbah = g->hob_lbah;
7116
7117         return true;
7118 }
7119
7120 static struct ata_port_operations ipr_sata_ops = {
7121         .phy_reset = ipr_ata_phy_reset,
7122         .hardreset = ipr_sata_reset,
7123         .post_internal_cmd = ipr_ata_post_internal,
7124         .qc_prep = ata_noop_qc_prep,
7125         .qc_defer = ipr_qc_defer,
7126         .qc_issue = ipr_qc_issue,
7127         .qc_fill_rtf = ipr_qc_fill_rtf,
7128         .port_start = ata_sas_port_start,
7129         .port_stop = ata_sas_port_stop
7130 };
7131
7132 static struct ata_port_info sata_port_info = {
7133         .flags          = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
7134                           ATA_FLAG_SAS_HOST,
7135         .pio_mask       = ATA_PIO4_ONLY,
7136         .mwdma_mask     = ATA_MWDMA2,
7137         .udma_mask      = ATA_UDMA6,
7138         .port_ops       = &ipr_sata_ops
7139 };
7140
7141 #ifdef CONFIG_PPC_PSERIES
7142 static const u16 ipr_blocked_processors[] = {
7143         PVR_NORTHSTAR,
7144         PVR_PULSAR,
7145         PVR_POWER4,
7146         PVR_ICESTAR,
7147         PVR_SSTAR,
7148         PVR_POWER4p,
7149         PVR_630,
7150         PVR_630p
7151 };
7152
7153 /**
7154  * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
7155  * @ioa_cfg:    ioa cfg struct
7156  *
7157  * Adapters that use Gemstone revision < 3.1 do not work reliably on
7158  * certain pSeries hardware. This function determines if the given
7159  * adapter is in one of these confgurations or not.
7160  *
7161  * Return value:
7162  *      1 if adapter is not supported / 0 if adapter is supported
7163  **/
7164 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
7165 {
7166         int i;
7167
7168         if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
7169                 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
7170                         if (pvr_version_is(ipr_blocked_processors[i]))
7171                                 return 1;
7172                 }
7173         }
7174         return 0;
7175 }
7176 #else
7177 #define ipr_invalid_adapter(ioa_cfg) 0
7178 #endif
7179
7180 /**
7181  * ipr_ioa_bringdown_done - IOA bring down completion.
7182  * @ipr_cmd:    ipr command struct
7183  *
7184  * This function processes the completion of an adapter bring down.
7185  * It wakes any reset sleepers.
7186  *
7187  * Return value:
7188  *      IPR_RC_JOB_RETURN
7189  **/
7190 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
7191 {
7192         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7193         int i;
7194
7195         ENTER;
7196         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
7197                 ipr_trace;
7198                 ioa_cfg->scsi_unblock = 1;
7199                 schedule_work(&ioa_cfg->work_q);
7200         }
7201
7202         ioa_cfg->in_reset_reload = 0;
7203         ioa_cfg->reset_retries = 0;
7204         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7205                 spin_lock(&ioa_cfg->hrrq[i]._lock);
7206                 ioa_cfg->hrrq[i].ioa_is_dead = 1;
7207                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
7208         }
7209         wmb();
7210
7211         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7212         wake_up_all(&ioa_cfg->reset_wait_q);
7213         LEAVE;
7214
7215         return IPR_RC_JOB_RETURN;
7216 }
7217
7218 /**
7219  * ipr_ioa_reset_done - IOA reset completion.
7220  * @ipr_cmd:    ipr command struct
7221  *
7222  * This function processes the completion of an adapter reset.
7223  * It schedules any necessary mid-layer add/removes and
7224  * wakes any reset sleepers.
7225  *
7226  * Return value:
7227  *      IPR_RC_JOB_RETURN
7228  **/
7229 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
7230 {
7231         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7232         struct ipr_resource_entry *res;
7233         int j;
7234
7235         ENTER;
7236         ioa_cfg->in_reset_reload = 0;
7237         for (j = 0; j < ioa_cfg->hrrq_num; j++) {
7238                 spin_lock(&ioa_cfg->hrrq[j]._lock);
7239                 ioa_cfg->hrrq[j].allow_cmds = 1;
7240                 spin_unlock(&ioa_cfg->hrrq[j]._lock);
7241         }
7242         wmb();
7243         ioa_cfg->reset_cmd = NULL;
7244         ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
7245
7246         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
7247                 if (res->add_to_ml || res->del_from_ml) {
7248                         ipr_trace;
7249                         break;
7250                 }
7251         }
7252         schedule_work(&ioa_cfg->work_q);
7253
7254         for (j = 0; j < IPR_NUM_HCAMS; j++) {
7255                 list_del_init(&ioa_cfg->hostrcb[j]->queue);
7256                 if (j < IPR_NUM_LOG_HCAMS)
7257                         ipr_send_hcam(ioa_cfg,
7258                                 IPR_HCAM_CDB_OP_CODE_LOG_DATA,
7259                                 ioa_cfg->hostrcb[j]);
7260                 else
7261                         ipr_send_hcam(ioa_cfg,
7262                                 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
7263                                 ioa_cfg->hostrcb[j]);
7264         }
7265
7266         scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
7267         dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
7268
7269         ioa_cfg->reset_retries = 0;
7270         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7271         wake_up_all(&ioa_cfg->reset_wait_q);
7272
7273         ioa_cfg->scsi_unblock = 1;
7274         schedule_work(&ioa_cfg->work_q);
7275         LEAVE;
7276         return IPR_RC_JOB_RETURN;
7277 }
7278
7279 /**
7280  * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
7281  * @supported_dev:      supported device struct
7282  * @vpids:                      vendor product id struct
7283  *
7284  * Return value:
7285  *      none
7286  **/
7287 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
7288                                  struct ipr_std_inq_vpids *vpids)
7289 {
7290         memset(supported_dev, 0, sizeof(struct ipr_supported_device));
7291         memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
7292         supported_dev->num_records = 1;
7293         supported_dev->data_length =
7294                 cpu_to_be16(sizeof(struct ipr_supported_device));
7295         supported_dev->reserved = 0;
7296 }
7297
7298 /**
7299  * ipr_set_supported_devs - Send Set Supported Devices for a device
7300  * @ipr_cmd:    ipr command struct
7301  *
7302  * This function sends a Set Supported Devices to the adapter
7303  *
7304  * Return value:
7305  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7306  **/
7307 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
7308 {
7309         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7310         struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
7311         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7312         struct ipr_resource_entry *res = ipr_cmd->u.res;
7313
7314         ipr_cmd->job_step = ipr_ioa_reset_done;
7315
7316         list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
7317                 if (!ipr_is_scsi_disk(res))
7318                         continue;
7319
7320                 ipr_cmd->u.res = res;
7321                 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
7322
7323                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7324                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7325                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7326
7327                 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
7328                 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
7329                 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
7330                 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
7331
7332                 ipr_init_ioadl(ipr_cmd,
7333                                ioa_cfg->vpd_cbs_dma +
7334                                  offsetof(struct ipr_misc_cbs, supp_dev),
7335                                sizeof(struct ipr_supported_device),
7336                                IPR_IOADL_FLAGS_WRITE_LAST);
7337
7338                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7339                            IPR_SET_SUP_DEVICE_TIMEOUT);
7340
7341                 if (!ioa_cfg->sis64)
7342                         ipr_cmd->job_step = ipr_set_supported_devs;
7343                 LEAVE;
7344                 return IPR_RC_JOB_RETURN;
7345         }
7346
7347         LEAVE;
7348         return IPR_RC_JOB_CONTINUE;
7349 }
7350
7351 /**
7352  * ipr_get_mode_page - Locate specified mode page
7353  * @mode_pages: mode page buffer
7354  * @page_code:  page code to find
7355  * @len:                minimum required length for mode page
7356  *
7357  * Return value:
7358  *      pointer to mode page / NULL on failure
7359  **/
7360 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
7361                                u32 page_code, u32 len)
7362 {
7363         struct ipr_mode_page_hdr *mode_hdr;
7364         u32 page_length;
7365         u32 length;
7366
7367         if (!mode_pages || (mode_pages->hdr.length == 0))
7368                 return NULL;
7369
7370         length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
7371         mode_hdr = (struct ipr_mode_page_hdr *)
7372                 (mode_pages->data + mode_pages->hdr.block_desc_len);
7373
7374         while (length) {
7375                 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
7376                         if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
7377                                 return mode_hdr;
7378                         break;
7379                 } else {
7380                         page_length = (sizeof(struct ipr_mode_page_hdr) +
7381                                        mode_hdr->page_length);
7382                         length -= page_length;
7383                         mode_hdr = (struct ipr_mode_page_hdr *)
7384                                 ((unsigned long)mode_hdr + page_length);
7385                 }
7386         }
7387         return NULL;
7388 }
7389
7390 /**
7391  * ipr_check_term_power - Check for term power errors
7392  * @ioa_cfg:    ioa config struct
7393  * @mode_pages: IOAFP mode pages buffer
7394  *
7395  * Check the IOAFP's mode page 28 for term power errors
7396  *
7397  * Return value:
7398  *      nothing
7399  **/
7400 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
7401                                  struct ipr_mode_pages *mode_pages)
7402 {
7403         int i;
7404         int entry_length;
7405         struct ipr_dev_bus_entry *bus;
7406         struct ipr_mode_page28 *mode_page;
7407
7408         mode_page = ipr_get_mode_page(mode_pages, 0x28,
7409                                       sizeof(struct ipr_mode_page28));
7410
7411         entry_length = mode_page->entry_length;
7412
7413         bus = mode_page->bus;
7414
7415         for (i = 0; i < mode_page->num_entries; i++) {
7416                 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
7417                         dev_err(&ioa_cfg->pdev->dev,
7418                                 "Term power is absent on scsi bus %d\n",
7419                                 bus->res_addr.bus);
7420                 }
7421
7422                 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
7423         }
7424 }
7425
7426 /**
7427  * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7428  * @ioa_cfg:    ioa config struct
7429  *
7430  * Looks through the config table checking for SES devices. If
7431  * the SES device is in the SES table indicating a maximum SCSI
7432  * bus speed, the speed is limited for the bus.
7433  *
7434  * Return value:
7435  *      none
7436  **/
7437 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7438 {
7439         u32 max_xfer_rate;
7440         int i;
7441
7442         for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7443                 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7444                                                        ioa_cfg->bus_attr[i].bus_width);
7445
7446                 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7447                         ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7448         }
7449 }
7450
7451 /**
7452  * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7453  * @ioa_cfg:    ioa config struct
7454  * @mode_pages: mode page 28 buffer
7455  *
7456  * Updates mode page 28 based on driver configuration
7457  *
7458  * Return value:
7459  *      none
7460  **/
7461 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
7462                                           struct ipr_mode_pages *mode_pages)
7463 {
7464         int i, entry_length;
7465         struct ipr_dev_bus_entry *bus;
7466         struct ipr_bus_attributes *bus_attr;
7467         struct ipr_mode_page28 *mode_page;
7468
7469         mode_page = ipr_get_mode_page(mode_pages, 0x28,
7470                                       sizeof(struct ipr_mode_page28));
7471
7472         entry_length = mode_page->entry_length;
7473
7474         /* Loop for each device bus entry */
7475         for (i = 0, bus = mode_page->bus;
7476              i < mode_page->num_entries;
7477              i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7478                 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7479                         dev_err(&ioa_cfg->pdev->dev,
7480                                 "Invalid resource address reported: 0x%08X\n",
7481                                 IPR_GET_PHYS_LOC(bus->res_addr));
7482                         continue;
7483                 }
7484
7485                 bus_attr = &ioa_cfg->bus_attr[i];
7486                 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7487                 bus->bus_width = bus_attr->bus_width;
7488                 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7489                 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7490                 if (bus_attr->qas_enabled)
7491                         bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7492                 else
7493                         bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7494         }
7495 }
7496
7497 /**
7498  * ipr_build_mode_select - Build a mode select command
7499  * @ipr_cmd:    ipr command struct
7500  * @res_handle: resource handle to send command to
7501  * @parm:               Byte 2 of Mode Sense command
7502  * @dma_addr:   DMA buffer address
7503  * @xfer_len:   data transfer length
7504  *
7505  * Return value:
7506  *      none
7507  **/
7508 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
7509                                   __be32 res_handle, u8 parm,
7510                                   dma_addr_t dma_addr, u8 xfer_len)
7511 {
7512         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7513
7514         ioarcb->res_handle = res_handle;
7515         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7516         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7517         ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7518         ioarcb->cmd_pkt.cdb[1] = parm;
7519         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7520
7521         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
7522 }
7523
7524 /**
7525  * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7526  * @ipr_cmd:    ipr command struct
7527  *
7528  * This function sets up the SCSI bus attributes and sends
7529  * a Mode Select for Page 28 to activate them.
7530  *
7531  * Return value:
7532  *      IPR_RC_JOB_RETURN
7533  **/
7534 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7535 {
7536         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7537         struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7538         int length;
7539
7540         ENTER;
7541         ipr_scsi_bus_speed_limit(ioa_cfg);
7542         ipr_check_term_power(ioa_cfg, mode_pages);
7543         ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7544         length = mode_pages->hdr.length + 1;
7545         mode_pages->hdr.length = 0;
7546
7547         ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7548                               ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7549                               length);
7550
7551         ipr_cmd->job_step = ipr_set_supported_devs;
7552         ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7553                                     struct ipr_resource_entry, queue);
7554         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7555
7556         LEAVE;
7557         return IPR_RC_JOB_RETURN;
7558 }
7559
7560 /**
7561  * ipr_build_mode_sense - Builds a mode sense command
7562  * @ipr_cmd:    ipr command struct
7563  * @res:                resource entry struct
7564  * @parm:               Byte 2 of mode sense command
7565  * @dma_addr:   DMA address of mode sense buffer
7566  * @xfer_len:   Size of DMA buffer
7567  *
7568  * Return value:
7569  *      none
7570  **/
7571 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7572                                  __be32 res_handle,
7573                                  u8 parm, dma_addr_t dma_addr, u8 xfer_len)
7574 {
7575         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7576
7577         ioarcb->res_handle = res_handle;
7578         ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7579         ioarcb->cmd_pkt.cdb[2] = parm;
7580         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7581         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7582
7583         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7584 }
7585
7586 /**
7587  * ipr_reset_cmd_failed - Handle failure of IOA reset command
7588  * @ipr_cmd:    ipr command struct
7589  *
7590  * This function handles the failure of an IOA bringup command.
7591  *
7592  * Return value:
7593  *      IPR_RC_JOB_RETURN
7594  **/
7595 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7596 {
7597         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7598         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7599
7600         dev_err(&ioa_cfg->pdev->dev,
7601                 "0x%02X failed with IOASC: 0x%08X\n",
7602                 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7603
7604         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7605         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7606         return IPR_RC_JOB_RETURN;
7607 }
7608
7609 /**
7610  * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7611  * @ipr_cmd:    ipr command struct
7612  *
7613  * This function handles the failure of a Mode Sense to the IOAFP.
7614  * Some adapters do not handle all mode pages.
7615  *
7616  * Return value:
7617  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7618  **/
7619 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7620 {
7621         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7622         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7623
7624         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7625                 ipr_cmd->job_step = ipr_set_supported_devs;
7626                 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7627                                             struct ipr_resource_entry, queue);
7628                 return IPR_RC_JOB_CONTINUE;
7629         }
7630
7631         return ipr_reset_cmd_failed(ipr_cmd);
7632 }
7633
7634 /**
7635  * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7636  * @ipr_cmd:    ipr command struct
7637  *
7638  * This function send a Page 28 mode sense to the IOA to
7639  * retrieve SCSI bus attributes.
7640  *
7641  * Return value:
7642  *      IPR_RC_JOB_RETURN
7643  **/
7644 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7645 {
7646         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7647
7648         ENTER;
7649         ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7650                              0x28, ioa_cfg->vpd_cbs_dma +
7651                              offsetof(struct ipr_misc_cbs, mode_pages),
7652                              sizeof(struct ipr_mode_pages));
7653
7654         ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
7655         ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
7656
7657         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7658
7659         LEAVE;
7660         return IPR_RC_JOB_RETURN;
7661 }
7662
7663 /**
7664  * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7665  * @ipr_cmd:    ipr command struct
7666  *
7667  * This function enables dual IOA RAID support if possible.
7668  *
7669  * Return value:
7670  *      IPR_RC_JOB_RETURN
7671  **/
7672 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7673 {
7674         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7675         struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7676         struct ipr_mode_page24 *mode_page;
7677         int length;
7678
7679         ENTER;
7680         mode_page = ipr_get_mode_page(mode_pages, 0x24,
7681                                       sizeof(struct ipr_mode_page24));
7682
7683         if (mode_page)
7684                 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7685
7686         length = mode_pages->hdr.length + 1;
7687         mode_pages->hdr.length = 0;
7688
7689         ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7690                               ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7691                               length);
7692
7693         ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7694         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7695
7696         LEAVE;
7697         return IPR_RC_JOB_RETURN;
7698 }
7699
7700 /**
7701  * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7702  * @ipr_cmd:    ipr command struct
7703  *
7704  * This function handles the failure of a Mode Sense to the IOAFP.
7705  * Some adapters do not handle all mode pages.
7706  *
7707  * Return value:
7708  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7709  **/
7710 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7711 {
7712         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7713
7714         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7715                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7716                 return IPR_RC_JOB_CONTINUE;
7717         }
7718
7719         return ipr_reset_cmd_failed(ipr_cmd);
7720 }
7721
7722 /**
7723  * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7724  * @ipr_cmd:    ipr command struct
7725  *
7726  * This function send a mode sense to the IOA to retrieve
7727  * the IOA Advanced Function Control mode page.
7728  *
7729  * Return value:
7730  *      IPR_RC_JOB_RETURN
7731  **/
7732 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7733 {
7734         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7735
7736         ENTER;
7737         ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7738                              0x24, ioa_cfg->vpd_cbs_dma +
7739                              offsetof(struct ipr_misc_cbs, mode_pages),
7740                              sizeof(struct ipr_mode_pages));
7741
7742         ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7743         ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7744
7745         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7746
7747         LEAVE;
7748         return IPR_RC_JOB_RETURN;
7749 }
7750
7751 /**
7752  * ipr_init_res_table - Initialize the resource table
7753  * @ipr_cmd:    ipr command struct
7754  *
7755  * This function looks through the existing resource table, comparing
7756  * it with the config table. This function will take care of old/new
7757  * devices and schedule adding/removing them from the mid-layer
7758  * as appropriate.
7759  *
7760  * Return value:
7761  *      IPR_RC_JOB_CONTINUE
7762  **/
7763 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7764 {
7765         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7766         struct ipr_resource_entry *res, *temp;
7767         struct ipr_config_table_entry_wrapper cfgtew;
7768         int entries, found, flag, i;
7769         LIST_HEAD(old_res);
7770
7771         ENTER;
7772         if (ioa_cfg->sis64)
7773                 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7774         else
7775                 flag = ioa_cfg->u.cfg_table->hdr.flags;
7776
7777         if (flag & IPR_UCODE_DOWNLOAD_REQ)
7778                 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7779
7780         list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7781                 list_move_tail(&res->queue, &old_res);
7782
7783         if (ioa_cfg->sis64)
7784                 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
7785         else
7786                 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7787
7788         for (i = 0; i < entries; i++) {
7789                 if (ioa_cfg->sis64)
7790                         cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7791                 else
7792                         cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
7793                 found = 0;
7794
7795                 list_for_each_entry_safe(res, temp, &old_res, queue) {
7796                         if (ipr_is_same_device(res, &cfgtew)) {
7797                                 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7798                                 found = 1;
7799                                 break;
7800                         }
7801                 }
7802
7803                 if (!found) {
7804                         if (list_empty(&ioa_cfg->free_res_q)) {
7805                                 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7806                                 break;
7807                         }
7808
7809                         found = 1;
7810                         res = list_entry(ioa_cfg->free_res_q.next,
7811                                          struct ipr_resource_entry, queue);
7812                         list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7813                         ipr_init_res_entry(res, &cfgtew);
7814                         res->add_to_ml = 1;
7815                 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7816                         res->sdev->allow_restart = 1;
7817
7818                 if (found)
7819                         ipr_update_res_entry(res, &cfgtew);
7820         }
7821
7822         list_for_each_entry_safe(res, temp, &old_res, queue) {
7823                 if (res->sdev) {
7824                         res->del_from_ml = 1;
7825                         res->res_handle = IPR_INVALID_RES_HANDLE;
7826                         list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7827                 }
7828         }
7829
7830         list_for_each_entry_safe(res, temp, &old_res, queue) {
7831                 ipr_clear_res_target(res);
7832                 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7833         }
7834
7835         if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7836                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7837         else
7838                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7839
7840         LEAVE;
7841         return IPR_RC_JOB_CONTINUE;
7842 }
7843
7844 /**
7845  * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7846  * @ipr_cmd:    ipr command struct
7847  *
7848  * This function sends a Query IOA Configuration command
7849  * to the adapter to retrieve the IOA configuration table.
7850  *
7851  * Return value:
7852  *      IPR_RC_JOB_RETURN
7853  **/
7854 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7855 {
7856         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7857         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7858         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
7859         struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7860
7861         ENTER;
7862         if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7863                 ioa_cfg->dual_raid = 1;
7864         dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7865                  ucode_vpd->major_release, ucode_vpd->card_type,
7866                  ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7867         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7868         ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7869
7870         ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
7871         ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
7872         ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7873         ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
7874
7875         ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
7876                        IPR_IOADL_FLAGS_READ_LAST);
7877
7878         ipr_cmd->job_step = ipr_init_res_table;
7879
7880         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7881
7882         LEAVE;
7883         return IPR_RC_JOB_RETURN;
7884 }
7885
7886 static int ipr_ioa_service_action_failed(struct ipr_cmnd *ipr_cmd)
7887 {
7888         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7889
7890         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT)
7891                 return IPR_RC_JOB_CONTINUE;
7892
7893         return ipr_reset_cmd_failed(ipr_cmd);
7894 }
7895
7896 static void ipr_build_ioa_service_action(struct ipr_cmnd *ipr_cmd,
7897                                          __be32 res_handle, u8 sa_code)
7898 {
7899         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7900
7901         ioarcb->res_handle = res_handle;
7902         ioarcb->cmd_pkt.cdb[0] = IPR_IOA_SERVICE_ACTION;
7903         ioarcb->cmd_pkt.cdb[1] = sa_code;
7904         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7905 }
7906
7907 /**
7908  * ipr_ioafp_set_caching_parameters - Issue Set Cache parameters service
7909  * action
7910  *
7911  * Return value:
7912  *      none
7913  **/
7914 static int ipr_ioafp_set_caching_parameters(struct ipr_cmnd *ipr_cmd)
7915 {
7916         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7917         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7918         struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
7919
7920         ENTER;
7921
7922         ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7923
7924         if (pageC4->cache_cap[0] & IPR_CAP_SYNC_CACHE) {
7925                 ipr_build_ioa_service_action(ipr_cmd,
7926                                              cpu_to_be32(IPR_IOA_RES_HANDLE),
7927                                              IPR_IOA_SA_CHANGE_CACHE_PARAMS);
7928
7929                 ioarcb->cmd_pkt.cdb[2] = 0x40;
7930
7931                 ipr_cmd->job_step_failed = ipr_ioa_service_action_failed;
7932                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7933                            IPR_SET_SUP_DEVICE_TIMEOUT);
7934
7935                 LEAVE;
7936                 return IPR_RC_JOB_RETURN;
7937         }
7938
7939         LEAVE;
7940         return IPR_RC_JOB_CONTINUE;
7941 }
7942
7943 /**
7944  * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7945  * @ipr_cmd:    ipr command struct
7946  *
7947  * This utility function sends an inquiry to the adapter.
7948  *
7949  * Return value:
7950  *      none
7951  **/
7952 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
7953                               dma_addr_t dma_addr, u8 xfer_len)
7954 {
7955         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7956
7957         ENTER;
7958         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7959         ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7960
7961         ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7962         ioarcb->cmd_pkt.cdb[1] = flags;
7963         ioarcb->cmd_pkt.cdb[2] = page;
7964         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7965
7966         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7967
7968         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7969         LEAVE;
7970 }
7971
7972 /**
7973  * ipr_inquiry_page_supported - Is the given inquiry page supported
7974  * @page0:              inquiry page 0 buffer
7975  * @page:               page code.
7976  *
7977  * This function determines if the specified inquiry page is supported.
7978  *
7979  * Return value:
7980  *      1 if page is supported / 0 if not
7981  **/
7982 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
7983 {
7984         int i;
7985
7986         for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
7987                 if (page0->page[i] == page)
7988                         return 1;
7989
7990         return 0;
7991 }
7992
7993 /**
7994  * ipr_ioafp_pageC4_inquiry - Send a Page 0xC4 Inquiry to the adapter.
7995  * @ipr_cmd:    ipr command struct
7996  *
7997  * This function sends a Page 0xC4 inquiry to the adapter
7998  * to retrieve software VPD information.
7999  *
8000  * Return value:
8001  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8002  **/
8003 static int ipr_ioafp_pageC4_inquiry(struct ipr_cmnd *ipr_cmd)
8004 {
8005         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8006         struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
8007         struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
8008
8009         ENTER;
8010         ipr_cmd->job_step = ipr_ioafp_set_caching_parameters;
8011         memset(pageC4, 0, sizeof(*pageC4));
8012
8013         if (ipr_inquiry_page_supported(page0, 0xC4)) {
8014                 ipr_ioafp_inquiry(ipr_cmd, 1, 0xC4,
8015                                   (ioa_cfg->vpd_cbs_dma
8016                                    + offsetof(struct ipr_misc_cbs,
8017                                               pageC4_data)),
8018                                   sizeof(struct ipr_inquiry_pageC4));
8019                 return IPR_RC_JOB_RETURN;
8020         }
8021
8022         LEAVE;
8023         return IPR_RC_JOB_CONTINUE;
8024 }
8025
8026 /**
8027  * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
8028  * @ipr_cmd:    ipr command struct
8029  *
8030  * This function sends a Page 0xD0 inquiry to the adapter
8031  * to retrieve adapter capabilities.
8032  *
8033  * Return value:
8034  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8035  **/
8036 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
8037 {
8038         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8039         struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
8040         struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
8041
8042         ENTER;
8043         ipr_cmd->job_step = ipr_ioafp_pageC4_inquiry;
8044         memset(cap, 0, sizeof(*cap));
8045
8046         if (ipr_inquiry_page_supported(page0, 0xD0)) {
8047                 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
8048                                   ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
8049                                   sizeof(struct ipr_inquiry_cap));
8050                 return IPR_RC_JOB_RETURN;
8051         }
8052
8053         LEAVE;
8054         return IPR_RC_JOB_CONTINUE;
8055 }
8056
8057 /**
8058  * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
8059  * @ipr_cmd:    ipr command struct
8060  *
8061  * This function sends a Page 3 inquiry to the adapter
8062  * to retrieve software VPD information.
8063  *
8064  * Return value:
8065  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8066  **/
8067 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
8068 {
8069         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8070
8071         ENTER;
8072
8073         ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
8074
8075         ipr_ioafp_inquiry(ipr_cmd, 1, 3,
8076                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
8077                           sizeof(struct ipr_inquiry_page3));
8078
8079         LEAVE;
8080         return IPR_RC_JOB_RETURN;
8081 }
8082
8083 /**
8084  * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
8085  * @ipr_cmd:    ipr command struct
8086  *
8087  * This function sends a Page 0 inquiry to the adapter
8088  * to retrieve supported inquiry pages.
8089  *
8090  * Return value:
8091  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8092  **/
8093 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
8094 {
8095         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8096         char type[5];
8097
8098         ENTER;
8099
8100         /* Grab the type out of the VPD and store it away */
8101         memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
8102         type[4] = '\0';
8103         ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
8104
8105         if (ipr_invalid_adapter(ioa_cfg)) {
8106                 dev_err(&ioa_cfg->pdev->dev,
8107                         "Adapter not supported in this hardware configuration.\n");
8108
8109                 if (!ipr_testmode) {
8110                         ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
8111                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8112                         list_add_tail(&ipr_cmd->queue,
8113                                         &ioa_cfg->hrrq->hrrq_free_q);
8114                         return IPR_RC_JOB_RETURN;
8115                 }
8116         }
8117
8118         ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
8119
8120         ipr_ioafp_inquiry(ipr_cmd, 1, 0,
8121                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
8122                           sizeof(struct ipr_inquiry_page0));
8123
8124         LEAVE;
8125         return IPR_RC_JOB_RETURN;
8126 }
8127
8128 /**
8129  * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
8130  * @ipr_cmd:    ipr command struct
8131  *
8132  * This function sends a standard inquiry to the adapter.
8133  *
8134  * Return value:
8135  *      IPR_RC_JOB_RETURN
8136  **/
8137 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
8138 {
8139         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8140
8141         ENTER;
8142         ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
8143
8144         ipr_ioafp_inquiry(ipr_cmd, 0, 0,
8145                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
8146                           sizeof(struct ipr_ioa_vpd));
8147
8148         LEAVE;
8149         return IPR_RC_JOB_RETURN;
8150 }
8151
8152 /**
8153  * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
8154  * @ipr_cmd:    ipr command struct
8155  *
8156  * This function send an Identify Host Request Response Queue
8157  * command to establish the HRRQ with the adapter.
8158  *
8159  * Return value:
8160  *      IPR_RC_JOB_RETURN
8161  **/
8162 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
8163 {
8164         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8165         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
8166         struct ipr_hrr_queue *hrrq;
8167
8168         ENTER;
8169         ipr_cmd->job_step = ipr_ioafp_std_inquiry;
8170         if (ioa_cfg->identify_hrrq_index == 0)
8171                 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
8172
8173         if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
8174                 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
8175
8176                 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
8177                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8178
8179                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8180                 if (ioa_cfg->sis64)
8181                         ioarcb->cmd_pkt.cdb[1] = 0x1;
8182
8183                 if (ioa_cfg->nvectors == 1)
8184                         ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
8185                 else
8186                         ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
8187
8188                 ioarcb->cmd_pkt.cdb[2] =
8189                         ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
8190                 ioarcb->cmd_pkt.cdb[3] =
8191                         ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
8192                 ioarcb->cmd_pkt.cdb[4] =
8193                         ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
8194                 ioarcb->cmd_pkt.cdb[5] =
8195                         ((u64) hrrq->host_rrq_dma) & 0xff;
8196                 ioarcb->cmd_pkt.cdb[7] =
8197                         ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
8198                 ioarcb->cmd_pkt.cdb[8] =
8199                         (sizeof(u32) * hrrq->size) & 0xff;
8200
8201                 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
8202                         ioarcb->cmd_pkt.cdb[9] =
8203                                         ioa_cfg->identify_hrrq_index;
8204
8205                 if (ioa_cfg->sis64) {
8206                         ioarcb->cmd_pkt.cdb[10] =
8207                                 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
8208                         ioarcb->cmd_pkt.cdb[11] =
8209                                 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
8210                         ioarcb->cmd_pkt.cdb[12] =
8211                                 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
8212                         ioarcb->cmd_pkt.cdb[13] =
8213                                 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
8214                 }
8215
8216                 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
8217                         ioarcb->cmd_pkt.cdb[14] =
8218                                         ioa_cfg->identify_hrrq_index;
8219
8220                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8221                            IPR_INTERNAL_TIMEOUT);
8222
8223                 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
8224                         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8225
8226                 LEAVE;
8227                 return IPR_RC_JOB_RETURN;
8228         }
8229
8230         LEAVE;
8231         return IPR_RC_JOB_CONTINUE;
8232 }
8233
8234 /**
8235  * ipr_reset_timer_done - Adapter reset timer function
8236  * @ipr_cmd:    ipr command struct
8237  *
8238  * Description: This function is used in adapter reset processing
8239  * for timing events. If the reset_cmd pointer in the IOA
8240  * config struct is not this adapter's we are doing nested
8241  * resets and fail_all_ops will take care of freeing the
8242  * command block.
8243  *
8244  * Return value:
8245  *      none
8246  **/
8247 static void ipr_reset_timer_done(struct timer_list *t)
8248 {
8249         struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
8250         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8251         unsigned long lock_flags = 0;
8252
8253         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8254
8255         if (ioa_cfg->reset_cmd == ipr_cmd) {
8256                 list_del(&ipr_cmd->queue);
8257                 ipr_cmd->done(ipr_cmd);
8258         }
8259
8260         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8261 }
8262
8263 /**
8264  * ipr_reset_start_timer - Start a timer for adapter reset job
8265  * @ipr_cmd:    ipr command struct
8266  * @timeout:    timeout value
8267  *
8268  * Description: This function is used in adapter reset processing
8269  * for timing events. If the reset_cmd pointer in the IOA
8270  * config struct is not this adapter's we are doing nested
8271  * resets and fail_all_ops will take care of freeing the
8272  * command block.
8273  *
8274  * Return value:
8275  *      none
8276  **/
8277 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
8278                                   unsigned long timeout)
8279 {
8280
8281         ENTER;
8282         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8283         ipr_cmd->done = ipr_reset_ioa_job;
8284
8285         ipr_cmd->timer.expires = jiffies + timeout;
8286         ipr_cmd->timer.function = ipr_reset_timer_done;
8287         add_timer(&ipr_cmd->timer);
8288 }
8289
8290 /**
8291  * ipr_init_ioa_mem - Initialize ioa_cfg control block
8292  * @ioa_cfg:    ioa cfg struct
8293  *
8294  * Return value:
8295  *      nothing
8296  **/
8297 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
8298 {
8299         struct ipr_hrr_queue *hrrq;
8300
8301         for_each_hrrq(hrrq, ioa_cfg) {
8302                 spin_lock(&hrrq->_lock);
8303                 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
8304
8305                 /* Initialize Host RRQ pointers */
8306                 hrrq->hrrq_start = hrrq->host_rrq;
8307                 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
8308                 hrrq->hrrq_curr = hrrq->hrrq_start;
8309                 hrrq->toggle_bit = 1;
8310                 spin_unlock(&hrrq->_lock);
8311         }
8312         wmb();
8313
8314         ioa_cfg->identify_hrrq_index = 0;
8315         if (ioa_cfg->hrrq_num == 1)
8316                 atomic_set(&ioa_cfg->hrrq_index, 0);
8317         else
8318                 atomic_set(&ioa_cfg->hrrq_index, 1);
8319
8320         /* Zero out config table */
8321         memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
8322 }
8323
8324 /**
8325  * ipr_reset_next_stage - Process IPL stage change based on feedback register.
8326  * @ipr_cmd:    ipr command struct
8327  *
8328  * Return value:
8329  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8330  **/
8331 static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
8332 {
8333         unsigned long stage, stage_time;
8334         u32 feedback;
8335         volatile u32 int_reg;
8336         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8337         u64 maskval = 0;
8338
8339         feedback = readl(ioa_cfg->regs.init_feedback_reg);
8340         stage = feedback & IPR_IPL_INIT_STAGE_MASK;
8341         stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
8342
8343         ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
8344
8345         /* sanity check the stage_time value */
8346         if (stage_time == 0)
8347                 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
8348         else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
8349                 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
8350         else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
8351                 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
8352
8353         if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
8354                 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
8355                 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8356                 stage_time = ioa_cfg->transop_timeout;
8357                 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8358         } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
8359                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8360                 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8361                         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8362                         maskval = IPR_PCII_IPL_STAGE_CHANGE;
8363                         maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
8364                         writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
8365                         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8366                         return IPR_RC_JOB_CONTINUE;
8367                 }
8368         }
8369
8370         ipr_cmd->timer.expires = jiffies + stage_time * HZ;
8371         ipr_cmd->timer.function = ipr_oper_timeout;
8372         ipr_cmd->done = ipr_reset_ioa_job;
8373         add_timer(&ipr_cmd->timer);
8374
8375         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8376
8377         return IPR_RC_JOB_RETURN;
8378 }
8379
8380 /**
8381  * ipr_reset_enable_ioa - Enable the IOA following a reset.
8382  * @ipr_cmd:    ipr command struct
8383  *
8384  * This function reinitializes some control blocks and
8385  * enables destructive diagnostics on the adapter.
8386  *
8387  * Return value:
8388  *      IPR_RC_JOB_RETURN
8389  **/
8390 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
8391 {
8392         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8393         volatile u32 int_reg;
8394         volatile u64 maskval;
8395         int i;
8396
8397         ENTER;
8398         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8399         ipr_init_ioa_mem(ioa_cfg);
8400
8401         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8402                 spin_lock(&ioa_cfg->hrrq[i]._lock);
8403                 ioa_cfg->hrrq[i].allow_interrupts = 1;
8404                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8405         }
8406         wmb();
8407         if (ioa_cfg->sis64) {
8408                 /* Set the adapter to the correct endian mode. */
8409                 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8410                 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8411         }
8412
8413         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8414
8415         if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8416                 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
8417                        ioa_cfg->regs.clr_interrupt_mask_reg32);
8418                 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8419                 return IPR_RC_JOB_CONTINUE;
8420         }
8421
8422         /* Enable destructive diagnostics on IOA */
8423         writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
8424
8425         if (ioa_cfg->sis64) {
8426                 maskval = IPR_PCII_IPL_STAGE_CHANGE;
8427                 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
8428                 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
8429         } else
8430                 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
8431
8432         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8433
8434         dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
8435
8436         if (ioa_cfg->sis64) {
8437                 ipr_cmd->job_step = ipr_reset_next_stage;
8438                 return IPR_RC_JOB_CONTINUE;
8439         }
8440
8441         ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
8442         ipr_cmd->timer.function = ipr_oper_timeout;
8443         ipr_cmd->done = ipr_reset_ioa_job;
8444         add_timer(&ipr_cmd->timer);
8445         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8446
8447         LEAVE;
8448         return IPR_RC_JOB_RETURN;
8449 }
8450
8451 /**
8452  * ipr_reset_wait_for_dump - Wait for a dump to timeout.
8453  * @ipr_cmd:    ipr command struct
8454  *
8455  * This function is invoked when an adapter dump has run out
8456  * of processing time.
8457  *
8458  * Return value:
8459  *      IPR_RC_JOB_CONTINUE
8460  **/
8461 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
8462 {
8463         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8464
8465         if (ioa_cfg->sdt_state == GET_DUMP)
8466                 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8467         else if (ioa_cfg->sdt_state == READ_DUMP)
8468                 ioa_cfg->sdt_state = ABORT_DUMP;
8469
8470         ioa_cfg->dump_timeout = 1;
8471         ipr_cmd->job_step = ipr_reset_alert;
8472
8473         return IPR_RC_JOB_CONTINUE;
8474 }
8475
8476 /**
8477  * ipr_unit_check_no_data - Log a unit check/no data error log
8478  * @ioa_cfg:            ioa config struct
8479  *
8480  * Logs an error indicating the adapter unit checked, but for some
8481  * reason, we were unable to fetch the unit check buffer.
8482  *
8483  * Return value:
8484  *      nothing
8485  **/
8486 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
8487 {
8488         ioa_cfg->errors_logged++;
8489         dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
8490 }
8491
8492 /**
8493  * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8494  * @ioa_cfg:            ioa config struct
8495  *
8496  * Fetches the unit check buffer from the adapter by clocking the data
8497  * through the mailbox register.
8498  *
8499  * Return value:
8500  *      nothing
8501  **/
8502 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
8503 {
8504         unsigned long mailbox;
8505         struct ipr_hostrcb *hostrcb;
8506         struct ipr_uc_sdt sdt;
8507         int rc, length;
8508         u32 ioasc;
8509
8510         mailbox = readl(ioa_cfg->ioa_mailbox);
8511
8512         if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
8513                 ipr_unit_check_no_data(ioa_cfg);
8514                 return;
8515         }
8516
8517         memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
8518         rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
8519                                         (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
8520
8521         if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
8522             ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
8523             (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
8524                 ipr_unit_check_no_data(ioa_cfg);
8525                 return;
8526         }
8527
8528         /* Find length of the first sdt entry (UC buffer) */
8529         if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
8530                 length = be32_to_cpu(sdt.entry[0].end_token);
8531         else
8532                 length = (be32_to_cpu(sdt.entry[0].end_token) -
8533                           be32_to_cpu(sdt.entry[0].start_token)) &
8534                           IPR_FMT2_MBX_ADDR_MASK;
8535
8536         hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8537                              struct ipr_hostrcb, queue);
8538         list_del_init(&hostrcb->queue);
8539         memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8540
8541         rc = ipr_get_ldump_data_section(ioa_cfg,
8542                                         be32_to_cpu(sdt.entry[0].start_token),
8543                                         (__be32 *)&hostrcb->hcam,
8544                                         min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
8545
8546         if (!rc) {
8547                 ipr_handle_log_data(ioa_cfg, hostrcb);
8548                 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
8549                 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8550                     ioa_cfg->sdt_state == GET_DUMP)
8551                         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8552         } else
8553                 ipr_unit_check_no_data(ioa_cfg);
8554
8555         list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8556 }
8557
8558 /**
8559  * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8560  * @ipr_cmd:    ipr command struct
8561  *
8562  * Description: This function will call to get the unit check buffer.
8563  *
8564  * Return value:
8565  *      IPR_RC_JOB_RETURN
8566  **/
8567 static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8568 {
8569         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8570
8571         ENTER;
8572         ioa_cfg->ioa_unit_checked = 0;
8573         ipr_get_unit_check_buffer(ioa_cfg);
8574         ipr_cmd->job_step = ipr_reset_alert;
8575         ipr_reset_start_timer(ipr_cmd, 0);
8576
8577         LEAVE;
8578         return IPR_RC_JOB_RETURN;
8579 }
8580
8581 static int ipr_dump_mailbox_wait(struct ipr_cmnd *ipr_cmd)
8582 {
8583         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8584
8585         ENTER;
8586
8587         if (ioa_cfg->sdt_state != GET_DUMP)
8588                 return IPR_RC_JOB_RETURN;
8589
8590         if (!ioa_cfg->sis64 || !ipr_cmd->u.time_left ||
8591             (readl(ioa_cfg->regs.sense_interrupt_reg) &
8592              IPR_PCII_MAILBOX_STABLE)) {
8593
8594                 if (!ipr_cmd->u.time_left)
8595                         dev_err(&ioa_cfg->pdev->dev,
8596                                 "Timed out waiting for Mailbox register.\n");
8597
8598                 ioa_cfg->sdt_state = READ_DUMP;
8599                 ioa_cfg->dump_timeout = 0;
8600                 if (ioa_cfg->sis64)
8601                         ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8602                 else
8603                         ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
8604                 ipr_cmd->job_step = ipr_reset_wait_for_dump;
8605                 schedule_work(&ioa_cfg->work_q);
8606
8607         } else {
8608                 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8609                 ipr_reset_start_timer(ipr_cmd,
8610                                       IPR_CHECK_FOR_RESET_TIMEOUT);
8611         }
8612
8613         LEAVE;
8614         return IPR_RC_JOB_RETURN;
8615 }
8616
8617 /**
8618  * ipr_reset_restore_cfg_space - Restore PCI config space.
8619  * @ipr_cmd:    ipr command struct
8620  *
8621  * Description: This function restores the saved PCI config space of
8622  * the adapter, fails all outstanding ops back to the callers, and
8623  * fetches the dump/unit check if applicable to this reset.
8624  *
8625  * Return value:
8626  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8627  **/
8628 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8629 {
8630         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8631         u32 int_reg;
8632
8633         ENTER;
8634         ioa_cfg->pdev->state_saved = true;
8635         pci_restore_state(ioa_cfg->pdev);
8636
8637         if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
8638                 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8639                 return IPR_RC_JOB_CONTINUE;
8640         }
8641
8642         ipr_fail_all_ops(ioa_cfg);
8643
8644         if (ioa_cfg->sis64) {
8645                 /* Set the adapter to the correct endian mode. */
8646                 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8647                 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8648         }
8649
8650         if (ioa_cfg->ioa_unit_checked) {
8651                 if (ioa_cfg->sis64) {
8652                         ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8653                         ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8654                         return IPR_RC_JOB_RETURN;
8655                 } else {
8656                         ioa_cfg->ioa_unit_checked = 0;
8657                         ipr_get_unit_check_buffer(ioa_cfg);
8658                         ipr_cmd->job_step = ipr_reset_alert;
8659                         ipr_reset_start_timer(ipr_cmd, 0);
8660                         return IPR_RC_JOB_RETURN;
8661                 }
8662         }
8663
8664         if (ioa_cfg->in_ioa_bringdown) {
8665                 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8666         } else if (ioa_cfg->sdt_state == GET_DUMP) {
8667                 ipr_cmd->job_step = ipr_dump_mailbox_wait;
8668                 ipr_cmd->u.time_left = IPR_WAIT_FOR_MAILBOX;
8669         } else {
8670                 ipr_cmd->job_step = ipr_reset_enable_ioa;
8671         }
8672
8673         LEAVE;
8674         return IPR_RC_JOB_CONTINUE;
8675 }
8676
8677 /**
8678  * ipr_reset_bist_done - BIST has completed on the adapter.
8679  * @ipr_cmd:    ipr command struct
8680  *
8681  * Description: Unblock config space and resume the reset process.
8682  *
8683  * Return value:
8684  *      IPR_RC_JOB_CONTINUE
8685  **/
8686 static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8687 {
8688         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8689
8690         ENTER;
8691         if (ioa_cfg->cfg_locked)
8692                 pci_cfg_access_unlock(ioa_cfg->pdev);
8693         ioa_cfg->cfg_locked = 0;
8694         ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8695         LEAVE;
8696         return IPR_RC_JOB_CONTINUE;
8697 }
8698
8699 /**
8700  * ipr_reset_start_bist - Run BIST on the adapter.
8701  * @ipr_cmd:    ipr command struct
8702  *
8703  * Description: This function runs BIST on the adapter, then delays 2 seconds.
8704  *
8705  * Return value:
8706  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8707  **/
8708 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8709 {
8710         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8711         int rc = PCIBIOS_SUCCESSFUL;
8712
8713         ENTER;
8714         if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8715                 writel(IPR_UPROCI_SIS64_START_BIST,
8716                        ioa_cfg->regs.set_uproc_interrupt_reg32);
8717         else
8718                 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8719
8720         if (rc == PCIBIOS_SUCCESSFUL) {
8721                 ipr_cmd->job_step = ipr_reset_bist_done;
8722                 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8723                 rc = IPR_RC_JOB_RETURN;
8724         } else {
8725                 if (ioa_cfg->cfg_locked)
8726                         pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8727                 ioa_cfg->cfg_locked = 0;
8728                 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8729                 rc = IPR_RC_JOB_CONTINUE;
8730         }
8731
8732         LEAVE;
8733         return rc;
8734 }
8735
8736 /**
8737  * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8738  * @ipr_cmd:    ipr command struct
8739  *
8740  * Description: This clears PCI reset to the adapter and delays two seconds.
8741  *
8742  * Return value:
8743  *      IPR_RC_JOB_RETURN
8744  **/
8745 static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8746 {
8747         ENTER;
8748         ipr_cmd->job_step = ipr_reset_bist_done;
8749         ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8750         LEAVE;
8751         return IPR_RC_JOB_RETURN;
8752 }
8753
8754 /**
8755  * ipr_reset_reset_work - Pulse a PCIe fundamental reset
8756  * @work:       work struct
8757  *
8758  * Description: This pulses warm reset to a slot.
8759  *
8760  **/
8761 static void ipr_reset_reset_work(struct work_struct *work)
8762 {
8763         struct ipr_cmnd *ipr_cmd = container_of(work, struct ipr_cmnd, work);
8764         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8765         struct pci_dev *pdev = ioa_cfg->pdev;
8766         unsigned long lock_flags = 0;
8767
8768         ENTER;
8769         pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8770         msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT));
8771         pci_set_pcie_reset_state(pdev, pcie_deassert_reset);
8772
8773         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8774         if (ioa_cfg->reset_cmd == ipr_cmd)
8775                 ipr_reset_ioa_job(ipr_cmd);
8776         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8777         LEAVE;
8778 }
8779
8780 /**
8781  * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8782  * @ipr_cmd:    ipr command struct
8783  *
8784  * Description: This asserts PCI reset to the adapter.
8785  *
8786  * Return value:
8787  *      IPR_RC_JOB_RETURN
8788  **/
8789 static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8790 {
8791         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8792
8793         ENTER;
8794         INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work);
8795         queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work);
8796         ipr_cmd->job_step = ipr_reset_slot_reset_done;
8797         LEAVE;
8798         return IPR_RC_JOB_RETURN;
8799 }
8800
8801 /**
8802  * ipr_reset_block_config_access_wait - Wait for permission to block config access
8803  * @ipr_cmd:    ipr command struct
8804  *
8805  * Description: This attempts to block config access to the IOA.
8806  *
8807  * Return value:
8808  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8809  **/
8810 static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8811 {
8812         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8813         int rc = IPR_RC_JOB_CONTINUE;
8814
8815         if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8816                 ioa_cfg->cfg_locked = 1;
8817                 ipr_cmd->job_step = ioa_cfg->reset;
8818         } else {
8819                 if (ipr_cmd->u.time_left) {
8820                         rc = IPR_RC_JOB_RETURN;
8821                         ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8822                         ipr_reset_start_timer(ipr_cmd,
8823                                               IPR_CHECK_FOR_RESET_TIMEOUT);
8824                 } else {
8825                         ipr_cmd->job_step = ioa_cfg->reset;
8826                         dev_err(&ioa_cfg->pdev->dev,
8827                                 "Timed out waiting to lock config access. Resetting anyway.\n");
8828                 }
8829         }
8830
8831         return rc;
8832 }
8833
8834 /**
8835  * ipr_reset_block_config_access - Block config access to the IOA
8836  * @ipr_cmd:    ipr command struct
8837  *
8838  * Description: This attempts to block config access to the IOA
8839  *
8840  * Return value:
8841  *      IPR_RC_JOB_CONTINUE
8842  **/
8843 static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8844 {
8845         ipr_cmd->ioa_cfg->cfg_locked = 0;
8846         ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8847         ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8848         return IPR_RC_JOB_CONTINUE;
8849 }
8850
8851 /**
8852  * ipr_reset_allowed - Query whether or not IOA can be reset
8853  * @ioa_cfg:    ioa config struct
8854  *
8855  * Return value:
8856  *      0 if reset not allowed / non-zero if reset is allowed
8857  **/
8858 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8859 {
8860         volatile u32 temp_reg;
8861
8862         temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8863         return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8864 }
8865
8866 /**
8867  * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8868  * @ipr_cmd:    ipr command struct
8869  *
8870  * Description: This function waits for adapter permission to run BIST,
8871  * then runs BIST. If the adapter does not give permission after a
8872  * reasonable time, we will reset the adapter anyway. The impact of
8873  * resetting the adapter without warning the adapter is the risk of
8874  * losing the persistent error log on the adapter. If the adapter is
8875  * reset while it is writing to the flash on the adapter, the flash
8876  * segment will have bad ECC and be zeroed.
8877  *
8878  * Return value:
8879  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8880  **/
8881 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8882 {
8883         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8884         int rc = IPR_RC_JOB_RETURN;
8885
8886         if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8887                 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8888                 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8889         } else {
8890                 ipr_cmd->job_step = ipr_reset_block_config_access;
8891                 rc = IPR_RC_JOB_CONTINUE;
8892         }
8893
8894         return rc;
8895 }
8896
8897 /**
8898  * ipr_reset_alert - Alert the adapter of a pending reset
8899  * @ipr_cmd:    ipr command struct
8900  *
8901  * Description: This function alerts the adapter that it will be reset.
8902  * If memory space is not currently enabled, proceed directly
8903  * to running BIST on the adapter. The timer must always be started
8904  * so we guarantee we do not run BIST from ipr_isr.
8905  *
8906  * Return value:
8907  *      IPR_RC_JOB_RETURN
8908  **/
8909 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8910 {
8911         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8912         u16 cmd_reg;
8913         int rc;
8914
8915         ENTER;
8916         rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8917
8918         if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8919                 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
8920                 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
8921                 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8922         } else {
8923                 ipr_cmd->job_step = ipr_reset_block_config_access;
8924         }
8925
8926         ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8927         ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8928
8929         LEAVE;
8930         return IPR_RC_JOB_RETURN;
8931 }
8932
8933 /**
8934  * ipr_reset_quiesce_done - Complete IOA disconnect
8935  * @ipr_cmd:    ipr command struct
8936  *
8937  * Description: Freeze the adapter to complete quiesce processing
8938  *
8939  * Return value:
8940  *      IPR_RC_JOB_CONTINUE
8941  **/
8942 static int ipr_reset_quiesce_done(struct ipr_cmnd *ipr_cmd)
8943 {
8944         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8945
8946         ENTER;
8947         ipr_cmd->job_step = ipr_ioa_bringdown_done;
8948         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8949         LEAVE;
8950         return IPR_RC_JOB_CONTINUE;
8951 }
8952
8953 /**
8954  * ipr_reset_cancel_hcam_done - Check for outstanding commands
8955  * @ipr_cmd:    ipr command struct
8956  *
8957  * Description: Ensure nothing is outstanding to the IOA and
8958  *                      proceed with IOA disconnect. Otherwise reset the IOA.
8959  *
8960  * Return value:
8961  *      IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE
8962  **/
8963 static int ipr_reset_cancel_hcam_done(struct ipr_cmnd *ipr_cmd)
8964 {
8965         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8966         struct ipr_cmnd *loop_cmd;
8967         struct ipr_hrr_queue *hrrq;
8968         int rc = IPR_RC_JOB_CONTINUE;
8969         int count = 0;
8970
8971         ENTER;
8972         ipr_cmd->job_step = ipr_reset_quiesce_done;
8973
8974         for_each_hrrq(hrrq, ioa_cfg) {
8975                 spin_lock(&hrrq->_lock);
8976                 list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) {
8977                         count++;
8978                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8979                         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
8980                         rc = IPR_RC_JOB_RETURN;
8981                         break;
8982                 }
8983                 spin_unlock(&hrrq->_lock);
8984
8985                 if (count)
8986                         break;
8987         }
8988
8989         LEAVE;
8990         return rc;
8991 }
8992
8993 /**
8994  * ipr_reset_cancel_hcam - Cancel outstanding HCAMs
8995  * @ipr_cmd:    ipr command struct
8996  *
8997  * Description: Cancel any oustanding HCAMs to the IOA.
8998  *
8999  * Return value:
9000  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9001  **/
9002 static int ipr_reset_cancel_hcam(struct ipr_cmnd *ipr_cmd)
9003 {
9004         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9005         int rc = IPR_RC_JOB_CONTINUE;
9006         struct ipr_cmd_pkt *cmd_pkt;
9007         struct ipr_cmnd *hcam_cmd;
9008         struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ];
9009
9010         ENTER;
9011         ipr_cmd->job_step = ipr_reset_cancel_hcam_done;
9012
9013         if (!hrrq->ioa_is_dead) {
9014                 if (!list_empty(&ioa_cfg->hostrcb_pending_q)) {
9015                         list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) {
9016                                 if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC)
9017                                         continue;
9018
9019                                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9020                                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9021                                 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
9022                                 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
9023                                 cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST;
9024                                 cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB;
9025                                 cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff;
9026                                 cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff;
9027                                 cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff;
9028                                 cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff;
9029                                 cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff;
9030                                 cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff;
9031                                 cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff;
9032                                 cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff;
9033
9034                                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
9035                                            IPR_CANCEL_TIMEOUT);
9036
9037                                 rc = IPR_RC_JOB_RETURN;
9038                                 ipr_cmd->job_step = ipr_reset_cancel_hcam;
9039                                 break;
9040                         }
9041                 }
9042         } else
9043                 ipr_cmd->job_step = ipr_reset_alert;
9044
9045         LEAVE;
9046         return rc;
9047 }
9048
9049 /**
9050  * ipr_reset_ucode_download_done - Microcode download completion
9051  * @ipr_cmd:    ipr command struct
9052  *
9053  * Description: This function unmaps the microcode download buffer.
9054  *
9055  * Return value:
9056  *      IPR_RC_JOB_CONTINUE
9057  **/
9058 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
9059 {
9060         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9061         struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
9062
9063         dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist,
9064                      sglist->num_sg, DMA_TO_DEVICE);
9065
9066         ipr_cmd->job_step = ipr_reset_alert;
9067         return IPR_RC_JOB_CONTINUE;
9068 }
9069
9070 /**
9071  * ipr_reset_ucode_download - Download microcode to the adapter
9072  * @ipr_cmd:    ipr command struct
9073  *
9074  * Description: This function checks to see if it there is microcode
9075  * to download to the adapter. If there is, a download is performed.
9076  *
9077  * Return value:
9078  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9079  **/
9080 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
9081 {
9082         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9083         struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
9084
9085         ENTER;
9086         ipr_cmd->job_step = ipr_reset_alert;
9087
9088         if (!sglist)
9089                 return IPR_RC_JOB_CONTINUE;
9090
9091         ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9092         ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
9093         ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
9094         ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
9095         ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
9096         ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
9097         ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
9098
9099         if (ioa_cfg->sis64)
9100                 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
9101         else
9102                 ipr_build_ucode_ioadl(ipr_cmd, sglist);
9103         ipr_cmd->job_step = ipr_reset_ucode_download_done;
9104
9105         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
9106                    IPR_WRITE_BUFFER_TIMEOUT);
9107
9108         LEAVE;
9109         return IPR_RC_JOB_RETURN;
9110 }
9111
9112 /**
9113  * ipr_reset_shutdown_ioa - Shutdown the adapter
9114  * @ipr_cmd:    ipr command struct
9115  *
9116  * Description: This function issues an adapter shutdown of the
9117  * specified type to the specified adapter as part of the
9118  * adapter reset job.
9119  *
9120  * Return value:
9121  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9122  **/
9123 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
9124 {
9125         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9126         enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
9127         unsigned long timeout;
9128         int rc = IPR_RC_JOB_CONTINUE;
9129
9130         ENTER;
9131         if (shutdown_type == IPR_SHUTDOWN_QUIESCE)
9132                 ipr_cmd->job_step = ipr_reset_cancel_hcam;
9133         else if (shutdown_type != IPR_SHUTDOWN_NONE &&
9134                         !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
9135                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9136                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9137                 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
9138                 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
9139
9140                 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
9141                         timeout = IPR_SHUTDOWN_TIMEOUT;
9142                 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
9143                         timeout = IPR_INTERNAL_TIMEOUT;
9144                 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
9145                         timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
9146                 else
9147                         timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
9148
9149                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
9150
9151                 rc = IPR_RC_JOB_RETURN;
9152                 ipr_cmd->job_step = ipr_reset_ucode_download;
9153         } else
9154                 ipr_cmd->job_step = ipr_reset_alert;
9155
9156         LEAVE;
9157         return rc;
9158 }
9159
9160 /**
9161  * ipr_reset_ioa_job - Adapter reset job
9162  * @ipr_cmd:    ipr command struct
9163  *
9164  * Description: This function is the job router for the adapter reset job.
9165  *
9166  * Return value:
9167  *      none
9168  **/
9169 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
9170 {
9171         u32 rc, ioasc;
9172         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9173
9174         do {
9175                 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
9176
9177                 if (ioa_cfg->reset_cmd != ipr_cmd) {
9178                         /*
9179                          * We are doing nested adapter resets and this is
9180                          * not the current reset job.
9181                          */
9182                         list_add_tail(&ipr_cmd->queue,
9183                                         &ipr_cmd->hrrq->hrrq_free_q);
9184                         return;
9185                 }
9186
9187                 if (IPR_IOASC_SENSE_KEY(ioasc)) {
9188                         rc = ipr_cmd->job_step_failed(ipr_cmd);
9189                         if (rc == IPR_RC_JOB_RETURN)
9190                                 return;
9191                 }
9192
9193                 ipr_reinit_ipr_cmnd(ipr_cmd);
9194                 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
9195                 rc = ipr_cmd->job_step(ipr_cmd);
9196         } while (rc == IPR_RC_JOB_CONTINUE);
9197 }
9198
9199 /**
9200  * _ipr_initiate_ioa_reset - Initiate an adapter reset
9201  * @ioa_cfg:            ioa config struct
9202  * @job_step:           first job step of reset job
9203  * @shutdown_type:      shutdown type
9204  *
9205  * Description: This function will initiate the reset of the given adapter
9206  * starting at the selected job step.
9207  * If the caller needs to wait on the completion of the reset,
9208  * the caller must sleep on the reset_wait_q.
9209  *
9210  * Return value:
9211  *      none
9212  **/
9213 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9214                                     int (*job_step) (struct ipr_cmnd *),
9215                                     enum ipr_shutdown_type shutdown_type)
9216 {
9217         struct ipr_cmnd *ipr_cmd;
9218         int i;
9219
9220         ioa_cfg->in_reset_reload = 1;
9221         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9222                 spin_lock(&ioa_cfg->hrrq[i]._lock);
9223                 ioa_cfg->hrrq[i].allow_cmds = 0;
9224                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9225         }
9226         wmb();
9227         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
9228                 ioa_cfg->scsi_unblock = 0;
9229                 ioa_cfg->scsi_blocked = 1;
9230                 scsi_block_requests(ioa_cfg->host);
9231         }
9232
9233         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
9234         ioa_cfg->reset_cmd = ipr_cmd;
9235         ipr_cmd->job_step = job_step;
9236         ipr_cmd->u.shutdown_type = shutdown_type;
9237
9238         ipr_reset_ioa_job(ipr_cmd);
9239 }
9240
9241 /**
9242  * ipr_initiate_ioa_reset - Initiate an adapter reset
9243  * @ioa_cfg:            ioa config struct
9244  * @shutdown_type:      shutdown type
9245  *
9246  * Description: This function will initiate the reset of the given adapter.
9247  * If the caller needs to wait on the completion of the reset,
9248  * the caller must sleep on the reset_wait_q.
9249  *
9250  * Return value:
9251  *      none
9252  **/
9253 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9254                                    enum ipr_shutdown_type shutdown_type)
9255 {
9256         int i;
9257
9258         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
9259                 return;
9260
9261         if (ioa_cfg->in_reset_reload) {
9262                 if (ioa_cfg->sdt_state == GET_DUMP)
9263                         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9264                 else if (ioa_cfg->sdt_state == READ_DUMP)
9265                         ioa_cfg->sdt_state = ABORT_DUMP;
9266         }
9267
9268         if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
9269                 dev_err(&ioa_cfg->pdev->dev,
9270                         "IOA taken offline - error recovery failed\n");
9271
9272                 ioa_cfg->reset_retries = 0;
9273                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9274                         spin_lock(&ioa_cfg->hrrq[i]._lock);
9275                         ioa_cfg->hrrq[i].ioa_is_dead = 1;
9276                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
9277                 }
9278                 wmb();
9279
9280                 if (ioa_cfg->in_ioa_bringdown) {
9281                         ioa_cfg->reset_cmd = NULL;
9282                         ioa_cfg->in_reset_reload = 0;
9283                         ipr_fail_all_ops(ioa_cfg);
9284                         wake_up_all(&ioa_cfg->reset_wait_q);
9285
9286                         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
9287                                 ioa_cfg->scsi_unblock = 1;
9288                                 schedule_work(&ioa_cfg->work_q);
9289                         }
9290                         return;
9291                 } else {
9292                         ioa_cfg->in_ioa_bringdown = 1;
9293                         shutdown_type = IPR_SHUTDOWN_NONE;
9294                 }
9295         }
9296
9297         _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
9298                                 shutdown_type);
9299 }
9300
9301 /**
9302  * ipr_reset_freeze - Hold off all I/O activity
9303  * @ipr_cmd:    ipr command struct
9304  *
9305  * Description: If the PCI slot is frozen, hold off all I/O
9306  * activity; then, as soon as the slot is available again,
9307  * initiate an adapter reset.
9308  */
9309 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
9310 {
9311         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9312         int i;
9313
9314         /* Disallow new interrupts, avoid loop */
9315         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9316                 spin_lock(&ioa_cfg->hrrq[i]._lock);
9317                 ioa_cfg->hrrq[i].allow_interrupts = 0;
9318                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9319         }
9320         wmb();
9321         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
9322         ipr_cmd->done = ipr_reset_ioa_job;
9323         return IPR_RC_JOB_RETURN;
9324 }
9325
9326 /**
9327  * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
9328  * @pdev:       PCI device struct
9329  *
9330  * Description: This routine is called to tell us that the MMIO
9331  * access to the IOA has been restored
9332  */
9333 static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
9334 {
9335         unsigned long flags = 0;
9336         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9337
9338         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9339         if (!ioa_cfg->probe_done)
9340                 pci_save_state(pdev);
9341         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9342         return PCI_ERS_RESULT_NEED_RESET;
9343 }
9344
9345 /**
9346  * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
9347  * @pdev:       PCI device struct
9348  *
9349  * Description: This routine is called to tell us that the PCI bus
9350  * is down. Can't do anything here, except put the device driver
9351  * into a holding pattern, waiting for the PCI bus to come back.
9352  */
9353 static void ipr_pci_frozen(struct pci_dev *pdev)
9354 {
9355         unsigned long flags = 0;
9356         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9357
9358         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9359         if (ioa_cfg->probe_done)
9360                 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
9361         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9362 }
9363
9364 /**
9365  * ipr_pci_slot_reset - Called when PCI slot has been reset.
9366  * @pdev:       PCI device struct
9367  *
9368  * Description: This routine is called by the pci error recovery
9369  * code after the PCI slot has been reset, just before we
9370  * should resume normal operations.
9371  */
9372 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
9373 {
9374         unsigned long flags = 0;
9375         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9376
9377         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9378         if (ioa_cfg->probe_done) {
9379                 if (ioa_cfg->needs_warm_reset)
9380                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9381                 else
9382                         _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
9383                                                 IPR_SHUTDOWN_NONE);
9384         } else
9385                 wake_up_all(&ioa_cfg->eeh_wait_q);
9386         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9387         return PCI_ERS_RESULT_RECOVERED;
9388 }
9389
9390 /**
9391  * ipr_pci_perm_failure - Called when PCI slot is dead for good.
9392  * @pdev:       PCI device struct
9393  *
9394  * Description: This routine is called when the PCI bus has
9395  * permanently failed.
9396  */
9397 static void ipr_pci_perm_failure(struct pci_dev *pdev)
9398 {
9399         unsigned long flags = 0;
9400         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9401         int i;
9402
9403         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9404         if (ioa_cfg->probe_done) {
9405                 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9406                         ioa_cfg->sdt_state = ABORT_DUMP;
9407                 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
9408                 ioa_cfg->in_ioa_bringdown = 1;
9409                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9410                         spin_lock(&ioa_cfg->hrrq[i]._lock);
9411                         ioa_cfg->hrrq[i].allow_cmds = 0;
9412                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
9413                 }
9414                 wmb();
9415                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9416         } else
9417                 wake_up_all(&ioa_cfg->eeh_wait_q);
9418         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9419 }
9420
9421 /**
9422  * ipr_pci_error_detected - Called when a PCI error is detected.
9423  * @pdev:       PCI device struct
9424  * @state:      PCI channel state
9425  *
9426  * Description: Called when a PCI error is detected.
9427  *
9428  * Return value:
9429  *      PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
9430  */
9431 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
9432                                                pci_channel_state_t state)
9433 {
9434         switch (state) {
9435         case pci_channel_io_frozen:
9436                 ipr_pci_frozen(pdev);
9437                 return PCI_ERS_RESULT_CAN_RECOVER;
9438         case pci_channel_io_perm_failure:
9439                 ipr_pci_perm_failure(pdev);
9440                 return PCI_ERS_RESULT_DISCONNECT;
9441                 break;
9442         default:
9443                 break;
9444         }
9445         return PCI_ERS_RESULT_NEED_RESET;
9446 }
9447
9448 /**
9449  * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
9450  * @ioa_cfg:    ioa cfg struct
9451  *
9452  * Description: This is the second phase of adapter initialization
9453  * This function takes care of initilizing the adapter to the point
9454  * where it can accept new commands.
9455
9456  * Return value:
9457  *      0 on success / -EIO on failure
9458  **/
9459 static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
9460 {
9461         int rc = 0;
9462         unsigned long host_lock_flags = 0;
9463
9464         ENTER;
9465         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9466         dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
9467         ioa_cfg->probe_done = 1;
9468         if (ioa_cfg->needs_hard_reset) {
9469                 ioa_cfg->needs_hard_reset = 0;
9470                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9471         } else
9472                 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
9473                                         IPR_SHUTDOWN_NONE);
9474         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9475
9476         LEAVE;
9477         return rc;
9478 }
9479
9480 /**
9481  * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
9482  * @ioa_cfg:    ioa config struct
9483  *
9484  * Return value:
9485  *      none
9486  **/
9487 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9488 {
9489         int i;
9490
9491         if (ioa_cfg->ipr_cmnd_list) {
9492                 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9493                         if (ioa_cfg->ipr_cmnd_list[i])
9494                                 dma_pool_free(ioa_cfg->ipr_cmd_pool,
9495                                               ioa_cfg->ipr_cmnd_list[i],
9496                                               ioa_cfg->ipr_cmnd_list_dma[i]);
9497
9498                         ioa_cfg->ipr_cmnd_list[i] = NULL;
9499                 }
9500         }
9501
9502         if (ioa_cfg->ipr_cmd_pool)
9503                 dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
9504
9505         kfree(ioa_cfg->ipr_cmnd_list);
9506         kfree(ioa_cfg->ipr_cmnd_list_dma);
9507         ioa_cfg->ipr_cmnd_list = NULL;
9508         ioa_cfg->ipr_cmnd_list_dma = NULL;
9509         ioa_cfg->ipr_cmd_pool = NULL;
9510 }
9511
9512 /**
9513  * ipr_free_mem - Frees memory allocated for an adapter
9514  * @ioa_cfg:    ioa cfg struct
9515  *
9516  * Return value:
9517  *      nothing
9518  **/
9519 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
9520 {
9521         int i;
9522
9523         kfree(ioa_cfg->res_entries);
9524         dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs),
9525                           ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9526         ipr_free_cmd_blks(ioa_cfg);
9527
9528         for (i = 0; i < ioa_cfg->hrrq_num; i++)
9529                 dma_free_coherent(&ioa_cfg->pdev->dev,
9530                                   sizeof(u32) * ioa_cfg->hrrq[i].size,
9531                                   ioa_cfg->hrrq[i].host_rrq,
9532                                   ioa_cfg->hrrq[i].host_rrq_dma);
9533
9534         dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
9535                           ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9536
9537         for (i = 0; i < IPR_MAX_HCAMS; i++) {
9538                 dma_free_coherent(&ioa_cfg->pdev->dev,
9539                                   sizeof(struct ipr_hostrcb),
9540                                   ioa_cfg->hostrcb[i],
9541                                   ioa_cfg->hostrcb_dma[i]);
9542         }
9543
9544         ipr_free_dump(ioa_cfg);
9545         kfree(ioa_cfg->trace);
9546 }
9547
9548 /**
9549  * ipr_free_irqs - Free all allocated IRQs for the adapter.
9550  * @ioa_cfg:    ipr cfg struct
9551  *
9552  * This function frees all allocated IRQs for the
9553  * specified adapter.
9554  *
9555  * Return value:
9556  *      none
9557  **/
9558 static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg)
9559 {
9560         struct pci_dev *pdev = ioa_cfg->pdev;
9561         int i;
9562
9563         for (i = 0; i < ioa_cfg->nvectors; i++)
9564                 free_irq(pci_irq_vector(pdev, i), &ioa_cfg->hrrq[i]);
9565         pci_free_irq_vectors(pdev);
9566 }
9567
9568 /**
9569  * ipr_free_all_resources - Free all allocated resources for an adapter.
9570  * @ipr_cmd:    ipr command struct
9571  *
9572  * This function frees all allocated resources for the
9573  * specified adapter.
9574  *
9575  * Return value:
9576  *      none
9577  **/
9578 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
9579 {
9580         struct pci_dev *pdev = ioa_cfg->pdev;
9581
9582         ENTER;
9583         ipr_free_irqs(ioa_cfg);
9584         if (ioa_cfg->reset_work_q)
9585                 destroy_workqueue(ioa_cfg->reset_work_q);
9586         iounmap(ioa_cfg->hdw_dma_regs);
9587         pci_release_regions(pdev);
9588         ipr_free_mem(ioa_cfg);
9589         scsi_host_put(ioa_cfg->host);
9590         pci_disable_device(pdev);
9591         LEAVE;
9592 }
9593
9594 /**
9595  * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
9596  * @ioa_cfg:    ioa config struct
9597  *
9598  * Return value:
9599  *      0 on success / -ENOMEM on allocation failure
9600  **/
9601 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9602 {
9603         struct ipr_cmnd *ipr_cmd;
9604         struct ipr_ioarcb *ioarcb;
9605         dma_addr_t dma_addr;
9606         int i, entries_each_hrrq, hrrq_id = 0;
9607
9608         ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev,
9609                                                 sizeof(struct ipr_cmnd), 512, 0);
9610
9611         if (!ioa_cfg->ipr_cmd_pool)
9612                 return -ENOMEM;
9613
9614         ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
9615         ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
9616
9617         if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
9618                 ipr_free_cmd_blks(ioa_cfg);
9619                 return -ENOMEM;
9620         }
9621
9622         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9623                 if (ioa_cfg->hrrq_num > 1) {
9624                         if (i == 0) {
9625                                 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
9626                                 ioa_cfg->hrrq[i].min_cmd_id = 0;
9627                                 ioa_cfg->hrrq[i].max_cmd_id =
9628                                         (entries_each_hrrq - 1);
9629                         } else {
9630                                 entries_each_hrrq =
9631                                         IPR_NUM_BASE_CMD_BLKS/
9632                                         (ioa_cfg->hrrq_num - 1);
9633                                 ioa_cfg->hrrq[i].min_cmd_id =
9634                                         IPR_NUM_INTERNAL_CMD_BLKS +
9635                                         (i - 1) * entries_each_hrrq;
9636                                 ioa_cfg->hrrq[i].max_cmd_id =
9637                                         (IPR_NUM_INTERNAL_CMD_BLKS +
9638                                         i * entries_each_hrrq - 1);
9639                         }
9640                 } else {
9641                         entries_each_hrrq = IPR_NUM_CMD_BLKS;
9642                         ioa_cfg->hrrq[i].min_cmd_id = 0;
9643                         ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
9644                 }
9645                 ioa_cfg->hrrq[i].size = entries_each_hrrq;
9646         }
9647
9648         BUG_ON(ioa_cfg->hrrq_num == 0);
9649
9650         i = IPR_NUM_CMD_BLKS -
9651                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
9652         if (i > 0) {
9653                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
9654                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
9655         }
9656
9657         for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9658                 ipr_cmd = dma_pool_zalloc(ioa_cfg->ipr_cmd_pool,
9659                                 GFP_KERNEL, &dma_addr);
9660
9661                 if (!ipr_cmd) {
9662                         ipr_free_cmd_blks(ioa_cfg);
9663                         return -ENOMEM;
9664                 }
9665
9666                 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
9667                 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
9668
9669                 ioarcb = &ipr_cmd->ioarcb;
9670                 ipr_cmd->dma_addr = dma_addr;
9671                 if (ioa_cfg->sis64)
9672                         ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
9673                 else
9674                         ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
9675
9676                 ioarcb->host_response_handle = cpu_to_be32(i << 2);
9677                 if (ioa_cfg->sis64) {
9678                         ioarcb->u.sis64_addr_data.data_ioadl_addr =
9679                                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
9680                         ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
9681                                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
9682                 } else {
9683                         ioarcb->write_ioadl_addr =
9684                                 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
9685                         ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
9686                         ioarcb->ioasa_host_pci_addr =
9687                                 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
9688                 }
9689                 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
9690                 ipr_cmd->cmd_index = i;
9691                 ipr_cmd->ioa_cfg = ioa_cfg;
9692                 ipr_cmd->sense_buffer_dma = dma_addr +
9693                         offsetof(struct ipr_cmnd, sense_buffer);
9694
9695                 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
9696                 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
9697                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9698                 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9699                         hrrq_id++;
9700         }
9701
9702         return 0;
9703 }
9704
9705 /**
9706  * ipr_alloc_mem - Allocate memory for an adapter
9707  * @ioa_cfg:    ioa config struct
9708  *
9709  * Return value:
9710  *      0 on success / non-zero for error
9711  **/
9712 static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
9713 {
9714         struct pci_dev *pdev = ioa_cfg->pdev;
9715         int i, rc = -ENOMEM;
9716
9717         ENTER;
9718         ioa_cfg->res_entries = kcalloc(ioa_cfg->max_devs_supported,
9719                                        sizeof(struct ipr_resource_entry),
9720                                        GFP_KERNEL);
9721
9722         if (!ioa_cfg->res_entries)
9723                 goto out;
9724
9725         for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
9726                 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
9727                 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9728         }
9729
9730         ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev,
9731                                               sizeof(struct ipr_misc_cbs),
9732                                               &ioa_cfg->vpd_cbs_dma,
9733                                               GFP_KERNEL);
9734
9735         if (!ioa_cfg->vpd_cbs)
9736                 goto out_free_res_entries;
9737
9738         if (ipr_alloc_cmd_blks(ioa_cfg))
9739                 goto out_free_vpd_cbs;
9740
9741         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9742                 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev,
9743                                         sizeof(u32) * ioa_cfg->hrrq[i].size,
9744                                         &ioa_cfg->hrrq[i].host_rrq_dma,
9745                                         GFP_KERNEL);
9746
9747                 if (!ioa_cfg->hrrq[i].host_rrq)  {
9748                         while (--i > 0)
9749                                 dma_free_coherent(&pdev->dev,
9750                                         sizeof(u32) * ioa_cfg->hrrq[i].size,
9751                                         ioa_cfg->hrrq[i].host_rrq,
9752                                         ioa_cfg->hrrq[i].host_rrq_dma);
9753                         goto out_ipr_free_cmd_blocks;
9754                 }
9755                 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9756         }
9757
9758         ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev,
9759                                                   ioa_cfg->cfg_table_size,
9760                                                   &ioa_cfg->cfg_table_dma,
9761                                                   GFP_KERNEL);
9762
9763         if (!ioa_cfg->u.cfg_table)
9764                 goto out_free_host_rrq;
9765
9766         for (i = 0; i < IPR_MAX_HCAMS; i++) {
9767                 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
9768                                                          sizeof(struct ipr_hostrcb),
9769                                                          &ioa_cfg->hostrcb_dma[i],
9770                                                          GFP_KERNEL);
9771
9772                 if (!ioa_cfg->hostrcb[i])
9773                         goto out_free_hostrcb_dma;
9774
9775                 ioa_cfg->hostrcb[i]->hostrcb_dma =
9776                         ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
9777                 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
9778                 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9779         }
9780
9781         ioa_cfg->trace = kcalloc(IPR_NUM_TRACE_ENTRIES,
9782                                  sizeof(struct ipr_trace_entry),
9783                                  GFP_KERNEL);
9784
9785         if (!ioa_cfg->trace)
9786                 goto out_free_hostrcb_dma;
9787
9788         rc = 0;
9789 out:
9790         LEAVE;
9791         return rc;
9792
9793 out_free_hostrcb_dma:
9794         while (i-- > 0) {
9795                 dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb),
9796                                   ioa_cfg->hostrcb[i],
9797                                   ioa_cfg->hostrcb_dma[i]);
9798         }
9799         dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size,
9800                           ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9801 out_free_host_rrq:
9802         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9803                 dma_free_coherent(&pdev->dev,
9804                                   sizeof(u32) * ioa_cfg->hrrq[i].size,
9805                                   ioa_cfg->hrrq[i].host_rrq,
9806                                   ioa_cfg->hrrq[i].host_rrq_dma);
9807         }
9808 out_ipr_free_cmd_blocks:
9809         ipr_free_cmd_blks(ioa_cfg);
9810 out_free_vpd_cbs:
9811         dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs),
9812                           ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9813 out_free_res_entries:
9814         kfree(ioa_cfg->res_entries);
9815         goto out;
9816 }
9817
9818 /**
9819  * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9820  * @ioa_cfg:    ioa config struct
9821  *
9822  * Return value:
9823  *      none
9824  **/
9825 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
9826 {
9827         int i;
9828
9829         for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9830                 ioa_cfg->bus_attr[i].bus = i;
9831                 ioa_cfg->bus_attr[i].qas_enabled = 0;
9832                 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9833                 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9834                         ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9835                 else
9836                         ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9837         }
9838 }
9839
9840 /**
9841  * ipr_init_regs - Initialize IOA registers
9842  * @ioa_cfg:    ioa config struct
9843  *
9844  * Return value:
9845  *      none
9846  **/
9847 static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
9848 {
9849         const struct ipr_interrupt_offsets *p;
9850         struct ipr_interrupts *t;
9851         void __iomem *base;
9852
9853         p = &ioa_cfg->chip_cfg->regs;
9854         t = &ioa_cfg->regs;
9855         base = ioa_cfg->hdw_dma_regs;
9856
9857         t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9858         t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
9859         t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
9860         t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
9861         t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
9862         t->clr_interrupt_reg = base + p->clr_interrupt_reg;
9863         t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
9864         t->sense_interrupt_reg = base + p->sense_interrupt_reg;
9865         t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
9866         t->ioarrin_reg = base + p->ioarrin_reg;
9867         t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
9868         t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
9869         t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
9870         t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
9871         t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
9872         t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
9873
9874         if (ioa_cfg->sis64) {
9875                 t->init_feedback_reg = base + p->init_feedback_reg;
9876                 t->dump_addr_reg = base + p->dump_addr_reg;
9877                 t->dump_data_reg = base + p->dump_data_reg;
9878                 t->endian_swap_reg = base + p->endian_swap_reg;
9879         }
9880 }
9881
9882 /**
9883  * ipr_init_ioa_cfg - Initialize IOA config struct
9884  * @ioa_cfg:    ioa config struct
9885  * @host:               scsi host struct
9886  * @pdev:               PCI dev struct
9887  *
9888  * Return value:
9889  *      none
9890  **/
9891 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9892                              struct Scsi_Host *host, struct pci_dev *pdev)
9893 {
9894         int i;
9895
9896         ioa_cfg->host = host;
9897         ioa_cfg->pdev = pdev;
9898         ioa_cfg->log_level = ipr_log_level;
9899         ioa_cfg->doorbell = IPR_DOORBELL;
9900         sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9901         sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
9902         sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9903         sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9904         sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9905         sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9906
9907         INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9908         INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9909         INIT_LIST_HEAD(&ioa_cfg->hostrcb_report_q);
9910         INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9911         INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9912         INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
9913         init_waitqueue_head(&ioa_cfg->reset_wait_q);
9914         init_waitqueue_head(&ioa_cfg->msi_wait_q);
9915         init_waitqueue_head(&ioa_cfg->eeh_wait_q);
9916         ioa_cfg->sdt_state = INACTIVE;
9917
9918         ipr_initialize_bus_attr(ioa_cfg);
9919         ioa_cfg->max_devs_supported = ipr_max_devs;
9920
9921         if (ioa_cfg->sis64) {
9922                 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9923                 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9924                 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9925                         ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9926                 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9927                                            + ((sizeof(struct ipr_config_table_entry64)
9928                                                * ioa_cfg->max_devs_supported)));
9929         } else {
9930                 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9931                 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9932                 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9933                         ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9934                 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9935                                            + ((sizeof(struct ipr_config_table_entry)
9936                                                * ioa_cfg->max_devs_supported)));
9937         }
9938
9939         host->max_channel = IPR_VSET_BUS;
9940         host->unique_id = host->host_no;
9941         host->max_cmd_len = IPR_MAX_CDB_LEN;
9942         host->can_queue = ioa_cfg->max_cmds;
9943         pci_set_drvdata(pdev, ioa_cfg);
9944
9945         for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9946                 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9947                 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9948                 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9949                 if (i == 0)
9950                         ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9951                 else
9952                         ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
9953         }
9954 }
9955
9956 /**
9957  * ipr_get_chip_info - Find adapter chip information
9958  * @dev_id:             PCI device id struct
9959  *
9960  * Return value:
9961  *      ptr to chip information on success / NULL on failure
9962  **/
9963 static const struct ipr_chip_t *
9964 ipr_get_chip_info(const struct pci_device_id *dev_id)
9965 {
9966         int i;
9967
9968         for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
9969                 if (ipr_chip[i].vendor == dev_id->vendor &&
9970                     ipr_chip[i].device == dev_id->device)
9971                         return &ipr_chip[i];
9972         return NULL;
9973 }
9974
9975 /**
9976  * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
9977  *                                              during probe time
9978  * @ioa_cfg:    ioa config struct
9979  *
9980  * Return value:
9981  *      None
9982  **/
9983 static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
9984 {
9985         struct pci_dev *pdev = ioa_cfg->pdev;
9986
9987         if (pci_channel_offline(pdev)) {
9988                 wait_event_timeout(ioa_cfg->eeh_wait_q,
9989                                    !pci_channel_offline(pdev),
9990                                    IPR_PCI_ERROR_RECOVERY_TIMEOUT);
9991                 pci_restore_state(pdev);
9992         }
9993 }
9994
9995 static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
9996 {
9997         int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
9998
9999         for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
10000                 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
10001                          "host%d-%d", ioa_cfg->host->host_no, vec_idx);
10002                 ioa_cfg->vectors_info[vec_idx].
10003                         desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
10004         }
10005 }
10006
10007 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg,
10008                 struct pci_dev *pdev)
10009 {
10010         int i, rc;
10011
10012         for (i = 1; i < ioa_cfg->nvectors; i++) {
10013                 rc = request_irq(pci_irq_vector(pdev, i),
10014                         ipr_isr_mhrrq,
10015                         0,
10016                         ioa_cfg->vectors_info[i].desc,
10017                         &ioa_cfg->hrrq[i]);
10018                 if (rc) {
10019                         while (--i >= 0)
10020                                 free_irq(pci_irq_vector(pdev, i),
10021                                         &ioa_cfg->hrrq[i]);
10022                         return rc;
10023                 }
10024         }
10025         return 0;
10026 }
10027
10028 /**
10029  * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
10030  * @pdev:               PCI device struct
10031  *
10032  * Description: Simply set the msi_received flag to 1 indicating that
10033  * Message Signaled Interrupts are supported.
10034  *
10035  * Return value:
10036  *      0 on success / non-zero on failure
10037  **/
10038 static irqreturn_t ipr_test_intr(int irq, void *devp)
10039 {
10040         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
10041         unsigned long lock_flags = 0;
10042         irqreturn_t rc = IRQ_HANDLED;
10043
10044         dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
10045         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10046
10047         ioa_cfg->msi_received = 1;
10048         wake_up(&ioa_cfg->msi_wait_q);
10049
10050         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10051         return rc;
10052 }
10053
10054 /**
10055  * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
10056  * @pdev:               PCI device struct
10057  *
10058  * Description: This routine sets up and initiates a test interrupt to determine
10059  * if the interrupt is received via the ipr_test_intr() service routine.
10060  * If the tests fails, the driver will fall back to LSI.
10061  *
10062  * Return value:
10063  *      0 on success / non-zero on failure
10064  **/
10065 static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
10066 {
10067         int rc;
10068         volatile u32 int_reg;
10069         unsigned long lock_flags = 0;
10070         int irq = pci_irq_vector(pdev, 0);
10071
10072         ENTER;
10073
10074         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10075         init_waitqueue_head(&ioa_cfg->msi_wait_q);
10076         ioa_cfg->msi_received = 0;
10077         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10078         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
10079         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
10080         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10081
10082         rc = request_irq(irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
10083         if (rc) {
10084                 dev_err(&pdev->dev, "Can not assign irq %d\n", irq);
10085                 return rc;
10086         } else if (ipr_debug)
10087                 dev_info(&pdev->dev, "IRQ assigned: %d\n", irq);
10088
10089         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
10090         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
10091         wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
10092         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10093         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10094
10095         if (!ioa_cfg->msi_received) {
10096                 /* MSI test failed */
10097                 dev_info(&pdev->dev, "MSI test failed.  Falling back to LSI.\n");
10098                 rc = -EOPNOTSUPP;
10099         } else if (ipr_debug)
10100                 dev_info(&pdev->dev, "MSI test succeeded.\n");
10101
10102         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10103
10104         free_irq(irq, ioa_cfg);
10105
10106         LEAVE;
10107
10108         return rc;
10109 }
10110
10111  /* ipr_probe_ioa - Allocates memory and does first stage of initialization
10112  * @pdev:               PCI device struct
10113  * @dev_id:             PCI device id struct
10114  *
10115  * Return value:
10116  *      0 on success / non-zero on failure
10117  **/
10118 static int ipr_probe_ioa(struct pci_dev *pdev,
10119                          const struct pci_device_id *dev_id)
10120 {
10121         struct ipr_ioa_cfg *ioa_cfg;
10122         struct Scsi_Host *host;
10123         unsigned long ipr_regs_pci;
10124         void __iomem *ipr_regs;
10125         int rc = PCIBIOS_SUCCESSFUL;
10126         volatile u32 mask, uproc, interrupts;
10127         unsigned long lock_flags, driver_lock_flags;
10128         unsigned int irq_flag;
10129
10130         ENTER;
10131
10132         dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
10133         host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
10134
10135         if (!host) {
10136                 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
10137                 rc = -ENOMEM;
10138                 goto out;
10139         }
10140
10141         ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
10142         memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
10143         ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
10144
10145         ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
10146
10147         if (!ioa_cfg->ipr_chip) {
10148                 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
10149                         dev_id->vendor, dev_id->device);
10150                 goto out_scsi_host_put;
10151         }
10152
10153         /* set SIS 32 or SIS 64 */
10154         ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
10155         ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
10156         ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
10157         ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
10158
10159         if (ipr_transop_timeout)
10160                 ioa_cfg->transop_timeout = ipr_transop_timeout;
10161         else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
10162                 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
10163         else
10164                 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
10165
10166         ioa_cfg->revid = pdev->revision;
10167
10168         ipr_init_ioa_cfg(ioa_cfg, host, pdev);
10169
10170         ipr_regs_pci = pci_resource_start(pdev, 0);
10171
10172         rc = pci_request_regions(pdev, IPR_NAME);
10173         if (rc < 0) {
10174                 dev_err(&pdev->dev,
10175                         "Couldn't register memory range of registers\n");
10176                 goto out_scsi_host_put;
10177         }
10178
10179         rc = pci_enable_device(pdev);
10180
10181         if (rc || pci_channel_offline(pdev)) {
10182                 if (pci_channel_offline(pdev)) {
10183                         ipr_wait_for_pci_err_recovery(ioa_cfg);
10184                         rc = pci_enable_device(pdev);
10185                 }
10186
10187                 if (rc) {
10188                         dev_err(&pdev->dev, "Cannot enable adapter\n");
10189                         ipr_wait_for_pci_err_recovery(ioa_cfg);
10190                         goto out_release_regions;
10191                 }
10192         }
10193
10194         ipr_regs = pci_ioremap_bar(pdev, 0);
10195
10196         if (!ipr_regs) {
10197                 dev_err(&pdev->dev,
10198                         "Couldn't map memory range of registers\n");
10199                 rc = -ENOMEM;
10200                 goto out_disable;
10201         }
10202
10203         ioa_cfg->hdw_dma_regs = ipr_regs;
10204         ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
10205         ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
10206
10207         ipr_init_regs(ioa_cfg);
10208
10209         if (ioa_cfg->sis64) {
10210                 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10211                 if (rc < 0) {
10212                         dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n");
10213                         rc = dma_set_mask_and_coherent(&pdev->dev,
10214                                                        DMA_BIT_MASK(32));
10215                 }
10216         } else
10217                 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10218
10219         if (rc < 0) {
10220                 dev_err(&pdev->dev, "Failed to set DMA mask\n");
10221                 goto cleanup_nomem;
10222         }
10223
10224         rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
10225                                    ioa_cfg->chip_cfg->cache_line_size);
10226
10227         if (rc != PCIBIOS_SUCCESSFUL) {
10228                 dev_err(&pdev->dev, "Write of cache line size failed\n");
10229                 ipr_wait_for_pci_err_recovery(ioa_cfg);
10230                 rc = -EIO;
10231                 goto cleanup_nomem;
10232         }
10233
10234         /* Issue MMIO read to ensure card is not in EEH */
10235         interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
10236         ipr_wait_for_pci_err_recovery(ioa_cfg);
10237
10238         if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
10239                 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
10240                         IPR_MAX_MSIX_VECTORS);
10241                 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
10242         }
10243
10244         irq_flag = PCI_IRQ_LEGACY;
10245         if (ioa_cfg->ipr_chip->has_msi)
10246                 irq_flag |= PCI_IRQ_MSI | PCI_IRQ_MSIX;
10247         rc = pci_alloc_irq_vectors(pdev, 1, ipr_number_of_msix, irq_flag);
10248         if (rc < 0) {
10249                 ipr_wait_for_pci_err_recovery(ioa_cfg);
10250                 goto cleanup_nomem;
10251         }
10252         ioa_cfg->nvectors = rc;
10253
10254         if (!pdev->msi_enabled && !pdev->msix_enabled)
10255                 ioa_cfg->clear_isr = 1;
10256
10257         pci_set_master(pdev);
10258
10259         if (pci_channel_offline(pdev)) {
10260                 ipr_wait_for_pci_err_recovery(ioa_cfg);
10261                 pci_set_master(pdev);
10262                 if (pci_channel_offline(pdev)) {
10263                         rc = -EIO;
10264                         goto out_msi_disable;
10265                 }
10266         }
10267
10268         if (pdev->msi_enabled || pdev->msix_enabled) {
10269                 rc = ipr_test_msi(ioa_cfg, pdev);
10270                 switch (rc) {
10271                 case 0:
10272                         dev_info(&pdev->dev,
10273                                 "Request for %d MSI%ss succeeded.", ioa_cfg->nvectors,
10274                                 pdev->msix_enabled ? "-X" : "");
10275                         break;
10276                 case -EOPNOTSUPP:
10277                         ipr_wait_for_pci_err_recovery(ioa_cfg);
10278                         pci_free_irq_vectors(pdev);
10279
10280                         ioa_cfg->nvectors = 1;
10281                         ioa_cfg->clear_isr = 1;
10282                         break;
10283                 default:
10284                         goto out_msi_disable;
10285                 }
10286         }
10287
10288         ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
10289                                 (unsigned int)num_online_cpus(),
10290                                 (unsigned int)IPR_MAX_HRRQ_NUM);
10291
10292         if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
10293                 goto out_msi_disable;
10294
10295         if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
10296                 goto out_msi_disable;
10297
10298         rc = ipr_alloc_mem(ioa_cfg);
10299         if (rc < 0) {
10300                 dev_err(&pdev->dev,
10301                         "Couldn't allocate enough memory for device driver!\n");
10302                 goto out_msi_disable;
10303         }
10304
10305         /* Save away PCI config space for use following IOA reset */
10306         rc = pci_save_state(pdev);
10307
10308         if (rc != PCIBIOS_SUCCESSFUL) {
10309                 dev_err(&pdev->dev, "Failed to save PCI config space\n");
10310                 rc = -EIO;
10311                 goto cleanup_nolog;
10312         }
10313
10314         /*
10315          * If HRRQ updated interrupt is not masked, or reset alert is set,
10316          * the card is in an unknown state and needs a hard reset
10317          */
10318         mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
10319         interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
10320         uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
10321         if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
10322                 ioa_cfg->needs_hard_reset = 1;
10323         if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
10324                 ioa_cfg->needs_hard_reset = 1;
10325         if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
10326                 ioa_cfg->ioa_unit_checked = 1;
10327
10328         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10329         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10330         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10331
10332         if (pdev->msi_enabled || pdev->msix_enabled) {
10333                 name_msi_vectors(ioa_cfg);
10334                 rc = request_irq(pci_irq_vector(pdev, 0), ipr_isr, 0,
10335                         ioa_cfg->vectors_info[0].desc,
10336                         &ioa_cfg->hrrq[0]);
10337                 if (!rc)
10338                         rc = ipr_request_other_msi_irqs(ioa_cfg, pdev);
10339         } else {
10340                 rc = request_irq(pdev->irq, ipr_isr,
10341                          IRQF_SHARED,
10342                          IPR_NAME, &ioa_cfg->hrrq[0]);
10343         }
10344         if (rc) {
10345                 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
10346                         pdev->irq, rc);
10347                 goto cleanup_nolog;
10348         }
10349
10350         if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
10351             (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
10352                 ioa_cfg->needs_warm_reset = 1;
10353                 ioa_cfg->reset = ipr_reset_slot_reset;
10354
10355                 ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d",
10356                                                                 WQ_MEM_RECLAIM, host->host_no);
10357
10358                 if (!ioa_cfg->reset_work_q) {
10359                         dev_err(&pdev->dev, "Couldn't register reset workqueue\n");
10360                         rc = -ENOMEM;
10361                         goto out_free_irq;
10362                 }
10363         } else
10364                 ioa_cfg->reset = ipr_reset_start_bist;
10365
10366         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10367         list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
10368         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10369
10370         LEAVE;
10371 out:
10372         return rc;
10373
10374 out_free_irq:
10375         ipr_free_irqs(ioa_cfg);
10376 cleanup_nolog:
10377         ipr_free_mem(ioa_cfg);
10378 out_msi_disable:
10379         ipr_wait_for_pci_err_recovery(ioa_cfg);
10380         pci_free_irq_vectors(pdev);
10381 cleanup_nomem:
10382         iounmap(ipr_regs);
10383 out_disable:
10384         pci_disable_device(pdev);
10385 out_release_regions:
10386         pci_release_regions(pdev);
10387 out_scsi_host_put:
10388         scsi_host_put(host);
10389         goto out;
10390 }
10391
10392 /**
10393  * ipr_initiate_ioa_bringdown - Bring down an adapter
10394  * @ioa_cfg:            ioa config struct
10395  * @shutdown_type:      shutdown type
10396  *
10397  * Description: This function will initiate bringing down the adapter.
10398  * This consists of issuing an IOA shutdown to the adapter
10399  * to flush the cache, and running BIST.
10400  * If the caller needs to wait on the completion of the reset,
10401  * the caller must sleep on the reset_wait_q.
10402  *
10403  * Return value:
10404  *      none
10405  **/
10406 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
10407                                        enum ipr_shutdown_type shutdown_type)
10408 {
10409         ENTER;
10410         if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
10411                 ioa_cfg->sdt_state = ABORT_DUMP;
10412         ioa_cfg->reset_retries = 0;
10413         ioa_cfg->in_ioa_bringdown = 1;
10414         ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
10415         LEAVE;
10416 }
10417
10418 /**
10419  * __ipr_remove - Remove a single adapter
10420  * @pdev:       pci device struct
10421  *
10422  * Adapter hot plug remove entry point.
10423  *
10424  * Return value:
10425  *      none
10426  **/
10427 static void __ipr_remove(struct pci_dev *pdev)
10428 {
10429         unsigned long host_lock_flags = 0;
10430         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10431         int i;
10432         unsigned long driver_lock_flags;
10433         ENTER;
10434
10435         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10436         while (ioa_cfg->in_reset_reload) {
10437                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10438                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10439                 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10440         }
10441
10442         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
10443                 spin_lock(&ioa_cfg->hrrq[i]._lock);
10444                 ioa_cfg->hrrq[i].removing_ioa = 1;
10445                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
10446         }
10447         wmb();
10448         ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
10449
10450         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10451         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10452         flush_work(&ioa_cfg->work_q);
10453         if (ioa_cfg->reset_work_q)
10454                 flush_workqueue(ioa_cfg->reset_work_q);
10455         INIT_LIST_HEAD(&ioa_cfg->used_res_q);
10456         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10457
10458         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10459         list_del(&ioa_cfg->queue);
10460         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10461
10462         if (ioa_cfg->sdt_state == ABORT_DUMP)
10463                 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
10464         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10465
10466         ipr_free_all_resources(ioa_cfg);
10467
10468         LEAVE;
10469 }
10470
10471 /**
10472  * ipr_remove - IOA hot plug remove entry point
10473  * @pdev:       pci device struct
10474  *
10475  * Adapter hot plug remove entry point.
10476  *
10477  * Return value:
10478  *      none
10479  **/
10480 static void ipr_remove(struct pci_dev *pdev)
10481 {
10482         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10483
10484         ENTER;
10485
10486         ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10487                               &ipr_trace_attr);
10488         ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10489                              &ipr_dump_attr);
10490         sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10491                         &ipr_ioa_async_err_log);
10492         scsi_remove_host(ioa_cfg->host);
10493
10494         __ipr_remove(pdev);
10495
10496         LEAVE;
10497 }
10498
10499 /**
10500  * ipr_probe - Adapter hot plug add entry point
10501  *
10502  * Return value:
10503  *      0 on success / non-zero on failure
10504  **/
10505 static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
10506 {
10507         struct ipr_ioa_cfg *ioa_cfg;
10508         unsigned long flags;
10509         int rc, i;
10510
10511         rc = ipr_probe_ioa(pdev, dev_id);
10512
10513         if (rc)
10514                 return rc;
10515
10516         ioa_cfg = pci_get_drvdata(pdev);
10517         rc = ipr_probe_ioa_part2(ioa_cfg);
10518
10519         if (rc) {
10520                 __ipr_remove(pdev);
10521                 return rc;
10522         }
10523
10524         rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
10525
10526         if (rc) {
10527                 __ipr_remove(pdev);
10528                 return rc;
10529         }
10530
10531         rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
10532                                    &ipr_trace_attr);
10533
10534         if (rc) {
10535                 scsi_remove_host(ioa_cfg->host);
10536                 __ipr_remove(pdev);
10537                 return rc;
10538         }
10539
10540         rc = sysfs_create_bin_file(&ioa_cfg->host->shost_dev.kobj,
10541                         &ipr_ioa_async_err_log);
10542
10543         if (rc) {
10544                 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10545                                 &ipr_dump_attr);
10546                 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10547                                 &ipr_trace_attr);
10548                 scsi_remove_host(ioa_cfg->host);
10549                 __ipr_remove(pdev);
10550                 return rc;
10551         }
10552
10553         rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
10554                                    &ipr_dump_attr);
10555
10556         if (rc) {
10557                 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10558                                       &ipr_ioa_async_err_log);
10559                 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10560                                       &ipr_trace_attr);
10561                 scsi_remove_host(ioa_cfg->host);
10562                 __ipr_remove(pdev);
10563                 return rc;
10564         }
10565         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10566         ioa_cfg->scan_enabled = 1;
10567         schedule_work(&ioa_cfg->work_q);
10568         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10569
10570         ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
10571
10572         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10573                 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
10574                         irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
10575                                         ioa_cfg->iopoll_weight, ipr_iopoll);
10576                 }
10577         }
10578
10579         scsi_scan_host(ioa_cfg->host);
10580
10581         return 0;
10582 }
10583
10584 /**
10585  * ipr_shutdown - Shutdown handler.
10586  * @pdev:       pci device struct
10587  *
10588  * This function is invoked upon system shutdown/reboot. It will issue
10589  * an adapter shutdown to the adapter to flush the write cache.
10590  *
10591  * Return value:
10592  *      none
10593  **/
10594 static void ipr_shutdown(struct pci_dev *pdev)
10595 {
10596         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10597         unsigned long lock_flags = 0;
10598         enum ipr_shutdown_type shutdown_type = IPR_SHUTDOWN_NORMAL;
10599         int i;
10600
10601         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10602         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10603                 ioa_cfg->iopoll_weight = 0;
10604                 for (i = 1; i < ioa_cfg->hrrq_num; i++)
10605                         irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
10606         }
10607
10608         while (ioa_cfg->in_reset_reload) {
10609                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10610                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10611                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10612         }
10613
10614         if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64)
10615                 shutdown_type = IPR_SHUTDOWN_QUIESCE;
10616
10617         ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type);
10618         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10619         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10620         if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) {
10621                 ipr_free_irqs(ioa_cfg);
10622                 pci_disable_device(ioa_cfg->pdev);
10623         }
10624 }
10625
10626 static struct pci_device_id ipr_pci_table[] = {
10627         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10628                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
10629         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10630                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
10631         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10632                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
10633         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10634                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
10635         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10636                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
10637         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10638                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
10639         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10640                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
10641         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10642                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
10643                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10644         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10645               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10646         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10647               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10648               IPR_USE_LONG_TRANSOP_TIMEOUT },
10649         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10650               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10651               IPR_USE_LONG_TRANSOP_TIMEOUT },
10652         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10653               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10654         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10655               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10656               IPR_USE_LONG_TRANSOP_TIMEOUT},
10657         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10658               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10659               IPR_USE_LONG_TRANSOP_TIMEOUT },
10660         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10661               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
10662               IPR_USE_LONG_TRANSOP_TIMEOUT },
10663         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10664               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
10665         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10666               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
10667         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10668               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
10669               IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
10670         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
10671                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
10672         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10673                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
10674         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10675                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
10676                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10677         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10678                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
10679                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10680         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10681                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
10682         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10683                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
10684         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10685                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
10686         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10687                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
10688         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10689                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
10690         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10691                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
10692         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10693                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
10694         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10695                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
10696         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10697                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
10698         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10699                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
10700         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10701                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
10702         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10703                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
10704         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10705                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
10706         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10707                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
10708         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10709                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
10710         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10711                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
10712         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10713                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
10714         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10715                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
10716         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10717                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
10718         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10719                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
10720         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10721                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
10722         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10723                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
10724         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10725                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
10726         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10727                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
10728         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10729                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
10730         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10731                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
10732         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10733                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580A, 0, 0, 0 },
10734         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10735                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580B, 0, 0, 0 },
10736         { }
10737 };
10738 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10739
10740 static const struct pci_error_handlers ipr_err_handler = {
10741         .error_detected = ipr_pci_error_detected,
10742         .mmio_enabled = ipr_pci_mmio_enabled,
10743         .slot_reset = ipr_pci_slot_reset,
10744 };
10745
10746 static struct pci_driver ipr_driver = {
10747         .name = IPR_NAME,
10748         .id_table = ipr_pci_table,
10749         .probe = ipr_probe,
10750         .remove = ipr_remove,
10751         .shutdown = ipr_shutdown,
10752         .err_handler = &ipr_err_handler,
10753 };
10754
10755 /**
10756  * ipr_halt_done - Shutdown prepare completion
10757  *
10758  * Return value:
10759  *      none
10760  **/
10761 static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10762 {
10763         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
10764 }
10765
10766 /**
10767  * ipr_halt - Issue shutdown prepare to all adapters
10768  *
10769  * Return value:
10770  *      NOTIFY_OK on success / NOTIFY_DONE on failure
10771  **/
10772 static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10773 {
10774         struct ipr_cmnd *ipr_cmd;
10775         struct ipr_ioa_cfg *ioa_cfg;
10776         unsigned long flags = 0, driver_lock_flags;
10777
10778         if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10779                 return NOTIFY_DONE;
10780
10781         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10782
10783         list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10784                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10785                 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
10786                     (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) {
10787                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10788                         continue;
10789                 }
10790
10791                 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10792                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10793                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10794                 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10795                 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10796
10797                 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10798                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10799         }
10800         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10801
10802         return NOTIFY_OK;
10803 }
10804
10805 static struct notifier_block ipr_notifier = {
10806         ipr_halt, NULL, 0
10807 };
10808
10809 /**
10810  * ipr_init - Module entry point
10811  *
10812  * Return value:
10813  *      0 on success / negative value on failure
10814  **/
10815 static int __init ipr_init(void)
10816 {
10817         ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10818                  IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10819
10820         register_reboot_notifier(&ipr_notifier);
10821         return pci_register_driver(&ipr_driver);
10822 }
10823
10824 /**
10825  * ipr_exit - Module unload
10826  *
10827  * Module unload entry point.
10828  *
10829  * Return value:
10830  *      none
10831  **/
10832 static void __exit ipr_exit(void)
10833 {
10834         unregister_reboot_notifier(&ipr_notifier);
10835         pci_unregister_driver(&ipr_driver);
10836 }
10837
10838 module_init(ipr_init);
10839 module_exit(ipr_exit);