Merge remote-tracking branches 'asoc/topic/rk3036', 'asoc/topic/rockchip', 'asoc...
[sfrench/cifs-2.6.git] / drivers / scsi / ipr.c
1 /*
2  * ipr.c -- driver for IBM Power Linux RAID adapters
3  *
4  * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5  *
6  * Copyright (C) 2003, 2004 IBM Corporation
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21  *
22  */
23
24 /*
25  * Notes:
26  *
27  * This driver is used to control the following SCSI adapters:
28  *
29  * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30  *
31  * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32  *              PCI-X Dual Channel Ultra 320 SCSI Adapter
33  *              PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34  *              Embedded SCSI adapter on p615 and p655 systems
35  *
36  * Supported Hardware Features:
37  *      - Ultra 320 SCSI controller
38  *      - PCI-X host interface
39  *      - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40  *      - Non-Volatile Write Cache
41  *      - Supports attachment of non-RAID disks, tape, and optical devices
42  *      - RAID Levels 0, 5, 10
43  *      - Hot spare
44  *      - Background Parity Checking
45  *      - Background Data Scrubbing
46  *      - Ability to increase the capacity of an existing RAID 5 disk array
47  *              by adding disks
48  *
49  * Driver Features:
50  *      - Tagged command queuing
51  *      - Adapter microcode download
52  *      - PCI hot plug
53  *      - SCSI device hot plug
54  *
55  */
56
57 #include <linux/fs.h>
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/slab.h>
63 #include <linux/vmalloc.h>
64 #include <linux/ioport.h>
65 #include <linux/delay.h>
66 #include <linux/pci.h>
67 #include <linux/wait.h>
68 #include <linux/spinlock.h>
69 #include <linux/sched.h>
70 #include <linux/interrupt.h>
71 #include <linux/blkdev.h>
72 #include <linux/firmware.h>
73 #include <linux/module.h>
74 #include <linux/moduleparam.h>
75 #include <linux/libata.h>
76 #include <linux/hdreg.h>
77 #include <linux/reboot.h>
78 #include <linux/stringify.h>
79 #include <asm/io.h>
80 #include <asm/irq.h>
81 #include <asm/processor.h>
82 #include <scsi/scsi.h>
83 #include <scsi/scsi_host.h>
84 #include <scsi/scsi_tcq.h>
85 #include <scsi/scsi_eh.h>
86 #include <scsi/scsi_cmnd.h>
87 #include "ipr.h"
88
89 /*
90  *   Global Data
91  */
92 static LIST_HEAD(ipr_ioa_head);
93 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94 static unsigned int ipr_max_speed = 1;
95 static int ipr_testmode = 0;
96 static unsigned int ipr_fastfail = 0;
97 static unsigned int ipr_transop_timeout = 0;
98 static unsigned int ipr_debug = 0;
99 static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
100 static unsigned int ipr_dual_ioa_raid = 1;
101 static unsigned int ipr_number_of_msix = 16;
102 static unsigned int ipr_fast_reboot;
103 static DEFINE_SPINLOCK(ipr_driver_lock);
104
105 /* This table describes the differences between DMA controller chips */
106 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
107         { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
108                 .mailbox = 0x0042C,
109                 .max_cmds = 100,
110                 .cache_line_size = 0x20,
111                 .clear_isr = 1,
112                 .iopoll_weight = 0,
113                 {
114                         .set_interrupt_mask_reg = 0x0022C,
115                         .clr_interrupt_mask_reg = 0x00230,
116                         .clr_interrupt_mask_reg32 = 0x00230,
117                         .sense_interrupt_mask_reg = 0x0022C,
118                         .sense_interrupt_mask_reg32 = 0x0022C,
119                         .clr_interrupt_reg = 0x00228,
120                         .clr_interrupt_reg32 = 0x00228,
121                         .sense_interrupt_reg = 0x00224,
122                         .sense_interrupt_reg32 = 0x00224,
123                         .ioarrin_reg = 0x00404,
124                         .sense_uproc_interrupt_reg = 0x00214,
125                         .sense_uproc_interrupt_reg32 = 0x00214,
126                         .set_uproc_interrupt_reg = 0x00214,
127                         .set_uproc_interrupt_reg32 = 0x00214,
128                         .clr_uproc_interrupt_reg = 0x00218,
129                         .clr_uproc_interrupt_reg32 = 0x00218
130                 }
131         },
132         { /* Snipe and Scamp */
133                 .mailbox = 0x0052C,
134                 .max_cmds = 100,
135                 .cache_line_size = 0x20,
136                 .clear_isr = 1,
137                 .iopoll_weight = 0,
138                 {
139                         .set_interrupt_mask_reg = 0x00288,
140                         .clr_interrupt_mask_reg = 0x0028C,
141                         .clr_interrupt_mask_reg32 = 0x0028C,
142                         .sense_interrupt_mask_reg = 0x00288,
143                         .sense_interrupt_mask_reg32 = 0x00288,
144                         .clr_interrupt_reg = 0x00284,
145                         .clr_interrupt_reg32 = 0x00284,
146                         .sense_interrupt_reg = 0x00280,
147                         .sense_interrupt_reg32 = 0x00280,
148                         .ioarrin_reg = 0x00504,
149                         .sense_uproc_interrupt_reg = 0x00290,
150                         .sense_uproc_interrupt_reg32 = 0x00290,
151                         .set_uproc_interrupt_reg = 0x00290,
152                         .set_uproc_interrupt_reg32 = 0x00290,
153                         .clr_uproc_interrupt_reg = 0x00294,
154                         .clr_uproc_interrupt_reg32 = 0x00294
155                 }
156         },
157         { /* CRoC */
158                 .mailbox = 0x00044,
159                 .max_cmds = 1000,
160                 .cache_line_size = 0x20,
161                 .clear_isr = 0,
162                 .iopoll_weight = 64,
163                 {
164                         .set_interrupt_mask_reg = 0x00010,
165                         .clr_interrupt_mask_reg = 0x00018,
166                         .clr_interrupt_mask_reg32 = 0x0001C,
167                         .sense_interrupt_mask_reg = 0x00010,
168                         .sense_interrupt_mask_reg32 = 0x00014,
169                         .clr_interrupt_reg = 0x00008,
170                         .clr_interrupt_reg32 = 0x0000C,
171                         .sense_interrupt_reg = 0x00000,
172                         .sense_interrupt_reg32 = 0x00004,
173                         .ioarrin_reg = 0x00070,
174                         .sense_uproc_interrupt_reg = 0x00020,
175                         .sense_uproc_interrupt_reg32 = 0x00024,
176                         .set_uproc_interrupt_reg = 0x00020,
177                         .set_uproc_interrupt_reg32 = 0x00024,
178                         .clr_uproc_interrupt_reg = 0x00028,
179                         .clr_uproc_interrupt_reg32 = 0x0002C,
180                         .init_feedback_reg = 0x0005C,
181                         .dump_addr_reg = 0x00064,
182                         .dump_data_reg = 0x00068,
183                         .endian_swap_reg = 0x00084
184                 }
185         },
186 };
187
188 static const struct ipr_chip_t ipr_chip[] = {
189         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
190         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
191         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
192         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
193         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, true, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
194         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
195         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
196         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
197         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
198         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
199 };
200
201 static int ipr_max_bus_speeds[] = {
202         IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
203 };
204
205 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
206 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
207 module_param_named(max_speed, ipr_max_speed, uint, 0);
208 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
209 module_param_named(log_level, ipr_log_level, uint, 0);
210 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
211 module_param_named(testmode, ipr_testmode, int, 0);
212 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
213 module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
214 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
215 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
216 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
217 module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
218 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
219 module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
220 MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
221 module_param_named(max_devs, ipr_max_devs, int, 0);
222 MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
223                  "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
224 module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
225 MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16).  (default:16)");
226 module_param_named(fast_reboot, ipr_fast_reboot, int, S_IRUGO | S_IWUSR);
227 MODULE_PARM_DESC(fast_reboot, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)");
228 MODULE_LICENSE("GPL");
229 MODULE_VERSION(IPR_DRIVER_VERSION);
230
231 /*  A constant array of IOASCs/URCs/Error Messages */
232 static const
233 struct ipr_error_table_t ipr_error_table[] = {
234         {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
235         "8155: An unknown error was received"},
236         {0x00330000, 0, 0,
237         "Soft underlength error"},
238         {0x005A0000, 0, 0,
239         "Command to be cancelled not found"},
240         {0x00808000, 0, 0,
241         "Qualified success"},
242         {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
243         "FFFE: Soft device bus error recovered by the IOA"},
244         {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
245         "4101: Soft device bus fabric error"},
246         {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
247         "FFFC: Logical block guard error recovered by the device"},
248         {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
249         "FFFC: Logical block reference tag error recovered by the device"},
250         {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
251         "4171: Recovered scatter list tag / sequence number error"},
252         {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
253         "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
254         {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
255         "4171: Recovered logical block sequence number error on IOA to Host transfer"},
256         {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
257         "FFFD: Recovered logical block reference tag error detected by the IOA"},
258         {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
259         "FFFD: Logical block guard error recovered by the IOA"},
260         {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
261         "FFF9: Device sector reassign successful"},
262         {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
263         "FFF7: Media error recovered by device rewrite procedures"},
264         {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
265         "7001: IOA sector reassignment successful"},
266         {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
267         "FFF9: Soft media error. Sector reassignment recommended"},
268         {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
269         "FFF7: Media error recovered by IOA rewrite procedures"},
270         {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
271         "FF3D: Soft PCI bus error recovered by the IOA"},
272         {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
273         "FFF6: Device hardware error recovered by the IOA"},
274         {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
275         "FFF6: Device hardware error recovered by the device"},
276         {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
277         "FF3D: Soft IOA error recovered by the IOA"},
278         {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
279         "FFFA: Undefined device response recovered by the IOA"},
280         {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
281         "FFF6: Device bus error, message or command phase"},
282         {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
283         "FFFE: Task Management Function failed"},
284         {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
285         "FFF6: Failure prediction threshold exceeded"},
286         {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
287         "8009: Impending cache battery pack failure"},
288         {0x02040100, 0, 0,
289         "Logical Unit in process of becoming ready"},
290         {0x02040200, 0, 0,
291         "Initializing command required"},
292         {0x02040400, 0, 0,
293         "34FF: Disk device format in progress"},
294         {0x02040C00, 0, 0,
295         "Logical unit not accessible, target port in unavailable state"},
296         {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
297         "9070: IOA requested reset"},
298         {0x023F0000, 0, 0,
299         "Synchronization required"},
300         {0x02408500, 0, 0,
301         "IOA microcode download required"},
302         {0x02408600, 0, 0,
303         "Device bus connection is prohibited by host"},
304         {0x024E0000, 0, 0,
305         "No ready, IOA shutdown"},
306         {0x025A0000, 0, 0,
307         "Not ready, IOA has been shutdown"},
308         {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
309         "3020: Storage subsystem configuration error"},
310         {0x03110B00, 0, 0,
311         "FFF5: Medium error, data unreadable, recommend reassign"},
312         {0x03110C00, 0, 0,
313         "7000: Medium error, data unreadable, do not reassign"},
314         {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
315         "FFF3: Disk media format bad"},
316         {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
317         "3002: Addressed device failed to respond to selection"},
318         {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
319         "3100: Device bus error"},
320         {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
321         "3109: IOA timed out a device command"},
322         {0x04088000, 0, 0,
323         "3120: SCSI bus is not operational"},
324         {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
325         "4100: Hard device bus fabric error"},
326         {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
327         "310C: Logical block guard error detected by the device"},
328         {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
329         "310C: Logical block reference tag error detected by the device"},
330         {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
331         "4170: Scatter list tag / sequence number error"},
332         {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
333         "8150: Logical block CRC error on IOA to Host transfer"},
334         {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
335         "4170: Logical block sequence number error on IOA to Host transfer"},
336         {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
337         "310D: Logical block reference tag error detected by the IOA"},
338         {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
339         "310D: Logical block guard error detected by the IOA"},
340         {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
341         "9000: IOA reserved area data check"},
342         {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
343         "9001: IOA reserved area invalid data pattern"},
344         {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
345         "9002: IOA reserved area LRC error"},
346         {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
347         "Hardware Error, IOA metadata access error"},
348         {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
349         "102E: Out of alternate sectors for disk storage"},
350         {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
351         "FFF4: Data transfer underlength error"},
352         {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
353         "FFF4: Data transfer overlength error"},
354         {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
355         "3400: Logical unit failure"},
356         {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
357         "FFF4: Device microcode is corrupt"},
358         {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
359         "8150: PCI bus error"},
360         {0x04430000, 1, 0,
361         "Unsupported device bus message received"},
362         {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
363         "FFF4: Disk device problem"},
364         {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
365         "8150: Permanent IOA failure"},
366         {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
367         "3010: Disk device returned wrong response to IOA"},
368         {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
369         "8151: IOA microcode error"},
370         {0x04448500, 0, 0,
371         "Device bus status error"},
372         {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
373         "8157: IOA error requiring IOA reset to recover"},
374         {0x04448700, 0, 0,
375         "ATA device status error"},
376         {0x04490000, 0, 0,
377         "Message reject received from the device"},
378         {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
379         "8008: A permanent cache battery pack failure occurred"},
380         {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
381         "9090: Disk unit has been modified after the last known status"},
382         {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
383         "9081: IOA detected device error"},
384         {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
385         "9082: IOA detected device error"},
386         {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
387         "3110: Device bus error, message or command phase"},
388         {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
389         "3110: SAS Command / Task Management Function failed"},
390         {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
391         "9091: Incorrect hardware configuration change has been detected"},
392         {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
393         "9073: Invalid multi-adapter configuration"},
394         {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
395         "4010: Incorrect connection between cascaded expanders"},
396         {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
397         "4020: Connections exceed IOA design limits"},
398         {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
399         "4030: Incorrect multipath connection"},
400         {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
401         "4110: Unsupported enclosure function"},
402         {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
403         "4120: SAS cable VPD cannot be read"},
404         {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
405         "FFF4: Command to logical unit failed"},
406         {0x05240000, 1, 0,
407         "Illegal request, invalid request type or request packet"},
408         {0x05250000, 0, 0,
409         "Illegal request, invalid resource handle"},
410         {0x05258000, 0, 0,
411         "Illegal request, commands not allowed to this device"},
412         {0x05258100, 0, 0,
413         "Illegal request, command not allowed to a secondary adapter"},
414         {0x05258200, 0, 0,
415         "Illegal request, command not allowed to a non-optimized resource"},
416         {0x05260000, 0, 0,
417         "Illegal request, invalid field in parameter list"},
418         {0x05260100, 0, 0,
419         "Illegal request, parameter not supported"},
420         {0x05260200, 0, 0,
421         "Illegal request, parameter value invalid"},
422         {0x052C0000, 0, 0,
423         "Illegal request, command sequence error"},
424         {0x052C8000, 1, 0,
425         "Illegal request, dual adapter support not enabled"},
426         {0x052C8100, 1, 0,
427         "Illegal request, another cable connector was physically disabled"},
428         {0x054E8000, 1, 0,
429         "Illegal request, inconsistent group id/group count"},
430         {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
431         "9031: Array protection temporarily suspended, protection resuming"},
432         {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
433         "9040: Array protection temporarily suspended, protection resuming"},
434         {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
435         "4080: IOA exceeded maximum operating temperature"},
436         {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
437         "4085: Service required"},
438         {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
439         "3140: Device bus not ready to ready transition"},
440         {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
441         "FFFB: SCSI bus was reset"},
442         {0x06290500, 0, 0,
443         "FFFE: SCSI bus transition to single ended"},
444         {0x06290600, 0, 0,
445         "FFFE: SCSI bus transition to LVD"},
446         {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
447         "FFFB: SCSI bus was reset by another initiator"},
448         {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
449         "3029: A device replacement has occurred"},
450         {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
451         "4102: Device bus fabric performance degradation"},
452         {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
453         "9051: IOA cache data exists for a missing or failed device"},
454         {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
455         "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
456         {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
457         "9025: Disk unit is not supported at its physical location"},
458         {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
459         "3020: IOA detected a SCSI bus configuration error"},
460         {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
461         "3150: SCSI bus configuration error"},
462         {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
463         "9074: Asymmetric advanced function disk configuration"},
464         {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
465         "4040: Incomplete multipath connection between IOA and enclosure"},
466         {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
467         "4041: Incomplete multipath connection between enclosure and device"},
468         {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
469         "9075: Incomplete multipath connection between IOA and remote IOA"},
470         {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
471         "9076: Configuration error, missing remote IOA"},
472         {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
473         "4050: Enclosure does not support a required multipath function"},
474         {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
475         "4121: Configuration error, required cable is missing"},
476         {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
477         "4122: Cable is not plugged into the correct location on remote IOA"},
478         {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
479         "4123: Configuration error, invalid cable vital product data"},
480         {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
481         "4124: Configuration error, both cable ends are plugged into the same IOA"},
482         {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
483         "4070: Logically bad block written on device"},
484         {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
485         "9041: Array protection temporarily suspended"},
486         {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
487         "9042: Corrupt array parity detected on specified device"},
488         {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
489         "9030: Array no longer protected due to missing or failed disk unit"},
490         {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
491         "9071: Link operational transition"},
492         {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
493         "9072: Link not operational transition"},
494         {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
495         "9032: Array exposed but still protected"},
496         {0x066B8300, 0, IPR_DEBUG_LOG_LEVEL,
497         "70DD: Device forced failed by disrupt device command"},
498         {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
499         "4061: Multipath redundancy level got better"},
500         {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
501         "4060: Multipath redundancy level got worse"},
502         {0x06808100, 0, IPR_DEBUG_LOG_LEVEL,
503         "9083: Device raw mode enabled"},
504         {0x06808200, 0, IPR_DEBUG_LOG_LEVEL,
505         "9084: Device raw mode disabled"},
506         {0x07270000, 0, 0,
507         "Failure due to other device"},
508         {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
509         "9008: IOA does not support functions expected by devices"},
510         {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
511         "9010: Cache data associated with attached devices cannot be found"},
512         {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
513         "9011: Cache data belongs to devices other than those attached"},
514         {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
515         "9020: Array missing 2 or more devices with only 1 device present"},
516         {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
517         "9021: Array missing 2 or more devices with 2 or more devices present"},
518         {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
519         "9022: Exposed array is missing a required device"},
520         {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
521         "9023: Array member(s) not at required physical locations"},
522         {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
523         "9024: Array not functional due to present hardware configuration"},
524         {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
525         "9026: Array not functional due to present hardware configuration"},
526         {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
527         "9027: Array is missing a device and parity is out of sync"},
528         {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
529         "9028: Maximum number of arrays already exist"},
530         {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
531         "9050: Required cache data cannot be located for a disk unit"},
532         {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
533         "9052: Cache data exists for a device that has been modified"},
534         {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
535         "9054: IOA resources not available due to previous problems"},
536         {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
537         "9092: Disk unit requires initialization before use"},
538         {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
539         "9029: Incorrect hardware configuration change has been detected"},
540         {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
541         "9060: One or more disk pairs are missing from an array"},
542         {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
543         "9061: One or more disks are missing from an array"},
544         {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
545         "9062: One or more disks are missing from an array"},
546         {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
547         "9063: Maximum number of functional arrays has been exceeded"},
548         {0x07279A00, 0, 0,
549         "Data protect, other volume set problem"},
550         {0x0B260000, 0, 0,
551         "Aborted command, invalid descriptor"},
552         {0x0B3F9000, 0, 0,
553         "Target operating conditions have changed, dual adapter takeover"},
554         {0x0B530200, 0, 0,
555         "Aborted command, medium removal prevented"},
556         {0x0B5A0000, 0, 0,
557         "Command terminated by host"},
558         {0x0B5B8000, 0, 0,
559         "Aborted command, command terminated by host"}
560 };
561
562 static const struct ipr_ses_table_entry ipr_ses_table[] = {
563         { "2104-DL1        ", "XXXXXXXXXXXXXXXX", 80 },
564         { "2104-TL1        ", "XXXXXXXXXXXXXXXX", 80 },
565         { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
566         { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
567         { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
568         { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
569         { "2104-DU3        ", "XXXXXXXXXXXXXXXX", 160 },
570         { "2104-TU3        ", "XXXXXXXXXXXXXXXX", 160 },
571         { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
572         { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
573         { "St  V1S2        ", "XXXXXXXXXXXXXXXX", 160 },
574         { "HSBPD4M  PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
575         { "VSBPD1H   U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
576 };
577
578 /*
579  *  Function Prototypes
580  */
581 static int ipr_reset_alert(struct ipr_cmnd *);
582 static void ipr_process_ccn(struct ipr_cmnd *);
583 static void ipr_process_error(struct ipr_cmnd *);
584 static void ipr_reset_ioa_job(struct ipr_cmnd *);
585 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
586                                    enum ipr_shutdown_type);
587
588 #ifdef CONFIG_SCSI_IPR_TRACE
589 /**
590  * ipr_trc_hook - Add a trace entry to the driver trace
591  * @ipr_cmd:    ipr command struct
592  * @type:               trace type
593  * @add_data:   additional data
594  *
595  * Return value:
596  *      none
597  **/
598 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
599                          u8 type, u32 add_data)
600 {
601         struct ipr_trace_entry *trace_entry;
602         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
603         unsigned int trace_index;
604
605         trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
606         trace_entry = &ioa_cfg->trace[trace_index];
607         trace_entry->time = jiffies;
608         trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
609         trace_entry->type = type;
610         if (ipr_cmd->ioa_cfg->sis64)
611                 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
612         else
613                 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
614         trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
615         trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
616         trace_entry->u.add_data = add_data;
617         wmb();
618 }
619 #else
620 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
621 #endif
622
623 /**
624  * ipr_lock_and_done - Acquire lock and complete command
625  * @ipr_cmd:    ipr command struct
626  *
627  * Return value:
628  *      none
629  **/
630 static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
631 {
632         unsigned long lock_flags;
633         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
634
635         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
636         ipr_cmd->done(ipr_cmd);
637         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
638 }
639
640 /**
641  * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
642  * @ipr_cmd:    ipr command struct
643  *
644  * Return value:
645  *      none
646  **/
647 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
648 {
649         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
650         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
651         struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
652         dma_addr_t dma_addr = ipr_cmd->dma_addr;
653         int hrrq_id;
654
655         hrrq_id = ioarcb->cmd_pkt.hrrq_id;
656         memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
657         ioarcb->cmd_pkt.hrrq_id = hrrq_id;
658         ioarcb->data_transfer_length = 0;
659         ioarcb->read_data_transfer_length = 0;
660         ioarcb->ioadl_len = 0;
661         ioarcb->read_ioadl_len = 0;
662
663         if (ipr_cmd->ioa_cfg->sis64) {
664                 ioarcb->u.sis64_addr_data.data_ioadl_addr =
665                         cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
666                 ioasa64->u.gata.status = 0;
667         } else {
668                 ioarcb->write_ioadl_addr =
669                         cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
670                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
671                 ioasa->u.gata.status = 0;
672         }
673
674         ioasa->hdr.ioasc = 0;
675         ioasa->hdr.residual_data_len = 0;
676         ipr_cmd->scsi_cmd = NULL;
677         ipr_cmd->qc = NULL;
678         ipr_cmd->sense_buffer[0] = 0;
679         ipr_cmd->dma_use_sg = 0;
680 }
681
682 /**
683  * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
684  * @ipr_cmd:    ipr command struct
685  *
686  * Return value:
687  *      none
688  **/
689 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
690                               void (*fast_done) (struct ipr_cmnd *))
691 {
692         ipr_reinit_ipr_cmnd(ipr_cmd);
693         ipr_cmd->u.scratch = 0;
694         ipr_cmd->sibling = NULL;
695         ipr_cmd->eh_comp = NULL;
696         ipr_cmd->fast_done = fast_done;
697         init_timer(&ipr_cmd->timer);
698 }
699
700 /**
701  * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
702  * @ioa_cfg:    ioa config struct
703  *
704  * Return value:
705  *      pointer to ipr command struct
706  **/
707 static
708 struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
709 {
710         struct ipr_cmnd *ipr_cmd = NULL;
711
712         if (likely(!list_empty(&hrrq->hrrq_free_q))) {
713                 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
714                         struct ipr_cmnd, queue);
715                 list_del(&ipr_cmd->queue);
716         }
717
718
719         return ipr_cmd;
720 }
721
722 /**
723  * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
724  * @ioa_cfg:    ioa config struct
725  *
726  * Return value:
727  *      pointer to ipr command struct
728  **/
729 static
730 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
731 {
732         struct ipr_cmnd *ipr_cmd =
733                 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
734         ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
735         return ipr_cmd;
736 }
737
738 /**
739  * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
740  * @ioa_cfg:    ioa config struct
741  * @clr_ints:     interrupts to clear
742  *
743  * This function masks all interrupts on the adapter, then clears the
744  * interrupts specified in the mask
745  *
746  * Return value:
747  *      none
748  **/
749 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
750                                           u32 clr_ints)
751 {
752         volatile u32 int_reg;
753         int i;
754
755         /* Stop new interrupts */
756         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
757                 spin_lock(&ioa_cfg->hrrq[i]._lock);
758                 ioa_cfg->hrrq[i].allow_interrupts = 0;
759                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
760         }
761         wmb();
762
763         /* Set interrupt mask to stop all new interrupts */
764         if (ioa_cfg->sis64)
765                 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
766         else
767                 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
768
769         /* Clear any pending interrupts */
770         if (ioa_cfg->sis64)
771                 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
772         writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
773         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
774 }
775
776 /**
777  * ipr_save_pcix_cmd_reg - Save PCI-X command register
778  * @ioa_cfg:    ioa config struct
779  *
780  * Return value:
781  *      0 on success / -EIO on failure
782  **/
783 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
784 {
785         int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
786
787         if (pcix_cmd_reg == 0)
788                 return 0;
789
790         if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
791                                  &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
792                 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
793                 return -EIO;
794         }
795
796         ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
797         return 0;
798 }
799
800 /**
801  * ipr_set_pcix_cmd_reg - Setup PCI-X command register
802  * @ioa_cfg:    ioa config struct
803  *
804  * Return value:
805  *      0 on success / -EIO on failure
806  **/
807 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
808 {
809         int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
810
811         if (pcix_cmd_reg) {
812                 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
813                                           ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
814                         dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
815                         return -EIO;
816                 }
817         }
818
819         return 0;
820 }
821
822 /**
823  * __ipr_sata_eh_done - done function for aborted SATA commands
824  * @ipr_cmd:    ipr command struct
825  *
826  * This function is invoked for ops generated to SATA
827  * devices which are being aborted.
828  *
829  * Return value:
830  *      none
831  **/
832 static void __ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
833 {
834         struct ata_queued_cmd *qc = ipr_cmd->qc;
835         struct ipr_sata_port *sata_port = qc->ap->private_data;
836
837         qc->err_mask |= AC_ERR_OTHER;
838         sata_port->ioasa.status |= ATA_BUSY;
839         ata_qc_complete(qc);
840         if (ipr_cmd->eh_comp)
841                 complete(ipr_cmd->eh_comp);
842         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
843 }
844
845 /**
846  * ipr_sata_eh_done - done function for aborted SATA commands
847  * @ipr_cmd:    ipr command struct
848  *
849  * This function is invoked for ops generated to SATA
850  * devices which are being aborted.
851  *
852  * Return value:
853  *      none
854  **/
855 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
856 {
857         struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
858         unsigned long hrrq_flags;
859
860         spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
861         __ipr_sata_eh_done(ipr_cmd);
862         spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
863 }
864
865 /**
866  * __ipr_scsi_eh_done - mid-layer done function for aborted ops
867  * @ipr_cmd:    ipr command struct
868  *
869  * This function is invoked by the interrupt handler for
870  * ops generated by the SCSI mid-layer which are being aborted.
871  *
872  * Return value:
873  *      none
874  **/
875 static void __ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
876 {
877         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
878
879         scsi_cmd->result |= (DID_ERROR << 16);
880
881         scsi_dma_unmap(ipr_cmd->scsi_cmd);
882         scsi_cmd->scsi_done(scsi_cmd);
883         if (ipr_cmd->eh_comp)
884                 complete(ipr_cmd->eh_comp);
885         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
886 }
887
888 /**
889  * ipr_scsi_eh_done - mid-layer done function for aborted ops
890  * @ipr_cmd:    ipr command struct
891  *
892  * This function is invoked by the interrupt handler for
893  * ops generated by the SCSI mid-layer which are being aborted.
894  *
895  * Return value:
896  *      none
897  **/
898 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
899 {
900         unsigned long hrrq_flags;
901         struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
902
903         spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
904         __ipr_scsi_eh_done(ipr_cmd);
905         spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
906 }
907
908 /**
909  * ipr_fail_all_ops - Fails all outstanding ops.
910  * @ioa_cfg:    ioa config struct
911  *
912  * This function fails all outstanding ops.
913  *
914  * Return value:
915  *      none
916  **/
917 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
918 {
919         struct ipr_cmnd *ipr_cmd, *temp;
920         struct ipr_hrr_queue *hrrq;
921
922         ENTER;
923         for_each_hrrq(hrrq, ioa_cfg) {
924                 spin_lock(&hrrq->_lock);
925                 list_for_each_entry_safe(ipr_cmd,
926                                         temp, &hrrq->hrrq_pending_q, queue) {
927                         list_del(&ipr_cmd->queue);
928
929                         ipr_cmd->s.ioasa.hdr.ioasc =
930                                 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
931                         ipr_cmd->s.ioasa.hdr.ilid =
932                                 cpu_to_be32(IPR_DRIVER_ILID);
933
934                         if (ipr_cmd->scsi_cmd)
935                                 ipr_cmd->done = __ipr_scsi_eh_done;
936                         else if (ipr_cmd->qc)
937                                 ipr_cmd->done = __ipr_sata_eh_done;
938
939                         ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
940                                      IPR_IOASC_IOA_WAS_RESET);
941                         del_timer(&ipr_cmd->timer);
942                         ipr_cmd->done(ipr_cmd);
943                 }
944                 spin_unlock(&hrrq->_lock);
945         }
946         LEAVE;
947 }
948
949 /**
950  * ipr_send_command -  Send driver initiated requests.
951  * @ipr_cmd:            ipr command struct
952  *
953  * This function sends a command to the adapter using the correct write call.
954  * In the case of sis64, calculate the ioarcb size required. Then or in the
955  * appropriate bits.
956  *
957  * Return value:
958  *      none
959  **/
960 static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
961 {
962         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
963         dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
964
965         if (ioa_cfg->sis64) {
966                 /* The default size is 256 bytes */
967                 send_dma_addr |= 0x1;
968
969                 /* If the number of ioadls * size of ioadl > 128 bytes,
970                    then use a 512 byte ioarcb */
971                 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
972                         send_dma_addr |= 0x4;
973                 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
974         } else
975                 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
976 }
977
978 /**
979  * ipr_do_req -  Send driver initiated requests.
980  * @ipr_cmd:            ipr command struct
981  * @done:                       done function
982  * @timeout_func:       timeout function
983  * @timeout:            timeout value
984  *
985  * This function sends the specified command to the adapter with the
986  * timeout given. The done function is invoked on command completion.
987  *
988  * Return value:
989  *      none
990  **/
991 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
992                        void (*done) (struct ipr_cmnd *),
993                        void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
994 {
995         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
996
997         ipr_cmd->done = done;
998
999         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
1000         ipr_cmd->timer.expires = jiffies + timeout;
1001         ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
1002
1003         add_timer(&ipr_cmd->timer);
1004
1005         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
1006
1007         ipr_send_command(ipr_cmd);
1008 }
1009
1010 /**
1011  * ipr_internal_cmd_done - Op done function for an internally generated op.
1012  * @ipr_cmd:    ipr command struct
1013  *
1014  * This function is the op done function for an internally generated,
1015  * blocking op. It simply wakes the sleeping thread.
1016  *
1017  * Return value:
1018  *      none
1019  **/
1020 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
1021 {
1022         if (ipr_cmd->sibling)
1023                 ipr_cmd->sibling = NULL;
1024         else
1025                 complete(&ipr_cmd->completion);
1026 }
1027
1028 /**
1029  * ipr_init_ioadl - initialize the ioadl for the correct SIS type
1030  * @ipr_cmd:    ipr command struct
1031  * @dma_addr:   dma address
1032  * @len:        transfer length
1033  * @flags:      ioadl flag value
1034  *
1035  * This function initializes an ioadl in the case where there is only a single
1036  * descriptor.
1037  *
1038  * Return value:
1039  *      nothing
1040  **/
1041 static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
1042                            u32 len, int flags)
1043 {
1044         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1045         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
1046
1047         ipr_cmd->dma_use_sg = 1;
1048
1049         if (ipr_cmd->ioa_cfg->sis64) {
1050                 ioadl64->flags = cpu_to_be32(flags);
1051                 ioadl64->data_len = cpu_to_be32(len);
1052                 ioadl64->address = cpu_to_be64(dma_addr);
1053
1054                 ipr_cmd->ioarcb.ioadl_len =
1055                         cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
1056                 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1057         } else {
1058                 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
1059                 ioadl->address = cpu_to_be32(dma_addr);
1060
1061                 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
1062                         ipr_cmd->ioarcb.read_ioadl_len =
1063                                 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1064                         ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
1065                 } else {
1066                         ipr_cmd->ioarcb.ioadl_len =
1067                                 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1068                         ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1069                 }
1070         }
1071 }
1072
1073 /**
1074  * ipr_send_blocking_cmd - Send command and sleep on its completion.
1075  * @ipr_cmd:    ipr command struct
1076  * @timeout_func:       function to invoke if command times out
1077  * @timeout:    timeout
1078  *
1079  * Return value:
1080  *      none
1081  **/
1082 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
1083                                   void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
1084                                   u32 timeout)
1085 {
1086         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1087
1088         init_completion(&ipr_cmd->completion);
1089         ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1090
1091         spin_unlock_irq(ioa_cfg->host->host_lock);
1092         wait_for_completion(&ipr_cmd->completion);
1093         spin_lock_irq(ioa_cfg->host->host_lock);
1094 }
1095
1096 static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1097 {
1098         unsigned int hrrq;
1099
1100         if (ioa_cfg->hrrq_num == 1)
1101                 hrrq = 0;
1102         else {
1103                 hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
1104                 hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
1105         }
1106         return hrrq;
1107 }
1108
1109 /**
1110  * ipr_send_hcam - Send an HCAM to the adapter.
1111  * @ioa_cfg:    ioa config struct
1112  * @type:               HCAM type
1113  * @hostrcb:    hostrcb struct
1114  *
1115  * This function will send a Host Controlled Async command to the adapter.
1116  * If HCAMs are currently not allowed to be issued to the adapter, it will
1117  * place the hostrcb on the free queue.
1118  *
1119  * Return value:
1120  *      none
1121  **/
1122 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1123                           struct ipr_hostrcb *hostrcb)
1124 {
1125         struct ipr_cmnd *ipr_cmd;
1126         struct ipr_ioarcb *ioarcb;
1127
1128         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1129                 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
1130                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1131                 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1132
1133                 ipr_cmd->u.hostrcb = hostrcb;
1134                 ioarcb = &ipr_cmd->ioarcb;
1135
1136                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1137                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1138                 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1139                 ioarcb->cmd_pkt.cdb[1] = type;
1140                 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1141                 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1142
1143                 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1144                                sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1145
1146                 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1147                         ipr_cmd->done = ipr_process_ccn;
1148                 else
1149                         ipr_cmd->done = ipr_process_error;
1150
1151                 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1152
1153                 ipr_send_command(ipr_cmd);
1154         } else {
1155                 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1156         }
1157 }
1158
1159 /**
1160  * ipr_update_ata_class - Update the ata class in the resource entry
1161  * @res:        resource entry struct
1162  * @proto:      cfgte device bus protocol value
1163  *
1164  * Return value:
1165  *      none
1166  **/
1167 static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1168 {
1169         switch (proto) {
1170         case IPR_PROTO_SATA:
1171         case IPR_PROTO_SAS_STP:
1172                 res->ata_class = ATA_DEV_ATA;
1173                 break;
1174         case IPR_PROTO_SATA_ATAPI:
1175         case IPR_PROTO_SAS_STP_ATAPI:
1176                 res->ata_class = ATA_DEV_ATAPI;
1177                 break;
1178         default:
1179                 res->ata_class = ATA_DEV_UNKNOWN;
1180                 break;
1181         };
1182 }
1183
1184 /**
1185  * ipr_init_res_entry - Initialize a resource entry struct.
1186  * @res:        resource entry struct
1187  * @cfgtew:     config table entry wrapper struct
1188  *
1189  * Return value:
1190  *      none
1191  **/
1192 static void ipr_init_res_entry(struct ipr_resource_entry *res,
1193                                struct ipr_config_table_entry_wrapper *cfgtew)
1194 {
1195         int found = 0;
1196         unsigned int proto;
1197         struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1198         struct ipr_resource_entry *gscsi_res = NULL;
1199
1200         res->needs_sync_complete = 0;
1201         res->in_erp = 0;
1202         res->add_to_ml = 0;
1203         res->del_from_ml = 0;
1204         res->resetting_device = 0;
1205         res->reset_occurred = 0;
1206         res->sdev = NULL;
1207         res->sata_port = NULL;
1208
1209         if (ioa_cfg->sis64) {
1210                 proto = cfgtew->u.cfgte64->proto;
1211                 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1212                 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
1213                 res->qmodel = IPR_QUEUEING_MODEL64(res);
1214                 res->type = cfgtew->u.cfgte64->res_type;
1215
1216                 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1217                         sizeof(res->res_path));
1218
1219                 res->bus = 0;
1220                 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1221                         sizeof(res->dev_lun.scsi_lun));
1222                 res->lun = scsilun_to_int(&res->dev_lun);
1223
1224                 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1225                         list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1226                                 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1227                                         found = 1;
1228                                         res->target = gscsi_res->target;
1229                                         break;
1230                                 }
1231                         }
1232                         if (!found) {
1233                                 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1234                                                                   ioa_cfg->max_devs_supported);
1235                                 set_bit(res->target, ioa_cfg->target_ids);
1236                         }
1237                 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1238                         res->bus = IPR_IOAFP_VIRTUAL_BUS;
1239                         res->target = 0;
1240                 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1241                         res->bus = IPR_ARRAY_VIRTUAL_BUS;
1242                         res->target = find_first_zero_bit(ioa_cfg->array_ids,
1243                                                           ioa_cfg->max_devs_supported);
1244                         set_bit(res->target, ioa_cfg->array_ids);
1245                 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1246                         res->bus = IPR_VSET_VIRTUAL_BUS;
1247                         res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1248                                                           ioa_cfg->max_devs_supported);
1249                         set_bit(res->target, ioa_cfg->vset_ids);
1250                 } else {
1251                         res->target = find_first_zero_bit(ioa_cfg->target_ids,
1252                                                           ioa_cfg->max_devs_supported);
1253                         set_bit(res->target, ioa_cfg->target_ids);
1254                 }
1255         } else {
1256                 proto = cfgtew->u.cfgte->proto;
1257                 res->qmodel = IPR_QUEUEING_MODEL(res);
1258                 res->flags = cfgtew->u.cfgte->flags;
1259                 if (res->flags & IPR_IS_IOA_RESOURCE)
1260                         res->type = IPR_RES_TYPE_IOAFP;
1261                 else
1262                         res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1263
1264                 res->bus = cfgtew->u.cfgte->res_addr.bus;
1265                 res->target = cfgtew->u.cfgte->res_addr.target;
1266                 res->lun = cfgtew->u.cfgte->res_addr.lun;
1267                 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
1268         }
1269
1270         ipr_update_ata_class(res, proto);
1271 }
1272
1273 /**
1274  * ipr_is_same_device - Determine if two devices are the same.
1275  * @res:        resource entry struct
1276  * @cfgtew:     config table entry wrapper struct
1277  *
1278  * Return value:
1279  *      1 if the devices are the same / 0 otherwise
1280  **/
1281 static int ipr_is_same_device(struct ipr_resource_entry *res,
1282                               struct ipr_config_table_entry_wrapper *cfgtew)
1283 {
1284         if (res->ioa_cfg->sis64) {
1285                 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1286                                         sizeof(cfgtew->u.cfgte64->dev_id)) &&
1287                         !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1288                                         sizeof(cfgtew->u.cfgte64->lun))) {
1289                         return 1;
1290                 }
1291         } else {
1292                 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1293                     res->target == cfgtew->u.cfgte->res_addr.target &&
1294                     res->lun == cfgtew->u.cfgte->res_addr.lun)
1295                         return 1;
1296         }
1297
1298         return 0;
1299 }
1300
1301 /**
1302  * __ipr_format_res_path - Format the resource path for printing.
1303  * @res_path:   resource path
1304  * @buf:        buffer
1305  * @len:        length of buffer provided
1306  *
1307  * Return value:
1308  *      pointer to buffer
1309  **/
1310 static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
1311 {
1312         int i;
1313         char *p = buffer;
1314
1315         *p = '\0';
1316         p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1317         for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1318                 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
1319
1320         return buffer;
1321 }
1322
1323 /**
1324  * ipr_format_res_path - Format the resource path for printing.
1325  * @ioa_cfg:    ioa config struct
1326  * @res_path:   resource path
1327  * @buf:        buffer
1328  * @len:        length of buffer provided
1329  *
1330  * Return value:
1331  *      pointer to buffer
1332  **/
1333 static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1334                                  u8 *res_path, char *buffer, int len)
1335 {
1336         char *p = buffer;
1337
1338         *p = '\0';
1339         p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1340         __ipr_format_res_path(res_path, p, len - (buffer - p));
1341         return buffer;
1342 }
1343
1344 /**
1345  * ipr_update_res_entry - Update the resource entry.
1346  * @res:        resource entry struct
1347  * @cfgtew:     config table entry wrapper struct
1348  *
1349  * Return value:
1350  *      none
1351  **/
1352 static void ipr_update_res_entry(struct ipr_resource_entry *res,
1353                                  struct ipr_config_table_entry_wrapper *cfgtew)
1354 {
1355         char buffer[IPR_MAX_RES_PATH_LENGTH];
1356         unsigned int proto;
1357         int new_path = 0;
1358
1359         if (res->ioa_cfg->sis64) {
1360                 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1361                 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
1362                 res->type = cfgtew->u.cfgte64->res_type;
1363
1364                 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1365                         sizeof(struct ipr_std_inq_data));
1366
1367                 res->qmodel = IPR_QUEUEING_MODEL64(res);
1368                 proto = cfgtew->u.cfgte64->proto;
1369                 res->res_handle = cfgtew->u.cfgte64->res_handle;
1370                 res->dev_id = cfgtew->u.cfgte64->dev_id;
1371
1372                 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1373                         sizeof(res->dev_lun.scsi_lun));
1374
1375                 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1376                                         sizeof(res->res_path))) {
1377                         memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1378                                 sizeof(res->res_path));
1379                         new_path = 1;
1380                 }
1381
1382                 if (res->sdev && new_path)
1383                         sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1384                                     ipr_format_res_path(res->ioa_cfg,
1385                                         res->res_path, buffer, sizeof(buffer)));
1386         } else {
1387                 res->flags = cfgtew->u.cfgte->flags;
1388                 if (res->flags & IPR_IS_IOA_RESOURCE)
1389                         res->type = IPR_RES_TYPE_IOAFP;
1390                 else
1391                         res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1392
1393                 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1394                         sizeof(struct ipr_std_inq_data));
1395
1396                 res->qmodel = IPR_QUEUEING_MODEL(res);
1397                 proto = cfgtew->u.cfgte->proto;
1398                 res->res_handle = cfgtew->u.cfgte->res_handle;
1399         }
1400
1401         ipr_update_ata_class(res, proto);
1402 }
1403
1404 /**
1405  * ipr_clear_res_target - Clear the bit in the bit map representing the target
1406  *                        for the resource.
1407  * @res:        resource entry struct
1408  * @cfgtew:     config table entry wrapper struct
1409  *
1410  * Return value:
1411  *      none
1412  **/
1413 static void ipr_clear_res_target(struct ipr_resource_entry *res)
1414 {
1415         struct ipr_resource_entry *gscsi_res = NULL;
1416         struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1417
1418         if (!ioa_cfg->sis64)
1419                 return;
1420
1421         if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1422                 clear_bit(res->target, ioa_cfg->array_ids);
1423         else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1424                 clear_bit(res->target, ioa_cfg->vset_ids);
1425         else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1426                 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1427                         if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1428                                 return;
1429                 clear_bit(res->target, ioa_cfg->target_ids);
1430
1431         } else if (res->bus == 0)
1432                 clear_bit(res->target, ioa_cfg->target_ids);
1433 }
1434
1435 /**
1436  * ipr_handle_config_change - Handle a config change from the adapter
1437  * @ioa_cfg:    ioa config struct
1438  * @hostrcb:    hostrcb
1439  *
1440  * Return value:
1441  *      none
1442  **/
1443 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1444                                      struct ipr_hostrcb *hostrcb)
1445 {
1446         struct ipr_resource_entry *res = NULL;
1447         struct ipr_config_table_entry_wrapper cfgtew;
1448         __be32 cc_res_handle;
1449
1450         u32 is_ndn = 1;
1451
1452         if (ioa_cfg->sis64) {
1453                 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1454                 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1455         } else {
1456                 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1457                 cc_res_handle = cfgtew.u.cfgte->res_handle;
1458         }
1459
1460         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1461                 if (res->res_handle == cc_res_handle) {
1462                         is_ndn = 0;
1463                         break;
1464                 }
1465         }
1466
1467         if (is_ndn) {
1468                 if (list_empty(&ioa_cfg->free_res_q)) {
1469                         ipr_send_hcam(ioa_cfg,
1470                                       IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1471                                       hostrcb);
1472                         return;
1473                 }
1474
1475                 res = list_entry(ioa_cfg->free_res_q.next,
1476                                  struct ipr_resource_entry, queue);
1477
1478                 list_del(&res->queue);
1479                 ipr_init_res_entry(res, &cfgtew);
1480                 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1481         }
1482
1483         ipr_update_res_entry(res, &cfgtew);
1484
1485         if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1486                 if (res->sdev) {
1487                         res->del_from_ml = 1;
1488                         res->res_handle = IPR_INVALID_RES_HANDLE;
1489                         schedule_work(&ioa_cfg->work_q);
1490                 } else {
1491                         ipr_clear_res_target(res);
1492                         list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1493                 }
1494         } else if (!res->sdev || res->del_from_ml) {
1495                 res->add_to_ml = 1;
1496                 schedule_work(&ioa_cfg->work_q);
1497         }
1498
1499         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1500 }
1501
1502 /**
1503  * ipr_process_ccn - Op done function for a CCN.
1504  * @ipr_cmd:    ipr command struct
1505  *
1506  * This function is the op done function for a configuration
1507  * change notification host controlled async from the adapter.
1508  *
1509  * Return value:
1510  *      none
1511  **/
1512 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1513 {
1514         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1515         struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1516         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1517
1518         list_del_init(&hostrcb->queue);
1519         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1520
1521         if (ioasc) {
1522                 if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
1523                     ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST)
1524                         dev_err(&ioa_cfg->pdev->dev,
1525                                 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1526
1527                 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1528         } else {
1529                 ipr_handle_config_change(ioa_cfg, hostrcb);
1530         }
1531 }
1532
1533 /**
1534  * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1535  * @i:          index into buffer
1536  * @buf:                string to modify
1537  *
1538  * This function will strip all trailing whitespace, pad the end
1539  * of the string with a single space, and NULL terminate the string.
1540  *
1541  * Return value:
1542  *      new length of string
1543  **/
1544 static int strip_and_pad_whitespace(int i, char *buf)
1545 {
1546         while (i && buf[i] == ' ')
1547                 i--;
1548         buf[i+1] = ' ';
1549         buf[i+2] = '\0';
1550         return i + 2;
1551 }
1552
1553 /**
1554  * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1555  * @prefix:             string to print at start of printk
1556  * @hostrcb:    hostrcb pointer
1557  * @vpd:                vendor/product id/sn struct
1558  *
1559  * Return value:
1560  *      none
1561  **/
1562 static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1563                                 struct ipr_vpd *vpd)
1564 {
1565         char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1566         int i = 0;
1567
1568         memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1569         i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1570
1571         memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1572         i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1573
1574         memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1575         buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1576
1577         ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1578 }
1579
1580 /**
1581  * ipr_log_vpd - Log the passed VPD to the error log.
1582  * @vpd:                vendor/product id/sn struct
1583  *
1584  * Return value:
1585  *      none
1586  **/
1587 static void ipr_log_vpd(struct ipr_vpd *vpd)
1588 {
1589         char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1590                     + IPR_SERIAL_NUM_LEN];
1591
1592         memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1593         memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1594                IPR_PROD_ID_LEN);
1595         buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1596         ipr_err("Vendor/Product ID: %s\n", buffer);
1597
1598         memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1599         buffer[IPR_SERIAL_NUM_LEN] = '\0';
1600         ipr_err("    Serial Number: %s\n", buffer);
1601 }
1602
1603 /**
1604  * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1605  * @prefix:             string to print at start of printk
1606  * @hostrcb:    hostrcb pointer
1607  * @vpd:                vendor/product id/sn/wwn struct
1608  *
1609  * Return value:
1610  *      none
1611  **/
1612 static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1613                                     struct ipr_ext_vpd *vpd)
1614 {
1615         ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1616         ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1617                      be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1618 }
1619
1620 /**
1621  * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1622  * @vpd:                vendor/product id/sn/wwn struct
1623  *
1624  * Return value:
1625  *      none
1626  **/
1627 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1628 {
1629         ipr_log_vpd(&vpd->vpd);
1630         ipr_err("    WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1631                 be32_to_cpu(vpd->wwid[1]));
1632 }
1633
1634 /**
1635  * ipr_log_enhanced_cache_error - Log a cache error.
1636  * @ioa_cfg:    ioa config struct
1637  * @hostrcb:    hostrcb struct
1638  *
1639  * Return value:
1640  *      none
1641  **/
1642 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1643                                          struct ipr_hostrcb *hostrcb)
1644 {
1645         struct ipr_hostrcb_type_12_error *error;
1646
1647         if (ioa_cfg->sis64)
1648                 error = &hostrcb->hcam.u.error64.u.type_12_error;
1649         else
1650                 error = &hostrcb->hcam.u.error.u.type_12_error;
1651
1652         ipr_err("-----Current Configuration-----\n");
1653         ipr_err("Cache Directory Card Information:\n");
1654         ipr_log_ext_vpd(&error->ioa_vpd);
1655         ipr_err("Adapter Card Information:\n");
1656         ipr_log_ext_vpd(&error->cfc_vpd);
1657
1658         ipr_err("-----Expected Configuration-----\n");
1659         ipr_err("Cache Directory Card Information:\n");
1660         ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1661         ipr_err("Adapter Card Information:\n");
1662         ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1663
1664         ipr_err("Additional IOA Data: %08X %08X %08X\n",
1665                      be32_to_cpu(error->ioa_data[0]),
1666                      be32_to_cpu(error->ioa_data[1]),
1667                      be32_to_cpu(error->ioa_data[2]));
1668 }
1669
1670 /**
1671  * ipr_log_cache_error - Log a cache error.
1672  * @ioa_cfg:    ioa config struct
1673  * @hostrcb:    hostrcb struct
1674  *
1675  * Return value:
1676  *      none
1677  **/
1678 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1679                                 struct ipr_hostrcb *hostrcb)
1680 {
1681         struct ipr_hostrcb_type_02_error *error =
1682                 &hostrcb->hcam.u.error.u.type_02_error;
1683
1684         ipr_err("-----Current Configuration-----\n");
1685         ipr_err("Cache Directory Card Information:\n");
1686         ipr_log_vpd(&error->ioa_vpd);
1687         ipr_err("Adapter Card Information:\n");
1688         ipr_log_vpd(&error->cfc_vpd);
1689
1690         ipr_err("-----Expected Configuration-----\n");
1691         ipr_err("Cache Directory Card Information:\n");
1692         ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1693         ipr_err("Adapter Card Information:\n");
1694         ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1695
1696         ipr_err("Additional IOA Data: %08X %08X %08X\n",
1697                      be32_to_cpu(error->ioa_data[0]),
1698                      be32_to_cpu(error->ioa_data[1]),
1699                      be32_to_cpu(error->ioa_data[2]));
1700 }
1701
1702 /**
1703  * ipr_log_enhanced_config_error - Log a configuration error.
1704  * @ioa_cfg:    ioa config struct
1705  * @hostrcb:    hostrcb struct
1706  *
1707  * Return value:
1708  *      none
1709  **/
1710 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1711                                           struct ipr_hostrcb *hostrcb)
1712 {
1713         int errors_logged, i;
1714         struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1715         struct ipr_hostrcb_type_13_error *error;
1716
1717         error = &hostrcb->hcam.u.error.u.type_13_error;
1718         errors_logged = be32_to_cpu(error->errors_logged);
1719
1720         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1721                 be32_to_cpu(error->errors_detected), errors_logged);
1722
1723         dev_entry = error->dev;
1724
1725         for (i = 0; i < errors_logged; i++, dev_entry++) {
1726                 ipr_err_separator;
1727
1728                 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1729                 ipr_log_ext_vpd(&dev_entry->vpd);
1730
1731                 ipr_err("-----New Device Information-----\n");
1732                 ipr_log_ext_vpd(&dev_entry->new_vpd);
1733
1734                 ipr_err("Cache Directory Card Information:\n");
1735                 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1736
1737                 ipr_err("Adapter Card Information:\n");
1738                 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1739         }
1740 }
1741
1742 /**
1743  * ipr_log_sis64_config_error - Log a device error.
1744  * @ioa_cfg:    ioa config struct
1745  * @hostrcb:    hostrcb struct
1746  *
1747  * Return value:
1748  *      none
1749  **/
1750 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1751                                        struct ipr_hostrcb *hostrcb)
1752 {
1753         int errors_logged, i;
1754         struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1755         struct ipr_hostrcb_type_23_error *error;
1756         char buffer[IPR_MAX_RES_PATH_LENGTH];
1757
1758         error = &hostrcb->hcam.u.error64.u.type_23_error;
1759         errors_logged = be32_to_cpu(error->errors_logged);
1760
1761         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1762                 be32_to_cpu(error->errors_detected), errors_logged);
1763
1764         dev_entry = error->dev;
1765
1766         for (i = 0; i < errors_logged; i++, dev_entry++) {
1767                 ipr_err_separator;
1768
1769                 ipr_err("Device %d : %s", i + 1,
1770                         __ipr_format_res_path(dev_entry->res_path,
1771                                               buffer, sizeof(buffer)));
1772                 ipr_log_ext_vpd(&dev_entry->vpd);
1773
1774                 ipr_err("-----New Device Information-----\n");
1775                 ipr_log_ext_vpd(&dev_entry->new_vpd);
1776
1777                 ipr_err("Cache Directory Card Information:\n");
1778                 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1779
1780                 ipr_err("Adapter Card Information:\n");
1781                 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1782         }
1783 }
1784
1785 /**
1786  * ipr_log_config_error - Log a configuration error.
1787  * @ioa_cfg:    ioa config struct
1788  * @hostrcb:    hostrcb struct
1789  *
1790  * Return value:
1791  *      none
1792  **/
1793 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1794                                  struct ipr_hostrcb *hostrcb)
1795 {
1796         int errors_logged, i;
1797         struct ipr_hostrcb_device_data_entry *dev_entry;
1798         struct ipr_hostrcb_type_03_error *error;
1799
1800         error = &hostrcb->hcam.u.error.u.type_03_error;
1801         errors_logged = be32_to_cpu(error->errors_logged);
1802
1803         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1804                 be32_to_cpu(error->errors_detected), errors_logged);
1805
1806         dev_entry = error->dev;
1807
1808         for (i = 0; i < errors_logged; i++, dev_entry++) {
1809                 ipr_err_separator;
1810
1811                 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1812                 ipr_log_vpd(&dev_entry->vpd);
1813
1814                 ipr_err("-----New Device Information-----\n");
1815                 ipr_log_vpd(&dev_entry->new_vpd);
1816
1817                 ipr_err("Cache Directory Card Information:\n");
1818                 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1819
1820                 ipr_err("Adapter Card Information:\n");
1821                 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1822
1823                 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1824                         be32_to_cpu(dev_entry->ioa_data[0]),
1825                         be32_to_cpu(dev_entry->ioa_data[1]),
1826                         be32_to_cpu(dev_entry->ioa_data[2]),
1827                         be32_to_cpu(dev_entry->ioa_data[3]),
1828                         be32_to_cpu(dev_entry->ioa_data[4]));
1829         }
1830 }
1831
1832 /**
1833  * ipr_log_enhanced_array_error - Log an array configuration error.
1834  * @ioa_cfg:    ioa config struct
1835  * @hostrcb:    hostrcb struct
1836  *
1837  * Return value:
1838  *      none
1839  **/
1840 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1841                                          struct ipr_hostrcb *hostrcb)
1842 {
1843         int i, num_entries;
1844         struct ipr_hostrcb_type_14_error *error;
1845         struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1846         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1847
1848         error = &hostrcb->hcam.u.error.u.type_14_error;
1849
1850         ipr_err_separator;
1851
1852         ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1853                 error->protection_level,
1854                 ioa_cfg->host->host_no,
1855                 error->last_func_vset_res_addr.bus,
1856                 error->last_func_vset_res_addr.target,
1857                 error->last_func_vset_res_addr.lun);
1858
1859         ipr_err_separator;
1860
1861         array_entry = error->array_member;
1862         num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1863                             ARRAY_SIZE(error->array_member));
1864
1865         for (i = 0; i < num_entries; i++, array_entry++) {
1866                 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1867                         continue;
1868
1869                 if (be32_to_cpu(error->exposed_mode_adn) == i)
1870                         ipr_err("Exposed Array Member %d:\n", i);
1871                 else
1872                         ipr_err("Array Member %d:\n", i);
1873
1874                 ipr_log_ext_vpd(&array_entry->vpd);
1875                 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1876                 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1877                                  "Expected Location");
1878
1879                 ipr_err_separator;
1880         }
1881 }
1882
1883 /**
1884  * ipr_log_array_error - Log an array configuration error.
1885  * @ioa_cfg:    ioa config struct
1886  * @hostrcb:    hostrcb struct
1887  *
1888  * Return value:
1889  *      none
1890  **/
1891 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1892                                 struct ipr_hostrcb *hostrcb)
1893 {
1894         int i;
1895         struct ipr_hostrcb_type_04_error *error;
1896         struct ipr_hostrcb_array_data_entry *array_entry;
1897         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1898
1899         error = &hostrcb->hcam.u.error.u.type_04_error;
1900
1901         ipr_err_separator;
1902
1903         ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1904                 error->protection_level,
1905                 ioa_cfg->host->host_no,
1906                 error->last_func_vset_res_addr.bus,
1907                 error->last_func_vset_res_addr.target,
1908                 error->last_func_vset_res_addr.lun);
1909
1910         ipr_err_separator;
1911
1912         array_entry = error->array_member;
1913
1914         for (i = 0; i < 18; i++) {
1915                 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1916                         continue;
1917
1918                 if (be32_to_cpu(error->exposed_mode_adn) == i)
1919                         ipr_err("Exposed Array Member %d:\n", i);
1920                 else
1921                         ipr_err("Array Member %d:\n", i);
1922
1923                 ipr_log_vpd(&array_entry->vpd);
1924
1925                 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1926                 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1927                                  "Expected Location");
1928
1929                 ipr_err_separator;
1930
1931                 if (i == 9)
1932                         array_entry = error->array_member2;
1933                 else
1934                         array_entry++;
1935         }
1936 }
1937
1938 /**
1939  * ipr_log_hex_data - Log additional hex IOA error data.
1940  * @ioa_cfg:    ioa config struct
1941  * @data:               IOA error data
1942  * @len:                data length
1943  *
1944  * Return value:
1945  *      none
1946  **/
1947 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, __be32 *data, int len)
1948 {
1949         int i;
1950
1951         if (len == 0)
1952                 return;
1953
1954         if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1955                 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1956
1957         for (i = 0; i < len / 4; i += 4) {
1958                 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1959                         be32_to_cpu(data[i]),
1960                         be32_to_cpu(data[i+1]),
1961                         be32_to_cpu(data[i+2]),
1962                         be32_to_cpu(data[i+3]));
1963         }
1964 }
1965
1966 /**
1967  * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1968  * @ioa_cfg:    ioa config struct
1969  * @hostrcb:    hostrcb struct
1970  *
1971  * Return value:
1972  *      none
1973  **/
1974 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1975                                             struct ipr_hostrcb *hostrcb)
1976 {
1977         struct ipr_hostrcb_type_17_error *error;
1978
1979         if (ioa_cfg->sis64)
1980                 error = &hostrcb->hcam.u.error64.u.type_17_error;
1981         else
1982                 error = &hostrcb->hcam.u.error.u.type_17_error;
1983
1984         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1985         strim(error->failure_reason);
1986
1987         ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1988                      be32_to_cpu(hostrcb->hcam.u.error.prc));
1989         ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1990         ipr_log_hex_data(ioa_cfg, error->data,
1991                          be32_to_cpu(hostrcb->hcam.length) -
1992                          (offsetof(struct ipr_hostrcb_error, u) +
1993                           offsetof(struct ipr_hostrcb_type_17_error, data)));
1994 }
1995
1996 /**
1997  * ipr_log_dual_ioa_error - Log a dual adapter error.
1998  * @ioa_cfg:    ioa config struct
1999  * @hostrcb:    hostrcb struct
2000  *
2001  * Return value:
2002  *      none
2003  **/
2004 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
2005                                    struct ipr_hostrcb *hostrcb)
2006 {
2007         struct ipr_hostrcb_type_07_error *error;
2008
2009         error = &hostrcb->hcam.u.error.u.type_07_error;
2010         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2011         strim(error->failure_reason);
2012
2013         ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
2014                      be32_to_cpu(hostrcb->hcam.u.error.prc));
2015         ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
2016         ipr_log_hex_data(ioa_cfg, error->data,
2017                          be32_to_cpu(hostrcb->hcam.length) -
2018                          (offsetof(struct ipr_hostrcb_error, u) +
2019                           offsetof(struct ipr_hostrcb_type_07_error, data)));
2020 }
2021
2022 static const struct {
2023         u8 active;
2024         char *desc;
2025 } path_active_desc[] = {
2026         { IPR_PATH_NO_INFO, "Path" },
2027         { IPR_PATH_ACTIVE, "Active path" },
2028         { IPR_PATH_NOT_ACTIVE, "Inactive path" }
2029 };
2030
2031 static const struct {
2032         u8 state;
2033         char *desc;
2034 } path_state_desc[] = {
2035         { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
2036         { IPR_PATH_HEALTHY, "is healthy" },
2037         { IPR_PATH_DEGRADED, "is degraded" },
2038         { IPR_PATH_FAILED, "is failed" }
2039 };
2040
2041 /**
2042  * ipr_log_fabric_path - Log a fabric path error
2043  * @hostrcb:    hostrcb struct
2044  * @fabric:             fabric descriptor
2045  *
2046  * Return value:
2047  *      none
2048  **/
2049 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
2050                                 struct ipr_hostrcb_fabric_desc *fabric)
2051 {
2052         int i, j;
2053         u8 path_state = fabric->path_state;
2054         u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2055         u8 state = path_state & IPR_PATH_STATE_MASK;
2056
2057         for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2058                 if (path_active_desc[i].active != active)
2059                         continue;
2060
2061                 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2062                         if (path_state_desc[j].state != state)
2063                                 continue;
2064
2065                         if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
2066                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
2067                                              path_active_desc[i].desc, path_state_desc[j].desc,
2068                                              fabric->ioa_port);
2069                         } else if (fabric->cascaded_expander == 0xff) {
2070                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
2071                                              path_active_desc[i].desc, path_state_desc[j].desc,
2072                                              fabric->ioa_port, fabric->phy);
2073                         } else if (fabric->phy == 0xff) {
2074                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
2075                                              path_active_desc[i].desc, path_state_desc[j].desc,
2076                                              fabric->ioa_port, fabric->cascaded_expander);
2077                         } else {
2078                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2079                                              path_active_desc[i].desc, path_state_desc[j].desc,
2080                                              fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2081                         }
2082                         return;
2083                 }
2084         }
2085
2086         ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
2087                 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2088 }
2089
2090 /**
2091  * ipr_log64_fabric_path - Log a fabric path error
2092  * @hostrcb:    hostrcb struct
2093  * @fabric:             fabric descriptor
2094  *
2095  * Return value:
2096  *      none
2097  **/
2098 static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2099                                   struct ipr_hostrcb64_fabric_desc *fabric)
2100 {
2101         int i, j;
2102         u8 path_state = fabric->path_state;
2103         u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2104         u8 state = path_state & IPR_PATH_STATE_MASK;
2105         char buffer[IPR_MAX_RES_PATH_LENGTH];
2106
2107         for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2108                 if (path_active_desc[i].active != active)
2109                         continue;
2110
2111                 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2112                         if (path_state_desc[j].state != state)
2113                                 continue;
2114
2115                         ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2116                                      path_active_desc[i].desc, path_state_desc[j].desc,
2117                                      ipr_format_res_path(hostrcb->ioa_cfg,
2118                                                 fabric->res_path,
2119                                                 buffer, sizeof(buffer)));
2120                         return;
2121                 }
2122         }
2123
2124         ipr_err("Path state=%02X Resource Path=%s\n", path_state,
2125                 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2126                                     buffer, sizeof(buffer)));
2127 }
2128
2129 static const struct {
2130         u8 type;
2131         char *desc;
2132 } path_type_desc[] = {
2133         { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2134         { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2135         { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2136         { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2137 };
2138
2139 static const struct {
2140         u8 status;
2141         char *desc;
2142 } path_status_desc[] = {
2143         { IPR_PATH_CFG_NO_PROB, "Functional" },
2144         { IPR_PATH_CFG_DEGRADED, "Degraded" },
2145         { IPR_PATH_CFG_FAILED, "Failed" },
2146         { IPR_PATH_CFG_SUSPECT, "Suspect" },
2147         { IPR_PATH_NOT_DETECTED, "Missing" },
2148         { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2149 };
2150
2151 static const char *link_rate[] = {
2152         "unknown",
2153         "disabled",
2154         "phy reset problem",
2155         "spinup hold",
2156         "port selector",
2157         "unknown",
2158         "unknown",
2159         "unknown",
2160         "1.5Gbps",
2161         "3.0Gbps",
2162         "unknown",
2163         "unknown",
2164         "unknown",
2165         "unknown",
2166         "unknown",
2167         "unknown"
2168 };
2169
2170 /**
2171  * ipr_log_path_elem - Log a fabric path element.
2172  * @hostrcb:    hostrcb struct
2173  * @cfg:                fabric path element struct
2174  *
2175  * Return value:
2176  *      none
2177  **/
2178 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2179                               struct ipr_hostrcb_config_element *cfg)
2180 {
2181         int i, j;
2182         u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2183         u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2184
2185         if (type == IPR_PATH_CFG_NOT_EXIST)
2186                 return;
2187
2188         for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2189                 if (path_type_desc[i].type != type)
2190                         continue;
2191
2192                 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2193                         if (path_status_desc[j].status != status)
2194                                 continue;
2195
2196                         if (type == IPR_PATH_CFG_IOA_PORT) {
2197                                 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2198                                              path_status_desc[j].desc, path_type_desc[i].desc,
2199                                              cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2200                                              be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2201                         } else {
2202                                 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2203                                         ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2204                                                      path_status_desc[j].desc, path_type_desc[i].desc,
2205                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2206                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2207                                 } else if (cfg->cascaded_expander == 0xff) {
2208                                         ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2209                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2210                                                      path_type_desc[i].desc, cfg->phy,
2211                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2212                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2213                                 } else if (cfg->phy == 0xff) {
2214                                         ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2215                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2216                                                      path_type_desc[i].desc, cfg->cascaded_expander,
2217                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2218                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2219                                 } else {
2220                                         ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2221                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2222                                                      path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2223                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2224                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2225                                 }
2226                         }
2227                         return;
2228                 }
2229         }
2230
2231         ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2232                      "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2233                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2234                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2235 }
2236
2237 /**
2238  * ipr_log64_path_elem - Log a fabric path element.
2239  * @hostrcb:    hostrcb struct
2240  * @cfg:                fabric path element struct
2241  *
2242  * Return value:
2243  *      none
2244  **/
2245 static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2246                                 struct ipr_hostrcb64_config_element *cfg)
2247 {
2248         int i, j;
2249         u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2250         u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2251         u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2252         char buffer[IPR_MAX_RES_PATH_LENGTH];
2253
2254         if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2255                 return;
2256
2257         for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2258                 if (path_type_desc[i].type != type)
2259                         continue;
2260
2261                 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2262                         if (path_status_desc[j].status != status)
2263                                 continue;
2264
2265                         ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2266                                      path_status_desc[j].desc, path_type_desc[i].desc,
2267                                      ipr_format_res_path(hostrcb->ioa_cfg,
2268                                         cfg->res_path, buffer, sizeof(buffer)),
2269                                         link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2270                                         be32_to_cpu(cfg->wwid[0]),
2271                                         be32_to_cpu(cfg->wwid[1]));
2272                         return;
2273                 }
2274         }
2275         ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2276                      "WWN=%08X%08X\n", cfg->type_status,
2277                      ipr_format_res_path(hostrcb->ioa_cfg,
2278                         cfg->res_path, buffer, sizeof(buffer)),
2279                         link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2280                         be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2281 }
2282
2283 /**
2284  * ipr_log_fabric_error - Log a fabric error.
2285  * @ioa_cfg:    ioa config struct
2286  * @hostrcb:    hostrcb struct
2287  *
2288  * Return value:
2289  *      none
2290  **/
2291 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2292                                  struct ipr_hostrcb *hostrcb)
2293 {
2294         struct ipr_hostrcb_type_20_error *error;
2295         struct ipr_hostrcb_fabric_desc *fabric;
2296         struct ipr_hostrcb_config_element *cfg;
2297         int i, add_len;
2298
2299         error = &hostrcb->hcam.u.error.u.type_20_error;
2300         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2301         ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2302
2303         add_len = be32_to_cpu(hostrcb->hcam.length) -
2304                 (offsetof(struct ipr_hostrcb_error, u) +
2305                  offsetof(struct ipr_hostrcb_type_20_error, desc));
2306
2307         for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2308                 ipr_log_fabric_path(hostrcb, fabric);
2309                 for_each_fabric_cfg(fabric, cfg)
2310                         ipr_log_path_elem(hostrcb, cfg);
2311
2312                 add_len -= be16_to_cpu(fabric->length);
2313                 fabric = (struct ipr_hostrcb_fabric_desc *)
2314                         ((unsigned long)fabric + be16_to_cpu(fabric->length));
2315         }
2316
2317         ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2318 }
2319
2320 /**
2321  * ipr_log_sis64_array_error - Log a sis64 array error.
2322  * @ioa_cfg:    ioa config struct
2323  * @hostrcb:    hostrcb struct
2324  *
2325  * Return value:
2326  *      none
2327  **/
2328 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2329                                       struct ipr_hostrcb *hostrcb)
2330 {
2331         int i, num_entries;
2332         struct ipr_hostrcb_type_24_error *error;
2333         struct ipr_hostrcb64_array_data_entry *array_entry;
2334         char buffer[IPR_MAX_RES_PATH_LENGTH];
2335         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2336
2337         error = &hostrcb->hcam.u.error64.u.type_24_error;
2338
2339         ipr_err_separator;
2340
2341         ipr_err("RAID %s Array Configuration: %s\n",
2342                 error->protection_level,
2343                 ipr_format_res_path(ioa_cfg, error->last_res_path,
2344                         buffer, sizeof(buffer)));
2345
2346         ipr_err_separator;
2347
2348         array_entry = error->array_member;
2349         num_entries = min_t(u32, error->num_entries,
2350                             ARRAY_SIZE(error->array_member));
2351
2352         for (i = 0; i < num_entries; i++, array_entry++) {
2353
2354                 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2355                         continue;
2356
2357                 if (error->exposed_mode_adn == i)
2358                         ipr_err("Exposed Array Member %d:\n", i);
2359                 else
2360                         ipr_err("Array Member %d:\n", i);
2361
2362                 ipr_err("Array Member %d:\n", i);
2363                 ipr_log_ext_vpd(&array_entry->vpd);
2364                 ipr_err("Current Location: %s\n",
2365                          ipr_format_res_path(ioa_cfg, array_entry->res_path,
2366                                 buffer, sizeof(buffer)));
2367                 ipr_err("Expected Location: %s\n",
2368                          ipr_format_res_path(ioa_cfg,
2369                                 array_entry->expected_res_path,
2370                                 buffer, sizeof(buffer)));
2371
2372                 ipr_err_separator;
2373         }
2374 }
2375
2376 /**
2377  * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2378  * @ioa_cfg:    ioa config struct
2379  * @hostrcb:    hostrcb struct
2380  *
2381  * Return value:
2382  *      none
2383  **/
2384 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2385                                        struct ipr_hostrcb *hostrcb)
2386 {
2387         struct ipr_hostrcb_type_30_error *error;
2388         struct ipr_hostrcb64_fabric_desc *fabric;
2389         struct ipr_hostrcb64_config_element *cfg;
2390         int i, add_len;
2391
2392         error = &hostrcb->hcam.u.error64.u.type_30_error;
2393
2394         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2395         ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2396
2397         add_len = be32_to_cpu(hostrcb->hcam.length) -
2398                 (offsetof(struct ipr_hostrcb64_error, u) +
2399                  offsetof(struct ipr_hostrcb_type_30_error, desc));
2400
2401         for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2402                 ipr_log64_fabric_path(hostrcb, fabric);
2403                 for_each_fabric_cfg(fabric, cfg)
2404                         ipr_log64_path_elem(hostrcb, cfg);
2405
2406                 add_len -= be16_to_cpu(fabric->length);
2407                 fabric = (struct ipr_hostrcb64_fabric_desc *)
2408                         ((unsigned long)fabric + be16_to_cpu(fabric->length));
2409         }
2410
2411         ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2412 }
2413
2414 /**
2415  * ipr_log_generic_error - Log an adapter error.
2416  * @ioa_cfg:    ioa config struct
2417  * @hostrcb:    hostrcb struct
2418  *
2419  * Return value:
2420  *      none
2421  **/
2422 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2423                                   struct ipr_hostrcb *hostrcb)
2424 {
2425         ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2426                          be32_to_cpu(hostrcb->hcam.length));
2427 }
2428
2429 /**
2430  * ipr_log_sis64_device_error - Log a cache error.
2431  * @ioa_cfg:    ioa config struct
2432  * @hostrcb:    hostrcb struct
2433  *
2434  * Return value:
2435  *      none
2436  **/
2437 static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
2438                                          struct ipr_hostrcb *hostrcb)
2439 {
2440         struct ipr_hostrcb_type_21_error *error;
2441         char buffer[IPR_MAX_RES_PATH_LENGTH];
2442
2443         error = &hostrcb->hcam.u.error64.u.type_21_error;
2444
2445         ipr_err("-----Failing Device Information-----\n");
2446         ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2447                 be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
2448                  be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
2449         ipr_err("Device Resource Path: %s\n",
2450                 __ipr_format_res_path(error->res_path,
2451                                       buffer, sizeof(buffer)));
2452         error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
2453         error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
2454         ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
2455         ipr_err("Secondary Problem Description:  %s\n", error->second_problem_desc);
2456         ipr_err("SCSI Sense Data:\n");
2457         ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
2458         ipr_err("SCSI Command Descriptor Block: \n");
2459         ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
2460
2461         ipr_err("Additional IOA Data:\n");
2462         ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
2463 }
2464
2465 /**
2466  * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2467  * @ioasc:      IOASC
2468  *
2469  * This function will return the index of into the ipr_error_table
2470  * for the specified IOASC. If the IOASC is not in the table,
2471  * 0 will be returned, which points to the entry used for unknown errors.
2472  *
2473  * Return value:
2474  *      index into the ipr_error_table
2475  **/
2476 static u32 ipr_get_error(u32 ioasc)
2477 {
2478         int i;
2479
2480         for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2481                 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2482                         return i;
2483
2484         return 0;
2485 }
2486
2487 /**
2488  * ipr_handle_log_data - Log an adapter error.
2489  * @ioa_cfg:    ioa config struct
2490  * @hostrcb:    hostrcb struct
2491  *
2492  * This function logs an adapter error to the system.
2493  *
2494  * Return value:
2495  *      none
2496  **/
2497 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2498                                 struct ipr_hostrcb *hostrcb)
2499 {
2500         u32 ioasc;
2501         int error_index;
2502         struct ipr_hostrcb_type_21_error *error;
2503
2504         if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2505                 return;
2506
2507         if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2508                 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2509
2510         if (ioa_cfg->sis64)
2511                 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2512         else
2513                 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2514
2515         if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2516             ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2517                 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2518                 scsi_report_bus_reset(ioa_cfg->host,
2519                                       hostrcb->hcam.u.error.fd_res_addr.bus);
2520         }
2521
2522         error_index = ipr_get_error(ioasc);
2523
2524         if (!ipr_error_table[error_index].log_hcam)
2525                 return;
2526
2527         if (ioasc == IPR_IOASC_HW_CMD_FAILED &&
2528             hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) {
2529                 error = &hostrcb->hcam.u.error64.u.type_21_error;
2530
2531                 if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST &&
2532                         ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
2533                                 return;
2534         }
2535
2536         ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2537
2538         /* Set indication we have logged an error */
2539         ioa_cfg->errors_logged++;
2540
2541         if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2542                 return;
2543         if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2544                 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2545
2546         switch (hostrcb->hcam.overlay_id) {
2547         case IPR_HOST_RCB_OVERLAY_ID_2:
2548                 ipr_log_cache_error(ioa_cfg, hostrcb);
2549                 break;
2550         case IPR_HOST_RCB_OVERLAY_ID_3:
2551                 ipr_log_config_error(ioa_cfg, hostrcb);
2552                 break;
2553         case IPR_HOST_RCB_OVERLAY_ID_4:
2554         case IPR_HOST_RCB_OVERLAY_ID_6:
2555                 ipr_log_array_error(ioa_cfg, hostrcb);
2556                 break;
2557         case IPR_HOST_RCB_OVERLAY_ID_7:
2558                 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2559                 break;
2560         case IPR_HOST_RCB_OVERLAY_ID_12:
2561                 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2562                 break;
2563         case IPR_HOST_RCB_OVERLAY_ID_13:
2564                 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2565                 break;
2566         case IPR_HOST_RCB_OVERLAY_ID_14:
2567         case IPR_HOST_RCB_OVERLAY_ID_16:
2568                 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2569                 break;
2570         case IPR_HOST_RCB_OVERLAY_ID_17:
2571                 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2572                 break;
2573         case IPR_HOST_RCB_OVERLAY_ID_20:
2574                 ipr_log_fabric_error(ioa_cfg, hostrcb);
2575                 break;
2576         case IPR_HOST_RCB_OVERLAY_ID_21:
2577                 ipr_log_sis64_device_error(ioa_cfg, hostrcb);
2578                 break;
2579         case IPR_HOST_RCB_OVERLAY_ID_23:
2580                 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2581                 break;
2582         case IPR_HOST_RCB_OVERLAY_ID_24:
2583         case IPR_HOST_RCB_OVERLAY_ID_26:
2584                 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2585                 break;
2586         case IPR_HOST_RCB_OVERLAY_ID_30:
2587                 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2588                 break;
2589         case IPR_HOST_RCB_OVERLAY_ID_1:
2590         case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2591         default:
2592                 ipr_log_generic_error(ioa_cfg, hostrcb);
2593                 break;
2594         }
2595 }
2596
2597 static struct ipr_hostrcb *ipr_get_free_hostrcb(struct ipr_ioa_cfg *ioa)
2598 {
2599         struct ipr_hostrcb *hostrcb;
2600
2601         hostrcb = list_first_entry_or_null(&ioa->hostrcb_free_q,
2602                                         struct ipr_hostrcb, queue);
2603
2604         if (unlikely(!hostrcb)) {
2605                 dev_info(&ioa->pdev->dev, "Reclaiming async error buffers.");
2606                 hostrcb = list_first_entry_or_null(&ioa->hostrcb_report_q,
2607                                                 struct ipr_hostrcb, queue);
2608         }
2609
2610         list_del_init(&hostrcb->queue);
2611         return hostrcb;
2612 }
2613
2614 /**
2615  * ipr_process_error - Op done function for an adapter error log.
2616  * @ipr_cmd:    ipr command struct
2617  *
2618  * This function is the op done function for an error log host
2619  * controlled async from the adapter. It will log the error and
2620  * send the HCAM back to the adapter.
2621  *
2622  * Return value:
2623  *      none
2624  **/
2625 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2626 {
2627         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2628         struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2629         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2630         u32 fd_ioasc;
2631
2632         if (ioa_cfg->sis64)
2633                 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2634         else
2635                 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2636
2637         list_del_init(&hostrcb->queue);
2638         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
2639
2640         if (!ioasc) {
2641                 ipr_handle_log_data(ioa_cfg, hostrcb);
2642                 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2643                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2644         } else if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
2645                    ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) {
2646                 dev_err(&ioa_cfg->pdev->dev,
2647                         "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2648         }
2649
2650         list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q);
2651         schedule_work(&ioa_cfg->work_q);
2652         hostrcb = ipr_get_free_hostrcb(ioa_cfg);
2653
2654         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2655 }
2656
2657 /**
2658  * ipr_timeout -  An internally generated op has timed out.
2659  * @ipr_cmd:    ipr command struct
2660  *
2661  * This function blocks host requests and initiates an
2662  * adapter reset.
2663  *
2664  * Return value:
2665  *      none
2666  **/
2667 static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2668 {
2669         unsigned long lock_flags = 0;
2670         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2671
2672         ENTER;
2673         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2674
2675         ioa_cfg->errors_logged++;
2676         dev_err(&ioa_cfg->pdev->dev,
2677                 "Adapter being reset due to command timeout.\n");
2678
2679         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2680                 ioa_cfg->sdt_state = GET_DUMP;
2681
2682         if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2683                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2684
2685         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2686         LEAVE;
2687 }
2688
2689 /**
2690  * ipr_oper_timeout -  Adapter timed out transitioning to operational
2691  * @ipr_cmd:    ipr command struct
2692  *
2693  * This function blocks host requests and initiates an
2694  * adapter reset.
2695  *
2696  * Return value:
2697  *      none
2698  **/
2699 static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2700 {
2701         unsigned long lock_flags = 0;
2702         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2703
2704         ENTER;
2705         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2706
2707         ioa_cfg->errors_logged++;
2708         dev_err(&ioa_cfg->pdev->dev,
2709                 "Adapter timed out transitioning to operational.\n");
2710
2711         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2712                 ioa_cfg->sdt_state = GET_DUMP;
2713
2714         if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2715                 if (ipr_fastfail)
2716                         ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2717                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2718         }
2719
2720         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2721         LEAVE;
2722 }
2723
2724 /**
2725  * ipr_find_ses_entry - Find matching SES in SES table
2726  * @res:        resource entry struct of SES
2727  *
2728  * Return value:
2729  *      pointer to SES table entry / NULL on failure
2730  **/
2731 static const struct ipr_ses_table_entry *
2732 ipr_find_ses_entry(struct ipr_resource_entry *res)
2733 {
2734         int i, j, matches;
2735         struct ipr_std_inq_vpids *vpids;
2736         const struct ipr_ses_table_entry *ste = ipr_ses_table;
2737
2738         for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2739                 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2740                         if (ste->compare_product_id_byte[j] == 'X') {
2741                                 vpids = &res->std_inq_data.vpids;
2742                                 if (vpids->product_id[j] == ste->product_id[j])
2743                                         matches++;
2744                                 else
2745                                         break;
2746                         } else
2747                                 matches++;
2748                 }
2749
2750                 if (matches == IPR_PROD_ID_LEN)
2751                         return ste;
2752         }
2753
2754         return NULL;
2755 }
2756
2757 /**
2758  * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2759  * @ioa_cfg:    ioa config struct
2760  * @bus:                SCSI bus
2761  * @bus_width:  bus width
2762  *
2763  * Return value:
2764  *      SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2765  *      For a 2-byte wide SCSI bus, the maximum transfer speed is
2766  *      twice the maximum transfer rate (e.g. for a wide enabled bus,
2767  *      max 160MHz = max 320MB/sec).
2768  **/
2769 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2770 {
2771         struct ipr_resource_entry *res;
2772         const struct ipr_ses_table_entry *ste;
2773         u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2774
2775         /* Loop through each config table entry in the config table buffer */
2776         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2777                 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2778                         continue;
2779
2780                 if (bus != res->bus)
2781                         continue;
2782
2783                 if (!(ste = ipr_find_ses_entry(res)))
2784                         continue;
2785
2786                 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2787         }
2788
2789         return max_xfer_rate;
2790 }
2791
2792 /**
2793  * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2794  * @ioa_cfg:            ioa config struct
2795  * @max_delay:          max delay in micro-seconds to wait
2796  *
2797  * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2798  *
2799  * Return value:
2800  *      0 on success / other on failure
2801  **/
2802 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2803 {
2804         volatile u32 pcii_reg;
2805         int delay = 1;
2806
2807         /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2808         while (delay < max_delay) {
2809                 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2810
2811                 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2812                         return 0;
2813
2814                 /* udelay cannot be used if delay is more than a few milliseconds */
2815                 if ((delay / 1000) > MAX_UDELAY_MS)
2816                         mdelay(delay / 1000);
2817                 else
2818                         udelay(delay);
2819
2820                 delay += delay;
2821         }
2822         return -EIO;
2823 }
2824
2825 /**
2826  * ipr_get_sis64_dump_data_section - Dump IOA memory
2827  * @ioa_cfg:                    ioa config struct
2828  * @start_addr:                 adapter address to dump
2829  * @dest:                       destination kernel buffer
2830  * @length_in_words:            length to dump in 4 byte words
2831  *
2832  * Return value:
2833  *      0 on success
2834  **/
2835 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2836                                            u32 start_addr,
2837                                            __be32 *dest, u32 length_in_words)
2838 {
2839         int i;
2840
2841         for (i = 0; i < length_in_words; i++) {
2842                 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2843                 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2844                 dest++;
2845         }
2846
2847         return 0;
2848 }
2849
2850 /**
2851  * ipr_get_ldump_data_section - Dump IOA memory
2852  * @ioa_cfg:                    ioa config struct
2853  * @start_addr:                 adapter address to dump
2854  * @dest:                               destination kernel buffer
2855  * @length_in_words:    length to dump in 4 byte words
2856  *
2857  * Return value:
2858  *      0 on success / -EIO on failure
2859  **/
2860 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2861                                       u32 start_addr,
2862                                       __be32 *dest, u32 length_in_words)
2863 {
2864         volatile u32 temp_pcii_reg;
2865         int i, delay = 0;
2866
2867         if (ioa_cfg->sis64)
2868                 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2869                                                        dest, length_in_words);
2870
2871         /* Write IOA interrupt reg starting LDUMP state  */
2872         writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2873                ioa_cfg->regs.set_uproc_interrupt_reg32);
2874
2875         /* Wait for IO debug acknowledge */
2876         if (ipr_wait_iodbg_ack(ioa_cfg,
2877                                IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2878                 dev_err(&ioa_cfg->pdev->dev,
2879                         "IOA dump long data transfer timeout\n");
2880                 return -EIO;
2881         }
2882
2883         /* Signal LDUMP interlocked - clear IO debug ack */
2884         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2885                ioa_cfg->regs.clr_interrupt_reg);
2886
2887         /* Write Mailbox with starting address */
2888         writel(start_addr, ioa_cfg->ioa_mailbox);
2889
2890         /* Signal address valid - clear IOA Reset alert */
2891         writel(IPR_UPROCI_RESET_ALERT,
2892                ioa_cfg->regs.clr_uproc_interrupt_reg32);
2893
2894         for (i = 0; i < length_in_words; i++) {
2895                 /* Wait for IO debug acknowledge */
2896                 if (ipr_wait_iodbg_ack(ioa_cfg,
2897                                        IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2898                         dev_err(&ioa_cfg->pdev->dev,
2899                                 "IOA dump short data transfer timeout\n");
2900                         return -EIO;
2901                 }
2902
2903                 /* Read data from mailbox and increment destination pointer */
2904                 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2905                 dest++;
2906
2907                 /* For all but the last word of data, signal data received */
2908                 if (i < (length_in_words - 1)) {
2909                         /* Signal dump data received - Clear IO debug Ack */
2910                         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2911                                ioa_cfg->regs.clr_interrupt_reg);
2912                 }
2913         }
2914
2915         /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2916         writel(IPR_UPROCI_RESET_ALERT,
2917                ioa_cfg->regs.set_uproc_interrupt_reg32);
2918
2919         writel(IPR_UPROCI_IO_DEBUG_ALERT,
2920                ioa_cfg->regs.clr_uproc_interrupt_reg32);
2921
2922         /* Signal dump data received - Clear IO debug Ack */
2923         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2924                ioa_cfg->regs.clr_interrupt_reg);
2925
2926         /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2927         while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2928                 temp_pcii_reg =
2929                     readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2930
2931                 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2932                         return 0;
2933
2934                 udelay(10);
2935                 delay += 10;
2936         }
2937
2938         return 0;
2939 }
2940
2941 #ifdef CONFIG_SCSI_IPR_DUMP
2942 /**
2943  * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2944  * @ioa_cfg:            ioa config struct
2945  * @pci_address:        adapter address
2946  * @length:                     length of data to copy
2947  *
2948  * Copy data from PCI adapter to kernel buffer.
2949  * Note: length MUST be a 4 byte multiple
2950  * Return value:
2951  *      0 on success / other on failure
2952  **/
2953 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2954                         unsigned long pci_address, u32 length)
2955 {
2956         int bytes_copied = 0;
2957         int cur_len, rc, rem_len, rem_page_len, max_dump_size;
2958         __be32 *page;
2959         unsigned long lock_flags = 0;
2960         struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2961
2962         if (ioa_cfg->sis64)
2963                 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2964         else
2965                 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2966
2967         while (bytes_copied < length &&
2968                (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
2969                 if (ioa_dump->page_offset >= PAGE_SIZE ||
2970                     ioa_dump->page_offset == 0) {
2971                         page = (__be32 *)__get_free_page(GFP_ATOMIC);
2972
2973                         if (!page) {
2974                                 ipr_trace;
2975                                 return bytes_copied;
2976                         }
2977
2978                         ioa_dump->page_offset = 0;
2979                         ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2980                         ioa_dump->next_page_index++;
2981                 } else
2982                         page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2983
2984                 rem_len = length - bytes_copied;
2985                 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2986                 cur_len = min(rem_len, rem_page_len);
2987
2988                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2989                 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2990                         rc = -EIO;
2991                 } else {
2992                         rc = ipr_get_ldump_data_section(ioa_cfg,
2993                                                         pci_address + bytes_copied,
2994                                                         &page[ioa_dump->page_offset / 4],
2995                                                         (cur_len / sizeof(u32)));
2996                 }
2997                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2998
2999                 if (!rc) {
3000                         ioa_dump->page_offset += cur_len;
3001                         bytes_copied += cur_len;
3002                 } else {
3003                         ipr_trace;
3004                         break;
3005                 }
3006                 schedule();
3007         }
3008
3009         return bytes_copied;
3010 }
3011
3012 /**
3013  * ipr_init_dump_entry_hdr - Initialize a dump entry header.
3014  * @hdr:        dump entry header struct
3015  *
3016  * Return value:
3017  *      nothing
3018  **/
3019 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
3020 {
3021         hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
3022         hdr->num_elems = 1;
3023         hdr->offset = sizeof(*hdr);
3024         hdr->status = IPR_DUMP_STATUS_SUCCESS;
3025 }
3026
3027 /**
3028  * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
3029  * @ioa_cfg:    ioa config struct
3030  * @driver_dump:        driver dump struct
3031  *
3032  * Return value:
3033  *      nothing
3034  **/
3035 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
3036                                    struct ipr_driver_dump *driver_dump)
3037 {
3038         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3039
3040         ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
3041         driver_dump->ioa_type_entry.hdr.len =
3042                 sizeof(struct ipr_dump_ioa_type_entry) -
3043                 sizeof(struct ipr_dump_entry_header);
3044         driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3045         driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
3046         driver_dump->ioa_type_entry.type = ioa_cfg->type;
3047         driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
3048                 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
3049                 ucode_vpd->minor_release[1];
3050         driver_dump->hdr.num_entries++;
3051 }
3052
3053 /**
3054  * ipr_dump_version_data - Fill in the driver version in the dump.
3055  * @ioa_cfg:    ioa config struct
3056  * @driver_dump:        driver dump struct
3057  *
3058  * Return value:
3059  *      nothing
3060  **/
3061 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
3062                                   struct ipr_driver_dump *driver_dump)
3063 {
3064         ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
3065         driver_dump->version_entry.hdr.len =
3066                 sizeof(struct ipr_dump_version_entry) -
3067                 sizeof(struct ipr_dump_entry_header);
3068         driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3069         driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
3070         strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
3071         driver_dump->hdr.num_entries++;
3072 }
3073
3074 /**
3075  * ipr_dump_trace_data - Fill in the IOA trace in the dump.
3076  * @ioa_cfg:    ioa config struct
3077  * @driver_dump:        driver dump struct
3078  *
3079  * Return value:
3080  *      nothing
3081  **/
3082 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
3083                                    struct ipr_driver_dump *driver_dump)
3084 {
3085         ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
3086         driver_dump->trace_entry.hdr.len =
3087                 sizeof(struct ipr_dump_trace_entry) -
3088                 sizeof(struct ipr_dump_entry_header);
3089         driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3090         driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
3091         memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
3092         driver_dump->hdr.num_entries++;
3093 }
3094
3095 /**
3096  * ipr_dump_location_data - Fill in the IOA location in the dump.
3097  * @ioa_cfg:    ioa config struct
3098  * @driver_dump:        driver dump struct
3099  *
3100  * Return value:
3101  *      nothing
3102  **/
3103 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
3104                                    struct ipr_driver_dump *driver_dump)
3105 {
3106         ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
3107         driver_dump->location_entry.hdr.len =
3108                 sizeof(struct ipr_dump_location_entry) -
3109                 sizeof(struct ipr_dump_entry_header);
3110         driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3111         driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
3112         strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
3113         driver_dump->hdr.num_entries++;
3114 }
3115
3116 /**
3117  * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3118  * @ioa_cfg:    ioa config struct
3119  * @dump:               dump struct
3120  *
3121  * Return value:
3122  *      nothing
3123  **/
3124 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
3125 {
3126         unsigned long start_addr, sdt_word;
3127         unsigned long lock_flags = 0;
3128         struct ipr_driver_dump *driver_dump = &dump->driver_dump;
3129         struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
3130         u32 num_entries, max_num_entries, start_off, end_off;
3131         u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
3132         struct ipr_sdt *sdt;
3133         int valid = 1;
3134         int i;
3135
3136         ENTER;
3137
3138         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3139
3140         if (ioa_cfg->sdt_state != READ_DUMP) {
3141                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3142                 return;
3143         }
3144
3145         if (ioa_cfg->sis64) {
3146                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3147                 ssleep(IPR_DUMP_DELAY_SECONDS);
3148                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3149         }
3150
3151         start_addr = readl(ioa_cfg->ioa_mailbox);
3152
3153         if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
3154                 dev_err(&ioa_cfg->pdev->dev,
3155                         "Invalid dump table format: %lx\n", start_addr);
3156                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3157                 return;
3158         }
3159
3160         dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3161
3162         driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3163
3164         /* Initialize the overall dump header */
3165         driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3166         driver_dump->hdr.num_entries = 1;
3167         driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3168         driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3169         driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3170         driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3171
3172         ipr_dump_version_data(ioa_cfg, driver_dump);
3173         ipr_dump_location_data(ioa_cfg, driver_dump);
3174         ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3175         ipr_dump_trace_data(ioa_cfg, driver_dump);
3176
3177         /* Update dump_header */
3178         driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3179
3180         /* IOA Dump entry */
3181         ipr_init_dump_entry_hdr(&ioa_dump->hdr);
3182         ioa_dump->hdr.len = 0;
3183         ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3184         ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3185
3186         /* First entries in sdt are actually a list of dump addresses and
3187          lengths to gather the real dump data.  sdt represents the pointer
3188          to the ioa generated dump table.  Dump data will be extracted based
3189          on entries in this table */
3190         sdt = &ioa_dump->sdt;
3191
3192         if (ioa_cfg->sis64) {
3193                 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3194                 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3195         } else {
3196                 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3197                 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3198         }
3199
3200         bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3201                         (max_num_entries * sizeof(struct ipr_sdt_entry));
3202         rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
3203                                         bytes_to_copy / sizeof(__be32));
3204
3205         /* Smart Dump table is ready to use and the first entry is valid */
3206         if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3207             (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
3208                 dev_err(&ioa_cfg->pdev->dev,
3209                         "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3210                         rc, be32_to_cpu(sdt->hdr.state));
3211                 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3212                 ioa_cfg->sdt_state = DUMP_OBTAINED;
3213                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3214                 return;
3215         }
3216
3217         num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3218
3219         if (num_entries > max_num_entries)
3220                 num_entries = max_num_entries;
3221
3222         /* Update dump length to the actual data to be copied */
3223         dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3224         if (ioa_cfg->sis64)
3225                 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3226         else
3227                 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
3228
3229         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3230
3231         for (i = 0; i < num_entries; i++) {
3232                 if (ioa_dump->hdr.len > max_dump_size) {
3233                         driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3234                         break;
3235                 }
3236
3237                 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
3238                         sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3239                         if (ioa_cfg->sis64)
3240                                 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3241                         else {
3242                                 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3243                                 end_off = be32_to_cpu(sdt->entry[i].end_token);
3244
3245                                 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3246                                         bytes_to_copy = end_off - start_off;
3247                                 else
3248                                         valid = 0;
3249                         }
3250                         if (valid) {
3251                                 if (bytes_to_copy > max_dump_size) {
3252                                         sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3253                                         continue;
3254                                 }
3255
3256                                 /* Copy data from adapter to driver buffers */
3257                                 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3258                                                             bytes_to_copy);
3259
3260                                 ioa_dump->hdr.len += bytes_copied;
3261
3262                                 if (bytes_copied != bytes_to_copy) {
3263                                         driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3264                                         break;
3265                                 }
3266                         }
3267                 }
3268         }
3269
3270         dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3271
3272         /* Update dump_header */
3273         driver_dump->hdr.len += ioa_dump->hdr.len;
3274         wmb();
3275         ioa_cfg->sdt_state = DUMP_OBTAINED;
3276         LEAVE;
3277 }
3278
3279 #else
3280 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3281 #endif
3282
3283 /**
3284  * ipr_release_dump - Free adapter dump memory
3285  * @kref:       kref struct
3286  *
3287  * Return value:
3288  *      nothing
3289  **/
3290 static void ipr_release_dump(struct kref *kref)
3291 {
3292         struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
3293         struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3294         unsigned long lock_flags = 0;
3295         int i;
3296
3297         ENTER;
3298         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3299         ioa_cfg->dump = NULL;
3300         ioa_cfg->sdt_state = INACTIVE;
3301         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3302
3303         for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3304                 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3305
3306         vfree(dump->ioa_dump.ioa_data);
3307         kfree(dump);
3308         LEAVE;
3309 }
3310
3311 /**
3312  * ipr_worker_thread - Worker thread
3313  * @work:               ioa config struct
3314  *
3315  * Called at task level from a work thread. This function takes care
3316  * of adding and removing device from the mid-layer as configuration
3317  * changes are detected by the adapter.
3318  *
3319  * Return value:
3320  *      nothing
3321  **/
3322 static void ipr_worker_thread(struct work_struct *work)
3323 {
3324         unsigned long lock_flags;
3325         struct ipr_resource_entry *res;
3326         struct scsi_device *sdev;
3327         struct ipr_dump *dump;
3328         struct ipr_ioa_cfg *ioa_cfg =
3329                 container_of(work, struct ipr_ioa_cfg, work_q);
3330         u8 bus, target, lun;
3331         int did_work;
3332
3333         ENTER;
3334         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3335
3336         if (ioa_cfg->sdt_state == READ_DUMP) {
3337                 dump = ioa_cfg->dump;
3338                 if (!dump) {
3339                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3340                         return;
3341                 }
3342                 kref_get(&dump->kref);
3343                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3344                 ipr_get_ioa_dump(ioa_cfg, dump);
3345                 kref_put(&dump->kref, ipr_release_dump);
3346
3347                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3348                 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
3349                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3350                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3351                 return;
3352         }
3353
3354         if (ioa_cfg->scsi_unblock) {
3355                 ioa_cfg->scsi_unblock = 0;
3356                 ioa_cfg->scsi_blocked = 0;
3357                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3358                 scsi_unblock_requests(ioa_cfg->host);
3359                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3360                 if (ioa_cfg->scsi_blocked)
3361                         scsi_block_requests(ioa_cfg->host);
3362         }
3363
3364         if (!ioa_cfg->scan_enabled) {
3365                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3366                 return;
3367         }
3368
3369 restart:
3370         do {
3371                 did_work = 0;
3372                 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
3373                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3374                         return;
3375                 }
3376
3377                 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3378                         if (res->del_from_ml && res->sdev) {
3379                                 did_work = 1;
3380                                 sdev = res->sdev;
3381                                 if (!scsi_device_get(sdev)) {
3382                                         if (!res->add_to_ml)
3383                                                 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3384                                         else
3385                                                 res->del_from_ml = 0;
3386                                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3387                                         scsi_remove_device(sdev);
3388                                         scsi_device_put(sdev);
3389                                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3390                                 }
3391                                 break;
3392                         }
3393                 }
3394         } while (did_work);
3395
3396         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3397                 if (res->add_to_ml) {
3398                         bus = res->bus;
3399                         target = res->target;
3400                         lun = res->lun;
3401                         res->add_to_ml = 0;
3402                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3403                         scsi_add_device(ioa_cfg->host, bus, target, lun);
3404                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3405                         goto restart;
3406                 }
3407         }
3408
3409         ioa_cfg->scan_done = 1;
3410         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3411         kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3412         LEAVE;
3413 }
3414
3415 #ifdef CONFIG_SCSI_IPR_TRACE
3416 /**
3417  * ipr_read_trace - Dump the adapter trace
3418  * @filp:               open sysfs file
3419  * @kobj:               kobject struct
3420  * @bin_attr:           bin_attribute struct
3421  * @buf:                buffer
3422  * @off:                offset
3423  * @count:              buffer size
3424  *
3425  * Return value:
3426  *      number of bytes printed to buffer
3427  **/
3428 static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
3429                               struct bin_attribute *bin_attr,
3430                               char *buf, loff_t off, size_t count)
3431 {
3432         struct device *dev = container_of(kobj, struct device, kobj);
3433         struct Scsi_Host *shost = class_to_shost(dev);
3434         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3435         unsigned long lock_flags = 0;
3436         ssize_t ret;
3437
3438         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3439         ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3440                                 IPR_TRACE_SIZE);
3441         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3442
3443         return ret;
3444 }
3445
3446 static struct bin_attribute ipr_trace_attr = {
3447         .attr = {
3448                 .name = "trace",
3449                 .mode = S_IRUGO,
3450         },
3451         .size = 0,
3452         .read = ipr_read_trace,
3453 };
3454 #endif
3455
3456 /**
3457  * ipr_show_fw_version - Show the firmware version
3458  * @dev:        class device struct
3459  * @buf:        buffer
3460  *
3461  * Return value:
3462  *      number of bytes printed to buffer
3463  **/
3464 static ssize_t ipr_show_fw_version(struct device *dev,
3465                                    struct device_attribute *attr, char *buf)
3466 {
3467         struct Scsi_Host *shost = class_to_shost(dev);
3468         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3469         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3470         unsigned long lock_flags = 0;
3471         int len;
3472
3473         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3474         len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3475                        ucode_vpd->major_release, ucode_vpd->card_type,
3476                        ucode_vpd->minor_release[0],
3477                        ucode_vpd->minor_release[1]);
3478         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3479         return len;
3480 }
3481
3482 static struct device_attribute ipr_fw_version_attr = {
3483         .attr = {
3484                 .name =         "fw_version",
3485                 .mode =         S_IRUGO,
3486         },
3487         .show = ipr_show_fw_version,
3488 };
3489
3490 /**
3491  * ipr_show_log_level - Show the adapter's error logging level
3492  * @dev:        class device struct
3493  * @buf:        buffer
3494  *
3495  * Return value:
3496  *      number of bytes printed to buffer
3497  **/
3498 static ssize_t ipr_show_log_level(struct device *dev,
3499                                    struct device_attribute *attr, char *buf)
3500 {
3501         struct Scsi_Host *shost = class_to_shost(dev);
3502         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3503         unsigned long lock_flags = 0;
3504         int len;
3505
3506         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3507         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3508         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3509         return len;
3510 }
3511
3512 /**
3513  * ipr_store_log_level - Change the adapter's error logging level
3514  * @dev:        class device struct
3515  * @buf:        buffer
3516  *
3517  * Return value:
3518  *      number of bytes printed to buffer
3519  **/
3520 static ssize_t ipr_store_log_level(struct device *dev,
3521                                    struct device_attribute *attr,
3522                                    const char *buf, size_t count)
3523 {
3524         struct Scsi_Host *shost = class_to_shost(dev);
3525         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3526         unsigned long lock_flags = 0;
3527
3528         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3529         ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3530         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3531         return strlen(buf);
3532 }
3533
3534 static struct device_attribute ipr_log_level_attr = {
3535         .attr = {
3536                 .name =         "log_level",
3537                 .mode =         S_IRUGO | S_IWUSR,
3538         },
3539         .show = ipr_show_log_level,
3540         .store = ipr_store_log_level
3541 };
3542
3543 /**
3544  * ipr_store_diagnostics - IOA Diagnostics interface
3545  * @dev:        device struct
3546  * @buf:        buffer
3547  * @count:      buffer size
3548  *
3549  * This function will reset the adapter and wait a reasonable
3550  * amount of time for any errors that the adapter might log.
3551  *
3552  * Return value:
3553  *      count on success / other on failure
3554  **/
3555 static ssize_t ipr_store_diagnostics(struct device *dev,
3556                                      struct device_attribute *attr,
3557                                      const char *buf, size_t count)
3558 {
3559         struct Scsi_Host *shost = class_to_shost(dev);
3560         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3561         unsigned long lock_flags = 0;
3562         int rc = count;
3563
3564         if (!capable(CAP_SYS_ADMIN))
3565                 return -EACCES;
3566
3567         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3568         while (ioa_cfg->in_reset_reload) {
3569                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3570                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3571                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3572         }
3573
3574         ioa_cfg->errors_logged = 0;
3575         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3576
3577         if (ioa_cfg->in_reset_reload) {
3578                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3579                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3580
3581                 /* Wait for a second for any errors to be logged */
3582                 msleep(1000);
3583         } else {
3584                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3585                 return -EIO;
3586         }
3587
3588         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3589         if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3590                 rc = -EIO;
3591         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3592
3593         return rc;
3594 }
3595
3596 static struct device_attribute ipr_diagnostics_attr = {
3597         .attr = {
3598                 .name =         "run_diagnostics",
3599                 .mode =         S_IWUSR,
3600         },
3601         .store = ipr_store_diagnostics
3602 };
3603
3604 /**
3605  * ipr_show_adapter_state - Show the adapter's state
3606  * @class_dev:  device struct
3607  * @buf:        buffer
3608  *
3609  * Return value:
3610  *      number of bytes printed to buffer
3611  **/
3612 static ssize_t ipr_show_adapter_state(struct device *dev,
3613                                       struct device_attribute *attr, char *buf)
3614 {
3615         struct Scsi_Host *shost = class_to_shost(dev);
3616         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3617         unsigned long lock_flags = 0;
3618         int len;
3619
3620         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3621         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
3622                 len = snprintf(buf, PAGE_SIZE, "offline\n");
3623         else
3624                 len = snprintf(buf, PAGE_SIZE, "online\n");
3625         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3626         return len;
3627 }
3628
3629 /**
3630  * ipr_store_adapter_state - Change adapter state
3631  * @dev:        device struct
3632  * @buf:        buffer
3633  * @count:      buffer size
3634  *
3635  * This function will change the adapter's state.
3636  *
3637  * Return value:
3638  *      count on success / other on failure
3639  **/
3640 static ssize_t ipr_store_adapter_state(struct device *dev,
3641                                        struct device_attribute *attr,
3642                                        const char *buf, size_t count)
3643 {
3644         struct Scsi_Host *shost = class_to_shost(dev);
3645         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3646         unsigned long lock_flags;
3647         int result = count, i;
3648
3649         if (!capable(CAP_SYS_ADMIN))
3650                 return -EACCES;
3651
3652         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3653         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3654             !strncmp(buf, "online", 6)) {
3655                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3656                         spin_lock(&ioa_cfg->hrrq[i]._lock);
3657                         ioa_cfg->hrrq[i].ioa_is_dead = 0;
3658                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
3659                 }
3660                 wmb();
3661                 ioa_cfg->reset_retries = 0;
3662                 ioa_cfg->in_ioa_bringdown = 0;
3663                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3664         }
3665         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3666         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3667
3668         return result;
3669 }
3670
3671 static struct device_attribute ipr_ioa_state_attr = {
3672         .attr = {
3673                 .name =         "online_state",
3674                 .mode =         S_IRUGO | S_IWUSR,
3675         },
3676         .show = ipr_show_adapter_state,
3677         .store = ipr_store_adapter_state
3678 };
3679
3680 /**
3681  * ipr_store_reset_adapter - Reset the adapter
3682  * @dev:        device struct
3683  * @buf:        buffer
3684  * @count:      buffer size
3685  *
3686  * This function will reset the adapter.
3687  *
3688  * Return value:
3689  *      count on success / other on failure
3690  **/
3691 static ssize_t ipr_store_reset_adapter(struct device *dev,
3692                                        struct device_attribute *attr,
3693                                        const char *buf, size_t count)
3694 {
3695         struct Scsi_Host *shost = class_to_shost(dev);
3696         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3697         unsigned long lock_flags;
3698         int result = count;
3699
3700         if (!capable(CAP_SYS_ADMIN))
3701                 return -EACCES;
3702
3703         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3704         if (!ioa_cfg->in_reset_reload)
3705                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3706         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3707         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3708
3709         return result;
3710 }
3711
3712 static struct device_attribute ipr_ioa_reset_attr = {
3713         .attr = {
3714                 .name =         "reset_host",
3715                 .mode =         S_IWUSR,
3716         },
3717         .store = ipr_store_reset_adapter
3718 };
3719
3720 static int ipr_iopoll(struct irq_poll *iop, int budget);
3721  /**
3722  * ipr_show_iopoll_weight - Show ipr polling mode
3723  * @dev:        class device struct
3724  * @buf:        buffer
3725  *
3726  * Return value:
3727  *      number of bytes printed to buffer
3728  **/
3729 static ssize_t ipr_show_iopoll_weight(struct device *dev,
3730                                    struct device_attribute *attr, char *buf)
3731 {
3732         struct Scsi_Host *shost = class_to_shost(dev);
3733         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3734         unsigned long lock_flags = 0;
3735         int len;
3736
3737         spin_lock_irqsave(shost->host_lock, lock_flags);
3738         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3739         spin_unlock_irqrestore(shost->host_lock, lock_flags);
3740
3741         return len;
3742 }
3743
3744 /**
3745  * ipr_store_iopoll_weight - Change the adapter's polling mode
3746  * @dev:        class device struct
3747  * @buf:        buffer
3748  *
3749  * Return value:
3750  *      number of bytes printed to buffer
3751  **/
3752 static ssize_t ipr_store_iopoll_weight(struct device *dev,
3753                                         struct device_attribute *attr,
3754                                         const char *buf, size_t count)
3755 {
3756         struct Scsi_Host *shost = class_to_shost(dev);
3757         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3758         unsigned long user_iopoll_weight;
3759         unsigned long lock_flags = 0;
3760         int i;
3761
3762         if (!ioa_cfg->sis64) {
3763                 dev_info(&ioa_cfg->pdev->dev, "irq_poll not supported on this adapter\n");
3764                 return -EINVAL;
3765         }
3766         if (kstrtoul(buf, 10, &user_iopoll_weight))
3767                 return -EINVAL;
3768
3769         if (user_iopoll_weight > 256) {
3770                 dev_info(&ioa_cfg->pdev->dev, "Invalid irq_poll weight. It must be less than 256\n");
3771                 return -EINVAL;
3772         }
3773
3774         if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3775                 dev_info(&ioa_cfg->pdev->dev, "Current irq_poll weight has the same weight\n");
3776                 return strlen(buf);
3777         }
3778
3779         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3780                 for (i = 1; i < ioa_cfg->hrrq_num; i++)
3781                         irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
3782         }
3783
3784         spin_lock_irqsave(shost->host_lock, lock_flags);
3785         ioa_cfg->iopoll_weight = user_iopoll_weight;
3786         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3787                 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3788                         irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
3789                                         ioa_cfg->iopoll_weight, ipr_iopoll);
3790                 }
3791         }
3792         spin_unlock_irqrestore(shost->host_lock, lock_flags);
3793
3794         return strlen(buf);
3795 }
3796
3797 static struct device_attribute ipr_iopoll_weight_attr = {
3798         .attr = {
3799                 .name =         "iopoll_weight",
3800                 .mode =         S_IRUGO | S_IWUSR,
3801         },
3802         .show = ipr_show_iopoll_weight,
3803         .store = ipr_store_iopoll_weight
3804 };
3805
3806 /**
3807  * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3808  * @buf_len:            buffer length
3809  *
3810  * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3811  * list to use for microcode download
3812  *
3813  * Return value:
3814  *      pointer to sglist / NULL on failure
3815  **/
3816 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3817 {
3818         int sg_size, order, bsize_elem, num_elem, i, j;
3819         struct ipr_sglist *sglist;
3820         struct scatterlist *scatterlist;
3821         struct page *page;
3822
3823         /* Get the minimum size per scatter/gather element */
3824         sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3825
3826         /* Get the actual size per element */
3827         order = get_order(sg_size);
3828
3829         /* Determine the actual number of bytes per element */
3830         bsize_elem = PAGE_SIZE * (1 << order);
3831
3832         /* Determine the actual number of sg entries needed */
3833         if (buf_len % bsize_elem)
3834                 num_elem = (buf_len / bsize_elem) + 1;
3835         else
3836                 num_elem = buf_len / bsize_elem;
3837
3838         /* Allocate a scatter/gather list for the DMA */
3839         sglist = kzalloc(sizeof(struct ipr_sglist) +
3840                          (sizeof(struct scatterlist) * (num_elem - 1)),
3841                          GFP_KERNEL);
3842
3843         if (sglist == NULL) {
3844                 ipr_trace;
3845                 return NULL;
3846         }
3847
3848         scatterlist = sglist->scatterlist;
3849         sg_init_table(scatterlist, num_elem);
3850
3851         sglist->order = order;
3852         sglist->num_sg = num_elem;
3853
3854         /* Allocate a bunch of sg elements */
3855         for (i = 0; i < num_elem; i++) {
3856                 page = alloc_pages(GFP_KERNEL, order);
3857                 if (!page) {
3858                         ipr_trace;
3859
3860                         /* Free up what we already allocated */
3861                         for (j = i - 1; j >= 0; j--)
3862                                 __free_pages(sg_page(&scatterlist[j]), order);
3863                         kfree(sglist);
3864                         return NULL;
3865                 }
3866
3867                 sg_set_page(&scatterlist[i], page, 0, 0);
3868         }
3869
3870         return sglist;
3871 }
3872
3873 /**
3874  * ipr_free_ucode_buffer - Frees a microcode download buffer
3875  * @p_dnld:             scatter/gather list pointer
3876  *
3877  * Free a DMA'able ucode download buffer previously allocated with
3878  * ipr_alloc_ucode_buffer
3879  *
3880  * Return value:
3881  *      nothing
3882  **/
3883 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3884 {
3885         int i;
3886
3887         for (i = 0; i < sglist->num_sg; i++)
3888                 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
3889
3890         kfree(sglist);
3891 }
3892
3893 /**
3894  * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3895  * @sglist:             scatter/gather list pointer
3896  * @buffer:             buffer pointer
3897  * @len:                buffer length
3898  *
3899  * Copy a microcode image from a user buffer into a buffer allocated by
3900  * ipr_alloc_ucode_buffer
3901  *
3902  * Return value:
3903  *      0 on success / other on failure
3904  **/
3905 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3906                                  u8 *buffer, u32 len)
3907 {
3908         int bsize_elem, i, result = 0;
3909         struct scatterlist *scatterlist;
3910         void *kaddr;
3911
3912         /* Determine the actual number of bytes per element */
3913         bsize_elem = PAGE_SIZE * (1 << sglist->order);
3914
3915         scatterlist = sglist->scatterlist;
3916
3917         for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3918                 struct page *page = sg_page(&scatterlist[i]);
3919
3920                 kaddr = kmap(page);
3921                 memcpy(kaddr, buffer, bsize_elem);
3922                 kunmap(page);
3923
3924                 scatterlist[i].length = bsize_elem;
3925
3926                 if (result != 0) {
3927                         ipr_trace;
3928                         return result;
3929                 }
3930         }
3931
3932         if (len % bsize_elem) {
3933                 struct page *page = sg_page(&scatterlist[i]);
3934
3935                 kaddr = kmap(page);
3936                 memcpy(kaddr, buffer, len % bsize_elem);
3937                 kunmap(page);
3938
3939                 scatterlist[i].length = len % bsize_elem;
3940         }
3941
3942         sglist->buffer_len = len;
3943         return result;
3944 }
3945
3946 /**
3947  * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3948  * @ipr_cmd:            ipr command struct
3949  * @sglist:             scatter/gather list
3950  *
3951  * Builds a microcode download IOA data list (IOADL).
3952  *
3953  **/
3954 static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3955                                     struct ipr_sglist *sglist)
3956 {
3957         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3958         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3959         struct scatterlist *scatterlist = sglist->scatterlist;
3960         int i;
3961
3962         ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3963         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3964         ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3965
3966         ioarcb->ioadl_len =
3967                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3968         for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3969                 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3970                 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3971                 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3972         }
3973
3974         ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3975 }
3976
3977 /**
3978  * ipr_build_ucode_ioadl - Build a microcode download IOADL
3979  * @ipr_cmd:    ipr command struct
3980  * @sglist:             scatter/gather list
3981  *
3982  * Builds a microcode download IOA data list (IOADL).
3983  *
3984  **/
3985 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3986                                   struct ipr_sglist *sglist)
3987 {
3988         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3989         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3990         struct scatterlist *scatterlist = sglist->scatterlist;
3991         int i;
3992
3993         ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3994         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3995         ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3996
3997         ioarcb->ioadl_len =
3998                 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3999
4000         for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
4001                 ioadl[i].flags_and_data_len =
4002                         cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
4003                 ioadl[i].address =
4004                         cpu_to_be32(sg_dma_address(&scatterlist[i]));
4005         }
4006
4007         ioadl[i-1].flags_and_data_len |=
4008                 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
4009 }
4010
4011 /**
4012  * ipr_update_ioa_ucode - Update IOA's microcode
4013  * @ioa_cfg:    ioa config struct
4014  * @sglist:             scatter/gather list
4015  *
4016  * Initiate an adapter reset to update the IOA's microcode
4017  *
4018  * Return value:
4019  *      0 on success / -EIO on failure
4020  **/
4021 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
4022                                 struct ipr_sglist *sglist)
4023 {
4024         unsigned long lock_flags;
4025
4026         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4027         while (ioa_cfg->in_reset_reload) {
4028                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4029                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4030                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4031         }
4032
4033         if (ioa_cfg->ucode_sglist) {
4034                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4035                 dev_err(&ioa_cfg->pdev->dev,
4036                         "Microcode download already in progress\n");
4037                 return -EIO;
4038         }
4039
4040         sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev,
4041                                         sglist->scatterlist, sglist->num_sg,
4042                                         DMA_TO_DEVICE);
4043
4044         if (!sglist->num_dma_sg) {
4045                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4046                 dev_err(&ioa_cfg->pdev->dev,
4047                         "Failed to map microcode download buffer!\n");
4048                 return -EIO;
4049         }
4050
4051         ioa_cfg->ucode_sglist = sglist;
4052         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
4053         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4054         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4055
4056         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4057         ioa_cfg->ucode_sglist = NULL;
4058         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4059         return 0;
4060 }
4061
4062 /**
4063  * ipr_store_update_fw - Update the firmware on the adapter
4064  * @class_dev:  device struct
4065  * @buf:        buffer
4066  * @count:      buffer size
4067  *
4068  * This function will update the firmware on the adapter.
4069  *
4070  * Return value:
4071  *      count on success / other on failure
4072  **/
4073 static ssize_t ipr_store_update_fw(struct device *dev,
4074                                    struct device_attribute *attr,
4075                                    const char *buf, size_t count)
4076 {
4077         struct Scsi_Host *shost = class_to_shost(dev);
4078         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4079         struct ipr_ucode_image_header *image_hdr;
4080         const struct firmware *fw_entry;
4081         struct ipr_sglist *sglist;
4082         char fname[100];
4083         char *src;
4084         char *endline;
4085         int result, dnld_size;
4086
4087         if (!capable(CAP_SYS_ADMIN))
4088                 return -EACCES;
4089
4090         snprintf(fname, sizeof(fname), "%s", buf);
4091
4092         endline = strchr(fname, '\n');
4093         if (endline)
4094                 *endline = '\0';
4095
4096         if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
4097                 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
4098                 return -EIO;
4099         }
4100
4101         image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
4102
4103         src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
4104         dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
4105         sglist = ipr_alloc_ucode_buffer(dnld_size);
4106
4107         if (!sglist) {
4108                 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
4109                 release_firmware(fw_entry);
4110                 return -ENOMEM;
4111         }
4112
4113         result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
4114
4115         if (result) {
4116                 dev_err(&ioa_cfg->pdev->dev,
4117                         "Microcode buffer copy to DMA buffer failed\n");
4118                 goto out;
4119         }
4120
4121         ipr_info("Updating microcode, please be patient.  This may take up to 30 minutes.\n");
4122
4123         result = ipr_update_ioa_ucode(ioa_cfg, sglist);
4124
4125         if (!result)
4126                 result = count;
4127 out:
4128         ipr_free_ucode_buffer(sglist);
4129         release_firmware(fw_entry);
4130         return result;
4131 }
4132
4133 static struct device_attribute ipr_update_fw_attr = {
4134         .attr = {
4135                 .name =         "update_fw",
4136                 .mode =         S_IWUSR,
4137         },
4138         .store = ipr_store_update_fw
4139 };
4140
4141 /**
4142  * ipr_show_fw_type - Show the adapter's firmware type.
4143  * @dev:        class device struct
4144  * @buf:        buffer
4145  *
4146  * Return value:
4147  *      number of bytes printed to buffer
4148  **/
4149 static ssize_t ipr_show_fw_type(struct device *dev,
4150                                 struct device_attribute *attr, char *buf)
4151 {
4152         struct Scsi_Host *shost = class_to_shost(dev);
4153         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4154         unsigned long lock_flags = 0;
4155         int len;
4156
4157         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4158         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4159         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4160         return len;
4161 }
4162
4163 static struct device_attribute ipr_ioa_fw_type_attr = {
4164         .attr = {
4165                 .name =         "fw_type",
4166                 .mode =         S_IRUGO,
4167         },
4168         .show = ipr_show_fw_type
4169 };
4170
4171 static ssize_t ipr_read_async_err_log(struct file *filep, struct kobject *kobj,
4172                                 struct bin_attribute *bin_attr, char *buf,
4173                                 loff_t off, size_t count)
4174 {
4175         struct device *cdev = container_of(kobj, struct device, kobj);
4176         struct Scsi_Host *shost = class_to_shost(cdev);
4177         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4178         struct ipr_hostrcb *hostrcb;
4179         unsigned long lock_flags = 0;
4180         int ret;
4181
4182         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4183         hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4184                                         struct ipr_hostrcb, queue);
4185         if (!hostrcb) {
4186                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4187                 return 0;
4188         }
4189         ret = memory_read_from_buffer(buf, count, &off, &hostrcb->hcam,
4190                                 sizeof(hostrcb->hcam));
4191         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4192         return ret;
4193 }
4194
4195 static ssize_t ipr_next_async_err_log(struct file *filep, struct kobject *kobj,
4196                                 struct bin_attribute *bin_attr, char *buf,
4197                                 loff_t off, size_t count)
4198 {
4199         struct device *cdev = container_of(kobj, struct device, kobj);
4200         struct Scsi_Host *shost = class_to_shost(cdev);
4201         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4202         struct ipr_hostrcb *hostrcb;
4203         unsigned long lock_flags = 0;
4204
4205         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4206         hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4207                                         struct ipr_hostrcb, queue);
4208         if (!hostrcb) {
4209                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4210                 return count;
4211         }
4212
4213         /* Reclaim hostrcb before exit */
4214         list_move_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
4215         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4216         return count;
4217 }
4218
4219 static struct bin_attribute ipr_ioa_async_err_log = {
4220         .attr = {
4221                 .name =         "async_err_log",
4222                 .mode =         S_IRUGO | S_IWUSR,
4223         },
4224         .size = 0,
4225         .read = ipr_read_async_err_log,
4226         .write = ipr_next_async_err_log
4227 };
4228
4229 static struct device_attribute *ipr_ioa_attrs[] = {
4230         &ipr_fw_version_attr,
4231         &ipr_log_level_attr,
4232         &ipr_diagnostics_attr,
4233         &ipr_ioa_state_attr,
4234         &ipr_ioa_reset_attr,
4235         &ipr_update_fw_attr,
4236         &ipr_ioa_fw_type_attr,
4237         &ipr_iopoll_weight_attr,
4238         NULL,
4239 };
4240
4241 #ifdef CONFIG_SCSI_IPR_DUMP
4242 /**
4243  * ipr_read_dump - Dump the adapter
4244  * @filp:               open sysfs file
4245  * @kobj:               kobject struct
4246  * @bin_attr:           bin_attribute struct
4247  * @buf:                buffer
4248  * @off:                offset
4249  * @count:              buffer size
4250  *
4251  * Return value:
4252  *      number of bytes printed to buffer
4253  **/
4254 static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
4255                              struct bin_attribute *bin_attr,
4256                              char *buf, loff_t off, size_t count)
4257 {
4258         struct device *cdev = container_of(kobj, struct device, kobj);
4259         struct Scsi_Host *shost = class_to_shost(cdev);
4260         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4261         struct ipr_dump *dump;
4262         unsigned long lock_flags = 0;
4263         char *src;
4264         int len, sdt_end;
4265         size_t rc = count;
4266
4267         if (!capable(CAP_SYS_ADMIN))
4268                 return -EACCES;
4269
4270         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4271         dump = ioa_cfg->dump;
4272
4273         if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4274                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4275                 return 0;
4276         }
4277         kref_get(&dump->kref);
4278         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4279
4280         if (off > dump->driver_dump.hdr.len) {
4281                 kref_put(&dump->kref, ipr_release_dump);
4282                 return 0;
4283         }
4284
4285         if (off + count > dump->driver_dump.hdr.len) {
4286                 count = dump->driver_dump.hdr.len - off;
4287                 rc = count;
4288         }
4289
4290         if (count && off < sizeof(dump->driver_dump)) {
4291                 if (off + count > sizeof(dump->driver_dump))
4292                         len = sizeof(dump->driver_dump) - off;
4293                 else
4294                         len = count;
4295                 src = (u8 *)&dump->driver_dump + off;
4296                 memcpy(buf, src, len);
4297                 buf += len;
4298                 off += len;
4299                 count -= len;
4300         }
4301
4302         off -= sizeof(dump->driver_dump);
4303
4304         if (ioa_cfg->sis64)
4305                 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4306                           (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4307                            sizeof(struct ipr_sdt_entry));
4308         else
4309                 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4310                           (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4311
4312         if (count && off < sdt_end) {
4313                 if (off + count > sdt_end)
4314                         len = sdt_end - off;
4315                 else
4316                         len = count;
4317                 src = (u8 *)&dump->ioa_dump + off;
4318                 memcpy(buf, src, len);
4319                 buf += len;
4320                 off += len;
4321                 count -= len;
4322         }
4323
4324         off -= sdt_end;
4325
4326         while (count) {
4327                 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4328                         len = PAGE_ALIGN(off) - off;
4329                 else
4330                         len = count;
4331                 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4332                 src += off & ~PAGE_MASK;
4333                 memcpy(buf, src, len);
4334                 buf += len;
4335                 off += len;
4336                 count -= len;
4337         }
4338
4339         kref_put(&dump->kref, ipr_release_dump);
4340         return rc;
4341 }
4342
4343 /**
4344  * ipr_alloc_dump - Prepare for adapter dump
4345  * @ioa_cfg:    ioa config struct
4346  *
4347  * Return value:
4348  *      0 on success / other on failure
4349  **/
4350 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4351 {
4352         struct ipr_dump *dump;
4353         __be32 **ioa_data;
4354         unsigned long lock_flags = 0;
4355
4356         dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
4357
4358         if (!dump) {
4359                 ipr_err("Dump memory allocation failed\n");
4360                 return -ENOMEM;
4361         }
4362
4363         if (ioa_cfg->sis64)
4364                 ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4365         else
4366                 ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4367
4368         if (!ioa_data) {
4369                 ipr_err("Dump memory allocation failed\n");
4370                 kfree(dump);
4371                 return -ENOMEM;
4372         }
4373
4374         dump->ioa_dump.ioa_data = ioa_data;
4375
4376         kref_init(&dump->kref);
4377         dump->ioa_cfg = ioa_cfg;
4378
4379         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4380
4381         if (INACTIVE != ioa_cfg->sdt_state) {
4382                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4383                 vfree(dump->ioa_dump.ioa_data);
4384                 kfree(dump);
4385                 return 0;
4386         }
4387
4388         ioa_cfg->dump = dump;
4389         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
4390         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
4391                 ioa_cfg->dump_taken = 1;
4392                 schedule_work(&ioa_cfg->work_q);
4393         }
4394         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4395
4396         return 0;
4397 }
4398
4399 /**
4400  * ipr_free_dump - Free adapter dump memory
4401  * @ioa_cfg:    ioa config struct
4402  *
4403  * Return value:
4404  *      0 on success / other on failure
4405  **/
4406 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4407 {
4408         struct ipr_dump *dump;
4409         unsigned long lock_flags = 0;
4410
4411         ENTER;
4412
4413         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4414         dump = ioa_cfg->dump;
4415         if (!dump) {
4416                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4417                 return 0;
4418         }
4419
4420         ioa_cfg->dump = NULL;
4421         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4422
4423         kref_put(&dump->kref, ipr_release_dump);
4424
4425         LEAVE;
4426         return 0;
4427 }
4428
4429 /**
4430  * ipr_write_dump - Setup dump state of adapter
4431  * @filp:               open sysfs file
4432  * @kobj:               kobject struct
4433  * @bin_attr:           bin_attribute struct
4434  * @buf:                buffer
4435  * @off:                offset
4436  * @count:              buffer size
4437  *
4438  * Return value:
4439  *      number of bytes printed to buffer
4440  **/
4441 static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
4442                               struct bin_attribute *bin_attr,
4443                               char *buf, loff_t off, size_t count)
4444 {
4445         struct device *cdev = container_of(kobj, struct device, kobj);
4446         struct Scsi_Host *shost = class_to_shost(cdev);
4447         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4448         int rc;
4449
4450         if (!capable(CAP_SYS_ADMIN))
4451                 return -EACCES;
4452
4453         if (buf[0] == '1')
4454                 rc = ipr_alloc_dump(ioa_cfg);
4455         else if (buf[0] == '0')
4456                 rc = ipr_free_dump(ioa_cfg);
4457         else
4458                 return -EINVAL;
4459
4460         if (rc)
4461                 return rc;
4462         else
4463                 return count;
4464 }
4465
4466 static struct bin_attribute ipr_dump_attr = {
4467         .attr = {
4468                 .name = "dump",
4469                 .mode = S_IRUSR | S_IWUSR,
4470         },
4471         .size = 0,
4472         .read = ipr_read_dump,
4473         .write = ipr_write_dump
4474 };
4475 #else
4476 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4477 #endif
4478
4479 /**
4480  * ipr_change_queue_depth - Change the device's queue depth
4481  * @sdev:       scsi device struct
4482  * @qdepth:     depth to set
4483  * @reason:     calling context
4484  *
4485  * Return value:
4486  *      actual depth set
4487  **/
4488 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
4489 {
4490         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4491         struct ipr_resource_entry *res;
4492         unsigned long lock_flags = 0;
4493
4494         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4495         res = (struct ipr_resource_entry *)sdev->hostdata;
4496
4497         if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4498                 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4499         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4500
4501         scsi_change_queue_depth(sdev, qdepth);
4502         return sdev->queue_depth;
4503 }
4504
4505 /**
4506  * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4507  * @dev:        device struct
4508  * @attr:       device attribute structure
4509  * @buf:        buffer
4510  *
4511  * Return value:
4512  *      number of bytes printed to buffer
4513  **/
4514 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4515 {
4516         struct scsi_device *sdev = to_scsi_device(dev);
4517         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4518         struct ipr_resource_entry *res;
4519         unsigned long lock_flags = 0;
4520         ssize_t len = -ENXIO;
4521
4522         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4523         res = (struct ipr_resource_entry *)sdev->hostdata;
4524         if (res)
4525                 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4526         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4527         return len;
4528 }
4529
4530 static struct device_attribute ipr_adapter_handle_attr = {
4531         .attr = {
4532                 .name =         "adapter_handle",
4533                 .mode =         S_IRUSR,
4534         },
4535         .show = ipr_show_adapter_handle
4536 };
4537
4538 /**
4539  * ipr_show_resource_path - Show the resource path or the resource address for
4540  *                          this device.
4541  * @dev:        device struct
4542  * @attr:       device attribute structure
4543  * @buf:        buffer
4544  *
4545  * Return value:
4546  *      number of bytes printed to buffer
4547  **/
4548 static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4549 {
4550         struct scsi_device *sdev = to_scsi_device(dev);
4551         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4552         struct ipr_resource_entry *res;
4553         unsigned long lock_flags = 0;
4554         ssize_t len = -ENXIO;
4555         char buffer[IPR_MAX_RES_PATH_LENGTH];
4556
4557         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4558         res = (struct ipr_resource_entry *)sdev->hostdata;
4559         if (res && ioa_cfg->sis64)
4560                 len = snprintf(buf, PAGE_SIZE, "%s\n",
4561                                __ipr_format_res_path(res->res_path, buffer,
4562                                                      sizeof(buffer)));
4563         else if (res)
4564                 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4565                                res->bus, res->target, res->lun);
4566
4567         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4568         return len;
4569 }
4570
4571 static struct device_attribute ipr_resource_path_attr = {
4572         .attr = {
4573                 .name =         "resource_path",
4574                 .mode =         S_IRUGO,
4575         },
4576         .show = ipr_show_resource_path
4577 };
4578
4579 /**
4580  * ipr_show_device_id - Show the device_id for this device.
4581  * @dev:        device struct
4582  * @attr:       device attribute structure
4583  * @buf:        buffer
4584  *
4585  * Return value:
4586  *      number of bytes printed to buffer
4587  **/
4588 static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4589 {
4590         struct scsi_device *sdev = to_scsi_device(dev);
4591         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4592         struct ipr_resource_entry *res;
4593         unsigned long lock_flags = 0;
4594         ssize_t len = -ENXIO;
4595
4596         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4597         res = (struct ipr_resource_entry *)sdev->hostdata;
4598         if (res && ioa_cfg->sis64)
4599                 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", be64_to_cpu(res->dev_id));
4600         else if (res)
4601                 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4602
4603         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4604         return len;
4605 }
4606
4607 static struct device_attribute ipr_device_id_attr = {
4608         .attr = {
4609                 .name =         "device_id",
4610                 .mode =         S_IRUGO,
4611         },
4612         .show = ipr_show_device_id
4613 };
4614
4615 /**
4616  * ipr_show_resource_type - Show the resource type for this device.
4617  * @dev:        device struct
4618  * @attr:       device attribute structure
4619  * @buf:        buffer
4620  *
4621  * Return value:
4622  *      number of bytes printed to buffer
4623  **/
4624 static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4625 {
4626         struct scsi_device *sdev = to_scsi_device(dev);
4627         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4628         struct ipr_resource_entry *res;
4629         unsigned long lock_flags = 0;
4630         ssize_t len = -ENXIO;
4631
4632         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4633         res = (struct ipr_resource_entry *)sdev->hostdata;
4634
4635         if (res)
4636                 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4637
4638         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4639         return len;
4640 }
4641
4642 static struct device_attribute ipr_resource_type_attr = {
4643         .attr = {
4644                 .name =         "resource_type",
4645                 .mode =         S_IRUGO,
4646         },
4647         .show = ipr_show_resource_type
4648 };
4649
4650 /**
4651  * ipr_show_raw_mode - Show the adapter's raw mode
4652  * @dev:        class device struct
4653  * @buf:        buffer
4654  *
4655  * Return value:
4656  *      number of bytes printed to buffer
4657  **/
4658 static ssize_t ipr_show_raw_mode(struct device *dev,
4659                                  struct device_attribute *attr, char *buf)
4660 {
4661         struct scsi_device *sdev = to_scsi_device(dev);
4662         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4663         struct ipr_resource_entry *res;
4664         unsigned long lock_flags = 0;
4665         ssize_t len;
4666
4667         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4668         res = (struct ipr_resource_entry *)sdev->hostdata;
4669         if (res)
4670                 len = snprintf(buf, PAGE_SIZE, "%d\n", res->raw_mode);
4671         else
4672                 len = -ENXIO;
4673         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4674         return len;
4675 }
4676
4677 /**
4678  * ipr_store_raw_mode - Change the adapter's raw mode
4679  * @dev:        class device struct
4680  * @buf:        buffer
4681  *
4682  * Return value:
4683  *      number of bytes printed to buffer
4684  **/
4685 static ssize_t ipr_store_raw_mode(struct device *dev,
4686                                   struct device_attribute *attr,
4687                                   const char *buf, size_t count)
4688 {
4689         struct scsi_device *sdev = to_scsi_device(dev);
4690         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4691         struct ipr_resource_entry *res;
4692         unsigned long lock_flags = 0;
4693         ssize_t len;
4694
4695         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4696         res = (struct ipr_resource_entry *)sdev->hostdata;
4697         if (res) {
4698                 if (ipr_is_af_dasd_device(res)) {
4699                         res->raw_mode = simple_strtoul(buf, NULL, 10);
4700                         len = strlen(buf);
4701                         if (res->sdev)
4702                                 sdev_printk(KERN_INFO, res->sdev, "raw mode is %s\n",
4703                                         res->raw_mode ? "enabled" : "disabled");
4704                 } else
4705                         len = -EINVAL;
4706         } else
4707                 len = -ENXIO;
4708         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4709         return len;
4710 }
4711
4712 static struct device_attribute ipr_raw_mode_attr = {
4713         .attr = {
4714                 .name =         "raw_mode",
4715                 .mode =         S_IRUGO | S_IWUSR,
4716         },
4717         .show = ipr_show_raw_mode,
4718         .store = ipr_store_raw_mode
4719 };
4720
4721 static struct device_attribute *ipr_dev_attrs[] = {
4722         &ipr_adapter_handle_attr,
4723         &ipr_resource_path_attr,
4724         &ipr_device_id_attr,
4725         &ipr_resource_type_attr,
4726         &ipr_raw_mode_attr,
4727         NULL,
4728 };
4729
4730 /**
4731  * ipr_biosparam - Return the HSC mapping
4732  * @sdev:                       scsi device struct
4733  * @block_device:       block device pointer
4734  * @capacity:           capacity of the device
4735  * @parm:                       Array containing returned HSC values.
4736  *
4737  * This function generates the HSC parms that fdisk uses.
4738  * We want to make sure we return something that places partitions
4739  * on 4k boundaries for best performance with the IOA.
4740  *
4741  * Return value:
4742  *      0 on success
4743  **/
4744 static int ipr_biosparam(struct scsi_device *sdev,
4745                          struct block_device *block_device,
4746                          sector_t capacity, int *parm)
4747 {
4748         int heads, sectors;
4749         sector_t cylinders;
4750
4751         heads = 128;
4752         sectors = 32;
4753
4754         cylinders = capacity;
4755         sector_div(cylinders, (128 * 32));
4756
4757         /* return result */
4758         parm[0] = heads;
4759         parm[1] = sectors;
4760         parm[2] = cylinders;
4761
4762         return 0;
4763 }
4764
4765 /**
4766  * ipr_find_starget - Find target based on bus/target.
4767  * @starget:    scsi target struct
4768  *
4769  * Return value:
4770  *      resource entry pointer if found / NULL if not found
4771  **/
4772 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4773 {
4774         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4775         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4776         struct ipr_resource_entry *res;
4777
4778         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4779                 if ((res->bus == starget->channel) &&
4780                     (res->target == starget->id)) {
4781                         return res;
4782                 }
4783         }
4784
4785         return NULL;
4786 }
4787
4788 static struct ata_port_info sata_port_info;
4789
4790 /**
4791  * ipr_target_alloc - Prepare for commands to a SCSI target
4792  * @starget:    scsi target struct
4793  *
4794  * If the device is a SATA device, this function allocates an
4795  * ATA port with libata, else it does nothing.
4796  *
4797  * Return value:
4798  *      0 on success / non-0 on failure
4799  **/
4800 static int ipr_target_alloc(struct scsi_target *starget)
4801 {
4802         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4803         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4804         struct ipr_sata_port *sata_port;
4805         struct ata_port *ap;
4806         struct ipr_resource_entry *res;
4807         unsigned long lock_flags;
4808
4809         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4810         res = ipr_find_starget(starget);
4811         starget->hostdata = NULL;
4812
4813         if (res && ipr_is_gata(res)) {
4814                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4815                 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4816                 if (!sata_port)
4817                         return -ENOMEM;
4818
4819                 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4820                 if (ap) {
4821                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4822                         sata_port->ioa_cfg = ioa_cfg;
4823                         sata_port->ap = ap;
4824                         sata_port->res = res;
4825
4826                         res->sata_port = sata_port;
4827                         ap->private_data = sata_port;
4828                         starget->hostdata = sata_port;
4829                 } else {
4830                         kfree(sata_port);
4831                         return -ENOMEM;
4832                 }
4833         }
4834         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4835
4836         return 0;
4837 }
4838
4839 /**
4840  * ipr_target_destroy - Destroy a SCSI target
4841  * @starget:    scsi target struct
4842  *
4843  * If the device was a SATA device, this function frees the libata
4844  * ATA port, else it does nothing.
4845  *
4846  **/
4847 static void ipr_target_destroy(struct scsi_target *starget)
4848 {
4849         struct ipr_sata_port *sata_port = starget->hostdata;
4850         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4851         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4852
4853         if (ioa_cfg->sis64) {
4854                 if (!ipr_find_starget(starget)) {
4855                         if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4856                                 clear_bit(starget->id, ioa_cfg->array_ids);
4857                         else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4858                                 clear_bit(starget->id, ioa_cfg->vset_ids);
4859                         else if (starget->channel == 0)
4860                                 clear_bit(starget->id, ioa_cfg->target_ids);
4861                 }
4862         }
4863
4864         if (sata_port) {
4865                 starget->hostdata = NULL;
4866                 ata_sas_port_destroy(sata_port->ap);
4867                 kfree(sata_port);
4868         }
4869 }
4870
4871 /**
4872  * ipr_find_sdev - Find device based on bus/target/lun.
4873  * @sdev:       scsi device struct
4874  *
4875  * Return value:
4876  *      resource entry pointer if found / NULL if not found
4877  **/
4878 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4879 {
4880         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4881         struct ipr_resource_entry *res;
4882
4883         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4884                 if ((res->bus == sdev->channel) &&
4885                     (res->target == sdev->id) &&
4886                     (res->lun == sdev->lun))
4887                         return res;
4888         }
4889
4890         return NULL;
4891 }
4892
4893 /**
4894  * ipr_slave_destroy - Unconfigure a SCSI device
4895  * @sdev:       scsi device struct
4896  *
4897  * Return value:
4898  *      nothing
4899  **/
4900 static void ipr_slave_destroy(struct scsi_device *sdev)
4901 {
4902         struct ipr_resource_entry *res;
4903         struct ipr_ioa_cfg *ioa_cfg;
4904         unsigned long lock_flags = 0;
4905
4906         ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4907
4908         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4909         res = (struct ipr_resource_entry *) sdev->hostdata;
4910         if (res) {
4911                 if (res->sata_port)
4912                         res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
4913                 sdev->hostdata = NULL;
4914                 res->sdev = NULL;
4915                 res->sata_port = NULL;
4916         }
4917         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4918 }
4919
4920 /**
4921  * ipr_slave_configure - Configure a SCSI device
4922  * @sdev:       scsi device struct
4923  *
4924  * This function configures the specified scsi device.
4925  *
4926  * Return value:
4927  *      0 on success
4928  **/
4929 static int ipr_slave_configure(struct scsi_device *sdev)
4930 {
4931         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4932         struct ipr_resource_entry *res;
4933         struct ata_port *ap = NULL;
4934         unsigned long lock_flags = 0;
4935         char buffer[IPR_MAX_RES_PATH_LENGTH];
4936
4937         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4938         res = sdev->hostdata;
4939         if (res) {
4940                 if (ipr_is_af_dasd_device(res))
4941                         sdev->type = TYPE_RAID;
4942                 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4943                         sdev->scsi_level = 4;
4944                         sdev->no_uld_attach = 1;
4945                 }
4946                 if (ipr_is_vset_device(res)) {
4947                         sdev->scsi_level = SCSI_SPC_3;
4948                         blk_queue_rq_timeout(sdev->request_queue,
4949                                              IPR_VSET_RW_TIMEOUT);
4950                         blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
4951                 }
4952                 if (ipr_is_gata(res) && res->sata_port)
4953                         ap = res->sata_port->ap;
4954                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4955
4956                 if (ap) {
4957                         scsi_change_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN);
4958                         ata_sas_slave_configure(sdev, ap);
4959                 }
4960
4961                 if (ioa_cfg->sis64)
4962                         sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4963                                     ipr_format_res_path(ioa_cfg,
4964                                 res->res_path, buffer, sizeof(buffer)));
4965                 return 0;
4966         }
4967         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4968         return 0;
4969 }
4970
4971 /**
4972  * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4973  * @sdev:       scsi device struct
4974  *
4975  * This function initializes an ATA port so that future commands
4976  * sent through queuecommand will work.
4977  *
4978  * Return value:
4979  *      0 on success
4980  **/
4981 static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4982 {
4983         struct ipr_sata_port *sata_port = NULL;
4984         int rc = -ENXIO;
4985
4986         ENTER;
4987         if (sdev->sdev_target)
4988                 sata_port = sdev->sdev_target->hostdata;
4989         if (sata_port) {
4990                 rc = ata_sas_port_init(sata_port->ap);
4991                 if (rc == 0)
4992                         rc = ata_sas_sync_probe(sata_port->ap);
4993         }
4994
4995         if (rc)
4996                 ipr_slave_destroy(sdev);
4997
4998         LEAVE;
4999         return rc;
5000 }
5001
5002 /**
5003  * ipr_slave_alloc - Prepare for commands to a device.
5004  * @sdev:       scsi device struct
5005  *
5006  * This function saves a pointer to the resource entry
5007  * in the scsi device struct if the device exists. We
5008  * can then use this pointer in ipr_queuecommand when
5009  * handling new commands.
5010  *
5011  * Return value:
5012  *      0 on success / -ENXIO if device does not exist
5013  **/
5014 static int ipr_slave_alloc(struct scsi_device *sdev)
5015 {
5016         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
5017         struct ipr_resource_entry *res;
5018         unsigned long lock_flags;
5019         int rc = -ENXIO;
5020
5021         sdev->hostdata = NULL;
5022
5023         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5024
5025         res = ipr_find_sdev(sdev);
5026         if (res) {
5027                 res->sdev = sdev;
5028                 res->add_to_ml = 0;
5029                 res->in_erp = 0;
5030                 sdev->hostdata = res;
5031                 if (!ipr_is_naca_model(res))
5032                         res->needs_sync_complete = 1;
5033                 rc = 0;
5034                 if (ipr_is_gata(res)) {
5035                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5036                         return ipr_ata_slave_alloc(sdev);
5037                 }
5038         }
5039
5040         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5041
5042         return rc;
5043 }
5044
5045 /**
5046  * ipr_match_lun - Match function for specified LUN
5047  * @ipr_cmd:    ipr command struct
5048  * @device:             device to match (sdev)
5049  *
5050  * Returns:
5051  *      1 if command matches sdev / 0 if command does not match sdev
5052  **/
5053 static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
5054 {
5055         if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
5056                 return 1;
5057         return 0;
5058 }
5059
5060 /**
5061  * ipr_cmnd_is_free - Check if a command is free or not
5062  * @ipr_cmd     ipr command struct
5063  *
5064  * Returns:
5065  *      true / false
5066  **/
5067 static bool ipr_cmnd_is_free(struct ipr_cmnd *ipr_cmd)
5068 {
5069         struct ipr_cmnd *loop_cmd;
5070
5071         list_for_each_entry(loop_cmd, &ipr_cmd->hrrq->hrrq_free_q, queue) {
5072                 if (loop_cmd == ipr_cmd)
5073                         return true;
5074         }
5075
5076         return false;
5077 }
5078
5079 /**
5080  * ipr_match_res - Match function for specified resource entry
5081  * @ipr_cmd:    ipr command struct
5082  * @resource:   resource entry to match
5083  *
5084  * Returns:
5085  *      1 if command matches sdev / 0 if command does not match sdev
5086  **/
5087 static int ipr_match_res(struct ipr_cmnd *ipr_cmd, void *resource)
5088 {
5089         struct ipr_resource_entry *res = resource;
5090
5091         if (res && ipr_cmd->ioarcb.res_handle == res->res_handle)
5092                 return 1;
5093         return 0;
5094 }
5095
5096 /**
5097  * ipr_wait_for_ops - Wait for matching commands to complete
5098  * @ipr_cmd:    ipr command struct
5099  * @device:             device to match (sdev)
5100  * @match:              match function to use
5101  *
5102  * Returns:
5103  *      SUCCESS / FAILED
5104  **/
5105 static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
5106                             int (*match)(struct ipr_cmnd *, void *))
5107 {
5108         struct ipr_cmnd *ipr_cmd;
5109         int wait, i;
5110         unsigned long flags;
5111         struct ipr_hrr_queue *hrrq;
5112         signed long timeout = IPR_ABORT_TASK_TIMEOUT;
5113         DECLARE_COMPLETION_ONSTACK(comp);
5114
5115         ENTER;
5116         do {
5117                 wait = 0;
5118
5119                 for_each_hrrq(hrrq, ioa_cfg) {
5120                         spin_lock_irqsave(hrrq->lock, flags);
5121                         for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5122                                 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5123                                 if (!ipr_cmnd_is_free(ipr_cmd)) {
5124                                         if (match(ipr_cmd, device)) {
5125                                                 ipr_cmd->eh_comp = &comp;
5126                                                 wait++;
5127                                         }
5128                                 }
5129                         }
5130                         spin_unlock_irqrestore(hrrq->lock, flags);
5131                 }
5132
5133                 if (wait) {
5134                         timeout = wait_for_completion_timeout(&comp, timeout);
5135
5136                         if (!timeout) {
5137                                 wait = 0;
5138
5139                                 for_each_hrrq(hrrq, ioa_cfg) {
5140                                         spin_lock_irqsave(hrrq->lock, flags);
5141                                         for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5142                                                 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5143                                                 if (!ipr_cmnd_is_free(ipr_cmd)) {
5144                                                         if (match(ipr_cmd, device)) {
5145                                                                 ipr_cmd->eh_comp = NULL;
5146                                                                 wait++;
5147                                                         }
5148                                                 }
5149                                         }
5150                                         spin_unlock_irqrestore(hrrq->lock, flags);
5151                                 }
5152
5153                                 if (wait)
5154                                         dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
5155                                 LEAVE;
5156                                 return wait ? FAILED : SUCCESS;
5157                         }
5158                 }
5159         } while (wait);
5160
5161         LEAVE;
5162         return SUCCESS;
5163 }
5164
5165 static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
5166 {
5167         struct ipr_ioa_cfg *ioa_cfg;
5168         unsigned long lock_flags = 0;
5169         int rc = SUCCESS;
5170
5171         ENTER;
5172         ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5173         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5174
5175         if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5176                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
5177                 dev_err(&ioa_cfg->pdev->dev,
5178                         "Adapter being reset as a result of error recovery.\n");
5179
5180                 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5181                         ioa_cfg->sdt_state = GET_DUMP;
5182         }
5183
5184         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5185         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5186         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5187
5188         /* If we got hit with a host reset while we were already resetting
5189          the adapter for some reason, and the reset failed. */
5190         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5191                 ipr_trace;
5192                 rc = FAILED;
5193         }
5194
5195         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5196         LEAVE;
5197         return rc;
5198 }
5199
5200 /**
5201  * ipr_device_reset - Reset the device
5202  * @ioa_cfg:    ioa config struct
5203  * @res:                resource entry struct
5204  *
5205  * This function issues a device reset to the affected device.
5206  * If the device is a SCSI device, a LUN reset will be sent
5207  * to the device first. If that does not work, a target reset
5208  * will be sent. If the device is a SATA device, a PHY reset will
5209  * be sent.
5210  *
5211  * Return value:
5212  *      0 on success / non-zero on failure
5213  **/
5214 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
5215                             struct ipr_resource_entry *res)
5216 {
5217         struct ipr_cmnd *ipr_cmd;
5218         struct ipr_ioarcb *ioarcb;
5219         struct ipr_cmd_pkt *cmd_pkt;
5220         struct ipr_ioarcb_ata_regs *regs;
5221         u32 ioasc;
5222
5223         ENTER;
5224         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5225         ioarcb = &ipr_cmd->ioarcb;
5226         cmd_pkt = &ioarcb->cmd_pkt;
5227
5228         if (ipr_cmd->ioa_cfg->sis64) {
5229                 regs = &ipr_cmd->i.ata_ioadl.regs;
5230                 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
5231         } else
5232                 regs = &ioarcb->u.add_data.u.regs;
5233
5234         ioarcb->res_handle = res->res_handle;
5235         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5236         cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5237         if (ipr_is_gata(res)) {
5238                 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
5239                 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
5240                 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5241         }
5242
5243         ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5244         ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5245         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5246         if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
5247                 if (ipr_cmd->ioa_cfg->sis64)
5248                         memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
5249                                sizeof(struct ipr_ioasa_gata));
5250                 else
5251                         memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
5252                                sizeof(struct ipr_ioasa_gata));
5253         }
5254
5255         LEAVE;
5256         return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
5257 }
5258
5259 /**
5260  * ipr_sata_reset - Reset the SATA port
5261  * @link:       SATA link to reset
5262  * @classes:    class of the attached device
5263  *
5264  * This function issues a SATA phy reset to the affected ATA link.
5265  *
5266  * Return value:
5267  *      0 on success / non-zero on failure
5268  **/
5269 static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
5270                                 unsigned long deadline)
5271 {
5272         struct ipr_sata_port *sata_port = link->ap->private_data;
5273         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5274         struct ipr_resource_entry *res;
5275         unsigned long lock_flags = 0;
5276         int rc = -ENXIO, ret;
5277
5278         ENTER;
5279         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5280         while (ioa_cfg->in_reset_reload) {
5281                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5282                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5283                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5284         }
5285
5286         res = sata_port->res;
5287         if (res) {
5288                 rc = ipr_device_reset(ioa_cfg, res);
5289                 *classes = res->ata_class;
5290                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5291
5292                 ret = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
5293                 if (ret != SUCCESS) {
5294                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5295                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
5296                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5297
5298                         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5299                 }
5300         } else
5301                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5302
5303         LEAVE;
5304         return rc;
5305 }
5306
5307 /**
5308  * ipr_eh_dev_reset - Reset the device
5309  * @scsi_cmd:   scsi command struct
5310  *
5311  * This function issues a device reset to the affected device.
5312  * A LUN reset will be sent to the device first. If that does
5313  * not work, a target reset will be sent.
5314  *
5315  * Return value:
5316  *      SUCCESS / FAILED
5317  **/
5318 static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
5319 {
5320         struct ipr_cmnd *ipr_cmd;
5321         struct ipr_ioa_cfg *ioa_cfg;
5322         struct ipr_resource_entry *res;
5323         struct ata_port *ap;
5324         int rc = 0, i;
5325         struct ipr_hrr_queue *hrrq;
5326
5327         ENTER;
5328         ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5329         res = scsi_cmd->device->hostdata;
5330
5331         /*
5332          * If we are currently going through reset/reload, return failed. This will force the
5333          * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5334          * reset to complete
5335          */
5336         if (ioa_cfg->in_reset_reload)
5337                 return FAILED;
5338         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5339                 return FAILED;
5340
5341         for_each_hrrq(hrrq, ioa_cfg) {
5342                 spin_lock(&hrrq->_lock);
5343                 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5344                         ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5345
5346                         if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
5347                                 if (!ipr_cmd->qc)
5348                                         continue;
5349                                 if (ipr_cmnd_is_free(ipr_cmd))
5350                                         continue;
5351
5352                                 ipr_cmd->done = ipr_sata_eh_done;
5353                                 if (!(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
5354                                         ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
5355                                         ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
5356                                 }
5357                         }
5358                 }
5359                 spin_unlock(&hrrq->_lock);
5360         }
5361         res->resetting_device = 1;
5362         scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
5363
5364         if (ipr_is_gata(res) && res->sata_port) {
5365                 ap = res->sata_port->ap;
5366                 spin_unlock_irq(scsi_cmd->device->host->host_lock);
5367                 ata_std_error_handler(ap);
5368                 spin_lock_irq(scsi_cmd->device->host->host_lock);
5369         } else
5370                 rc = ipr_device_reset(ioa_cfg, res);
5371         res->resetting_device = 0;
5372         res->reset_occurred = 1;
5373
5374         LEAVE;
5375         return rc ? FAILED : SUCCESS;
5376 }
5377
5378 static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
5379 {
5380         int rc;
5381         struct ipr_ioa_cfg *ioa_cfg;
5382         struct ipr_resource_entry *res;
5383
5384         ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5385         res = cmd->device->hostdata;
5386
5387         if (!res)
5388                 return FAILED;
5389
5390         spin_lock_irq(cmd->device->host->host_lock);
5391         rc = __ipr_eh_dev_reset(cmd);
5392         spin_unlock_irq(cmd->device->host->host_lock);
5393
5394         if (rc == SUCCESS) {
5395                 if (ipr_is_gata(res) && res->sata_port)
5396                         rc = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
5397                 else
5398                         rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
5399         }
5400
5401         return rc;
5402 }
5403
5404 /**
5405  * ipr_bus_reset_done - Op done function for bus reset.
5406  * @ipr_cmd:    ipr command struct
5407  *
5408  * This function is the op done function for a bus reset
5409  *
5410  * Return value:
5411  *      none
5412  **/
5413 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5414 {
5415         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5416         struct ipr_resource_entry *res;
5417
5418         ENTER;
5419         if (!ioa_cfg->sis64)
5420                 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5421                         if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5422                                 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5423                                 break;
5424                         }
5425                 }
5426
5427         /*
5428          * If abort has not completed, indicate the reset has, else call the
5429          * abort's done function to wake the sleeping eh thread
5430          */
5431         if (ipr_cmd->sibling->sibling)
5432                 ipr_cmd->sibling->sibling = NULL;
5433         else
5434                 ipr_cmd->sibling->done(ipr_cmd->sibling);
5435
5436         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5437         LEAVE;
5438 }
5439
5440 /**
5441  * ipr_abort_timeout - An abort task has timed out
5442  * @ipr_cmd:    ipr command struct
5443  *
5444  * This function handles when an abort task times out. If this
5445  * happens we issue a bus reset since we have resources tied
5446  * up that must be freed before returning to the midlayer.
5447  *
5448  * Return value:
5449  *      none
5450  **/
5451 static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
5452 {
5453         struct ipr_cmnd *reset_cmd;
5454         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5455         struct ipr_cmd_pkt *cmd_pkt;
5456         unsigned long lock_flags = 0;
5457
5458         ENTER;
5459         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5460         if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5461                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5462                 return;
5463         }
5464
5465         sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
5466         reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5467         ipr_cmd->sibling = reset_cmd;
5468         reset_cmd->sibling = ipr_cmd;
5469         reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5470         cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5471         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5472         cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5473         cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5474
5475         ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5476         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5477         LEAVE;
5478 }
5479
5480 /**
5481  * ipr_cancel_op - Cancel specified op
5482  * @scsi_cmd:   scsi command struct
5483  *
5484  * This function cancels specified op.
5485  *
5486  * Return value:
5487  *      SUCCESS / FAILED
5488  **/
5489 static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
5490 {
5491         struct ipr_cmnd *ipr_cmd;
5492         struct ipr_ioa_cfg *ioa_cfg;
5493         struct ipr_resource_entry *res;
5494         struct ipr_cmd_pkt *cmd_pkt;
5495         u32 ioasc, int_reg;
5496         int i, op_found = 0;
5497         struct ipr_hrr_queue *hrrq;
5498
5499         ENTER;
5500         ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5501         res = scsi_cmd->device->hostdata;
5502
5503         /* If we are currently going through reset/reload, return failed.
5504          * This will force the mid-layer to call ipr_eh_host_reset,
5505          * which will then go to sleep and wait for the reset to complete
5506          */
5507         if (ioa_cfg->in_reset_reload ||
5508             ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5509                 return FAILED;
5510         if (!res)
5511                 return FAILED;
5512
5513         /*
5514          * If we are aborting a timed out op, chances are that the timeout was caused
5515          * by a still not detected EEH error. In such cases, reading a register will
5516          * trigger the EEH recovery infrastructure.
5517          */
5518         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5519
5520         if (!ipr_is_gscsi(res))
5521                 return FAILED;
5522
5523         for_each_hrrq(hrrq, ioa_cfg) {
5524                 spin_lock(&hrrq->_lock);
5525                 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5526                         if (ioa_cfg->ipr_cmnd_list[i]->scsi_cmd == scsi_cmd) {
5527                                 if (!ipr_cmnd_is_free(ioa_cfg->ipr_cmnd_list[i])) {
5528                                         op_found = 1;
5529                                         break;
5530                                 }
5531                         }
5532                 }
5533                 spin_unlock(&hrrq->_lock);
5534         }
5535
5536         if (!op_found)
5537                 return SUCCESS;
5538
5539         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5540         ipr_cmd->ioarcb.res_handle = res->res_handle;
5541         cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5542         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5543         cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5544         ipr_cmd->u.sdev = scsi_cmd->device;
5545
5546         scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5547                     scsi_cmd->cmnd[0]);
5548         ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
5549         ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5550
5551         /*
5552          * If the abort task timed out and we sent a bus reset, we will get
5553          * one the following responses to the abort
5554          */
5555         if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5556                 ioasc = 0;
5557                 ipr_trace;
5558         }
5559
5560         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5561         if (!ipr_is_naca_model(res))
5562                 res->needs_sync_complete = 1;
5563
5564         LEAVE;
5565         return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
5566 }
5567
5568 /**
5569  * ipr_eh_abort - Abort a single op
5570  * @scsi_cmd:   scsi command struct
5571  *
5572  * Return value:
5573  *      0 if scan in progress / 1 if scan is complete
5574  **/
5575 static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time)
5576 {
5577         unsigned long lock_flags;
5578         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
5579         int rc = 0;
5580
5581         spin_lock_irqsave(shost->host_lock, lock_flags);
5582         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
5583                 rc = 1;
5584         if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2))
5585                 rc = 1;
5586         spin_unlock_irqrestore(shost->host_lock, lock_flags);
5587         return rc;
5588 }
5589
5590 /**
5591  * ipr_eh_host_reset - Reset the host adapter
5592  * @scsi_cmd:   scsi command struct
5593  *
5594  * Return value:
5595  *      SUCCESS / FAILED
5596  **/
5597 static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
5598 {
5599         unsigned long flags;
5600         int rc;
5601         struct ipr_ioa_cfg *ioa_cfg;
5602
5603         ENTER;
5604
5605         ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5606
5607         spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5608         rc = ipr_cancel_op(scsi_cmd);
5609         spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
5610
5611         if (rc == SUCCESS)
5612                 rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
5613         LEAVE;
5614         return rc;
5615 }
5616
5617 /**
5618  * ipr_handle_other_interrupt - Handle "other" interrupts
5619  * @ioa_cfg:    ioa config struct
5620  * @int_reg:    interrupt register
5621  *
5622  * Return value:
5623  *      IRQ_NONE / IRQ_HANDLED
5624  **/
5625 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5626                                               u32 int_reg)
5627 {
5628         irqreturn_t rc = IRQ_HANDLED;
5629         u32 int_mask_reg;
5630
5631         int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5632         int_reg &= ~int_mask_reg;
5633
5634         /* If an interrupt on the adapter did not occur, ignore it.
5635          * Or in the case of SIS 64, check for a stage change interrupt.
5636          */
5637         if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5638                 if (ioa_cfg->sis64) {
5639                         int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5640                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5641                         if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5642
5643                                 /* clear stage change */
5644                                 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5645                                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5646                                 list_del(&ioa_cfg->reset_cmd->queue);
5647                                 del_timer(&ioa_cfg->reset_cmd->timer);
5648                                 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5649                                 return IRQ_HANDLED;
5650                         }
5651                 }
5652
5653                 return IRQ_NONE;
5654         }
5655
5656         if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5657                 /* Mask the interrupt */
5658                 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5659                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5660
5661                 list_del(&ioa_cfg->reset_cmd->queue);
5662                 del_timer(&ioa_cfg->reset_cmd->timer);
5663                 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5664         } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
5665                 if (ioa_cfg->clear_isr) {
5666                         if (ipr_debug && printk_ratelimit())
5667                                 dev_err(&ioa_cfg->pdev->dev,
5668                                         "Spurious interrupt detected. 0x%08X\n", int_reg);
5669                         writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5670                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5671                         return IRQ_NONE;
5672                 }
5673         } else {
5674                 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5675                         ioa_cfg->ioa_unit_checked = 1;
5676                 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5677                         dev_err(&ioa_cfg->pdev->dev,
5678                                 "No Host RRQ. 0x%08X\n", int_reg);
5679                 else
5680                         dev_err(&ioa_cfg->pdev->dev,
5681                                 "Permanent IOA failure. 0x%08X\n", int_reg);
5682
5683                 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5684                         ioa_cfg->sdt_state = GET_DUMP;
5685
5686                 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5687                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5688         }
5689
5690         return rc;
5691 }
5692
5693 /**
5694  * ipr_isr_eh - Interrupt service routine error handler
5695  * @ioa_cfg:    ioa config struct
5696  * @msg:        message to log
5697  *
5698  * Return value:
5699  *      none
5700  **/
5701 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
5702 {
5703         ioa_cfg->errors_logged++;
5704         dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
5705
5706         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5707                 ioa_cfg->sdt_state = GET_DUMP;
5708
5709         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5710 }
5711
5712 static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
5713                                                 struct list_head *doneq)
5714 {
5715         u32 ioasc;
5716         u16 cmd_index;
5717         struct ipr_cmnd *ipr_cmd;
5718         struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5719         int num_hrrq = 0;
5720
5721         /* If interrupts are disabled, ignore the interrupt */
5722         if (!hrr_queue->allow_interrupts)
5723                 return 0;
5724
5725         while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5726                hrr_queue->toggle_bit) {
5727
5728                 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5729                              IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5730                              IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5731
5732                 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5733                              cmd_index < hrr_queue->min_cmd_id)) {
5734                         ipr_isr_eh(ioa_cfg,
5735                                 "Invalid response handle from IOA: ",
5736                                 cmd_index);
5737                         break;
5738                 }
5739
5740                 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5741                 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5742
5743                 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5744
5745                 list_move_tail(&ipr_cmd->queue, doneq);
5746
5747                 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5748                         hrr_queue->hrrq_curr++;
5749                 } else {
5750                         hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5751                         hrr_queue->toggle_bit ^= 1u;
5752                 }
5753                 num_hrrq++;
5754                 if (budget > 0 && num_hrrq >= budget)
5755                         break;
5756         }
5757
5758         return num_hrrq;
5759 }
5760
5761 static int ipr_iopoll(struct irq_poll *iop, int budget)
5762 {
5763         struct ipr_ioa_cfg *ioa_cfg;
5764         struct ipr_hrr_queue *hrrq;
5765         struct ipr_cmnd *ipr_cmd, *temp;
5766         unsigned long hrrq_flags;
5767         int completed_ops;
5768         LIST_HEAD(doneq);
5769
5770         hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5771         ioa_cfg = hrrq->ioa_cfg;
5772
5773         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5774         completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5775
5776         if (completed_ops < budget)
5777                 irq_poll_complete(iop);
5778         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5779
5780         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5781                 list_del(&ipr_cmd->queue);
5782                 del_timer(&ipr_cmd->timer);
5783                 ipr_cmd->fast_done(ipr_cmd);
5784         }
5785
5786         return completed_ops;
5787 }
5788
5789 /**
5790  * ipr_isr - Interrupt service routine
5791  * @irq:        irq number
5792  * @devp:       pointer to ioa config struct
5793  *
5794  * Return value:
5795  *      IRQ_NONE / IRQ_HANDLED
5796  **/
5797 static irqreturn_t ipr_isr(int irq, void *devp)
5798 {
5799         struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5800         struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5801         unsigned long hrrq_flags = 0;
5802         u32 int_reg = 0;
5803         int num_hrrq = 0;
5804         int irq_none = 0;
5805         struct ipr_cmnd *ipr_cmd, *temp;
5806         irqreturn_t rc = IRQ_NONE;
5807         LIST_HEAD(doneq);
5808
5809         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5810         /* If interrupts are disabled, ignore the interrupt */
5811         if (!hrrq->allow_interrupts) {
5812                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5813                 return IRQ_NONE;
5814         }
5815
5816         while (1) {
5817                 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5818                         rc =  IRQ_HANDLED;
5819
5820                         if (!ioa_cfg->clear_isr)
5821                                 break;
5822
5823                         /* Clear the PCI interrupt */
5824                         num_hrrq = 0;
5825                         do {
5826                                 writel(IPR_PCII_HRRQ_UPDATED,
5827                                      ioa_cfg->regs.clr_interrupt_reg32);
5828                                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5829                         } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5830                                 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5831
5832                 } else if (rc == IRQ_NONE && irq_none == 0) {
5833                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5834                         irq_none++;
5835                 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5836                            int_reg & IPR_PCII_HRRQ_UPDATED) {
5837                         ipr_isr_eh(ioa_cfg,
5838                                 "Error clearing HRRQ: ", num_hrrq);
5839                         rc = IRQ_HANDLED;
5840                         break;
5841                 } else
5842                         break;
5843         }
5844
5845         if (unlikely(rc == IRQ_NONE))
5846                 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5847
5848         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5849         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5850                 list_del(&ipr_cmd->queue);
5851                 del_timer(&ipr_cmd->timer);
5852                 ipr_cmd->fast_done(ipr_cmd);
5853         }
5854         return rc;
5855 }
5856
5857 /**
5858  * ipr_isr_mhrrq - Interrupt service routine
5859  * @irq:        irq number
5860  * @devp:       pointer to ioa config struct
5861  *
5862  * Return value:
5863  *      IRQ_NONE / IRQ_HANDLED
5864  **/
5865 static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5866 {
5867         struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5868         struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5869         unsigned long hrrq_flags = 0;
5870         struct ipr_cmnd *ipr_cmd, *temp;
5871         irqreturn_t rc = IRQ_NONE;
5872         LIST_HEAD(doneq);
5873
5874         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5875
5876         /* If interrupts are disabled, ignore the interrupt */
5877         if (!hrrq->allow_interrupts) {
5878                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5879                 return IRQ_NONE;
5880         }
5881
5882         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
5883                 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5884                        hrrq->toggle_bit) {
5885                         irq_poll_sched(&hrrq->iopoll);
5886                         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5887                         return IRQ_HANDLED;
5888                 }
5889         } else {
5890                 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5891                         hrrq->toggle_bit)
5892
5893                         if (ipr_process_hrrq(hrrq, -1, &doneq))
5894                                 rc =  IRQ_HANDLED;
5895         }
5896
5897         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5898
5899         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5900                 list_del(&ipr_cmd->queue);
5901                 del_timer(&ipr_cmd->timer);
5902                 ipr_cmd->fast_done(ipr_cmd);
5903         }
5904         return rc;
5905 }
5906
5907 /**
5908  * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5909  * @ioa_cfg:    ioa config struct
5910  * @ipr_cmd:    ipr command struct
5911  *
5912  * Return value:
5913  *      0 on success / -1 on failure
5914  **/
5915 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5916                              struct ipr_cmnd *ipr_cmd)
5917 {
5918         int i, nseg;
5919         struct scatterlist *sg;
5920         u32 length;
5921         u32 ioadl_flags = 0;
5922         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5923         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5924         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5925
5926         length = scsi_bufflen(scsi_cmd);
5927         if (!length)
5928                 return 0;
5929
5930         nseg = scsi_dma_map(scsi_cmd);
5931         if (nseg < 0) {
5932                 if (printk_ratelimit())
5933                         dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5934                 return -1;
5935         }
5936
5937         ipr_cmd->dma_use_sg = nseg;
5938
5939         ioarcb->data_transfer_length = cpu_to_be32(length);
5940         ioarcb->ioadl_len =
5941                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5942
5943         if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5944                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5945                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5946         } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5947                 ioadl_flags = IPR_IOADL_FLAGS_READ;
5948
5949         scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5950                 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5951                 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5952                 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5953         }
5954
5955         ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5956         return 0;
5957 }
5958
5959 /**
5960  * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5961  * @ioa_cfg:    ioa config struct
5962  * @ipr_cmd:    ipr command struct
5963  *
5964  * Return value:
5965  *      0 on success / -1 on failure
5966  **/
5967 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5968                            struct ipr_cmnd *ipr_cmd)
5969 {
5970         int i, nseg;
5971         struct scatterlist *sg;
5972         u32 length;
5973         u32 ioadl_flags = 0;
5974         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5975         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5976         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5977
5978         length = scsi_bufflen(scsi_cmd);
5979         if (!length)
5980                 return 0;
5981
5982         nseg = scsi_dma_map(scsi_cmd);
5983         if (nseg < 0) {
5984                 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5985                 return -1;
5986         }
5987
5988         ipr_cmd->dma_use_sg = nseg;
5989
5990         if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5991                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5992                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5993                 ioarcb->data_transfer_length = cpu_to_be32(length);
5994                 ioarcb->ioadl_len =
5995                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5996         } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5997                 ioadl_flags = IPR_IOADL_FLAGS_READ;
5998                 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5999                 ioarcb->read_ioadl_len =
6000                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6001         }
6002
6003         if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
6004                 ioadl = ioarcb->u.add_data.u.ioadl;
6005                 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
6006                                     offsetof(struct ipr_ioarcb, u.add_data));
6007                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6008         }
6009
6010         scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
6011                 ioadl[i].flags_and_data_len =
6012                         cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6013                 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
6014         }
6015
6016         ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6017         return 0;
6018 }
6019
6020 /**
6021  * __ipr_erp_done - Process completion of ERP for a device
6022  * @ipr_cmd:            ipr command struct
6023  *
6024  * This function copies the sense buffer into the scsi_cmd
6025  * struct and pushes the scsi_done function.
6026  *
6027  * Return value:
6028  *      nothing
6029  **/
6030 static void __ipr_erp_done(struct ipr_cmnd *ipr_cmd)
6031 {
6032         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6033         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6034         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6035
6036         if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
6037                 scsi_cmd->result |= (DID_ERROR << 16);
6038                 scmd_printk(KERN_ERR, scsi_cmd,
6039                             "Request Sense failed with IOASC: 0x%08X\n", ioasc);
6040         } else {
6041                 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
6042                        SCSI_SENSE_BUFFERSIZE);
6043         }
6044
6045         if (res) {
6046                 if (!ipr_is_naca_model(res))
6047                         res->needs_sync_complete = 1;
6048                 res->in_erp = 0;
6049         }
6050         scsi_dma_unmap(ipr_cmd->scsi_cmd);
6051         scsi_cmd->scsi_done(scsi_cmd);
6052         if (ipr_cmd->eh_comp)
6053                 complete(ipr_cmd->eh_comp);
6054         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6055 }
6056
6057 /**
6058  * ipr_erp_done - Process completion of ERP for a device
6059  * @ipr_cmd:            ipr command struct
6060  *
6061  * This function copies the sense buffer into the scsi_cmd
6062  * struct and pushes the scsi_done function.
6063  *
6064  * Return value:
6065  *      nothing
6066  **/
6067 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
6068 {
6069         struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
6070         unsigned long hrrq_flags;
6071
6072         spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
6073         __ipr_erp_done(ipr_cmd);
6074         spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
6075 }
6076
6077 /**
6078  * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
6079  * @ipr_cmd:    ipr command struct
6080  *
6081  * Return value:
6082  *      none
6083  **/
6084 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
6085 {
6086         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6087         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6088         dma_addr_t dma_addr = ipr_cmd->dma_addr;
6089
6090         memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
6091         ioarcb->data_transfer_length = 0;
6092         ioarcb->read_data_transfer_length = 0;
6093         ioarcb->ioadl_len = 0;
6094         ioarcb->read_ioadl_len = 0;
6095         ioasa->hdr.ioasc = 0;
6096         ioasa->hdr.residual_data_len = 0;
6097
6098         if (ipr_cmd->ioa_cfg->sis64)
6099                 ioarcb->u.sis64_addr_data.data_ioadl_addr =
6100                         cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
6101         else {
6102                 ioarcb->write_ioadl_addr =
6103                         cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
6104                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6105         }
6106 }
6107
6108 /**
6109  * __ipr_erp_request_sense - Send request sense to a device
6110  * @ipr_cmd:    ipr command struct
6111  *
6112  * This function sends a request sense to a device as a result
6113  * of a check condition.
6114  *
6115  * Return value:
6116  *      nothing
6117  **/
6118 static void __ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
6119 {
6120         struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
6121         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6122
6123         if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
6124                 __ipr_erp_done(ipr_cmd);
6125                 return;
6126         }
6127
6128         ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6129
6130         cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
6131         cmd_pkt->cdb[0] = REQUEST_SENSE;
6132         cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
6133         cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
6134         cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6135         cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
6136
6137         ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
6138                        SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
6139
6140         ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
6141                    IPR_REQUEST_SENSE_TIMEOUT * 2);
6142 }
6143
6144 /**
6145  * ipr_erp_request_sense - Send request sense to a device
6146  * @ipr_cmd:    ipr command struct
6147  *
6148  * This function sends a request sense to a device as a result
6149  * of a check condition.
6150  *
6151  * Return value:
6152  *      nothing
6153  **/
6154 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
6155 {
6156         struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
6157         unsigned long hrrq_flags;
6158
6159         spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
6160         __ipr_erp_request_sense(ipr_cmd);
6161         spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
6162 }
6163
6164 /**
6165  * ipr_erp_cancel_all - Send cancel all to a device
6166  * @ipr_cmd:    ipr command struct
6167  *
6168  * This function sends a cancel all to a device to clear the
6169  * queue. If we are running TCQ on the device, QERR is set to 1,
6170  * which means all outstanding ops have been dropped on the floor.
6171  * Cancel all will return them to us.
6172  *
6173  * Return value:
6174  *      nothing
6175  **/
6176 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
6177 {
6178         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6179         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6180         struct ipr_cmd_pkt *cmd_pkt;
6181
6182         res->in_erp = 1;
6183
6184         ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6185
6186         if (!scsi_cmd->device->simple_tags) {
6187                 __ipr_erp_request_sense(ipr_cmd);
6188                 return;
6189         }
6190
6191         cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
6192         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
6193         cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
6194
6195         ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
6196                    IPR_CANCEL_ALL_TIMEOUT);
6197 }
6198
6199 /**
6200  * ipr_dump_ioasa - Dump contents of IOASA
6201  * @ioa_cfg:    ioa config struct
6202  * @ipr_cmd:    ipr command struct
6203  * @res:                resource entry struct
6204  *
6205  * This function is invoked by the interrupt handler when ops
6206  * fail. It will log the IOASA if appropriate. Only called
6207  * for GPDD ops.
6208  *
6209  * Return value:
6210  *      none
6211  **/
6212 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
6213                            struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
6214 {
6215         int i;
6216         u16 data_len;
6217         u32 ioasc, fd_ioasc;
6218         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6219         __be32 *ioasa_data = (__be32 *)ioasa;
6220         int error_index;
6221
6222         ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
6223         fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
6224
6225         if (0 == ioasc)
6226                 return;
6227
6228         if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
6229                 return;
6230
6231         if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
6232                 error_index = ipr_get_error(fd_ioasc);
6233         else
6234                 error_index = ipr_get_error(ioasc);
6235
6236         if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
6237                 /* Don't log an error if the IOA already logged one */
6238                 if (ioasa->hdr.ilid != 0)
6239                         return;
6240
6241                 if (!ipr_is_gscsi(res))
6242                         return;
6243
6244                 if (ipr_error_table[error_index].log_ioasa == 0)
6245                         return;
6246         }
6247
6248         ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
6249
6250         data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
6251         if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
6252                 data_len = sizeof(struct ipr_ioasa64);
6253         else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
6254                 data_len = sizeof(struct ipr_ioasa);
6255
6256         ipr_err("IOASA Dump:\n");
6257
6258         for (i = 0; i < data_len / 4; i += 4) {
6259                 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
6260                         be32_to_cpu(ioasa_data[i]),
6261                         be32_to_cpu(ioasa_data[i+1]),
6262                         be32_to_cpu(ioasa_data[i+2]),
6263                         be32_to_cpu(ioasa_data[i+3]));
6264         }
6265 }
6266
6267 /**
6268  * ipr_gen_sense - Generate SCSI sense data from an IOASA
6269  * @ioasa:              IOASA
6270  * @sense_buf:  sense data buffer
6271  *
6272  * Return value:
6273  *      none
6274  **/
6275 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
6276 {
6277         u32 failing_lba;
6278         u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
6279         struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
6280         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6281         u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
6282
6283         memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
6284
6285         if (ioasc >= IPR_FIRST_DRIVER_IOASC)
6286                 return;
6287
6288         ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
6289
6290         if (ipr_is_vset_device(res) &&
6291             ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
6292             ioasa->u.vset.failing_lba_hi != 0) {
6293                 sense_buf[0] = 0x72;
6294                 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
6295                 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
6296                 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
6297
6298                 sense_buf[7] = 12;
6299                 sense_buf[8] = 0;
6300                 sense_buf[9] = 0x0A;
6301                 sense_buf[10] = 0x80;
6302
6303                 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
6304
6305                 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
6306                 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
6307                 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
6308                 sense_buf[15] = failing_lba & 0x000000ff;
6309
6310                 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6311
6312                 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
6313                 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
6314                 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
6315                 sense_buf[19] = failing_lba & 0x000000ff;
6316         } else {
6317                 sense_buf[0] = 0x70;
6318                 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
6319                 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
6320                 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
6321
6322                 /* Illegal request */
6323                 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
6324                     (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
6325                         sense_buf[7] = 10;      /* additional length */
6326
6327                         /* IOARCB was in error */
6328                         if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
6329                                 sense_buf[15] = 0xC0;
6330                         else    /* Parameter data was invalid */
6331                                 sense_buf[15] = 0x80;
6332
6333                         sense_buf[16] =
6334                             ((IPR_FIELD_POINTER_MASK &
6335                               be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
6336                         sense_buf[17] =
6337                             (IPR_FIELD_POINTER_MASK &
6338                              be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
6339                 } else {
6340                         if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
6341                                 if (ipr_is_vset_device(res))
6342                                         failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6343                                 else
6344                                         failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
6345
6346                                 sense_buf[0] |= 0x80;   /* Or in the Valid bit */
6347                                 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
6348                                 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
6349                                 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
6350                                 sense_buf[6] = failing_lba & 0x000000ff;
6351                         }
6352
6353                         sense_buf[7] = 6;       /* additional length */
6354                 }
6355         }
6356 }
6357
6358 /**
6359  * ipr_get_autosense - Copy autosense data to sense buffer
6360  * @ipr_cmd:    ipr command struct
6361  *
6362  * This function copies the autosense buffer to the buffer
6363  * in the scsi_cmd, if there is autosense available.
6364  *
6365  * Return value:
6366  *      1 if autosense was available / 0 if not
6367  **/
6368 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
6369 {
6370         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6371         struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
6372
6373         if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
6374                 return 0;
6375
6376         if (ipr_cmd->ioa_cfg->sis64)
6377                 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
6378                        min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
6379                            SCSI_SENSE_BUFFERSIZE));
6380         else
6381                 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
6382                        min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
6383                            SCSI_SENSE_BUFFERSIZE));
6384         return 1;
6385 }
6386
6387 /**
6388  * ipr_erp_start - Process an error response for a SCSI op
6389  * @ioa_cfg:    ioa config struct
6390  * @ipr_cmd:    ipr command struct
6391  *
6392  * This function determines whether or not to initiate ERP
6393  * on the affected device.
6394  *
6395  * Return value:
6396  *      nothing
6397  **/
6398 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6399                               struct ipr_cmnd *ipr_cmd)
6400 {
6401         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6402         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6403         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6404         u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
6405
6406         if (!res) {
6407                 __ipr_scsi_eh_done(ipr_cmd);
6408                 return;
6409         }
6410
6411         if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
6412                 ipr_gen_sense(ipr_cmd);
6413
6414         ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6415
6416         switch (masked_ioasc) {
6417         case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
6418                 if (ipr_is_naca_model(res))
6419                         scsi_cmd->result |= (DID_ABORT << 16);
6420                 else
6421                         scsi_cmd->result |= (DID_IMM_RETRY << 16);
6422                 break;
6423         case IPR_IOASC_IR_RESOURCE_HANDLE:
6424         case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
6425                 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6426                 break;
6427         case IPR_IOASC_HW_SEL_TIMEOUT:
6428                 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6429                 if (!ipr_is_naca_model(res))
6430                         res->needs_sync_complete = 1;
6431                 break;
6432         case IPR_IOASC_SYNC_REQUIRED:
6433                 if (!res->in_erp)
6434                         res->needs_sync_complete = 1;
6435                 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6436                 break;
6437         case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
6438         case IPR_IOASA_IR_DUAL_IOA_DISABLED:
6439                 /*
6440                  * exception: do not set DID_PASSTHROUGH on CHECK CONDITION
6441                  * so SCSI mid-layer and upper layers handle it accordingly.
6442                  */
6443                 if (scsi_cmd->result != SAM_STAT_CHECK_CONDITION)
6444                         scsi_cmd->result |= (DID_PASSTHROUGH << 16);
6445                 break;
6446         case IPR_IOASC_BUS_WAS_RESET:
6447         case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6448                 /*
6449                  * Report the bus reset and ask for a retry. The device
6450                  * will give CC/UA the next command.
6451                  */
6452                 if (!res->resetting_device)
6453                         scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6454                 scsi_cmd->result |= (DID_ERROR << 16);
6455                 if (!ipr_is_naca_model(res))
6456                         res->needs_sync_complete = 1;
6457                 break;
6458         case IPR_IOASC_HW_DEV_BUS_STATUS:
6459                 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6460                 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
6461                         if (!ipr_get_autosense(ipr_cmd)) {
6462                                 if (!ipr_is_naca_model(res)) {
6463                                         ipr_erp_cancel_all(ipr_cmd);
6464                                         return;
6465                                 }
6466                         }
6467                 }
6468                 if (!ipr_is_naca_model(res))
6469                         res->needs_sync_complete = 1;
6470                 break;
6471         case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6472                 break;
6473         case IPR_IOASC_IR_NON_OPTIMIZED:
6474                 if (res->raw_mode) {
6475                         res->raw_mode = 0;
6476                         scsi_cmd->result |= (DID_IMM_RETRY << 16);
6477                 } else
6478                         scsi_cmd->result |= (DID_ERROR << 16);
6479                 break;
6480         default:
6481                 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6482                         scsi_cmd->result |= (DID_ERROR << 16);
6483                 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
6484                         res->needs_sync_complete = 1;
6485                 break;
6486         }
6487
6488         scsi_dma_unmap(ipr_cmd->scsi_cmd);
6489         scsi_cmd->scsi_done(scsi_cmd);
6490         if (ipr_cmd->eh_comp)
6491                 complete(ipr_cmd->eh_comp);
6492         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6493 }
6494
6495 /**
6496  * ipr_scsi_done - mid-layer done function
6497  * @ipr_cmd:    ipr command struct
6498  *
6499  * This function is invoked by the interrupt handler for
6500  * ops generated by the SCSI mid-layer
6501  *
6502  * Return value:
6503  *      none
6504  **/
6505 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6506 {
6507         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6508         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6509         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6510         unsigned long lock_flags;
6511
6512         scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
6513
6514         if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
6515                 scsi_dma_unmap(scsi_cmd);
6516
6517                 spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
6518                 scsi_cmd->scsi_done(scsi_cmd);
6519                 if (ipr_cmd->eh_comp)
6520                         complete(ipr_cmd->eh_comp);
6521                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6522                 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
6523         } else {
6524                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6525                 spin_lock(&ipr_cmd->hrrq->_lock);
6526                 ipr_erp_start(ioa_cfg, ipr_cmd);
6527                 spin_unlock(&ipr_cmd->hrrq->_lock);
6528                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6529         }
6530 }
6531
6532 /**
6533  * ipr_queuecommand - Queue a mid-layer request
6534  * @shost:              scsi host struct
6535  * @scsi_cmd:   scsi command struct
6536  *
6537  * This function queues a request generated by the mid-layer.
6538  *
6539  * Return value:
6540  *      0 on success
6541  *      SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6542  *      SCSI_MLQUEUE_HOST_BUSY if host is busy
6543  **/
6544 static int ipr_queuecommand(struct Scsi_Host *shost,
6545                             struct scsi_cmnd *scsi_cmd)
6546 {
6547         struct ipr_ioa_cfg *ioa_cfg;
6548         struct ipr_resource_entry *res;
6549         struct ipr_ioarcb *ioarcb;
6550         struct ipr_cmnd *ipr_cmd;
6551         unsigned long hrrq_flags, lock_flags;
6552         int rc;
6553         struct ipr_hrr_queue *hrrq;
6554         int hrrq_id;
6555
6556         ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6557
6558         scsi_cmd->result = (DID_OK << 16);
6559         res = scsi_cmd->device->hostdata;
6560
6561         if (ipr_is_gata(res) && res->sata_port) {
6562                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6563                 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6564                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6565                 return rc;
6566         }
6567
6568         hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6569         hrrq = &ioa_cfg->hrrq[hrrq_id];
6570
6571         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6572         /*
6573          * We are currently blocking all devices due to a host reset
6574          * We have told the host to stop giving us new requests, but
6575          * ERP ops don't count. FIXME
6576          */
6577         if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
6578                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6579                 return SCSI_MLQUEUE_HOST_BUSY;
6580         }
6581
6582         /*
6583          * FIXME - Create scsi_set_host_offline interface
6584          *  and the ioa_is_dead check can be removed
6585          */
6586         if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
6587                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6588                 goto err_nodev;
6589         }
6590
6591         ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6592         if (ipr_cmd == NULL) {
6593                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6594                 return SCSI_MLQUEUE_HOST_BUSY;
6595         }
6596         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6597
6598         ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
6599         ioarcb = &ipr_cmd->ioarcb;
6600
6601         memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6602         ipr_cmd->scsi_cmd = scsi_cmd;
6603         ipr_cmd->done = ipr_scsi_eh_done;
6604
6605         if (ipr_is_gscsi(res)) {
6606                 if (scsi_cmd->underflow == 0)
6607                         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6608
6609                 if (res->reset_occurred) {
6610                         res->reset_occurred = 0;
6611                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
6612                 }
6613         }
6614
6615         if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6616                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6617
6618                 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
6619                 if (scsi_cmd->flags & SCMD_TAGGED)
6620                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK;
6621                 else
6622                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK;
6623         }
6624
6625         if (scsi_cmd->cmnd[0] >= 0xC0 &&
6626             (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
6627                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6628         }
6629         if (res->raw_mode && ipr_is_af_dasd_device(res)) {
6630                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE;
6631
6632                 if (scsi_cmd->underflow == 0)
6633                         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6634         }
6635
6636         if (ioa_cfg->sis64)
6637                 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6638         else
6639                 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
6640
6641         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6642         if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
6643                 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6644                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6645                 if (!rc)
6646                         scsi_dma_unmap(scsi_cmd);
6647                 return SCSI_MLQUEUE_HOST_BUSY;
6648         }
6649
6650         if (unlikely(hrrq->ioa_is_dead)) {
6651                 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6652                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6653                 scsi_dma_unmap(scsi_cmd);
6654                 goto err_nodev;
6655         }
6656
6657         ioarcb->res_handle = res->res_handle;
6658         if (res->needs_sync_complete) {
6659                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6660                 res->needs_sync_complete = 0;
6661         }
6662         list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
6663         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6664         ipr_send_command(ipr_cmd);
6665         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6666         return 0;
6667
6668 err_nodev:
6669         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6670         memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6671         scsi_cmd->result = (DID_NO_CONNECT << 16);
6672         scsi_cmd->scsi_done(scsi_cmd);
6673         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6674         return 0;
6675 }
6676
6677 /**
6678  * ipr_ioctl - IOCTL handler
6679  * @sdev:       scsi device struct
6680  * @cmd:        IOCTL cmd
6681  * @arg:        IOCTL arg
6682  *
6683  * Return value:
6684  *      0 on success / other on failure
6685  **/
6686 static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
6687 {
6688         struct ipr_resource_entry *res;
6689
6690         res = (struct ipr_resource_entry *)sdev->hostdata;
6691         if (res && ipr_is_gata(res)) {
6692                 if (cmd == HDIO_GET_IDENTITY)
6693                         return -ENOTTY;
6694                 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
6695         }
6696
6697         return -EINVAL;
6698 }
6699
6700 /**
6701  * ipr_info - Get information about the card/driver
6702  * @scsi_host:  scsi host struct
6703  *
6704  * Return value:
6705  *      pointer to buffer with description string
6706  **/
6707 static const char *ipr_ioa_info(struct Scsi_Host *host)
6708 {
6709         static char buffer[512];
6710         struct ipr_ioa_cfg *ioa_cfg;
6711         unsigned long lock_flags = 0;
6712
6713         ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6714
6715         spin_lock_irqsave(host->host_lock, lock_flags);
6716         sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6717         spin_unlock_irqrestore(host->host_lock, lock_flags);
6718
6719         return buffer;
6720 }
6721
6722 static struct scsi_host_template driver_template = {
6723         .module = THIS_MODULE,
6724         .name = "IPR",
6725         .info = ipr_ioa_info,
6726         .ioctl = ipr_ioctl,
6727         .queuecommand = ipr_queuecommand,
6728         .eh_abort_handler = ipr_eh_abort,
6729         .eh_device_reset_handler = ipr_eh_dev_reset,
6730         .eh_host_reset_handler = ipr_eh_host_reset,
6731         .slave_alloc = ipr_slave_alloc,
6732         .slave_configure = ipr_slave_configure,
6733         .slave_destroy = ipr_slave_destroy,
6734         .scan_finished = ipr_scan_finished,
6735         .target_alloc = ipr_target_alloc,
6736         .target_destroy = ipr_target_destroy,
6737         .change_queue_depth = ipr_change_queue_depth,
6738         .bios_param = ipr_biosparam,
6739         .can_queue = IPR_MAX_COMMANDS,
6740         .this_id = -1,
6741         .sg_tablesize = IPR_MAX_SGLIST,
6742         .max_sectors = IPR_IOA_MAX_SECTORS,
6743         .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6744         .use_clustering = ENABLE_CLUSTERING,
6745         .shost_attrs = ipr_ioa_attrs,
6746         .sdev_attrs = ipr_dev_attrs,
6747         .proc_name = IPR_NAME,
6748 };
6749
6750 /**
6751  * ipr_ata_phy_reset - libata phy_reset handler
6752  * @ap:         ata port to reset
6753  *
6754  **/
6755 static void ipr_ata_phy_reset(struct ata_port *ap)
6756 {
6757         unsigned long flags;
6758         struct ipr_sata_port *sata_port = ap->private_data;
6759         struct ipr_resource_entry *res = sata_port->res;
6760         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6761         int rc;
6762
6763         ENTER;
6764         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6765         while (ioa_cfg->in_reset_reload) {
6766                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6767                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6768                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6769         }
6770
6771         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6772                 goto out_unlock;
6773
6774         rc = ipr_device_reset(ioa_cfg, res);
6775
6776         if (rc) {
6777                 ap->link.device[0].class = ATA_DEV_NONE;
6778                 goto out_unlock;
6779         }
6780
6781         ap->link.device[0].class = res->ata_class;
6782         if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
6783                 ap->link.device[0].class = ATA_DEV_NONE;
6784
6785 out_unlock:
6786         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6787         LEAVE;
6788 }
6789
6790 /**
6791  * ipr_ata_post_internal - Cleanup after an internal command
6792  * @qc: ATA queued command
6793  *
6794  * Return value:
6795  *      none
6796  **/
6797 static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6798 {
6799         struct ipr_sata_port *sata_port = qc->ap->private_data;
6800         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6801         struct ipr_cmnd *ipr_cmd;
6802         struct ipr_hrr_queue *hrrq;
6803         unsigned long flags;
6804
6805         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6806         while (ioa_cfg->in_reset_reload) {
6807                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6808                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6809                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6810         }
6811
6812         for_each_hrrq(hrrq, ioa_cfg) {
6813                 spin_lock(&hrrq->_lock);
6814                 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6815                         if (ipr_cmd->qc == qc) {
6816                                 ipr_device_reset(ioa_cfg, sata_port->res);
6817                                 break;
6818                         }
6819                 }
6820                 spin_unlock(&hrrq->_lock);
6821         }
6822         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6823 }
6824
6825 /**
6826  * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6827  * @regs:       destination
6828  * @tf: source ATA taskfile
6829  *
6830  * Return value:
6831  *      none
6832  **/
6833 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6834                              struct ata_taskfile *tf)
6835 {
6836         regs->feature = tf->feature;
6837         regs->nsect = tf->nsect;
6838         regs->lbal = tf->lbal;
6839         regs->lbam = tf->lbam;
6840         regs->lbah = tf->lbah;
6841         regs->device = tf->device;
6842         regs->command = tf->command;
6843         regs->hob_feature = tf->hob_feature;
6844         regs->hob_nsect = tf->hob_nsect;
6845         regs->hob_lbal = tf->hob_lbal;
6846         regs->hob_lbam = tf->hob_lbam;
6847         regs->hob_lbah = tf->hob_lbah;
6848         regs->ctl = tf->ctl;
6849 }
6850
6851 /**
6852  * ipr_sata_done - done function for SATA commands
6853  * @ipr_cmd:    ipr command struct
6854  *
6855  * This function is invoked by the interrupt handler for
6856  * ops generated by the SCSI mid-layer to SATA devices
6857  *
6858  * Return value:
6859  *      none
6860  **/
6861 static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6862 {
6863         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6864         struct ata_queued_cmd *qc = ipr_cmd->qc;
6865         struct ipr_sata_port *sata_port = qc->ap->private_data;
6866         struct ipr_resource_entry *res = sata_port->res;
6867         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6868
6869         spin_lock(&ipr_cmd->hrrq->_lock);
6870         if (ipr_cmd->ioa_cfg->sis64)
6871                 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6872                        sizeof(struct ipr_ioasa_gata));
6873         else
6874                 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6875                        sizeof(struct ipr_ioasa_gata));
6876         ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6877
6878         if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
6879                 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
6880
6881         if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6882                 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
6883         else
6884                 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
6885         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6886         spin_unlock(&ipr_cmd->hrrq->_lock);
6887         ata_qc_complete(qc);
6888 }
6889
6890 /**
6891  * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6892  * @ipr_cmd:    ipr command struct
6893  * @qc:         ATA queued command
6894  *
6895  **/
6896 static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6897                                   struct ata_queued_cmd *qc)
6898 {
6899         u32 ioadl_flags = 0;
6900         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6901         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
6902         struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6903         int len = qc->nbytes;
6904         struct scatterlist *sg;
6905         unsigned int si;
6906         dma_addr_t dma_addr = ipr_cmd->dma_addr;
6907
6908         if (len == 0)
6909                 return;
6910
6911         if (qc->dma_dir == DMA_TO_DEVICE) {
6912                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6913                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6914         } else if (qc->dma_dir == DMA_FROM_DEVICE)
6915                 ioadl_flags = IPR_IOADL_FLAGS_READ;
6916
6917         ioarcb->data_transfer_length = cpu_to_be32(len);
6918         ioarcb->ioadl_len =
6919                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6920         ioarcb->u.sis64_addr_data.data_ioadl_addr =
6921                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
6922
6923         for_each_sg(qc->sg, sg, qc->n_elem, si) {
6924                 ioadl64->flags = cpu_to_be32(ioadl_flags);
6925                 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6926                 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6927
6928                 last_ioadl64 = ioadl64;
6929                 ioadl64++;
6930         }
6931
6932         if (likely(last_ioadl64))
6933                 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6934 }
6935
6936 /**
6937  * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6938  * @ipr_cmd:    ipr command struct
6939  * @qc:         ATA queued command
6940  *
6941  **/
6942 static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6943                                 struct ata_queued_cmd *qc)
6944 {
6945         u32 ioadl_flags = 0;
6946         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6947         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
6948         struct ipr_ioadl_desc *last_ioadl = NULL;
6949         int len = qc->nbytes;
6950         struct scatterlist *sg;
6951         unsigned int si;
6952
6953         if (len == 0)
6954                 return;
6955
6956         if (qc->dma_dir == DMA_TO_DEVICE) {
6957                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6958                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6959                 ioarcb->data_transfer_length = cpu_to_be32(len);
6960                 ioarcb->ioadl_len =
6961                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6962         } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6963                 ioadl_flags = IPR_IOADL_FLAGS_READ;
6964                 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6965                 ioarcb->read_ioadl_len =
6966                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6967         }
6968
6969         for_each_sg(qc->sg, sg, qc->n_elem, si) {
6970                 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6971                 ioadl->address = cpu_to_be32(sg_dma_address(sg));
6972
6973                 last_ioadl = ioadl;
6974                 ioadl++;
6975         }
6976
6977         if (likely(last_ioadl))
6978                 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6979 }
6980
6981 /**
6982  * ipr_qc_defer - Get a free ipr_cmd
6983  * @qc: queued command
6984  *
6985  * Return value:
6986  *      0 if success
6987  **/
6988 static int ipr_qc_defer(struct ata_queued_cmd *qc)
6989 {
6990         struct ata_port *ap = qc->ap;
6991         struct ipr_sata_port *sata_port = ap->private_data;
6992         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6993         struct ipr_cmnd *ipr_cmd;
6994         struct ipr_hrr_queue *hrrq;
6995         int hrrq_id;
6996
6997         hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6998         hrrq = &ioa_cfg->hrrq[hrrq_id];
6999
7000         qc->lldd_task = NULL;
7001         spin_lock(&hrrq->_lock);
7002         if (unlikely(hrrq->ioa_is_dead)) {
7003                 spin_unlock(&hrrq->_lock);
7004                 return 0;
7005         }
7006
7007         if (unlikely(!hrrq->allow_cmds)) {
7008                 spin_unlock(&hrrq->_lock);
7009                 return ATA_DEFER_LINK;
7010         }
7011
7012         ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
7013         if (ipr_cmd == NULL) {
7014                 spin_unlock(&hrrq->_lock);
7015                 return ATA_DEFER_LINK;
7016         }
7017
7018         qc->lldd_task = ipr_cmd;
7019         spin_unlock(&hrrq->_lock);
7020         return 0;
7021 }
7022
7023 /**
7024  * ipr_qc_issue - Issue a SATA qc to a device
7025  * @qc: queued command
7026  *
7027  * Return value:
7028  *      0 if success
7029  **/
7030 static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
7031 {
7032         struct ata_port *ap = qc->ap;
7033         struct ipr_sata_port *sata_port = ap->private_data;
7034         struct ipr_resource_entry *res = sata_port->res;
7035         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
7036         struct ipr_cmnd *ipr_cmd;
7037         struct ipr_ioarcb *ioarcb;
7038         struct ipr_ioarcb_ata_regs *regs;
7039
7040         if (qc->lldd_task == NULL)
7041                 ipr_qc_defer(qc);
7042
7043         ipr_cmd = qc->lldd_task;
7044         if (ipr_cmd == NULL)
7045                 return AC_ERR_SYSTEM;
7046
7047         qc->lldd_task = NULL;
7048         spin_lock(&ipr_cmd->hrrq->_lock);
7049         if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
7050                         ipr_cmd->hrrq->ioa_is_dead)) {
7051                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7052                 spin_unlock(&ipr_cmd->hrrq->_lock);
7053                 return AC_ERR_SYSTEM;
7054         }
7055
7056         ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
7057         ioarcb = &ipr_cmd->ioarcb;
7058
7059         if (ioa_cfg->sis64) {
7060                 regs = &ipr_cmd->i.ata_ioadl.regs;
7061                 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
7062         } else
7063                 regs = &ioarcb->u.add_data.u.regs;
7064
7065         memset(regs, 0, sizeof(*regs));
7066         ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
7067
7068         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7069         ipr_cmd->qc = qc;
7070         ipr_cmd->done = ipr_sata_done;
7071         ipr_cmd->ioarcb.res_handle = res->res_handle;
7072         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
7073         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
7074         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
7075         ipr_cmd->dma_use_sg = qc->n_elem;
7076
7077         if (ioa_cfg->sis64)
7078                 ipr_build_ata_ioadl64(ipr_cmd, qc);
7079         else
7080                 ipr_build_ata_ioadl(ipr_cmd, qc);
7081
7082         regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
7083         ipr_copy_sata_tf(regs, &qc->tf);
7084         memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
7085         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
7086
7087         switch (qc->tf.protocol) {
7088         case ATA_PROT_NODATA:
7089         case ATA_PROT_PIO:
7090                 break;
7091
7092         case ATA_PROT_DMA:
7093                 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
7094                 break;
7095
7096         case ATAPI_PROT_PIO:
7097         case ATAPI_PROT_NODATA:
7098                 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
7099                 break;
7100
7101         case ATAPI_PROT_DMA:
7102                 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
7103                 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
7104                 break;
7105
7106         default:
7107                 WARN_ON(1);
7108                 spin_unlock(&ipr_cmd->hrrq->_lock);
7109                 return AC_ERR_INVALID;
7110         }
7111
7112         ipr_send_command(ipr_cmd);
7113         spin_unlock(&ipr_cmd->hrrq->_lock);
7114
7115         return 0;
7116 }
7117
7118 /**
7119  * ipr_qc_fill_rtf - Read result TF
7120  * @qc: ATA queued command
7121  *
7122  * Return value:
7123  *      true
7124  **/
7125 static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
7126 {
7127         struct ipr_sata_port *sata_port = qc->ap->private_data;
7128         struct ipr_ioasa_gata *g = &sata_port->ioasa;
7129         struct ata_taskfile *tf = &qc->result_tf;
7130
7131         tf->feature = g->error;
7132         tf->nsect = g->nsect;
7133         tf->lbal = g->lbal;
7134         tf->lbam = g->lbam;
7135         tf->lbah = g->lbah;
7136         tf->device = g->device;
7137         tf->command = g->status;
7138         tf->hob_nsect = g->hob_nsect;
7139         tf->hob_lbal = g->hob_lbal;
7140         tf->hob_lbam = g->hob_lbam;
7141         tf->hob_lbah = g->hob_lbah;
7142
7143         return true;
7144 }
7145
7146 static struct ata_port_operations ipr_sata_ops = {
7147         .phy_reset = ipr_ata_phy_reset,
7148         .hardreset = ipr_sata_reset,
7149         .post_internal_cmd = ipr_ata_post_internal,
7150         .qc_prep = ata_noop_qc_prep,
7151         .qc_defer = ipr_qc_defer,
7152         .qc_issue = ipr_qc_issue,
7153         .qc_fill_rtf = ipr_qc_fill_rtf,
7154         .port_start = ata_sas_port_start,
7155         .port_stop = ata_sas_port_stop
7156 };
7157
7158 static struct ata_port_info sata_port_info = {
7159         .flags          = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
7160                           ATA_FLAG_SAS_HOST,
7161         .pio_mask       = ATA_PIO4_ONLY,
7162         .mwdma_mask     = ATA_MWDMA2,
7163         .udma_mask      = ATA_UDMA6,
7164         .port_ops       = &ipr_sata_ops
7165 };
7166
7167 #ifdef CONFIG_PPC_PSERIES
7168 static const u16 ipr_blocked_processors[] = {
7169         PVR_NORTHSTAR,
7170         PVR_PULSAR,
7171         PVR_POWER4,
7172         PVR_ICESTAR,
7173         PVR_SSTAR,
7174         PVR_POWER4p,
7175         PVR_630,
7176         PVR_630p
7177 };
7178
7179 /**
7180  * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
7181  * @ioa_cfg:    ioa cfg struct
7182  *
7183  * Adapters that use Gemstone revision < 3.1 do not work reliably on
7184  * certain pSeries hardware. This function determines if the given
7185  * adapter is in one of these confgurations or not.
7186  *
7187  * Return value:
7188  *      1 if adapter is not supported / 0 if adapter is supported
7189  **/
7190 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
7191 {
7192         int i;
7193
7194         if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
7195                 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
7196                         if (pvr_version_is(ipr_blocked_processors[i]))
7197                                 return 1;
7198                 }
7199         }
7200         return 0;
7201 }
7202 #else
7203 #define ipr_invalid_adapter(ioa_cfg) 0
7204 #endif
7205
7206 /**
7207  * ipr_ioa_bringdown_done - IOA bring down completion.
7208  * @ipr_cmd:    ipr command struct
7209  *
7210  * This function processes the completion of an adapter bring down.
7211  * It wakes any reset sleepers.
7212  *
7213  * Return value:
7214  *      IPR_RC_JOB_RETURN
7215  **/
7216 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
7217 {
7218         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7219         int i;
7220
7221         ENTER;
7222         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
7223                 ipr_trace;
7224                 ioa_cfg->scsi_unblock = 1;
7225                 schedule_work(&ioa_cfg->work_q);
7226         }
7227
7228         ioa_cfg->in_reset_reload = 0;
7229         ioa_cfg->reset_retries = 0;
7230         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7231                 spin_lock(&ioa_cfg->hrrq[i]._lock);
7232                 ioa_cfg->hrrq[i].ioa_is_dead = 1;
7233                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
7234         }
7235         wmb();
7236
7237         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7238         wake_up_all(&ioa_cfg->reset_wait_q);
7239         LEAVE;
7240
7241         return IPR_RC_JOB_RETURN;
7242 }
7243
7244 /**
7245  * ipr_ioa_reset_done - IOA reset completion.
7246  * @ipr_cmd:    ipr command struct
7247  *
7248  * This function processes the completion of an adapter reset.
7249  * It schedules any necessary mid-layer add/removes and
7250  * wakes any reset sleepers.
7251  *
7252  * Return value:
7253  *      IPR_RC_JOB_RETURN
7254  **/
7255 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
7256 {
7257         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7258         struct ipr_resource_entry *res;
7259         int j;
7260
7261         ENTER;
7262         ioa_cfg->in_reset_reload = 0;
7263         for (j = 0; j < ioa_cfg->hrrq_num; j++) {
7264                 spin_lock(&ioa_cfg->hrrq[j]._lock);
7265                 ioa_cfg->hrrq[j].allow_cmds = 1;
7266                 spin_unlock(&ioa_cfg->hrrq[j]._lock);
7267         }
7268         wmb();
7269         ioa_cfg->reset_cmd = NULL;
7270         ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
7271
7272         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
7273                 if (res->add_to_ml || res->del_from_ml) {
7274                         ipr_trace;
7275                         break;
7276                 }
7277         }
7278         schedule_work(&ioa_cfg->work_q);
7279
7280         for (j = 0; j < IPR_NUM_HCAMS; j++) {
7281                 list_del_init(&ioa_cfg->hostrcb[j]->queue);
7282                 if (j < IPR_NUM_LOG_HCAMS)
7283                         ipr_send_hcam(ioa_cfg,
7284                                 IPR_HCAM_CDB_OP_CODE_LOG_DATA,
7285                                 ioa_cfg->hostrcb[j]);
7286                 else
7287                         ipr_send_hcam(ioa_cfg,
7288                                 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
7289                                 ioa_cfg->hostrcb[j]);
7290         }
7291
7292         scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
7293         dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
7294
7295         ioa_cfg->reset_retries = 0;
7296         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7297         wake_up_all(&ioa_cfg->reset_wait_q);
7298
7299         ioa_cfg->scsi_unblock = 1;
7300         schedule_work(&ioa_cfg->work_q);
7301         LEAVE;
7302         return IPR_RC_JOB_RETURN;
7303 }
7304
7305 /**
7306  * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
7307  * @supported_dev:      supported device struct
7308  * @vpids:                      vendor product id struct
7309  *
7310  * Return value:
7311  *      none
7312  **/
7313 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
7314                                  struct ipr_std_inq_vpids *vpids)
7315 {
7316         memset(supported_dev, 0, sizeof(struct ipr_supported_device));
7317         memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
7318         supported_dev->num_records = 1;
7319         supported_dev->data_length =
7320                 cpu_to_be16(sizeof(struct ipr_supported_device));
7321         supported_dev->reserved = 0;
7322 }
7323
7324 /**
7325  * ipr_set_supported_devs - Send Set Supported Devices for a device
7326  * @ipr_cmd:    ipr command struct
7327  *
7328  * This function sends a Set Supported Devices to the adapter
7329  *
7330  * Return value:
7331  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7332  **/
7333 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
7334 {
7335         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7336         struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
7337         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7338         struct ipr_resource_entry *res = ipr_cmd->u.res;
7339
7340         ipr_cmd->job_step = ipr_ioa_reset_done;
7341
7342         list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
7343                 if (!ipr_is_scsi_disk(res))
7344                         continue;
7345
7346                 ipr_cmd->u.res = res;
7347                 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
7348
7349                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7350                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7351                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7352
7353                 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
7354                 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
7355                 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
7356                 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
7357
7358                 ipr_init_ioadl(ipr_cmd,
7359                                ioa_cfg->vpd_cbs_dma +
7360                                  offsetof(struct ipr_misc_cbs, supp_dev),
7361                                sizeof(struct ipr_supported_device),
7362                                IPR_IOADL_FLAGS_WRITE_LAST);
7363
7364                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7365                            IPR_SET_SUP_DEVICE_TIMEOUT);
7366
7367                 if (!ioa_cfg->sis64)
7368                         ipr_cmd->job_step = ipr_set_supported_devs;
7369                 LEAVE;
7370                 return IPR_RC_JOB_RETURN;
7371         }
7372
7373         LEAVE;
7374         return IPR_RC_JOB_CONTINUE;
7375 }
7376
7377 /**
7378  * ipr_get_mode_page - Locate specified mode page
7379  * @mode_pages: mode page buffer
7380  * @page_code:  page code to find
7381  * @len:                minimum required length for mode page
7382  *
7383  * Return value:
7384  *      pointer to mode page / NULL on failure
7385  **/
7386 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
7387                                u32 page_code, u32 len)
7388 {
7389         struct ipr_mode_page_hdr *mode_hdr;
7390         u32 page_length;
7391         u32 length;
7392
7393         if (!mode_pages || (mode_pages->hdr.length == 0))
7394                 return NULL;
7395
7396         length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
7397         mode_hdr = (struct ipr_mode_page_hdr *)
7398                 (mode_pages->data + mode_pages->hdr.block_desc_len);
7399
7400         while (length) {
7401                 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
7402                         if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
7403                                 return mode_hdr;
7404                         break;
7405                 } else {
7406                         page_length = (sizeof(struct ipr_mode_page_hdr) +
7407                                        mode_hdr->page_length);
7408                         length -= page_length;
7409                         mode_hdr = (struct ipr_mode_page_hdr *)
7410                                 ((unsigned long)mode_hdr + page_length);
7411                 }
7412         }
7413         return NULL;
7414 }
7415
7416 /**
7417  * ipr_check_term_power - Check for term power errors
7418  * @ioa_cfg:    ioa config struct
7419  * @mode_pages: IOAFP mode pages buffer
7420  *
7421  * Check the IOAFP's mode page 28 for term power errors
7422  *
7423  * Return value:
7424  *      nothing
7425  **/
7426 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
7427                                  struct ipr_mode_pages *mode_pages)
7428 {
7429         int i;
7430         int entry_length;
7431         struct ipr_dev_bus_entry *bus;
7432         struct ipr_mode_page28 *mode_page;
7433
7434         mode_page = ipr_get_mode_page(mode_pages, 0x28,
7435                                       sizeof(struct ipr_mode_page28));
7436
7437         entry_length = mode_page->entry_length;
7438
7439         bus = mode_page->bus;
7440
7441         for (i = 0; i < mode_page->num_entries; i++) {
7442                 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
7443                         dev_err(&ioa_cfg->pdev->dev,
7444                                 "Term power is absent on scsi bus %d\n",
7445                                 bus->res_addr.bus);
7446                 }
7447
7448                 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
7449         }
7450 }
7451
7452 /**
7453  * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7454  * @ioa_cfg:    ioa config struct
7455  *
7456  * Looks through the config table checking for SES devices. If
7457  * the SES device is in the SES table indicating a maximum SCSI
7458  * bus speed, the speed is limited for the bus.
7459  *
7460  * Return value:
7461  *      none
7462  **/
7463 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7464 {
7465         u32 max_xfer_rate;
7466         int i;
7467
7468         for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7469                 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7470                                                        ioa_cfg->bus_attr[i].bus_width);
7471
7472                 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7473                         ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7474         }
7475 }
7476
7477 /**
7478  * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7479  * @ioa_cfg:    ioa config struct
7480  * @mode_pages: mode page 28 buffer
7481  *
7482  * Updates mode page 28 based on driver configuration
7483  *
7484  * Return value:
7485  *      none
7486  **/
7487 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
7488                                           struct ipr_mode_pages *mode_pages)
7489 {
7490         int i, entry_length;
7491         struct ipr_dev_bus_entry *bus;
7492         struct ipr_bus_attributes *bus_attr;
7493         struct ipr_mode_page28 *mode_page;
7494
7495         mode_page = ipr_get_mode_page(mode_pages, 0x28,
7496                                       sizeof(struct ipr_mode_page28));
7497
7498         entry_length = mode_page->entry_length;
7499
7500         /* Loop for each device bus entry */
7501         for (i = 0, bus = mode_page->bus;
7502              i < mode_page->num_entries;
7503              i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7504                 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7505                         dev_err(&ioa_cfg->pdev->dev,
7506                                 "Invalid resource address reported: 0x%08X\n",
7507                                 IPR_GET_PHYS_LOC(bus->res_addr));
7508                         continue;
7509                 }
7510
7511                 bus_attr = &ioa_cfg->bus_attr[i];
7512                 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7513                 bus->bus_width = bus_attr->bus_width;
7514                 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7515                 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7516                 if (bus_attr->qas_enabled)
7517                         bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7518                 else
7519                         bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7520         }
7521 }
7522
7523 /**
7524  * ipr_build_mode_select - Build a mode select command
7525  * @ipr_cmd:    ipr command struct
7526  * @res_handle: resource handle to send command to
7527  * @parm:               Byte 2 of Mode Sense command
7528  * @dma_addr:   DMA buffer address
7529  * @xfer_len:   data transfer length
7530  *
7531  * Return value:
7532  *      none
7533  **/
7534 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
7535                                   __be32 res_handle, u8 parm,
7536                                   dma_addr_t dma_addr, u8 xfer_len)
7537 {
7538         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7539
7540         ioarcb->res_handle = res_handle;
7541         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7542         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7543         ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7544         ioarcb->cmd_pkt.cdb[1] = parm;
7545         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7546
7547         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
7548 }
7549
7550 /**
7551  * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7552  * @ipr_cmd:    ipr command struct
7553  *
7554  * This function sets up the SCSI bus attributes and sends
7555  * a Mode Select for Page 28 to activate them.
7556  *
7557  * Return value:
7558  *      IPR_RC_JOB_RETURN
7559  **/
7560 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7561 {
7562         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7563         struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7564         int length;
7565
7566         ENTER;
7567         ipr_scsi_bus_speed_limit(ioa_cfg);
7568         ipr_check_term_power(ioa_cfg, mode_pages);
7569         ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7570         length = mode_pages->hdr.length + 1;
7571         mode_pages->hdr.length = 0;
7572
7573         ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7574                               ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7575                               length);
7576
7577         ipr_cmd->job_step = ipr_set_supported_devs;
7578         ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7579                                     struct ipr_resource_entry, queue);
7580         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7581
7582         LEAVE;
7583         return IPR_RC_JOB_RETURN;
7584 }
7585
7586 /**
7587  * ipr_build_mode_sense - Builds a mode sense command
7588  * @ipr_cmd:    ipr command struct
7589  * @res:                resource entry struct
7590  * @parm:               Byte 2 of mode sense command
7591  * @dma_addr:   DMA address of mode sense buffer
7592  * @xfer_len:   Size of DMA buffer
7593  *
7594  * Return value:
7595  *      none
7596  **/
7597 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7598                                  __be32 res_handle,
7599                                  u8 parm, dma_addr_t dma_addr, u8 xfer_len)
7600 {
7601         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7602
7603         ioarcb->res_handle = res_handle;
7604         ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7605         ioarcb->cmd_pkt.cdb[2] = parm;
7606         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7607         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7608
7609         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7610 }
7611
7612 /**
7613  * ipr_reset_cmd_failed - Handle failure of IOA reset command
7614  * @ipr_cmd:    ipr command struct
7615  *
7616  * This function handles the failure of an IOA bringup command.
7617  *
7618  * Return value:
7619  *      IPR_RC_JOB_RETURN
7620  **/
7621 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7622 {
7623         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7624         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7625
7626         dev_err(&ioa_cfg->pdev->dev,
7627                 "0x%02X failed with IOASC: 0x%08X\n",
7628                 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7629
7630         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7631         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7632         return IPR_RC_JOB_RETURN;
7633 }
7634
7635 /**
7636  * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7637  * @ipr_cmd:    ipr command struct
7638  *
7639  * This function handles the failure of a Mode Sense to the IOAFP.
7640  * Some adapters do not handle all mode pages.
7641  *
7642  * Return value:
7643  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7644  **/
7645 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7646 {
7647         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7648         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7649
7650         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7651                 ipr_cmd->job_step = ipr_set_supported_devs;
7652                 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7653                                             struct ipr_resource_entry, queue);
7654                 return IPR_RC_JOB_CONTINUE;
7655         }
7656
7657         return ipr_reset_cmd_failed(ipr_cmd);
7658 }
7659
7660 /**
7661  * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7662  * @ipr_cmd:    ipr command struct
7663  *
7664  * This function send a Page 28 mode sense to the IOA to
7665  * retrieve SCSI bus attributes.
7666  *
7667  * Return value:
7668  *      IPR_RC_JOB_RETURN
7669  **/
7670 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7671 {
7672         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7673
7674         ENTER;
7675         ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7676                              0x28, ioa_cfg->vpd_cbs_dma +
7677                              offsetof(struct ipr_misc_cbs, mode_pages),
7678                              sizeof(struct ipr_mode_pages));
7679
7680         ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
7681         ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
7682
7683         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7684
7685         LEAVE;
7686         return IPR_RC_JOB_RETURN;
7687 }
7688
7689 /**
7690  * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7691  * @ipr_cmd:    ipr command struct
7692  *
7693  * This function enables dual IOA RAID support if possible.
7694  *
7695  * Return value:
7696  *      IPR_RC_JOB_RETURN
7697  **/
7698 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7699 {
7700         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7701         struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7702         struct ipr_mode_page24 *mode_page;
7703         int length;
7704
7705         ENTER;
7706         mode_page = ipr_get_mode_page(mode_pages, 0x24,
7707                                       sizeof(struct ipr_mode_page24));
7708
7709         if (mode_page)
7710                 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7711
7712         length = mode_pages->hdr.length + 1;
7713         mode_pages->hdr.length = 0;
7714
7715         ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7716                               ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7717                               length);
7718
7719         ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7720         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7721
7722         LEAVE;
7723         return IPR_RC_JOB_RETURN;
7724 }
7725
7726 /**
7727  * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7728  * @ipr_cmd:    ipr command struct
7729  *
7730  * This function handles the failure of a Mode Sense to the IOAFP.
7731  * Some adapters do not handle all mode pages.
7732  *
7733  * Return value:
7734  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7735  **/
7736 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7737 {
7738         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7739
7740         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7741                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7742                 return IPR_RC_JOB_CONTINUE;
7743         }
7744
7745         return ipr_reset_cmd_failed(ipr_cmd);
7746 }
7747
7748 /**
7749  * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7750  * @ipr_cmd:    ipr command struct
7751  *
7752  * This function send a mode sense to the IOA to retrieve
7753  * the IOA Advanced Function Control mode page.
7754  *
7755  * Return value:
7756  *      IPR_RC_JOB_RETURN
7757  **/
7758 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7759 {
7760         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7761
7762         ENTER;
7763         ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7764                              0x24, ioa_cfg->vpd_cbs_dma +
7765                              offsetof(struct ipr_misc_cbs, mode_pages),
7766                              sizeof(struct ipr_mode_pages));
7767
7768         ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7769         ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7770
7771         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7772
7773         LEAVE;
7774         return IPR_RC_JOB_RETURN;
7775 }
7776
7777 /**
7778  * ipr_init_res_table - Initialize the resource table
7779  * @ipr_cmd:    ipr command struct
7780  *
7781  * This function looks through the existing resource table, comparing
7782  * it with the config table. This function will take care of old/new
7783  * devices and schedule adding/removing them from the mid-layer
7784  * as appropriate.
7785  *
7786  * Return value:
7787  *      IPR_RC_JOB_CONTINUE
7788  **/
7789 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7790 {
7791         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7792         struct ipr_resource_entry *res, *temp;
7793         struct ipr_config_table_entry_wrapper cfgtew;
7794         int entries, found, flag, i;
7795         LIST_HEAD(old_res);
7796
7797         ENTER;
7798         if (ioa_cfg->sis64)
7799                 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7800         else
7801                 flag = ioa_cfg->u.cfg_table->hdr.flags;
7802
7803         if (flag & IPR_UCODE_DOWNLOAD_REQ)
7804                 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7805
7806         list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7807                 list_move_tail(&res->queue, &old_res);
7808
7809         if (ioa_cfg->sis64)
7810                 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
7811         else
7812                 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7813
7814         for (i = 0; i < entries; i++) {
7815                 if (ioa_cfg->sis64)
7816                         cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7817                 else
7818                         cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
7819                 found = 0;
7820
7821                 list_for_each_entry_safe(res, temp, &old_res, queue) {
7822                         if (ipr_is_same_device(res, &cfgtew)) {
7823                                 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7824                                 found = 1;
7825                                 break;
7826                         }
7827                 }
7828
7829                 if (!found) {
7830                         if (list_empty(&ioa_cfg->free_res_q)) {
7831                                 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7832                                 break;
7833                         }
7834
7835                         found = 1;
7836                         res = list_entry(ioa_cfg->free_res_q.next,
7837                                          struct ipr_resource_entry, queue);
7838                         list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7839                         ipr_init_res_entry(res, &cfgtew);
7840                         res->add_to_ml = 1;
7841                 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7842                         res->sdev->allow_restart = 1;
7843
7844                 if (found)
7845                         ipr_update_res_entry(res, &cfgtew);
7846         }
7847
7848         list_for_each_entry_safe(res, temp, &old_res, queue) {
7849                 if (res->sdev) {
7850                         res->del_from_ml = 1;
7851                         res->res_handle = IPR_INVALID_RES_HANDLE;
7852                         list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7853                 }
7854         }
7855
7856         list_for_each_entry_safe(res, temp, &old_res, queue) {
7857                 ipr_clear_res_target(res);
7858                 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7859         }
7860
7861         if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7862                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7863         else
7864                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7865
7866         LEAVE;
7867         return IPR_RC_JOB_CONTINUE;
7868 }
7869
7870 /**
7871  * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7872  * @ipr_cmd:    ipr command struct
7873  *
7874  * This function sends a Query IOA Configuration command
7875  * to the adapter to retrieve the IOA configuration table.
7876  *
7877  * Return value:
7878  *      IPR_RC_JOB_RETURN
7879  **/
7880 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7881 {
7882         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7883         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7884         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
7885         struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7886
7887         ENTER;
7888         if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7889                 ioa_cfg->dual_raid = 1;
7890         dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7891                  ucode_vpd->major_release, ucode_vpd->card_type,
7892                  ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7893         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7894         ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7895
7896         ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
7897         ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
7898         ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7899         ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
7900
7901         ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
7902                        IPR_IOADL_FLAGS_READ_LAST);
7903
7904         ipr_cmd->job_step = ipr_init_res_table;
7905
7906         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7907
7908         LEAVE;
7909         return IPR_RC_JOB_RETURN;
7910 }
7911
7912 static int ipr_ioa_service_action_failed(struct ipr_cmnd *ipr_cmd)
7913 {
7914         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7915
7916         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT)
7917                 return IPR_RC_JOB_CONTINUE;
7918
7919         return ipr_reset_cmd_failed(ipr_cmd);
7920 }
7921
7922 static void ipr_build_ioa_service_action(struct ipr_cmnd *ipr_cmd,
7923                                          __be32 res_handle, u8 sa_code)
7924 {
7925         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7926
7927         ioarcb->res_handle = res_handle;
7928         ioarcb->cmd_pkt.cdb[0] = IPR_IOA_SERVICE_ACTION;
7929         ioarcb->cmd_pkt.cdb[1] = sa_code;
7930         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7931 }
7932
7933 /**
7934  * ipr_ioafp_set_caching_parameters - Issue Set Cache parameters service
7935  * action
7936  *
7937  * Return value:
7938  *      none
7939  **/
7940 static int ipr_ioafp_set_caching_parameters(struct ipr_cmnd *ipr_cmd)
7941 {
7942         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7943         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7944         struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
7945
7946         ENTER;
7947
7948         ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7949
7950         if (pageC4->cache_cap[0] & IPR_CAP_SYNC_CACHE) {
7951                 ipr_build_ioa_service_action(ipr_cmd,
7952                                              cpu_to_be32(IPR_IOA_RES_HANDLE),
7953                                              IPR_IOA_SA_CHANGE_CACHE_PARAMS);
7954
7955                 ioarcb->cmd_pkt.cdb[2] = 0x40;
7956
7957                 ipr_cmd->job_step_failed = ipr_ioa_service_action_failed;
7958                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7959                            IPR_SET_SUP_DEVICE_TIMEOUT);
7960
7961                 LEAVE;
7962                 return IPR_RC_JOB_RETURN;
7963         }
7964
7965         LEAVE;
7966         return IPR_RC_JOB_CONTINUE;
7967 }
7968
7969 /**
7970  * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7971  * @ipr_cmd:    ipr command struct
7972  *
7973  * This utility function sends an inquiry to the adapter.
7974  *
7975  * Return value:
7976  *      none
7977  **/
7978 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
7979                               dma_addr_t dma_addr, u8 xfer_len)
7980 {
7981         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7982
7983         ENTER;
7984         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7985         ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7986
7987         ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7988         ioarcb->cmd_pkt.cdb[1] = flags;
7989         ioarcb->cmd_pkt.cdb[2] = page;
7990         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7991
7992         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7993
7994         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7995         LEAVE;
7996 }
7997
7998 /**
7999  * ipr_inquiry_page_supported - Is the given inquiry page supported
8000  * @page0:              inquiry page 0 buffer
8001  * @page:               page code.
8002  *
8003  * This function determines if the specified inquiry page is supported.
8004  *
8005  * Return value:
8006  *      1 if page is supported / 0 if not
8007  **/
8008 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
8009 {
8010         int i;
8011
8012         for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
8013                 if (page0->page[i] == page)
8014                         return 1;
8015
8016         return 0;
8017 }
8018
8019 /**
8020  * ipr_ioafp_pageC4_inquiry - Send a Page 0xC4 Inquiry to the adapter.
8021  * @ipr_cmd:    ipr command struct
8022  *
8023  * This function sends a Page 0xC4 inquiry to the adapter
8024  * to retrieve software VPD information.
8025  *
8026  * Return value:
8027  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8028  **/
8029 static int ipr_ioafp_pageC4_inquiry(struct ipr_cmnd *ipr_cmd)
8030 {
8031         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8032         struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
8033         struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
8034
8035         ENTER;
8036         ipr_cmd->job_step = ipr_ioafp_set_caching_parameters;
8037         memset(pageC4, 0, sizeof(*pageC4));
8038
8039         if (ipr_inquiry_page_supported(page0, 0xC4)) {
8040                 ipr_ioafp_inquiry(ipr_cmd, 1, 0xC4,
8041                                   (ioa_cfg->vpd_cbs_dma
8042                                    + offsetof(struct ipr_misc_cbs,
8043                                               pageC4_data)),
8044                                   sizeof(struct ipr_inquiry_pageC4));
8045                 return IPR_RC_JOB_RETURN;
8046         }
8047
8048         LEAVE;
8049         return IPR_RC_JOB_CONTINUE;
8050 }
8051
8052 /**
8053  * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
8054  * @ipr_cmd:    ipr command struct
8055  *
8056  * This function sends a Page 0xD0 inquiry to the adapter
8057  * to retrieve adapter capabilities.
8058  *
8059  * Return value:
8060  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8061  **/
8062 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
8063 {
8064         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8065         struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
8066         struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
8067
8068         ENTER;
8069         ipr_cmd->job_step = ipr_ioafp_pageC4_inquiry;
8070         memset(cap, 0, sizeof(*cap));
8071
8072         if (ipr_inquiry_page_supported(page0, 0xD0)) {
8073                 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
8074                                   ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
8075                                   sizeof(struct ipr_inquiry_cap));
8076                 return IPR_RC_JOB_RETURN;
8077         }
8078
8079         LEAVE;
8080         return IPR_RC_JOB_CONTINUE;
8081 }
8082
8083 /**
8084  * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
8085  * @ipr_cmd:    ipr command struct
8086  *
8087  * This function sends a Page 3 inquiry to the adapter
8088  * to retrieve software VPD information.
8089  *
8090  * Return value:
8091  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8092  **/
8093 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
8094 {
8095         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8096
8097         ENTER;
8098
8099         ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
8100
8101         ipr_ioafp_inquiry(ipr_cmd, 1, 3,
8102                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
8103                           sizeof(struct ipr_inquiry_page3));
8104
8105         LEAVE;
8106         return IPR_RC_JOB_RETURN;
8107 }
8108
8109 /**
8110  * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
8111  * @ipr_cmd:    ipr command struct
8112  *
8113  * This function sends a Page 0 inquiry to the adapter
8114  * to retrieve supported inquiry pages.
8115  *
8116  * Return value:
8117  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8118  **/
8119 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
8120 {
8121         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8122         char type[5];
8123
8124         ENTER;
8125
8126         /* Grab the type out of the VPD and store it away */
8127         memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
8128         type[4] = '\0';
8129         ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
8130
8131         if (ipr_invalid_adapter(ioa_cfg)) {
8132                 dev_err(&ioa_cfg->pdev->dev,
8133                         "Adapter not supported in this hardware configuration.\n");
8134
8135                 if (!ipr_testmode) {
8136                         ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
8137                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8138                         list_add_tail(&ipr_cmd->queue,
8139                                         &ioa_cfg->hrrq->hrrq_free_q);
8140                         return IPR_RC_JOB_RETURN;
8141                 }
8142         }
8143
8144         ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
8145
8146         ipr_ioafp_inquiry(ipr_cmd, 1, 0,
8147                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
8148                           sizeof(struct ipr_inquiry_page0));
8149
8150         LEAVE;
8151         return IPR_RC_JOB_RETURN;
8152 }
8153
8154 /**
8155  * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
8156  * @ipr_cmd:    ipr command struct
8157  *
8158  * This function sends a standard inquiry to the adapter.
8159  *
8160  * Return value:
8161  *      IPR_RC_JOB_RETURN
8162  **/
8163 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
8164 {
8165         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8166
8167         ENTER;
8168         ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
8169
8170         ipr_ioafp_inquiry(ipr_cmd, 0, 0,
8171                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
8172                           sizeof(struct ipr_ioa_vpd));
8173
8174         LEAVE;
8175         return IPR_RC_JOB_RETURN;
8176 }
8177
8178 /**
8179  * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
8180  * @ipr_cmd:    ipr command struct
8181  *
8182  * This function send an Identify Host Request Response Queue
8183  * command to establish the HRRQ with the adapter.
8184  *
8185  * Return value:
8186  *      IPR_RC_JOB_RETURN
8187  **/
8188 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
8189 {
8190         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8191         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
8192         struct ipr_hrr_queue *hrrq;
8193
8194         ENTER;
8195         ipr_cmd->job_step = ipr_ioafp_std_inquiry;
8196         if (ioa_cfg->identify_hrrq_index == 0)
8197                 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
8198
8199         if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
8200                 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
8201
8202                 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
8203                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8204
8205                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8206                 if (ioa_cfg->sis64)
8207                         ioarcb->cmd_pkt.cdb[1] = 0x1;
8208
8209                 if (ioa_cfg->nvectors == 1)
8210                         ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
8211                 else
8212                         ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
8213
8214                 ioarcb->cmd_pkt.cdb[2] =
8215                         ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
8216                 ioarcb->cmd_pkt.cdb[3] =
8217                         ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
8218                 ioarcb->cmd_pkt.cdb[4] =
8219                         ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
8220                 ioarcb->cmd_pkt.cdb[5] =
8221                         ((u64) hrrq->host_rrq_dma) & 0xff;
8222                 ioarcb->cmd_pkt.cdb[7] =
8223                         ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
8224                 ioarcb->cmd_pkt.cdb[8] =
8225                         (sizeof(u32) * hrrq->size) & 0xff;
8226
8227                 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
8228                         ioarcb->cmd_pkt.cdb[9] =
8229                                         ioa_cfg->identify_hrrq_index;
8230
8231                 if (ioa_cfg->sis64) {
8232                         ioarcb->cmd_pkt.cdb[10] =
8233                                 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
8234                         ioarcb->cmd_pkt.cdb[11] =
8235                                 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
8236                         ioarcb->cmd_pkt.cdb[12] =
8237                                 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
8238                         ioarcb->cmd_pkt.cdb[13] =
8239                                 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
8240                 }
8241
8242                 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
8243                         ioarcb->cmd_pkt.cdb[14] =
8244                                         ioa_cfg->identify_hrrq_index;
8245
8246                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8247                            IPR_INTERNAL_TIMEOUT);
8248
8249                 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
8250                         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8251
8252                 LEAVE;
8253                 return IPR_RC_JOB_RETURN;
8254         }
8255
8256         LEAVE;
8257         return IPR_RC_JOB_CONTINUE;
8258 }
8259
8260 /**
8261  * ipr_reset_timer_done - Adapter reset timer function
8262  * @ipr_cmd:    ipr command struct
8263  *
8264  * Description: This function is used in adapter reset processing
8265  * for timing events. If the reset_cmd pointer in the IOA
8266  * config struct is not this adapter's we are doing nested
8267  * resets and fail_all_ops will take care of freeing the
8268  * command block.
8269  *
8270  * Return value:
8271  *      none
8272  **/
8273 static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
8274 {
8275         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8276         unsigned long lock_flags = 0;
8277
8278         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8279
8280         if (ioa_cfg->reset_cmd == ipr_cmd) {
8281                 list_del(&ipr_cmd->queue);
8282                 ipr_cmd->done(ipr_cmd);
8283         }
8284
8285         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8286 }
8287
8288 /**
8289  * ipr_reset_start_timer - Start a timer for adapter reset job
8290  * @ipr_cmd:    ipr command struct
8291  * @timeout:    timeout value
8292  *
8293  * Description: This function is used in adapter reset processing
8294  * for timing events. If the reset_cmd pointer in the IOA
8295  * config struct is not this adapter's we are doing nested
8296  * resets and fail_all_ops will take care of freeing the
8297  * command block.
8298  *
8299  * Return value:
8300  *      none
8301  **/
8302 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
8303                                   unsigned long timeout)
8304 {
8305
8306         ENTER;
8307         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8308         ipr_cmd->done = ipr_reset_ioa_job;
8309
8310         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
8311         ipr_cmd->timer.expires = jiffies + timeout;
8312         ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
8313         add_timer(&ipr_cmd->timer);
8314 }
8315
8316 /**
8317  * ipr_init_ioa_mem - Initialize ioa_cfg control block
8318  * @ioa_cfg:    ioa cfg struct
8319  *
8320  * Return value:
8321  *      nothing
8322  **/
8323 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
8324 {
8325         struct ipr_hrr_queue *hrrq;
8326
8327         for_each_hrrq(hrrq, ioa_cfg) {
8328                 spin_lock(&hrrq->_lock);
8329                 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
8330
8331                 /* Initialize Host RRQ pointers */
8332                 hrrq->hrrq_start = hrrq->host_rrq;
8333                 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
8334                 hrrq->hrrq_curr = hrrq->hrrq_start;
8335                 hrrq->toggle_bit = 1;
8336                 spin_unlock(&hrrq->_lock);
8337         }
8338         wmb();
8339
8340         ioa_cfg->identify_hrrq_index = 0;
8341         if (ioa_cfg->hrrq_num == 1)
8342                 atomic_set(&ioa_cfg->hrrq_index, 0);
8343         else
8344                 atomic_set(&ioa_cfg->hrrq_index, 1);
8345
8346         /* Zero out config table */
8347         memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
8348 }
8349
8350 /**
8351  * ipr_reset_next_stage - Process IPL stage change based on feedback register.
8352  * @ipr_cmd:    ipr command struct
8353  *
8354  * Return value:
8355  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8356  **/
8357 static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
8358 {
8359         unsigned long stage, stage_time;
8360         u32 feedback;
8361         volatile u32 int_reg;
8362         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8363         u64 maskval = 0;
8364
8365         feedback = readl(ioa_cfg->regs.init_feedback_reg);
8366         stage = feedback & IPR_IPL_INIT_STAGE_MASK;
8367         stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
8368
8369         ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
8370
8371         /* sanity check the stage_time value */
8372         if (stage_time == 0)
8373                 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
8374         else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
8375                 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
8376         else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
8377                 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
8378
8379         if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
8380                 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
8381                 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8382                 stage_time = ioa_cfg->transop_timeout;
8383                 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8384         } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
8385                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8386                 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8387                         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8388                         maskval = IPR_PCII_IPL_STAGE_CHANGE;
8389                         maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
8390                         writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
8391                         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8392                         return IPR_RC_JOB_CONTINUE;
8393                 }
8394         }
8395
8396         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
8397         ipr_cmd->timer.expires = jiffies + stage_time * HZ;
8398         ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
8399         ipr_cmd->done = ipr_reset_ioa_job;
8400         add_timer(&ipr_cmd->timer);
8401
8402         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8403
8404         return IPR_RC_JOB_RETURN;
8405 }
8406
8407 /**
8408  * ipr_reset_enable_ioa - Enable the IOA following a reset.
8409  * @ipr_cmd:    ipr command struct
8410  *
8411  * This function reinitializes some control blocks and
8412  * enables destructive diagnostics on the adapter.
8413  *
8414  * Return value:
8415  *      IPR_RC_JOB_RETURN
8416  **/
8417 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
8418 {
8419         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8420         volatile u32 int_reg;
8421         volatile u64 maskval;
8422         int i;
8423
8424         ENTER;
8425         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8426         ipr_init_ioa_mem(ioa_cfg);
8427
8428         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8429                 spin_lock(&ioa_cfg->hrrq[i]._lock);
8430                 ioa_cfg->hrrq[i].allow_interrupts = 1;
8431                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8432         }
8433         wmb();
8434         if (ioa_cfg->sis64) {
8435                 /* Set the adapter to the correct endian mode. */
8436                 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8437                 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8438         }
8439
8440         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8441
8442         if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8443                 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
8444                        ioa_cfg->regs.clr_interrupt_mask_reg32);
8445                 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8446                 return IPR_RC_JOB_CONTINUE;
8447         }
8448
8449         /* Enable destructive diagnostics on IOA */
8450         writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
8451
8452         if (ioa_cfg->sis64) {
8453                 maskval = IPR_PCII_IPL_STAGE_CHANGE;
8454                 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
8455                 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
8456         } else
8457                 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
8458
8459         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8460
8461         dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
8462
8463         if (ioa_cfg->sis64) {
8464                 ipr_cmd->job_step = ipr_reset_next_stage;
8465                 return IPR_RC_JOB_CONTINUE;
8466         }
8467
8468         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
8469         ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
8470         ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
8471         ipr_cmd->done = ipr_reset_ioa_job;
8472         add_timer(&ipr_cmd->timer);
8473         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8474
8475         LEAVE;
8476         return IPR_RC_JOB_RETURN;
8477 }
8478
8479 /**
8480  * ipr_reset_wait_for_dump - Wait for a dump to timeout.
8481  * @ipr_cmd:    ipr command struct
8482  *
8483  * This function is invoked when an adapter dump has run out
8484  * of processing time.
8485  *
8486  * Return value:
8487  *      IPR_RC_JOB_CONTINUE
8488  **/
8489 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
8490 {
8491         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8492
8493         if (ioa_cfg->sdt_state == GET_DUMP)
8494                 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8495         else if (ioa_cfg->sdt_state == READ_DUMP)
8496                 ioa_cfg->sdt_state = ABORT_DUMP;
8497
8498         ioa_cfg->dump_timeout = 1;
8499         ipr_cmd->job_step = ipr_reset_alert;
8500
8501         return IPR_RC_JOB_CONTINUE;
8502 }
8503
8504 /**
8505  * ipr_unit_check_no_data - Log a unit check/no data error log
8506  * @ioa_cfg:            ioa config struct
8507  *
8508  * Logs an error indicating the adapter unit checked, but for some
8509  * reason, we were unable to fetch the unit check buffer.
8510  *
8511  * Return value:
8512  *      nothing
8513  **/
8514 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
8515 {
8516         ioa_cfg->errors_logged++;
8517         dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
8518 }
8519
8520 /**
8521  * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8522  * @ioa_cfg:            ioa config struct
8523  *
8524  * Fetches the unit check buffer from the adapter by clocking the data
8525  * through the mailbox register.
8526  *
8527  * Return value:
8528  *      nothing
8529  **/
8530 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
8531 {
8532         unsigned long mailbox;
8533         struct ipr_hostrcb *hostrcb;
8534         struct ipr_uc_sdt sdt;
8535         int rc, length;
8536         u32 ioasc;
8537
8538         mailbox = readl(ioa_cfg->ioa_mailbox);
8539
8540         if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
8541                 ipr_unit_check_no_data(ioa_cfg);
8542                 return;
8543         }
8544
8545         memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
8546         rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
8547                                         (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
8548
8549         if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
8550             ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
8551             (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
8552                 ipr_unit_check_no_data(ioa_cfg);
8553                 return;
8554         }
8555
8556         /* Find length of the first sdt entry (UC buffer) */
8557         if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
8558                 length = be32_to_cpu(sdt.entry[0].end_token);
8559         else
8560                 length = (be32_to_cpu(sdt.entry[0].end_token) -
8561                           be32_to_cpu(sdt.entry[0].start_token)) &
8562                           IPR_FMT2_MBX_ADDR_MASK;
8563
8564         hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8565                              struct ipr_hostrcb, queue);
8566         list_del_init(&hostrcb->queue);
8567         memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8568
8569         rc = ipr_get_ldump_data_section(ioa_cfg,
8570                                         be32_to_cpu(sdt.entry[0].start_token),
8571                                         (__be32 *)&hostrcb->hcam,
8572                                         min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
8573
8574         if (!rc) {
8575                 ipr_handle_log_data(ioa_cfg, hostrcb);
8576                 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
8577                 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8578                     ioa_cfg->sdt_state == GET_DUMP)
8579                         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8580         } else
8581                 ipr_unit_check_no_data(ioa_cfg);
8582
8583         list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8584 }
8585
8586 /**
8587  * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8588  * @ipr_cmd:    ipr command struct
8589  *
8590  * Description: This function will call to get the unit check buffer.
8591  *
8592  * Return value:
8593  *      IPR_RC_JOB_RETURN
8594  **/
8595 static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8596 {
8597         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8598
8599         ENTER;
8600         ioa_cfg->ioa_unit_checked = 0;
8601         ipr_get_unit_check_buffer(ioa_cfg);
8602         ipr_cmd->job_step = ipr_reset_alert;
8603         ipr_reset_start_timer(ipr_cmd, 0);
8604
8605         LEAVE;
8606         return IPR_RC_JOB_RETURN;
8607 }
8608
8609 static int ipr_dump_mailbox_wait(struct ipr_cmnd *ipr_cmd)
8610 {
8611         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8612
8613         ENTER;
8614
8615         if (ioa_cfg->sdt_state != GET_DUMP)
8616                 return IPR_RC_JOB_RETURN;
8617
8618         if (!ioa_cfg->sis64 || !ipr_cmd->u.time_left ||
8619             (readl(ioa_cfg->regs.sense_interrupt_reg) &
8620              IPR_PCII_MAILBOX_STABLE)) {
8621
8622                 if (!ipr_cmd->u.time_left)
8623                         dev_err(&ioa_cfg->pdev->dev,
8624                                 "Timed out waiting for Mailbox register.\n");
8625
8626                 ioa_cfg->sdt_state = READ_DUMP;
8627                 ioa_cfg->dump_timeout = 0;
8628                 if (ioa_cfg->sis64)
8629                         ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8630                 else
8631                         ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
8632                 ipr_cmd->job_step = ipr_reset_wait_for_dump;
8633                 schedule_work(&ioa_cfg->work_q);
8634
8635         } else {
8636                 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8637                 ipr_reset_start_timer(ipr_cmd,
8638                                       IPR_CHECK_FOR_RESET_TIMEOUT);
8639         }
8640
8641         LEAVE;
8642         return IPR_RC_JOB_RETURN;
8643 }
8644
8645 /**
8646  * ipr_reset_restore_cfg_space - Restore PCI config space.
8647  * @ipr_cmd:    ipr command struct
8648  *
8649  * Description: This function restores the saved PCI config space of
8650  * the adapter, fails all outstanding ops back to the callers, and
8651  * fetches the dump/unit check if applicable to this reset.
8652  *
8653  * Return value:
8654  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8655  **/
8656 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8657 {
8658         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8659         u32 int_reg;
8660
8661         ENTER;
8662         ioa_cfg->pdev->state_saved = true;
8663         pci_restore_state(ioa_cfg->pdev);
8664
8665         if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
8666                 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8667                 return IPR_RC_JOB_CONTINUE;
8668         }
8669
8670         ipr_fail_all_ops(ioa_cfg);
8671
8672         if (ioa_cfg->sis64) {
8673                 /* Set the adapter to the correct endian mode. */
8674                 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8675                 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8676         }
8677
8678         if (ioa_cfg->ioa_unit_checked) {
8679                 if (ioa_cfg->sis64) {
8680                         ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8681                         ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8682                         return IPR_RC_JOB_RETURN;
8683                 } else {
8684                         ioa_cfg->ioa_unit_checked = 0;
8685                         ipr_get_unit_check_buffer(ioa_cfg);
8686                         ipr_cmd->job_step = ipr_reset_alert;
8687                         ipr_reset_start_timer(ipr_cmd, 0);
8688                         return IPR_RC_JOB_RETURN;
8689                 }
8690         }
8691
8692         if (ioa_cfg->in_ioa_bringdown) {
8693                 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8694         } else if (ioa_cfg->sdt_state == GET_DUMP) {
8695                 ipr_cmd->job_step = ipr_dump_mailbox_wait;
8696                 ipr_cmd->u.time_left = IPR_WAIT_FOR_MAILBOX;
8697         } else {
8698                 ipr_cmd->job_step = ipr_reset_enable_ioa;
8699         }
8700
8701         LEAVE;
8702         return IPR_RC_JOB_CONTINUE;
8703 }
8704
8705 /**
8706  * ipr_reset_bist_done - BIST has completed on the adapter.
8707  * @ipr_cmd:    ipr command struct
8708  *
8709  * Description: Unblock config space and resume the reset process.
8710  *
8711  * Return value:
8712  *      IPR_RC_JOB_CONTINUE
8713  **/
8714 static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8715 {
8716         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8717
8718         ENTER;
8719         if (ioa_cfg->cfg_locked)
8720                 pci_cfg_access_unlock(ioa_cfg->pdev);
8721         ioa_cfg->cfg_locked = 0;
8722         ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8723         LEAVE;
8724         return IPR_RC_JOB_CONTINUE;
8725 }
8726
8727 /**
8728  * ipr_reset_start_bist - Run BIST on the adapter.
8729  * @ipr_cmd:    ipr command struct
8730  *
8731  * Description: This function runs BIST on the adapter, then delays 2 seconds.
8732  *
8733  * Return value:
8734  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8735  **/
8736 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8737 {
8738         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8739         int rc = PCIBIOS_SUCCESSFUL;
8740
8741         ENTER;
8742         if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8743                 writel(IPR_UPROCI_SIS64_START_BIST,
8744                        ioa_cfg->regs.set_uproc_interrupt_reg32);
8745         else
8746                 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8747
8748         if (rc == PCIBIOS_SUCCESSFUL) {
8749                 ipr_cmd->job_step = ipr_reset_bist_done;
8750                 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8751                 rc = IPR_RC_JOB_RETURN;
8752         } else {
8753                 if (ioa_cfg->cfg_locked)
8754                         pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8755                 ioa_cfg->cfg_locked = 0;
8756                 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8757                 rc = IPR_RC_JOB_CONTINUE;
8758         }
8759
8760         LEAVE;
8761         return rc;
8762 }
8763
8764 /**
8765  * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8766  * @ipr_cmd:    ipr command struct
8767  *
8768  * Description: This clears PCI reset to the adapter and delays two seconds.
8769  *
8770  * Return value:
8771  *      IPR_RC_JOB_RETURN
8772  **/
8773 static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8774 {
8775         ENTER;
8776         ipr_cmd->job_step = ipr_reset_bist_done;
8777         ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8778         LEAVE;
8779         return IPR_RC_JOB_RETURN;
8780 }
8781
8782 /**
8783  * ipr_reset_reset_work - Pulse a PCIe fundamental reset
8784  * @work:       work struct
8785  *
8786  * Description: This pulses warm reset to a slot.
8787  *
8788  **/
8789 static void ipr_reset_reset_work(struct work_struct *work)
8790 {
8791         struct ipr_cmnd *ipr_cmd = container_of(work, struct ipr_cmnd, work);
8792         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8793         struct pci_dev *pdev = ioa_cfg->pdev;
8794         unsigned long lock_flags = 0;
8795
8796         ENTER;
8797         pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8798         msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT));
8799         pci_set_pcie_reset_state(pdev, pcie_deassert_reset);
8800
8801         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8802         if (ioa_cfg->reset_cmd == ipr_cmd)
8803                 ipr_reset_ioa_job(ipr_cmd);
8804         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8805         LEAVE;
8806 }
8807
8808 /**
8809  * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8810  * @ipr_cmd:    ipr command struct
8811  *
8812  * Description: This asserts PCI reset to the adapter.
8813  *
8814  * Return value:
8815  *      IPR_RC_JOB_RETURN
8816  **/
8817 static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8818 {
8819         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8820
8821         ENTER;
8822         INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work);
8823         queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work);
8824         ipr_cmd->job_step = ipr_reset_slot_reset_done;
8825         LEAVE;
8826         return IPR_RC_JOB_RETURN;
8827 }
8828
8829 /**
8830  * ipr_reset_block_config_access_wait - Wait for permission to block config access
8831  * @ipr_cmd:    ipr command struct
8832  *
8833  * Description: This attempts to block config access to the IOA.
8834  *
8835  * Return value:
8836  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8837  **/
8838 static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8839 {
8840         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8841         int rc = IPR_RC_JOB_CONTINUE;
8842
8843         if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8844                 ioa_cfg->cfg_locked = 1;
8845                 ipr_cmd->job_step = ioa_cfg->reset;
8846         } else {
8847                 if (ipr_cmd->u.time_left) {
8848                         rc = IPR_RC_JOB_RETURN;
8849                         ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8850                         ipr_reset_start_timer(ipr_cmd,
8851                                               IPR_CHECK_FOR_RESET_TIMEOUT);
8852                 } else {
8853                         ipr_cmd->job_step = ioa_cfg->reset;
8854                         dev_err(&ioa_cfg->pdev->dev,
8855                                 "Timed out waiting to lock config access. Resetting anyway.\n");
8856                 }
8857         }
8858
8859         return rc;
8860 }
8861
8862 /**
8863  * ipr_reset_block_config_access - Block config access to the IOA
8864  * @ipr_cmd:    ipr command struct
8865  *
8866  * Description: This attempts to block config access to the IOA
8867  *
8868  * Return value:
8869  *      IPR_RC_JOB_CONTINUE
8870  **/
8871 static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8872 {
8873         ipr_cmd->ioa_cfg->cfg_locked = 0;
8874         ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8875         ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8876         return IPR_RC_JOB_CONTINUE;
8877 }
8878
8879 /**
8880  * ipr_reset_allowed - Query whether or not IOA can be reset
8881  * @ioa_cfg:    ioa config struct
8882  *
8883  * Return value:
8884  *      0 if reset not allowed / non-zero if reset is allowed
8885  **/
8886 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8887 {
8888         volatile u32 temp_reg;
8889
8890         temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8891         return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8892 }
8893
8894 /**
8895  * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8896  * @ipr_cmd:    ipr command struct
8897  *
8898  * Description: This function waits for adapter permission to run BIST,
8899  * then runs BIST. If the adapter does not give permission after a
8900  * reasonable time, we will reset the adapter anyway. The impact of
8901  * resetting the adapter without warning the adapter is the risk of
8902  * losing the persistent error log on the adapter. If the adapter is
8903  * reset while it is writing to the flash on the adapter, the flash
8904  * segment will have bad ECC and be zeroed.
8905  *
8906  * Return value:
8907  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8908  **/
8909 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8910 {
8911         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8912         int rc = IPR_RC_JOB_RETURN;
8913
8914         if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8915                 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8916                 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8917         } else {
8918                 ipr_cmd->job_step = ipr_reset_block_config_access;
8919                 rc = IPR_RC_JOB_CONTINUE;
8920         }
8921
8922         return rc;
8923 }
8924
8925 /**
8926  * ipr_reset_alert - Alert the adapter of a pending reset
8927  * @ipr_cmd:    ipr command struct
8928  *
8929  * Description: This function alerts the adapter that it will be reset.
8930  * If memory space is not currently enabled, proceed directly
8931  * to running BIST on the adapter. The timer must always be started
8932  * so we guarantee we do not run BIST from ipr_isr.
8933  *
8934  * Return value:
8935  *      IPR_RC_JOB_RETURN
8936  **/
8937 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8938 {
8939         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8940         u16 cmd_reg;
8941         int rc;
8942
8943         ENTER;
8944         rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8945
8946         if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8947                 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
8948                 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
8949                 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8950         } else {
8951                 ipr_cmd->job_step = ipr_reset_block_config_access;
8952         }
8953
8954         ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8955         ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8956
8957         LEAVE;
8958         return IPR_RC_JOB_RETURN;
8959 }
8960
8961 /**
8962  * ipr_reset_quiesce_done - Complete IOA disconnect
8963  * @ipr_cmd:    ipr command struct
8964  *
8965  * Description: Freeze the adapter to complete quiesce processing
8966  *
8967  * Return value:
8968  *      IPR_RC_JOB_CONTINUE
8969  **/
8970 static int ipr_reset_quiesce_done(struct ipr_cmnd *ipr_cmd)
8971 {
8972         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8973
8974         ENTER;
8975         ipr_cmd->job_step = ipr_ioa_bringdown_done;
8976         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8977         LEAVE;
8978         return IPR_RC_JOB_CONTINUE;
8979 }
8980
8981 /**
8982  * ipr_reset_cancel_hcam_done - Check for outstanding commands
8983  * @ipr_cmd:    ipr command struct
8984  *
8985  * Description: Ensure nothing is outstanding to the IOA and
8986  *                      proceed with IOA disconnect. Otherwise reset the IOA.
8987  *
8988  * Return value:
8989  *      IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE
8990  **/
8991 static int ipr_reset_cancel_hcam_done(struct ipr_cmnd *ipr_cmd)
8992 {
8993         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8994         struct ipr_cmnd *loop_cmd;
8995         struct ipr_hrr_queue *hrrq;
8996         int rc = IPR_RC_JOB_CONTINUE;
8997         int count = 0;
8998
8999         ENTER;
9000         ipr_cmd->job_step = ipr_reset_quiesce_done;
9001
9002         for_each_hrrq(hrrq, ioa_cfg) {
9003                 spin_lock(&hrrq->_lock);
9004                 list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) {
9005                         count++;
9006                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9007                         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9008                         rc = IPR_RC_JOB_RETURN;
9009                         break;
9010                 }
9011                 spin_unlock(&hrrq->_lock);
9012
9013                 if (count)
9014                         break;
9015         }
9016
9017         LEAVE;
9018         return rc;
9019 }
9020
9021 /**
9022  * ipr_reset_cancel_hcam - Cancel outstanding HCAMs
9023  * @ipr_cmd:    ipr command struct
9024  *
9025  * Description: Cancel any oustanding HCAMs to the IOA.
9026  *
9027  * Return value:
9028  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9029  **/
9030 static int ipr_reset_cancel_hcam(struct ipr_cmnd *ipr_cmd)
9031 {
9032         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9033         int rc = IPR_RC_JOB_CONTINUE;
9034         struct ipr_cmd_pkt *cmd_pkt;
9035         struct ipr_cmnd *hcam_cmd;
9036         struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ];
9037
9038         ENTER;
9039         ipr_cmd->job_step = ipr_reset_cancel_hcam_done;
9040
9041         if (!hrrq->ioa_is_dead) {
9042                 if (!list_empty(&ioa_cfg->hostrcb_pending_q)) {
9043                         list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) {
9044                                 if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC)
9045                                         continue;
9046
9047                                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9048                                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9049                                 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
9050                                 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
9051                                 cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST;
9052                                 cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB;
9053                                 cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff;
9054                                 cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff;
9055                                 cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff;
9056                                 cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff;
9057                                 cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff;
9058                                 cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff;
9059                                 cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff;
9060                                 cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff;
9061
9062                                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
9063                                            IPR_CANCEL_TIMEOUT);
9064
9065                                 rc = IPR_RC_JOB_RETURN;
9066                                 ipr_cmd->job_step = ipr_reset_cancel_hcam;
9067                                 break;
9068                         }
9069                 }
9070         } else
9071                 ipr_cmd->job_step = ipr_reset_alert;
9072
9073         LEAVE;
9074         return rc;
9075 }
9076
9077 /**
9078  * ipr_reset_ucode_download_done - Microcode download completion
9079  * @ipr_cmd:    ipr command struct
9080  *
9081  * Description: This function unmaps the microcode download buffer.
9082  *
9083  * Return value:
9084  *      IPR_RC_JOB_CONTINUE
9085  **/
9086 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
9087 {
9088         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9089         struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
9090
9091         dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist,
9092                      sglist->num_sg, DMA_TO_DEVICE);
9093
9094         ipr_cmd->job_step = ipr_reset_alert;
9095         return IPR_RC_JOB_CONTINUE;
9096 }
9097
9098 /**
9099  * ipr_reset_ucode_download - Download microcode to the adapter
9100  * @ipr_cmd:    ipr command struct
9101  *
9102  * Description: This function checks to see if it there is microcode
9103  * to download to the adapter. If there is, a download is performed.
9104  *
9105  * Return value:
9106  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9107  **/
9108 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
9109 {
9110         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9111         struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
9112
9113         ENTER;
9114         ipr_cmd->job_step = ipr_reset_alert;
9115
9116         if (!sglist)
9117                 return IPR_RC_JOB_CONTINUE;
9118
9119         ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9120         ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
9121         ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
9122         ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
9123         ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
9124         ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
9125         ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
9126
9127         if (ioa_cfg->sis64)
9128                 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
9129         else
9130                 ipr_build_ucode_ioadl(ipr_cmd, sglist);
9131         ipr_cmd->job_step = ipr_reset_ucode_download_done;
9132
9133         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
9134                    IPR_WRITE_BUFFER_TIMEOUT);
9135
9136         LEAVE;
9137         return IPR_RC_JOB_RETURN;
9138 }
9139
9140 /**
9141  * ipr_reset_shutdown_ioa - Shutdown the adapter
9142  * @ipr_cmd:    ipr command struct
9143  *
9144  * Description: This function issues an adapter shutdown of the
9145  * specified type to the specified adapter as part of the
9146  * adapter reset job.
9147  *
9148  * Return value:
9149  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9150  **/
9151 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
9152 {
9153         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9154         enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
9155         unsigned long timeout;
9156         int rc = IPR_RC_JOB_CONTINUE;
9157
9158         ENTER;
9159         if (shutdown_type == IPR_SHUTDOWN_QUIESCE)
9160                 ipr_cmd->job_step = ipr_reset_cancel_hcam;
9161         else if (shutdown_type != IPR_SHUTDOWN_NONE &&
9162                         !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
9163                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9164                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9165                 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
9166                 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
9167
9168                 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
9169                         timeout = IPR_SHUTDOWN_TIMEOUT;
9170                 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
9171                         timeout = IPR_INTERNAL_TIMEOUT;
9172                 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
9173                         timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
9174                 else
9175                         timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
9176
9177                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
9178
9179                 rc = IPR_RC_JOB_RETURN;
9180                 ipr_cmd->job_step = ipr_reset_ucode_download;
9181         } else
9182                 ipr_cmd->job_step = ipr_reset_alert;
9183
9184         LEAVE;
9185         return rc;
9186 }
9187
9188 /**
9189  * ipr_reset_ioa_job - Adapter reset job
9190  * @ipr_cmd:    ipr command struct
9191  *
9192  * Description: This function is the job router for the adapter reset job.
9193  *
9194  * Return value:
9195  *      none
9196  **/
9197 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
9198 {
9199         u32 rc, ioasc;
9200         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9201
9202         do {
9203                 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
9204
9205                 if (ioa_cfg->reset_cmd != ipr_cmd) {
9206                         /*
9207                          * We are doing nested adapter resets and this is
9208                          * not the current reset job.
9209                          */
9210                         list_add_tail(&ipr_cmd->queue,
9211                                         &ipr_cmd->hrrq->hrrq_free_q);
9212                         return;
9213                 }
9214
9215                 if (IPR_IOASC_SENSE_KEY(ioasc)) {
9216                         rc = ipr_cmd->job_step_failed(ipr_cmd);
9217                         if (rc == IPR_RC_JOB_RETURN)
9218                                 return;
9219                 }
9220
9221                 ipr_reinit_ipr_cmnd(ipr_cmd);
9222                 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
9223                 rc = ipr_cmd->job_step(ipr_cmd);
9224         } while (rc == IPR_RC_JOB_CONTINUE);
9225 }
9226
9227 /**
9228  * _ipr_initiate_ioa_reset - Initiate an adapter reset
9229  * @ioa_cfg:            ioa config struct
9230  * @job_step:           first job step of reset job
9231  * @shutdown_type:      shutdown type
9232  *
9233  * Description: This function will initiate the reset of the given adapter
9234  * starting at the selected job step.
9235  * If the caller needs to wait on the completion of the reset,
9236  * the caller must sleep on the reset_wait_q.
9237  *
9238  * Return value:
9239  *      none
9240  **/
9241 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9242                                     int (*job_step) (struct ipr_cmnd *),
9243                                     enum ipr_shutdown_type shutdown_type)
9244 {
9245         struct ipr_cmnd *ipr_cmd;
9246         int i;
9247
9248         ioa_cfg->in_reset_reload = 1;
9249         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9250                 spin_lock(&ioa_cfg->hrrq[i]._lock);
9251                 ioa_cfg->hrrq[i].allow_cmds = 0;
9252                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9253         }
9254         wmb();
9255         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
9256                 ioa_cfg->scsi_unblock = 0;
9257                 ioa_cfg->scsi_blocked = 1;
9258                 scsi_block_requests(ioa_cfg->host);
9259         }
9260
9261         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
9262         ioa_cfg->reset_cmd = ipr_cmd;
9263         ipr_cmd->job_step = job_step;
9264         ipr_cmd->u.shutdown_type = shutdown_type;
9265
9266         ipr_reset_ioa_job(ipr_cmd);
9267 }
9268
9269 /**
9270  * ipr_initiate_ioa_reset - Initiate an adapter reset
9271  * @ioa_cfg:            ioa config struct
9272  * @shutdown_type:      shutdown type
9273  *
9274  * Description: This function will initiate the reset of the given adapter.
9275  * If the caller needs to wait on the completion of the reset,
9276  * the caller must sleep on the reset_wait_q.
9277  *
9278  * Return value:
9279  *      none
9280  **/
9281 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9282                                    enum ipr_shutdown_type shutdown_type)
9283 {
9284         int i;
9285
9286         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
9287                 return;
9288
9289         if (ioa_cfg->in_reset_reload) {
9290                 if (ioa_cfg->sdt_state == GET_DUMP)
9291                         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9292                 else if (ioa_cfg->sdt_state == READ_DUMP)
9293                         ioa_cfg->sdt_state = ABORT_DUMP;
9294         }
9295
9296         if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
9297                 dev_err(&ioa_cfg->pdev->dev,
9298                         "IOA taken offline - error recovery failed\n");
9299
9300                 ioa_cfg->reset_retries = 0;
9301                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9302                         spin_lock(&ioa_cfg->hrrq[i]._lock);
9303                         ioa_cfg->hrrq[i].ioa_is_dead = 1;
9304                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
9305                 }
9306                 wmb();
9307
9308                 if (ioa_cfg->in_ioa_bringdown) {
9309                         ioa_cfg->reset_cmd = NULL;
9310                         ioa_cfg->in_reset_reload = 0;
9311                         ipr_fail_all_ops(ioa_cfg);
9312                         wake_up_all(&ioa_cfg->reset_wait_q);
9313
9314                         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
9315                                 ioa_cfg->scsi_unblock = 1;
9316                                 schedule_work(&ioa_cfg->work_q);
9317                         }
9318                         return;
9319                 } else {
9320                         ioa_cfg->in_ioa_bringdown = 1;
9321                         shutdown_type = IPR_SHUTDOWN_NONE;
9322                 }
9323         }
9324
9325         _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
9326                                 shutdown_type);
9327 }
9328
9329 /**
9330  * ipr_reset_freeze - Hold off all I/O activity
9331  * @ipr_cmd:    ipr command struct
9332  *
9333  * Description: If the PCI slot is frozen, hold off all I/O
9334  * activity; then, as soon as the slot is available again,
9335  * initiate an adapter reset.
9336  */
9337 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
9338 {
9339         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9340         int i;
9341
9342         /* Disallow new interrupts, avoid loop */
9343         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9344                 spin_lock(&ioa_cfg->hrrq[i]._lock);
9345                 ioa_cfg->hrrq[i].allow_interrupts = 0;
9346                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9347         }
9348         wmb();
9349         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
9350         ipr_cmd->done = ipr_reset_ioa_job;
9351         return IPR_RC_JOB_RETURN;
9352 }
9353
9354 /**
9355  * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
9356  * @pdev:       PCI device struct
9357  *
9358  * Description: This routine is called to tell us that the MMIO
9359  * access to the IOA has been restored
9360  */
9361 static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
9362 {
9363         unsigned long flags = 0;
9364         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9365
9366         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9367         if (!ioa_cfg->probe_done)
9368                 pci_save_state(pdev);
9369         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9370         return PCI_ERS_RESULT_NEED_RESET;
9371 }
9372
9373 /**
9374  * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
9375  * @pdev:       PCI device struct
9376  *
9377  * Description: This routine is called to tell us that the PCI bus
9378  * is down. Can't do anything here, except put the device driver
9379  * into a holding pattern, waiting for the PCI bus to come back.
9380  */
9381 static void ipr_pci_frozen(struct pci_dev *pdev)
9382 {
9383         unsigned long flags = 0;
9384         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9385
9386         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9387         if (ioa_cfg->probe_done)
9388                 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
9389         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9390 }
9391
9392 /**
9393  * ipr_pci_slot_reset - Called when PCI slot has been reset.
9394  * @pdev:       PCI device struct
9395  *
9396  * Description: This routine is called by the pci error recovery
9397  * code after the PCI slot has been reset, just before we
9398  * should resume normal operations.
9399  */
9400 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
9401 {
9402         unsigned long flags = 0;
9403         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9404
9405         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9406         if (ioa_cfg->probe_done) {
9407                 if (ioa_cfg->needs_warm_reset)
9408                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9409                 else
9410                         _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
9411                                                 IPR_SHUTDOWN_NONE);
9412         } else
9413                 wake_up_all(&ioa_cfg->eeh_wait_q);
9414         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9415         return PCI_ERS_RESULT_RECOVERED;
9416 }
9417
9418 /**
9419  * ipr_pci_perm_failure - Called when PCI slot is dead for good.
9420  * @pdev:       PCI device struct
9421  *
9422  * Description: This routine is called when the PCI bus has
9423  * permanently failed.
9424  */
9425 static void ipr_pci_perm_failure(struct pci_dev *pdev)
9426 {
9427         unsigned long flags = 0;
9428         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9429         int i;
9430
9431         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9432         if (ioa_cfg->probe_done) {
9433                 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9434                         ioa_cfg->sdt_state = ABORT_DUMP;
9435                 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
9436                 ioa_cfg->in_ioa_bringdown = 1;
9437                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9438                         spin_lock(&ioa_cfg->hrrq[i]._lock);
9439                         ioa_cfg->hrrq[i].allow_cmds = 0;
9440                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
9441                 }
9442                 wmb();
9443                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9444         } else
9445                 wake_up_all(&ioa_cfg->eeh_wait_q);
9446         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9447 }
9448
9449 /**
9450  * ipr_pci_error_detected - Called when a PCI error is detected.
9451  * @pdev:       PCI device struct
9452  * @state:      PCI channel state
9453  *
9454  * Description: Called when a PCI error is detected.
9455  *
9456  * Return value:
9457  *      PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
9458  */
9459 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
9460                                                pci_channel_state_t state)
9461 {
9462         switch (state) {
9463         case pci_channel_io_frozen:
9464                 ipr_pci_frozen(pdev);
9465                 return PCI_ERS_RESULT_CAN_RECOVER;
9466         case pci_channel_io_perm_failure:
9467                 ipr_pci_perm_failure(pdev);
9468                 return PCI_ERS_RESULT_DISCONNECT;
9469                 break;
9470         default:
9471                 break;
9472         }
9473         return PCI_ERS_RESULT_NEED_RESET;
9474 }
9475
9476 /**
9477  * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
9478  * @ioa_cfg:    ioa cfg struct
9479  *
9480  * Description: This is the second phase of adapter initialization
9481  * This function takes care of initilizing the adapter to the point
9482  * where it can accept new commands.
9483
9484  * Return value:
9485  *      0 on success / -EIO on failure
9486  **/
9487 static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
9488 {
9489         int rc = 0;
9490         unsigned long host_lock_flags = 0;
9491
9492         ENTER;
9493         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9494         dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
9495         ioa_cfg->probe_done = 1;
9496         if (ioa_cfg->needs_hard_reset) {
9497                 ioa_cfg->needs_hard_reset = 0;
9498                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9499         } else
9500                 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
9501                                         IPR_SHUTDOWN_NONE);
9502         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9503
9504         LEAVE;
9505         return rc;
9506 }
9507
9508 /**
9509  * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
9510  * @ioa_cfg:    ioa config struct
9511  *
9512  * Return value:
9513  *      none
9514  **/
9515 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9516 {
9517         int i;
9518
9519         if (ioa_cfg->ipr_cmnd_list) {
9520                 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9521                         if (ioa_cfg->ipr_cmnd_list[i])
9522                                 dma_pool_free(ioa_cfg->ipr_cmd_pool,
9523                                               ioa_cfg->ipr_cmnd_list[i],
9524                                               ioa_cfg->ipr_cmnd_list_dma[i]);
9525
9526                         ioa_cfg->ipr_cmnd_list[i] = NULL;
9527                 }
9528         }
9529
9530         if (ioa_cfg->ipr_cmd_pool)
9531                 dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
9532
9533         kfree(ioa_cfg->ipr_cmnd_list);
9534         kfree(ioa_cfg->ipr_cmnd_list_dma);
9535         ioa_cfg->ipr_cmnd_list = NULL;
9536         ioa_cfg->ipr_cmnd_list_dma = NULL;
9537         ioa_cfg->ipr_cmd_pool = NULL;
9538 }
9539
9540 /**
9541  * ipr_free_mem - Frees memory allocated for an adapter
9542  * @ioa_cfg:    ioa cfg struct
9543  *
9544  * Return value:
9545  *      nothing
9546  **/
9547 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
9548 {
9549         int i;
9550
9551         kfree(ioa_cfg->res_entries);
9552         dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs),
9553                           ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9554         ipr_free_cmd_blks(ioa_cfg);
9555
9556         for (i = 0; i < ioa_cfg->hrrq_num; i++)
9557                 dma_free_coherent(&ioa_cfg->pdev->dev,
9558                                   sizeof(u32) * ioa_cfg->hrrq[i].size,
9559                                   ioa_cfg->hrrq[i].host_rrq,
9560                                   ioa_cfg->hrrq[i].host_rrq_dma);
9561
9562         dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
9563                           ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9564
9565         for (i = 0; i < IPR_MAX_HCAMS; i++) {
9566                 dma_free_coherent(&ioa_cfg->pdev->dev,
9567                                   sizeof(struct ipr_hostrcb),
9568                                   ioa_cfg->hostrcb[i],
9569                                   ioa_cfg->hostrcb_dma[i]);
9570         }
9571
9572         ipr_free_dump(ioa_cfg);
9573         kfree(ioa_cfg->trace);
9574 }
9575
9576 /**
9577  * ipr_free_irqs - Free all allocated IRQs for the adapter.
9578  * @ioa_cfg:    ipr cfg struct
9579  *
9580  * This function frees all allocated IRQs for the
9581  * specified adapter.
9582  *
9583  * Return value:
9584  *      none
9585  **/
9586 static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg)
9587 {
9588         struct pci_dev *pdev = ioa_cfg->pdev;
9589         int i;
9590
9591         for (i = 0; i < ioa_cfg->nvectors; i++)
9592                 free_irq(pci_irq_vector(pdev, i), &ioa_cfg->hrrq[i]);
9593         pci_free_irq_vectors(pdev);
9594 }
9595
9596 /**
9597  * ipr_free_all_resources - Free all allocated resources for an adapter.
9598  * @ipr_cmd:    ipr command struct
9599  *
9600  * This function frees all allocated resources for the
9601  * specified adapter.
9602  *
9603  * Return value:
9604  *      none
9605  **/
9606 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
9607 {
9608         struct pci_dev *pdev = ioa_cfg->pdev;
9609
9610         ENTER;
9611         ipr_free_irqs(ioa_cfg);
9612         if (ioa_cfg->reset_work_q)
9613                 destroy_workqueue(ioa_cfg->reset_work_q);
9614         iounmap(ioa_cfg->hdw_dma_regs);
9615         pci_release_regions(pdev);
9616         ipr_free_mem(ioa_cfg);
9617         scsi_host_put(ioa_cfg->host);
9618         pci_disable_device(pdev);
9619         LEAVE;
9620 }
9621
9622 /**
9623  * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
9624  * @ioa_cfg:    ioa config struct
9625  *
9626  * Return value:
9627  *      0 on success / -ENOMEM on allocation failure
9628  **/
9629 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9630 {
9631         struct ipr_cmnd *ipr_cmd;
9632         struct ipr_ioarcb *ioarcb;
9633         dma_addr_t dma_addr;
9634         int i, entries_each_hrrq, hrrq_id = 0;
9635
9636         ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev,
9637                                                 sizeof(struct ipr_cmnd), 512, 0);
9638
9639         if (!ioa_cfg->ipr_cmd_pool)
9640                 return -ENOMEM;
9641
9642         ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
9643         ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
9644
9645         if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
9646                 ipr_free_cmd_blks(ioa_cfg);
9647                 return -ENOMEM;
9648         }
9649
9650         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9651                 if (ioa_cfg->hrrq_num > 1) {
9652                         if (i == 0) {
9653                                 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
9654                                 ioa_cfg->hrrq[i].min_cmd_id = 0;
9655                                         ioa_cfg->hrrq[i].max_cmd_id =
9656                                                 (entries_each_hrrq - 1);
9657                         } else {
9658                                 entries_each_hrrq =
9659                                         IPR_NUM_BASE_CMD_BLKS/
9660                                         (ioa_cfg->hrrq_num - 1);
9661                                 ioa_cfg->hrrq[i].min_cmd_id =
9662                                         IPR_NUM_INTERNAL_CMD_BLKS +
9663                                         (i - 1) * entries_each_hrrq;
9664                                 ioa_cfg->hrrq[i].max_cmd_id =
9665                                         (IPR_NUM_INTERNAL_CMD_BLKS +
9666                                         i * entries_each_hrrq - 1);
9667                         }
9668                 } else {
9669                         entries_each_hrrq = IPR_NUM_CMD_BLKS;
9670                         ioa_cfg->hrrq[i].min_cmd_id = 0;
9671                         ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
9672                 }
9673                 ioa_cfg->hrrq[i].size = entries_each_hrrq;
9674         }
9675
9676         BUG_ON(ioa_cfg->hrrq_num == 0);
9677
9678         i = IPR_NUM_CMD_BLKS -
9679                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
9680         if (i > 0) {
9681                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
9682                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
9683         }
9684
9685         for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9686                 ipr_cmd = dma_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
9687
9688                 if (!ipr_cmd) {
9689                         ipr_free_cmd_blks(ioa_cfg);
9690                         return -ENOMEM;
9691                 }
9692
9693                 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
9694                 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
9695                 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
9696
9697                 ioarcb = &ipr_cmd->ioarcb;
9698                 ipr_cmd->dma_addr = dma_addr;
9699                 if (ioa_cfg->sis64)
9700                         ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
9701                 else
9702                         ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
9703
9704                 ioarcb->host_response_handle = cpu_to_be32(i << 2);
9705                 if (ioa_cfg->sis64) {
9706                         ioarcb->u.sis64_addr_data.data_ioadl_addr =
9707                                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
9708                         ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
9709                                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
9710                 } else {
9711                         ioarcb->write_ioadl_addr =
9712                                 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
9713                         ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
9714                         ioarcb->ioasa_host_pci_addr =
9715                                 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
9716                 }
9717                 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
9718                 ipr_cmd->cmd_index = i;
9719                 ipr_cmd->ioa_cfg = ioa_cfg;
9720                 ipr_cmd->sense_buffer_dma = dma_addr +
9721                         offsetof(struct ipr_cmnd, sense_buffer);
9722
9723                 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
9724                 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
9725                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9726                 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9727                         hrrq_id++;
9728         }
9729
9730         return 0;
9731 }
9732
9733 /**
9734  * ipr_alloc_mem - Allocate memory for an adapter
9735  * @ioa_cfg:    ioa config struct
9736  *
9737  * Return value:
9738  *      0 on success / non-zero for error
9739  **/
9740 static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
9741 {
9742         struct pci_dev *pdev = ioa_cfg->pdev;
9743         int i, rc = -ENOMEM;
9744
9745         ENTER;
9746         ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
9747                                        ioa_cfg->max_devs_supported, GFP_KERNEL);
9748
9749         if (!ioa_cfg->res_entries)
9750                 goto out;
9751
9752         for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
9753                 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
9754                 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9755         }
9756
9757         ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev,
9758                                               sizeof(struct ipr_misc_cbs),
9759                                               &ioa_cfg->vpd_cbs_dma,
9760                                               GFP_KERNEL);
9761
9762         if (!ioa_cfg->vpd_cbs)
9763                 goto out_free_res_entries;
9764
9765         if (ipr_alloc_cmd_blks(ioa_cfg))
9766                 goto out_free_vpd_cbs;
9767
9768         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9769                 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev,
9770                                         sizeof(u32) * ioa_cfg->hrrq[i].size,
9771                                         &ioa_cfg->hrrq[i].host_rrq_dma,
9772                                         GFP_KERNEL);
9773
9774                 if (!ioa_cfg->hrrq[i].host_rrq)  {
9775                         while (--i > 0)
9776                                 dma_free_coherent(&pdev->dev,
9777                                         sizeof(u32) * ioa_cfg->hrrq[i].size,
9778                                         ioa_cfg->hrrq[i].host_rrq,
9779                                         ioa_cfg->hrrq[i].host_rrq_dma);
9780                         goto out_ipr_free_cmd_blocks;
9781                 }
9782                 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9783         }
9784
9785         ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev,
9786                                                   ioa_cfg->cfg_table_size,
9787                                                   &ioa_cfg->cfg_table_dma,
9788                                                   GFP_KERNEL);
9789
9790         if (!ioa_cfg->u.cfg_table)
9791                 goto out_free_host_rrq;
9792
9793         for (i = 0; i < IPR_MAX_HCAMS; i++) {
9794                 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
9795                                                          sizeof(struct ipr_hostrcb),
9796                                                          &ioa_cfg->hostrcb_dma[i],
9797                                                          GFP_KERNEL);
9798
9799                 if (!ioa_cfg->hostrcb[i])
9800                         goto out_free_hostrcb_dma;
9801
9802                 ioa_cfg->hostrcb[i]->hostrcb_dma =
9803                         ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
9804                 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
9805                 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9806         }
9807
9808         ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
9809                                  IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
9810
9811         if (!ioa_cfg->trace)
9812                 goto out_free_hostrcb_dma;
9813
9814         rc = 0;
9815 out:
9816         LEAVE;
9817         return rc;
9818
9819 out_free_hostrcb_dma:
9820         while (i-- > 0) {
9821                 dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb),
9822                                   ioa_cfg->hostrcb[i],
9823                                   ioa_cfg->hostrcb_dma[i]);
9824         }
9825         dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size,
9826                           ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9827 out_free_host_rrq:
9828         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9829                 dma_free_coherent(&pdev->dev,
9830                                   sizeof(u32) * ioa_cfg->hrrq[i].size,
9831                                   ioa_cfg->hrrq[i].host_rrq,
9832                                   ioa_cfg->hrrq[i].host_rrq_dma);
9833         }
9834 out_ipr_free_cmd_blocks:
9835         ipr_free_cmd_blks(ioa_cfg);
9836 out_free_vpd_cbs:
9837         dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs),
9838                           ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9839 out_free_res_entries:
9840         kfree(ioa_cfg->res_entries);
9841         goto out;
9842 }
9843
9844 /**
9845  * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9846  * @ioa_cfg:    ioa config struct
9847  *
9848  * Return value:
9849  *      none
9850  **/
9851 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
9852 {
9853         int i;
9854
9855         for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9856                 ioa_cfg->bus_attr[i].bus = i;
9857                 ioa_cfg->bus_attr[i].qas_enabled = 0;
9858                 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9859                 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9860                         ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9861                 else
9862                         ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9863         }
9864 }
9865
9866 /**
9867  * ipr_init_regs - Initialize IOA registers
9868  * @ioa_cfg:    ioa config struct
9869  *
9870  * Return value:
9871  *      none
9872  **/
9873 static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
9874 {
9875         const struct ipr_interrupt_offsets *p;
9876         struct ipr_interrupts *t;
9877         void __iomem *base;
9878
9879         p = &ioa_cfg->chip_cfg->regs;
9880         t = &ioa_cfg->regs;
9881         base = ioa_cfg->hdw_dma_regs;
9882
9883         t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9884         t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
9885         t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
9886         t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
9887         t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
9888         t->clr_interrupt_reg = base + p->clr_interrupt_reg;
9889         t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
9890         t->sense_interrupt_reg = base + p->sense_interrupt_reg;
9891         t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
9892         t->ioarrin_reg = base + p->ioarrin_reg;
9893         t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
9894         t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
9895         t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
9896         t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
9897         t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
9898         t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
9899
9900         if (ioa_cfg->sis64) {
9901                 t->init_feedback_reg = base + p->init_feedback_reg;
9902                 t->dump_addr_reg = base + p->dump_addr_reg;
9903                 t->dump_data_reg = base + p->dump_data_reg;
9904                 t->endian_swap_reg = base + p->endian_swap_reg;
9905         }
9906 }
9907
9908 /**
9909  * ipr_init_ioa_cfg - Initialize IOA config struct
9910  * @ioa_cfg:    ioa config struct
9911  * @host:               scsi host struct
9912  * @pdev:               PCI dev struct
9913  *
9914  * Return value:
9915  *      none
9916  **/
9917 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9918                              struct Scsi_Host *host, struct pci_dev *pdev)
9919 {
9920         int i;
9921
9922         ioa_cfg->host = host;
9923         ioa_cfg->pdev = pdev;
9924         ioa_cfg->log_level = ipr_log_level;
9925         ioa_cfg->doorbell = IPR_DOORBELL;
9926         sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9927         sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
9928         sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9929         sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9930         sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9931         sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9932
9933         INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9934         INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9935         INIT_LIST_HEAD(&ioa_cfg->hostrcb_report_q);
9936         INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9937         INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9938         INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
9939         init_waitqueue_head(&ioa_cfg->reset_wait_q);
9940         init_waitqueue_head(&ioa_cfg->msi_wait_q);
9941         init_waitqueue_head(&ioa_cfg->eeh_wait_q);
9942         ioa_cfg->sdt_state = INACTIVE;
9943
9944         ipr_initialize_bus_attr(ioa_cfg);
9945         ioa_cfg->max_devs_supported = ipr_max_devs;
9946
9947         if (ioa_cfg->sis64) {
9948                 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9949                 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9950                 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9951                         ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9952                 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9953                                            + ((sizeof(struct ipr_config_table_entry64)
9954                                                * ioa_cfg->max_devs_supported)));
9955         } else {
9956                 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9957                 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9958                 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9959                         ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9960                 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9961                                            + ((sizeof(struct ipr_config_table_entry)
9962                                                * ioa_cfg->max_devs_supported)));
9963         }
9964
9965         host->max_channel = IPR_VSET_BUS;
9966         host->unique_id = host->host_no;
9967         host->max_cmd_len = IPR_MAX_CDB_LEN;
9968         host->can_queue = ioa_cfg->max_cmds;
9969         pci_set_drvdata(pdev, ioa_cfg);
9970
9971         for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9972                 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9973                 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9974                 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9975                 if (i == 0)
9976                         ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9977                 else
9978                         ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
9979         }
9980 }
9981
9982 /**
9983  * ipr_get_chip_info - Find adapter chip information
9984  * @dev_id:             PCI device id struct
9985  *
9986  * Return value:
9987  *      ptr to chip information on success / NULL on failure
9988  **/
9989 static const struct ipr_chip_t *
9990 ipr_get_chip_info(const struct pci_device_id *dev_id)
9991 {
9992         int i;
9993
9994         for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
9995                 if (ipr_chip[i].vendor == dev_id->vendor &&
9996                     ipr_chip[i].device == dev_id->device)
9997                         return &ipr_chip[i];
9998         return NULL;
9999 }
10000
10001 /**
10002  * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
10003  *                                              during probe time
10004  * @ioa_cfg:    ioa config struct
10005  *
10006  * Return value:
10007  *      None
10008  **/
10009 static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
10010 {
10011         struct pci_dev *pdev = ioa_cfg->pdev;
10012
10013         if (pci_channel_offline(pdev)) {
10014                 wait_event_timeout(ioa_cfg->eeh_wait_q,
10015                                    !pci_channel_offline(pdev),
10016                                    IPR_PCI_ERROR_RECOVERY_TIMEOUT);
10017                 pci_restore_state(pdev);
10018         }
10019 }
10020
10021 static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
10022 {
10023         int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
10024
10025         for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
10026                 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
10027                          "host%d-%d", ioa_cfg->host->host_no, vec_idx);
10028                 ioa_cfg->vectors_info[vec_idx].
10029                         desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
10030         }
10031 }
10032
10033 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg,
10034                 struct pci_dev *pdev)
10035 {
10036         int i, rc;
10037
10038         for (i = 1; i < ioa_cfg->nvectors; i++) {
10039                 rc = request_irq(pci_irq_vector(pdev, i),
10040                         ipr_isr_mhrrq,
10041                         0,
10042                         ioa_cfg->vectors_info[i].desc,
10043                         &ioa_cfg->hrrq[i]);
10044                 if (rc) {
10045                         while (--i >= 0)
10046                                 free_irq(pci_irq_vector(pdev, i),
10047                                         &ioa_cfg->hrrq[i]);
10048                         return rc;
10049                 }
10050         }
10051         return 0;
10052 }
10053
10054 /**
10055  * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
10056  * @pdev:               PCI device struct
10057  *
10058  * Description: Simply set the msi_received flag to 1 indicating that
10059  * Message Signaled Interrupts are supported.
10060  *
10061  * Return value:
10062  *      0 on success / non-zero on failure
10063  **/
10064 static irqreturn_t ipr_test_intr(int irq, void *devp)
10065 {
10066         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
10067         unsigned long lock_flags = 0;
10068         irqreturn_t rc = IRQ_HANDLED;
10069
10070         dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
10071         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10072
10073         ioa_cfg->msi_received = 1;
10074         wake_up(&ioa_cfg->msi_wait_q);
10075
10076         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10077         return rc;
10078 }
10079
10080 /**
10081  * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
10082  * @pdev:               PCI device struct
10083  *
10084  * Description: This routine sets up and initiates a test interrupt to determine
10085  * if the interrupt is received via the ipr_test_intr() service routine.
10086  * If the tests fails, the driver will fall back to LSI.
10087  *
10088  * Return value:
10089  *      0 on success / non-zero on failure
10090  **/
10091 static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
10092 {
10093         int rc;
10094         volatile u32 int_reg;
10095         unsigned long lock_flags = 0;
10096         int irq = pci_irq_vector(pdev, 0);
10097
10098         ENTER;
10099
10100         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10101         init_waitqueue_head(&ioa_cfg->msi_wait_q);
10102         ioa_cfg->msi_received = 0;
10103         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10104         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
10105         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
10106         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10107
10108         rc = request_irq(irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
10109         if (rc) {
10110                 dev_err(&pdev->dev, "Can not assign irq %d\n", irq);
10111                 return rc;
10112         } else if (ipr_debug)
10113                 dev_info(&pdev->dev, "IRQ assigned: %d\n", irq);
10114
10115         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
10116         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
10117         wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
10118         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10119         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10120
10121         if (!ioa_cfg->msi_received) {
10122                 /* MSI test failed */
10123                 dev_info(&pdev->dev, "MSI test failed.  Falling back to LSI.\n");
10124                 rc = -EOPNOTSUPP;
10125         } else if (ipr_debug)
10126                 dev_info(&pdev->dev, "MSI test succeeded.\n");
10127
10128         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10129
10130         free_irq(irq, ioa_cfg);
10131
10132         LEAVE;
10133
10134         return rc;
10135 }
10136
10137  /* ipr_probe_ioa - Allocates memory and does first stage of initialization
10138  * @pdev:               PCI device struct
10139  * @dev_id:             PCI device id struct
10140  *
10141  * Return value:
10142  *      0 on success / non-zero on failure
10143  **/
10144 static int ipr_probe_ioa(struct pci_dev *pdev,
10145                          const struct pci_device_id *dev_id)
10146 {
10147         struct ipr_ioa_cfg *ioa_cfg;
10148         struct Scsi_Host *host;
10149         unsigned long ipr_regs_pci;
10150         void __iomem *ipr_regs;
10151         int rc = PCIBIOS_SUCCESSFUL;
10152         volatile u32 mask, uproc, interrupts;
10153         unsigned long lock_flags, driver_lock_flags;
10154         unsigned int irq_flag;
10155
10156         ENTER;
10157
10158         dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
10159         host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
10160
10161         if (!host) {
10162                 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
10163                 rc = -ENOMEM;
10164                 goto out;
10165         }
10166
10167         ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
10168         memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
10169         ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
10170
10171         ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
10172
10173         if (!ioa_cfg->ipr_chip) {
10174                 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
10175                         dev_id->vendor, dev_id->device);
10176                 goto out_scsi_host_put;
10177         }
10178
10179         /* set SIS 32 or SIS 64 */
10180         ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
10181         ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
10182         ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
10183         ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
10184
10185         if (ipr_transop_timeout)
10186                 ioa_cfg->transop_timeout = ipr_transop_timeout;
10187         else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
10188                 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
10189         else
10190                 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
10191
10192         ioa_cfg->revid = pdev->revision;
10193
10194         ipr_init_ioa_cfg(ioa_cfg, host, pdev);
10195
10196         ipr_regs_pci = pci_resource_start(pdev, 0);
10197
10198         rc = pci_request_regions(pdev, IPR_NAME);
10199         if (rc < 0) {
10200                 dev_err(&pdev->dev,
10201                         "Couldn't register memory range of registers\n");
10202                 goto out_scsi_host_put;
10203         }
10204
10205         rc = pci_enable_device(pdev);
10206
10207         if (rc || pci_channel_offline(pdev)) {
10208                 if (pci_channel_offline(pdev)) {
10209                         ipr_wait_for_pci_err_recovery(ioa_cfg);
10210                         rc = pci_enable_device(pdev);
10211                 }
10212
10213                 if (rc) {
10214                         dev_err(&pdev->dev, "Cannot enable adapter\n");
10215                         ipr_wait_for_pci_err_recovery(ioa_cfg);
10216                         goto out_release_regions;
10217                 }
10218         }
10219
10220         ipr_regs = pci_ioremap_bar(pdev, 0);
10221
10222         if (!ipr_regs) {
10223                 dev_err(&pdev->dev,
10224                         "Couldn't map memory range of registers\n");
10225                 rc = -ENOMEM;
10226                 goto out_disable;
10227         }
10228
10229         ioa_cfg->hdw_dma_regs = ipr_regs;
10230         ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
10231         ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
10232
10233         ipr_init_regs(ioa_cfg);
10234
10235         if (ioa_cfg->sis64) {
10236                 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10237                 if (rc < 0) {
10238                         dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n");
10239                         rc = dma_set_mask_and_coherent(&pdev->dev,
10240                                                        DMA_BIT_MASK(32));
10241                 }
10242         } else
10243                 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10244
10245         if (rc < 0) {
10246                 dev_err(&pdev->dev, "Failed to set DMA mask\n");
10247                 goto cleanup_nomem;
10248         }
10249
10250         rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
10251                                    ioa_cfg->chip_cfg->cache_line_size);
10252
10253         if (rc != PCIBIOS_SUCCESSFUL) {
10254                 dev_err(&pdev->dev, "Write of cache line size failed\n");
10255                 ipr_wait_for_pci_err_recovery(ioa_cfg);
10256                 rc = -EIO;
10257                 goto cleanup_nomem;
10258         }
10259
10260         /* Issue MMIO read to ensure card is not in EEH */
10261         interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
10262         ipr_wait_for_pci_err_recovery(ioa_cfg);
10263
10264         if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
10265                 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
10266                         IPR_MAX_MSIX_VECTORS);
10267                 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
10268         }
10269
10270         irq_flag = PCI_IRQ_LEGACY;
10271         if (ioa_cfg->ipr_chip->has_msi)
10272                 irq_flag |= PCI_IRQ_MSI | PCI_IRQ_MSIX;
10273         rc = pci_alloc_irq_vectors(pdev, 1, ipr_number_of_msix, irq_flag);
10274         if (rc < 0) {
10275                 ipr_wait_for_pci_err_recovery(ioa_cfg);
10276                 goto cleanup_nomem;
10277         }
10278         ioa_cfg->nvectors = rc;
10279
10280         if (!pdev->msi_enabled && !pdev->msix_enabled)
10281                 ioa_cfg->clear_isr = 1;
10282
10283         pci_set_master(pdev);
10284
10285         if (pci_channel_offline(pdev)) {
10286                 ipr_wait_for_pci_err_recovery(ioa_cfg);
10287                 pci_set_master(pdev);
10288                 if (pci_channel_offline(pdev)) {
10289                         rc = -EIO;
10290                         goto out_msi_disable;
10291                 }
10292         }
10293
10294         if (pdev->msi_enabled || pdev->msix_enabled) {
10295                 rc = ipr_test_msi(ioa_cfg, pdev);
10296                 switch (rc) {
10297                 case 0:
10298                         dev_info(&pdev->dev,
10299                                 "Request for %d MSI%ss succeeded.", ioa_cfg->nvectors,
10300                                 pdev->msix_enabled ? "-X" : "");
10301                         break;
10302                 case -EOPNOTSUPP:
10303                         ipr_wait_for_pci_err_recovery(ioa_cfg);
10304                         pci_free_irq_vectors(pdev);
10305
10306                         ioa_cfg->nvectors = 1;
10307                         ioa_cfg->clear_isr = 1;
10308                         break;
10309                 default:
10310                         goto out_msi_disable;
10311                 }
10312         }
10313
10314         ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
10315                                 (unsigned int)num_online_cpus(),
10316                                 (unsigned int)IPR_MAX_HRRQ_NUM);
10317
10318         if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
10319                 goto out_msi_disable;
10320
10321         if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
10322                 goto out_msi_disable;
10323
10324         rc = ipr_alloc_mem(ioa_cfg);
10325         if (rc < 0) {
10326                 dev_err(&pdev->dev,
10327                         "Couldn't allocate enough memory for device driver!\n");
10328                 goto out_msi_disable;
10329         }
10330
10331         /* Save away PCI config space for use following IOA reset */
10332         rc = pci_save_state(pdev);
10333
10334         if (rc != PCIBIOS_SUCCESSFUL) {
10335                 dev_err(&pdev->dev, "Failed to save PCI config space\n");
10336                 rc = -EIO;
10337                 goto cleanup_nolog;
10338         }
10339
10340         /*
10341          * If HRRQ updated interrupt is not masked, or reset alert is set,
10342          * the card is in an unknown state and needs a hard reset
10343          */
10344         mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
10345         interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
10346         uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
10347         if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
10348                 ioa_cfg->needs_hard_reset = 1;
10349         if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
10350                 ioa_cfg->needs_hard_reset = 1;
10351         if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
10352                 ioa_cfg->ioa_unit_checked = 1;
10353
10354         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10355         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10356         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10357
10358         if (pdev->msi_enabled || pdev->msix_enabled) {
10359                 name_msi_vectors(ioa_cfg);
10360                 rc = request_irq(pci_irq_vector(pdev, 0), ipr_isr, 0,
10361                         ioa_cfg->vectors_info[0].desc,
10362                         &ioa_cfg->hrrq[0]);
10363                 if (!rc)
10364                         rc = ipr_request_other_msi_irqs(ioa_cfg, pdev);
10365         } else {
10366                 rc = request_irq(pdev->irq, ipr_isr,
10367                          IRQF_SHARED,
10368                          IPR_NAME, &ioa_cfg->hrrq[0]);
10369         }
10370         if (rc) {
10371                 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
10372                         pdev->irq, rc);
10373                 goto cleanup_nolog;
10374         }
10375
10376         if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
10377             (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
10378                 ioa_cfg->needs_warm_reset = 1;
10379                 ioa_cfg->reset = ipr_reset_slot_reset;
10380
10381                 ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d",
10382                                                                 WQ_MEM_RECLAIM, host->host_no);
10383
10384                 if (!ioa_cfg->reset_work_q) {
10385                         dev_err(&pdev->dev, "Couldn't register reset workqueue\n");
10386                         rc = -ENOMEM;
10387                         goto out_free_irq;
10388                 }
10389         } else
10390                 ioa_cfg->reset = ipr_reset_start_bist;
10391
10392         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10393         list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
10394         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10395
10396         LEAVE;
10397 out:
10398         return rc;
10399
10400 out_free_irq:
10401         ipr_free_irqs(ioa_cfg);
10402 cleanup_nolog:
10403         ipr_free_mem(ioa_cfg);
10404 out_msi_disable:
10405         ipr_wait_for_pci_err_recovery(ioa_cfg);
10406         pci_free_irq_vectors(pdev);
10407 cleanup_nomem:
10408         iounmap(ipr_regs);
10409 out_disable:
10410         pci_disable_device(pdev);
10411 out_release_regions:
10412         pci_release_regions(pdev);
10413 out_scsi_host_put:
10414         scsi_host_put(host);
10415         goto out;
10416 }
10417
10418 /**
10419  * ipr_initiate_ioa_bringdown - Bring down an adapter
10420  * @ioa_cfg:            ioa config struct
10421  * @shutdown_type:      shutdown type
10422  *
10423  * Description: This function will initiate bringing down the adapter.
10424  * This consists of issuing an IOA shutdown to the adapter
10425  * to flush the cache, and running BIST.
10426  * If the caller needs to wait on the completion of the reset,
10427  * the caller must sleep on the reset_wait_q.
10428  *
10429  * Return value:
10430  *      none
10431  **/
10432 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
10433                                        enum ipr_shutdown_type shutdown_type)
10434 {
10435         ENTER;
10436         if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
10437                 ioa_cfg->sdt_state = ABORT_DUMP;
10438         ioa_cfg->reset_retries = 0;
10439         ioa_cfg->in_ioa_bringdown = 1;
10440         ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
10441         LEAVE;
10442 }
10443
10444 /**
10445  * __ipr_remove - Remove a single adapter
10446  * @pdev:       pci device struct
10447  *
10448  * Adapter hot plug remove entry point.
10449  *
10450  * Return value:
10451  *      none
10452  **/
10453 static void __ipr_remove(struct pci_dev *pdev)
10454 {
10455         unsigned long host_lock_flags = 0;
10456         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10457         int i;
10458         unsigned long driver_lock_flags;
10459         ENTER;
10460
10461         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10462         while (ioa_cfg->in_reset_reload) {
10463                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10464                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10465                 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10466         }
10467
10468         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
10469                 spin_lock(&ioa_cfg->hrrq[i]._lock);
10470                 ioa_cfg->hrrq[i].removing_ioa = 1;
10471                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
10472         }
10473         wmb();
10474         ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
10475
10476         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10477         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10478         flush_work(&ioa_cfg->work_q);
10479         if (ioa_cfg->reset_work_q)
10480                 flush_workqueue(ioa_cfg->reset_work_q);
10481         INIT_LIST_HEAD(&ioa_cfg->used_res_q);
10482         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10483
10484         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10485         list_del(&ioa_cfg->queue);
10486         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10487
10488         if (ioa_cfg->sdt_state == ABORT_DUMP)
10489                 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
10490         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10491
10492         ipr_free_all_resources(ioa_cfg);
10493
10494         LEAVE;
10495 }
10496
10497 /**
10498  * ipr_remove - IOA hot plug remove entry point
10499  * @pdev:       pci device struct
10500  *
10501  * Adapter hot plug remove entry point.
10502  *
10503  * Return value:
10504  *      none
10505  **/
10506 static void ipr_remove(struct pci_dev *pdev)
10507 {
10508         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10509
10510         ENTER;
10511
10512         ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10513                               &ipr_trace_attr);
10514         ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10515                              &ipr_dump_attr);
10516         sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10517                         &ipr_ioa_async_err_log);
10518         scsi_remove_host(ioa_cfg->host);
10519
10520         __ipr_remove(pdev);
10521
10522         LEAVE;
10523 }
10524
10525 /**
10526  * ipr_probe - Adapter hot plug add entry point
10527  *
10528  * Return value:
10529  *      0 on success / non-zero on failure
10530  **/
10531 static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
10532 {
10533         struct ipr_ioa_cfg *ioa_cfg;
10534         unsigned long flags;
10535         int rc, i;
10536
10537         rc = ipr_probe_ioa(pdev, dev_id);
10538
10539         if (rc)
10540                 return rc;
10541
10542         ioa_cfg = pci_get_drvdata(pdev);
10543         rc = ipr_probe_ioa_part2(ioa_cfg);
10544
10545         if (rc) {
10546                 __ipr_remove(pdev);
10547                 return rc;
10548         }
10549
10550         rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
10551
10552         if (rc) {
10553                 __ipr_remove(pdev);
10554                 return rc;
10555         }
10556
10557         rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
10558                                    &ipr_trace_attr);
10559
10560         if (rc) {
10561                 scsi_remove_host(ioa_cfg->host);
10562                 __ipr_remove(pdev);
10563                 return rc;
10564         }
10565
10566         rc = sysfs_create_bin_file(&ioa_cfg->host->shost_dev.kobj,
10567                         &ipr_ioa_async_err_log);
10568
10569         if (rc) {
10570                 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10571                                 &ipr_dump_attr);
10572                 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10573                                 &ipr_trace_attr);
10574                 scsi_remove_host(ioa_cfg->host);
10575                 __ipr_remove(pdev);
10576                 return rc;
10577         }
10578
10579         rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
10580                                    &ipr_dump_attr);
10581
10582         if (rc) {
10583                 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10584                                       &ipr_ioa_async_err_log);
10585                 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10586                                       &ipr_trace_attr);
10587                 scsi_remove_host(ioa_cfg->host);
10588                 __ipr_remove(pdev);
10589                 return rc;
10590         }
10591         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10592         ioa_cfg->scan_enabled = 1;
10593         schedule_work(&ioa_cfg->work_q);
10594         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10595
10596         ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
10597
10598         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10599                 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
10600                         irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
10601                                         ioa_cfg->iopoll_weight, ipr_iopoll);
10602                 }
10603         }
10604
10605         scsi_scan_host(ioa_cfg->host);
10606
10607         return 0;
10608 }
10609
10610 /**
10611  * ipr_shutdown - Shutdown handler.
10612  * @pdev:       pci device struct
10613  *
10614  * This function is invoked upon system shutdown/reboot. It will issue
10615  * an adapter shutdown to the adapter to flush the write cache.
10616  *
10617  * Return value:
10618  *      none
10619  **/
10620 static void ipr_shutdown(struct pci_dev *pdev)
10621 {
10622         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10623         unsigned long lock_flags = 0;
10624         enum ipr_shutdown_type shutdown_type = IPR_SHUTDOWN_NORMAL;
10625         int i;
10626
10627         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10628         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10629                 ioa_cfg->iopoll_weight = 0;
10630                 for (i = 1; i < ioa_cfg->hrrq_num; i++)
10631                         irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
10632         }
10633
10634         while (ioa_cfg->in_reset_reload) {
10635                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10636                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10637                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10638         }
10639
10640         if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64)
10641                 shutdown_type = IPR_SHUTDOWN_QUIESCE;
10642
10643         ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type);
10644         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10645         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10646         if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) {
10647                 ipr_free_irqs(ioa_cfg);
10648                 pci_disable_device(ioa_cfg->pdev);
10649         }
10650 }
10651
10652 static struct pci_device_id ipr_pci_table[] = {
10653         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10654                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
10655         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10656                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
10657         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10658                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
10659         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10660                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
10661         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10662                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
10663         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10664                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
10665         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10666                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
10667         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10668                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
10669                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10670         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10671               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10672         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10673               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10674               IPR_USE_LONG_TRANSOP_TIMEOUT },
10675         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10676               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10677               IPR_USE_LONG_TRANSOP_TIMEOUT },
10678         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10679               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10680         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10681               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10682               IPR_USE_LONG_TRANSOP_TIMEOUT},
10683         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10684               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10685               IPR_USE_LONG_TRANSOP_TIMEOUT },
10686         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10687               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
10688               IPR_USE_LONG_TRANSOP_TIMEOUT },
10689         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10690               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
10691         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10692               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
10693         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10694               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
10695               IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
10696         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
10697                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
10698         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10699                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
10700         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10701                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
10702                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10703         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10704                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
10705                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10706         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10707                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
10708         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10709                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
10710         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10711                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
10712         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10713                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
10714         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10715                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
10716         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10717                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
10718         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10719                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
10720         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10721                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
10722         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10723                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
10724         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10725                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
10726         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10727                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
10728         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10729                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
10730         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10731                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
10732         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10733                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
10734         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10735                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
10736         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10737                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
10738         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10739                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
10740         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10741                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
10742         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10743                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
10744         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10745                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
10746         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10747                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
10748         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10749                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
10750         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10751                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
10752         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10753                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
10754         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10755                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
10756         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10757                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
10758         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10759                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580A, 0, 0, 0 },
10760         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10761                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580B, 0, 0, 0 },
10762         { }
10763 };
10764 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10765
10766 static const struct pci_error_handlers ipr_err_handler = {
10767         .error_detected = ipr_pci_error_detected,
10768         .mmio_enabled = ipr_pci_mmio_enabled,
10769         .slot_reset = ipr_pci_slot_reset,
10770 };
10771
10772 static struct pci_driver ipr_driver = {
10773         .name = IPR_NAME,
10774         .id_table = ipr_pci_table,
10775         .probe = ipr_probe,
10776         .remove = ipr_remove,
10777         .shutdown = ipr_shutdown,
10778         .err_handler = &ipr_err_handler,
10779 };
10780
10781 /**
10782  * ipr_halt_done - Shutdown prepare completion
10783  *
10784  * Return value:
10785  *      none
10786  **/
10787 static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10788 {
10789         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
10790 }
10791
10792 /**
10793  * ipr_halt - Issue shutdown prepare to all adapters
10794  *
10795  * Return value:
10796  *      NOTIFY_OK on success / NOTIFY_DONE on failure
10797  **/
10798 static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10799 {
10800         struct ipr_cmnd *ipr_cmd;
10801         struct ipr_ioa_cfg *ioa_cfg;
10802         unsigned long flags = 0, driver_lock_flags;
10803
10804         if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10805                 return NOTIFY_DONE;
10806
10807         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10808
10809         list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10810                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10811                 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
10812                     (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) {
10813                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10814                         continue;
10815                 }
10816
10817                 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10818                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10819                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10820                 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10821                 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10822
10823                 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10824                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10825         }
10826         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10827
10828         return NOTIFY_OK;
10829 }
10830
10831 static struct notifier_block ipr_notifier = {
10832         ipr_halt, NULL, 0
10833 };
10834
10835 /**
10836  * ipr_init - Module entry point
10837  *
10838  * Return value:
10839  *      0 on success / negative value on failure
10840  **/
10841 static int __init ipr_init(void)
10842 {
10843         ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10844                  IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10845
10846         register_reboot_notifier(&ipr_notifier);
10847         return pci_register_driver(&ipr_driver);
10848 }
10849
10850 /**
10851  * ipr_exit - Module unload
10852  *
10853  * Module unload entry point.
10854  *
10855  * Return value:
10856  *      none
10857  **/
10858 static void __exit ipr_exit(void)
10859 {
10860         unregister_reboot_notifier(&ipr_notifier);
10861         pci_unregister_driver(&ipr_driver);
10862 }
10863
10864 module_init(ipr_init);
10865 module_exit(ipr_exit);