ath9k: fix typo
[cascardo/linux.git] / drivers / scsi / ipr.c
1 /*
2  * ipr.c -- driver for IBM Power Linux RAID adapters
3  *
4  * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5  *
6  * Copyright (C) 2003, 2004 IBM Corporation
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21  *
22  */
23
24 /*
25  * Notes:
26  *
27  * This driver is used to control the following SCSI adapters:
28  *
29  * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30  *
31  * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32  *              PCI-X Dual Channel Ultra 320 SCSI Adapter
33  *              PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34  *              Embedded SCSI adapter on p615 and p655 systems
35  *
36  * Supported Hardware Features:
37  *      - Ultra 320 SCSI controller
38  *      - PCI-X host interface
39  *      - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40  *      - Non-Volatile Write Cache
41  *      - Supports attachment of non-RAID disks, tape, and optical devices
42  *      - RAID Levels 0, 5, 10
43  *      - Hot spare
44  *      - Background Parity Checking
45  *      - Background Data Scrubbing
46  *      - Ability to increase the capacity of an existing RAID 5 disk array
47  *              by adding disks
48  *
49  * Driver Features:
50  *      - Tagged command queuing
51  *      - Adapter microcode download
52  *      - PCI hot plug
53  *      - SCSI device hot plug
54  *
55  */
56
57 #include <linux/fs.h>
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/slab.h>
63 #include <linux/vmalloc.h>
64 #include <linux/ioport.h>
65 #include <linux/delay.h>
66 #include <linux/pci.h>
67 #include <linux/wait.h>
68 #include <linux/spinlock.h>
69 #include <linux/sched.h>
70 #include <linux/interrupt.h>
71 #include <linux/blkdev.h>
72 #include <linux/firmware.h>
73 #include <linux/module.h>
74 #include <linux/moduleparam.h>
75 #include <linux/libata.h>
76 #include <linux/hdreg.h>
77 #include <linux/reboot.h>
78 #include <linux/stringify.h>
79 #include <asm/io.h>
80 #include <asm/irq.h>
81 #include <asm/processor.h>
82 #include <scsi/scsi.h>
83 #include <scsi/scsi_host.h>
84 #include <scsi/scsi_tcq.h>
85 #include <scsi/scsi_eh.h>
86 #include <scsi/scsi_cmnd.h>
87 #include "ipr.h"
88
89 /*
90  *   Global Data
91  */
92 static LIST_HEAD(ipr_ioa_head);
93 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94 static unsigned int ipr_max_speed = 1;
95 static int ipr_testmode = 0;
96 static unsigned int ipr_fastfail = 0;
97 static unsigned int ipr_transop_timeout = 0;
98 static unsigned int ipr_debug = 0;
99 static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
100 static unsigned int ipr_dual_ioa_raid = 1;
101 static unsigned int ipr_number_of_msix = 2;
102 static DEFINE_SPINLOCK(ipr_driver_lock);
103
104 /* This table describes the differences between DMA controller chips */
105 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
106         { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
107                 .mailbox = 0x0042C,
108                 .max_cmds = 100,
109                 .cache_line_size = 0x20,
110                 .clear_isr = 1,
111                 .iopoll_weight = 0,
112                 {
113                         .set_interrupt_mask_reg = 0x0022C,
114                         .clr_interrupt_mask_reg = 0x00230,
115                         .clr_interrupt_mask_reg32 = 0x00230,
116                         .sense_interrupt_mask_reg = 0x0022C,
117                         .sense_interrupt_mask_reg32 = 0x0022C,
118                         .clr_interrupt_reg = 0x00228,
119                         .clr_interrupt_reg32 = 0x00228,
120                         .sense_interrupt_reg = 0x00224,
121                         .sense_interrupt_reg32 = 0x00224,
122                         .ioarrin_reg = 0x00404,
123                         .sense_uproc_interrupt_reg = 0x00214,
124                         .sense_uproc_interrupt_reg32 = 0x00214,
125                         .set_uproc_interrupt_reg = 0x00214,
126                         .set_uproc_interrupt_reg32 = 0x00214,
127                         .clr_uproc_interrupt_reg = 0x00218,
128                         .clr_uproc_interrupt_reg32 = 0x00218
129                 }
130         },
131         { /* Snipe and Scamp */
132                 .mailbox = 0x0052C,
133                 .max_cmds = 100,
134                 .cache_line_size = 0x20,
135                 .clear_isr = 1,
136                 .iopoll_weight = 0,
137                 {
138                         .set_interrupt_mask_reg = 0x00288,
139                         .clr_interrupt_mask_reg = 0x0028C,
140                         .clr_interrupt_mask_reg32 = 0x0028C,
141                         .sense_interrupt_mask_reg = 0x00288,
142                         .sense_interrupt_mask_reg32 = 0x00288,
143                         .clr_interrupt_reg = 0x00284,
144                         .clr_interrupt_reg32 = 0x00284,
145                         .sense_interrupt_reg = 0x00280,
146                         .sense_interrupt_reg32 = 0x00280,
147                         .ioarrin_reg = 0x00504,
148                         .sense_uproc_interrupt_reg = 0x00290,
149                         .sense_uproc_interrupt_reg32 = 0x00290,
150                         .set_uproc_interrupt_reg = 0x00290,
151                         .set_uproc_interrupt_reg32 = 0x00290,
152                         .clr_uproc_interrupt_reg = 0x00294,
153                         .clr_uproc_interrupt_reg32 = 0x00294
154                 }
155         },
156         { /* CRoC */
157                 .mailbox = 0x00044,
158                 .max_cmds = 1000,
159                 .cache_line_size = 0x20,
160                 .clear_isr = 0,
161                 .iopoll_weight = 64,
162                 {
163                         .set_interrupt_mask_reg = 0x00010,
164                         .clr_interrupt_mask_reg = 0x00018,
165                         .clr_interrupt_mask_reg32 = 0x0001C,
166                         .sense_interrupt_mask_reg = 0x00010,
167                         .sense_interrupt_mask_reg32 = 0x00014,
168                         .clr_interrupt_reg = 0x00008,
169                         .clr_interrupt_reg32 = 0x0000C,
170                         .sense_interrupt_reg = 0x00000,
171                         .sense_interrupt_reg32 = 0x00004,
172                         .ioarrin_reg = 0x00070,
173                         .sense_uproc_interrupt_reg = 0x00020,
174                         .sense_uproc_interrupt_reg32 = 0x00024,
175                         .set_uproc_interrupt_reg = 0x00020,
176                         .set_uproc_interrupt_reg32 = 0x00024,
177                         .clr_uproc_interrupt_reg = 0x00028,
178                         .clr_uproc_interrupt_reg32 = 0x0002C,
179                         .init_feedback_reg = 0x0005C,
180                         .dump_addr_reg = 0x00064,
181                         .dump_data_reg = 0x00068,
182                         .endian_swap_reg = 0x00084
183                 }
184         },
185 };
186
187 static const struct ipr_chip_t ipr_chip[] = {
188         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
189         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
190         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
191         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
192         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
193         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
194         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
195         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
196         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
197 };
198
199 static int ipr_max_bus_speeds[] = {
200         IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
201 };
202
203 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
204 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
205 module_param_named(max_speed, ipr_max_speed, uint, 0);
206 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
207 module_param_named(log_level, ipr_log_level, uint, 0);
208 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
209 module_param_named(testmode, ipr_testmode, int, 0);
210 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
211 module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
212 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
213 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
214 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
215 module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
216 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
217 module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
218 MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
219 module_param_named(max_devs, ipr_max_devs, int, 0);
220 MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
221                  "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
222 module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
223 MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16).  (default:2)");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(IPR_DRIVER_VERSION);
226
227 /*  A constant array of IOASCs/URCs/Error Messages */
228 static const
229 struct ipr_error_table_t ipr_error_table[] = {
230         {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
231         "8155: An unknown error was received"},
232         {0x00330000, 0, 0,
233         "Soft underlength error"},
234         {0x005A0000, 0, 0,
235         "Command to be cancelled not found"},
236         {0x00808000, 0, 0,
237         "Qualified success"},
238         {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
239         "FFFE: Soft device bus error recovered by the IOA"},
240         {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
241         "4101: Soft device bus fabric error"},
242         {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
243         "FFFC: Logical block guard error recovered by the device"},
244         {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
245         "FFFC: Logical block reference tag error recovered by the device"},
246         {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
247         "4171: Recovered scatter list tag / sequence number error"},
248         {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
249         "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
250         {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
251         "4171: Recovered logical block sequence number error on IOA to Host transfer"},
252         {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
253         "FFFD: Recovered logical block reference tag error detected by the IOA"},
254         {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
255         "FFFD: Logical block guard error recovered by the IOA"},
256         {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
257         "FFF9: Device sector reassign successful"},
258         {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
259         "FFF7: Media error recovered by device rewrite procedures"},
260         {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
261         "7001: IOA sector reassignment successful"},
262         {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
263         "FFF9: Soft media error. Sector reassignment recommended"},
264         {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
265         "FFF7: Media error recovered by IOA rewrite procedures"},
266         {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
267         "FF3D: Soft PCI bus error recovered by the IOA"},
268         {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
269         "FFF6: Device hardware error recovered by the IOA"},
270         {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
271         "FFF6: Device hardware error recovered by the device"},
272         {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
273         "FF3D: Soft IOA error recovered by the IOA"},
274         {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
275         "FFFA: Undefined device response recovered by the IOA"},
276         {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
277         "FFF6: Device bus error, message or command phase"},
278         {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
279         "FFFE: Task Management Function failed"},
280         {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
281         "FFF6: Failure prediction threshold exceeded"},
282         {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
283         "8009: Impending cache battery pack failure"},
284         {0x02040100, 0, 0,
285         "Logical Unit in process of becoming ready"},
286         {0x02040200, 0, 0,
287         "Initializing command required"},
288         {0x02040400, 0, 0,
289         "34FF: Disk device format in progress"},
290         {0x02040C00, 0, 0,
291         "Logical unit not accessible, target port in unavailable state"},
292         {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
293         "9070: IOA requested reset"},
294         {0x023F0000, 0, 0,
295         "Synchronization required"},
296         {0x02408500, 0, 0,
297         "IOA microcode download required"},
298         {0x02408600, 0, 0,
299         "Device bus connection is prohibited by host"},
300         {0x024E0000, 0, 0,
301         "No ready, IOA shutdown"},
302         {0x025A0000, 0, 0,
303         "Not ready, IOA has been shutdown"},
304         {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
305         "3020: Storage subsystem configuration error"},
306         {0x03110B00, 0, 0,
307         "FFF5: Medium error, data unreadable, recommend reassign"},
308         {0x03110C00, 0, 0,
309         "7000: Medium error, data unreadable, do not reassign"},
310         {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
311         "FFF3: Disk media format bad"},
312         {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
313         "3002: Addressed device failed to respond to selection"},
314         {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
315         "3100: Device bus error"},
316         {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
317         "3109: IOA timed out a device command"},
318         {0x04088000, 0, 0,
319         "3120: SCSI bus is not operational"},
320         {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
321         "4100: Hard device bus fabric error"},
322         {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
323         "310C: Logical block guard error detected by the device"},
324         {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
325         "310C: Logical block reference tag error detected by the device"},
326         {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
327         "4170: Scatter list tag / sequence number error"},
328         {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
329         "8150: Logical block CRC error on IOA to Host transfer"},
330         {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
331         "4170: Logical block sequence number error on IOA to Host transfer"},
332         {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
333         "310D: Logical block reference tag error detected by the IOA"},
334         {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
335         "310D: Logical block guard error detected by the IOA"},
336         {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
337         "9000: IOA reserved area data check"},
338         {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
339         "9001: IOA reserved area invalid data pattern"},
340         {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
341         "9002: IOA reserved area LRC error"},
342         {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
343         "Hardware Error, IOA metadata access error"},
344         {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
345         "102E: Out of alternate sectors for disk storage"},
346         {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
347         "FFF4: Data transfer underlength error"},
348         {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
349         "FFF4: Data transfer overlength error"},
350         {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
351         "3400: Logical unit failure"},
352         {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
353         "FFF4: Device microcode is corrupt"},
354         {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
355         "8150: PCI bus error"},
356         {0x04430000, 1, 0,
357         "Unsupported device bus message received"},
358         {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
359         "FFF4: Disk device problem"},
360         {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
361         "8150: Permanent IOA failure"},
362         {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
363         "3010: Disk device returned wrong response to IOA"},
364         {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
365         "8151: IOA microcode error"},
366         {0x04448500, 0, 0,
367         "Device bus status error"},
368         {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
369         "8157: IOA error requiring IOA reset to recover"},
370         {0x04448700, 0, 0,
371         "ATA device status error"},
372         {0x04490000, 0, 0,
373         "Message reject received from the device"},
374         {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
375         "8008: A permanent cache battery pack failure occurred"},
376         {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
377         "9090: Disk unit has been modified after the last known status"},
378         {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
379         "9081: IOA detected device error"},
380         {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
381         "9082: IOA detected device error"},
382         {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
383         "3110: Device bus error, message or command phase"},
384         {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
385         "3110: SAS Command / Task Management Function failed"},
386         {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
387         "9091: Incorrect hardware configuration change has been detected"},
388         {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
389         "9073: Invalid multi-adapter configuration"},
390         {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
391         "4010: Incorrect connection between cascaded expanders"},
392         {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
393         "4020: Connections exceed IOA design limits"},
394         {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
395         "4030: Incorrect multipath connection"},
396         {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
397         "4110: Unsupported enclosure function"},
398         {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
399         "4120: SAS cable VPD cannot be read"},
400         {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
401         "FFF4: Command to logical unit failed"},
402         {0x05240000, 1, 0,
403         "Illegal request, invalid request type or request packet"},
404         {0x05250000, 0, 0,
405         "Illegal request, invalid resource handle"},
406         {0x05258000, 0, 0,
407         "Illegal request, commands not allowed to this device"},
408         {0x05258100, 0, 0,
409         "Illegal request, command not allowed to a secondary adapter"},
410         {0x05258200, 0, 0,
411         "Illegal request, command not allowed to a non-optimized resource"},
412         {0x05260000, 0, 0,
413         "Illegal request, invalid field in parameter list"},
414         {0x05260100, 0, 0,
415         "Illegal request, parameter not supported"},
416         {0x05260200, 0, 0,
417         "Illegal request, parameter value invalid"},
418         {0x052C0000, 0, 0,
419         "Illegal request, command sequence error"},
420         {0x052C8000, 1, 0,
421         "Illegal request, dual adapter support not enabled"},
422         {0x052C8100, 1, 0,
423         "Illegal request, another cable connector was physically disabled"},
424         {0x054E8000, 1, 0,
425         "Illegal request, inconsistent group id/group count"},
426         {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
427         "9031: Array protection temporarily suspended, protection resuming"},
428         {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
429         "9040: Array protection temporarily suspended, protection resuming"},
430         {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
431         "4080: IOA exceeded maximum operating temperature"},
432         {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
433         "4085: Service required"},
434         {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
435         "3140: Device bus not ready to ready transition"},
436         {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
437         "FFFB: SCSI bus was reset"},
438         {0x06290500, 0, 0,
439         "FFFE: SCSI bus transition to single ended"},
440         {0x06290600, 0, 0,
441         "FFFE: SCSI bus transition to LVD"},
442         {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
443         "FFFB: SCSI bus was reset by another initiator"},
444         {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
445         "3029: A device replacement has occurred"},
446         {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
447         "4102: Device bus fabric performance degradation"},
448         {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
449         "9051: IOA cache data exists for a missing or failed device"},
450         {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
451         "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
452         {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
453         "9025: Disk unit is not supported at its physical location"},
454         {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
455         "3020: IOA detected a SCSI bus configuration error"},
456         {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
457         "3150: SCSI bus configuration error"},
458         {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
459         "9074: Asymmetric advanced function disk configuration"},
460         {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
461         "4040: Incomplete multipath connection between IOA and enclosure"},
462         {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
463         "4041: Incomplete multipath connection between enclosure and device"},
464         {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
465         "9075: Incomplete multipath connection between IOA and remote IOA"},
466         {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
467         "9076: Configuration error, missing remote IOA"},
468         {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
469         "4050: Enclosure does not support a required multipath function"},
470         {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
471         "4121: Configuration error, required cable is missing"},
472         {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
473         "4122: Cable is not plugged into the correct location on remote IOA"},
474         {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
475         "4123: Configuration error, invalid cable vital product data"},
476         {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
477         "4124: Configuration error, both cable ends are plugged into the same IOA"},
478         {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
479         "4070: Logically bad block written on device"},
480         {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
481         "9041: Array protection temporarily suspended"},
482         {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
483         "9042: Corrupt array parity detected on specified device"},
484         {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
485         "9030: Array no longer protected due to missing or failed disk unit"},
486         {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
487         "9071: Link operational transition"},
488         {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
489         "9072: Link not operational transition"},
490         {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
491         "9032: Array exposed but still protected"},
492         {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
493         "70DD: Device forced failed by disrupt device command"},
494         {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
495         "4061: Multipath redundancy level got better"},
496         {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
497         "4060: Multipath redundancy level got worse"},
498         {0x07270000, 0, 0,
499         "Failure due to other device"},
500         {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
501         "9008: IOA does not support functions expected by devices"},
502         {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
503         "9010: Cache data associated with attached devices cannot be found"},
504         {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
505         "9011: Cache data belongs to devices other than those attached"},
506         {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
507         "9020: Array missing 2 or more devices with only 1 device present"},
508         {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
509         "9021: Array missing 2 or more devices with 2 or more devices present"},
510         {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
511         "9022: Exposed array is missing a required device"},
512         {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
513         "9023: Array member(s) not at required physical locations"},
514         {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
515         "9024: Array not functional due to present hardware configuration"},
516         {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
517         "9026: Array not functional due to present hardware configuration"},
518         {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
519         "9027: Array is missing a device and parity is out of sync"},
520         {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
521         "9028: Maximum number of arrays already exist"},
522         {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
523         "9050: Required cache data cannot be located for a disk unit"},
524         {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
525         "9052: Cache data exists for a device that has been modified"},
526         {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
527         "9054: IOA resources not available due to previous problems"},
528         {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
529         "9092: Disk unit requires initialization before use"},
530         {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
531         "9029: Incorrect hardware configuration change has been detected"},
532         {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
533         "9060: One or more disk pairs are missing from an array"},
534         {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
535         "9061: One or more disks are missing from an array"},
536         {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
537         "9062: One or more disks are missing from an array"},
538         {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
539         "9063: Maximum number of functional arrays has been exceeded"},
540         {0x07279A00, 0, 0,
541         "Data protect, other volume set problem"},
542         {0x0B260000, 0, 0,
543         "Aborted command, invalid descriptor"},
544         {0x0B3F9000, 0, 0,
545         "Target operating conditions have changed, dual adapter takeover"},
546         {0x0B530200, 0, 0,
547         "Aborted command, medium removal prevented"},
548         {0x0B5A0000, 0, 0,
549         "Command terminated by host"},
550         {0x0B5B8000, 0, 0,
551         "Aborted command, command terminated by host"}
552 };
553
554 static const struct ipr_ses_table_entry ipr_ses_table[] = {
555         { "2104-DL1        ", "XXXXXXXXXXXXXXXX", 80 },
556         { "2104-TL1        ", "XXXXXXXXXXXXXXXX", 80 },
557         { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
558         { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
559         { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
560         { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
561         { "2104-DU3        ", "XXXXXXXXXXXXXXXX", 160 },
562         { "2104-TU3        ", "XXXXXXXXXXXXXXXX", 160 },
563         { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
564         { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
565         { "St  V1S2        ", "XXXXXXXXXXXXXXXX", 160 },
566         { "HSBPD4M  PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
567         { "VSBPD1H   U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
568 };
569
570 /*
571  *  Function Prototypes
572  */
573 static int ipr_reset_alert(struct ipr_cmnd *);
574 static void ipr_process_ccn(struct ipr_cmnd *);
575 static void ipr_process_error(struct ipr_cmnd *);
576 static void ipr_reset_ioa_job(struct ipr_cmnd *);
577 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
578                                    enum ipr_shutdown_type);
579
580 #ifdef CONFIG_SCSI_IPR_TRACE
581 /**
582  * ipr_trc_hook - Add a trace entry to the driver trace
583  * @ipr_cmd:    ipr command struct
584  * @type:               trace type
585  * @add_data:   additional data
586  *
587  * Return value:
588  *      none
589  **/
590 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
591                          u8 type, u32 add_data)
592 {
593         struct ipr_trace_entry *trace_entry;
594         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
595
596         trace_entry = &ioa_cfg->trace[atomic_add_return
597                         (1, &ioa_cfg->trace_index)%IPR_NUM_TRACE_ENTRIES];
598         trace_entry->time = jiffies;
599         trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
600         trace_entry->type = type;
601         if (ipr_cmd->ioa_cfg->sis64)
602                 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
603         else
604                 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
605         trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
606         trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
607         trace_entry->u.add_data = add_data;
608         wmb();
609 }
610 #else
611 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
612 #endif
613
614 /**
615  * ipr_lock_and_done - Acquire lock and complete command
616  * @ipr_cmd:    ipr command struct
617  *
618  * Return value:
619  *      none
620  **/
621 static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
622 {
623         unsigned long lock_flags;
624         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
625
626         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
627         ipr_cmd->done(ipr_cmd);
628         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
629 }
630
631 /**
632  * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
633  * @ipr_cmd:    ipr command struct
634  *
635  * Return value:
636  *      none
637  **/
638 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
639 {
640         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
641         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
642         struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
643         dma_addr_t dma_addr = ipr_cmd->dma_addr;
644         int hrrq_id;
645
646         hrrq_id = ioarcb->cmd_pkt.hrrq_id;
647         memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
648         ioarcb->cmd_pkt.hrrq_id = hrrq_id;
649         ioarcb->data_transfer_length = 0;
650         ioarcb->read_data_transfer_length = 0;
651         ioarcb->ioadl_len = 0;
652         ioarcb->read_ioadl_len = 0;
653
654         if (ipr_cmd->ioa_cfg->sis64) {
655                 ioarcb->u.sis64_addr_data.data_ioadl_addr =
656                         cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
657                 ioasa64->u.gata.status = 0;
658         } else {
659                 ioarcb->write_ioadl_addr =
660                         cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
661                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
662                 ioasa->u.gata.status = 0;
663         }
664
665         ioasa->hdr.ioasc = 0;
666         ioasa->hdr.residual_data_len = 0;
667         ipr_cmd->scsi_cmd = NULL;
668         ipr_cmd->qc = NULL;
669         ipr_cmd->sense_buffer[0] = 0;
670         ipr_cmd->dma_use_sg = 0;
671 }
672
673 /**
674  * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
675  * @ipr_cmd:    ipr command struct
676  *
677  * Return value:
678  *      none
679  **/
680 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
681                               void (*fast_done) (struct ipr_cmnd *))
682 {
683         ipr_reinit_ipr_cmnd(ipr_cmd);
684         ipr_cmd->u.scratch = 0;
685         ipr_cmd->sibling = NULL;
686         ipr_cmd->fast_done = fast_done;
687         init_timer(&ipr_cmd->timer);
688 }
689
690 /**
691  * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
692  * @ioa_cfg:    ioa config struct
693  *
694  * Return value:
695  *      pointer to ipr command struct
696  **/
697 static
698 struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
699 {
700         struct ipr_cmnd *ipr_cmd = NULL;
701
702         if (likely(!list_empty(&hrrq->hrrq_free_q))) {
703                 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
704                         struct ipr_cmnd, queue);
705                 list_del(&ipr_cmd->queue);
706         }
707
708
709         return ipr_cmd;
710 }
711
712 /**
713  * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
714  * @ioa_cfg:    ioa config struct
715  *
716  * Return value:
717  *      pointer to ipr command struct
718  **/
719 static
720 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
721 {
722         struct ipr_cmnd *ipr_cmd =
723                 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
724         ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
725         return ipr_cmd;
726 }
727
728 /**
729  * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
730  * @ioa_cfg:    ioa config struct
731  * @clr_ints:     interrupts to clear
732  *
733  * This function masks all interrupts on the adapter, then clears the
734  * interrupts specified in the mask
735  *
736  * Return value:
737  *      none
738  **/
739 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
740                                           u32 clr_ints)
741 {
742         volatile u32 int_reg;
743         int i;
744
745         /* Stop new interrupts */
746         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
747                 spin_lock(&ioa_cfg->hrrq[i]._lock);
748                 ioa_cfg->hrrq[i].allow_interrupts = 0;
749                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
750         }
751         wmb();
752
753         /* Set interrupt mask to stop all new interrupts */
754         if (ioa_cfg->sis64)
755                 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
756         else
757                 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
758
759         /* Clear any pending interrupts */
760         if (ioa_cfg->sis64)
761                 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
762         writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
763         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
764 }
765
766 /**
767  * ipr_save_pcix_cmd_reg - Save PCI-X command register
768  * @ioa_cfg:    ioa config struct
769  *
770  * Return value:
771  *      0 on success / -EIO on failure
772  **/
773 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
774 {
775         int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
776
777         if (pcix_cmd_reg == 0)
778                 return 0;
779
780         if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
781                                  &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
782                 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
783                 return -EIO;
784         }
785
786         ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
787         return 0;
788 }
789
790 /**
791  * ipr_set_pcix_cmd_reg - Setup PCI-X command register
792  * @ioa_cfg:    ioa config struct
793  *
794  * Return value:
795  *      0 on success / -EIO on failure
796  **/
797 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
798 {
799         int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
800
801         if (pcix_cmd_reg) {
802                 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
803                                           ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
804                         dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
805                         return -EIO;
806                 }
807         }
808
809         return 0;
810 }
811
812 /**
813  * ipr_sata_eh_done - done function for aborted SATA commands
814  * @ipr_cmd:    ipr command struct
815  *
816  * This function is invoked for ops generated to SATA
817  * devices which are being aborted.
818  *
819  * Return value:
820  *      none
821  **/
822 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
823 {
824         struct ata_queued_cmd *qc = ipr_cmd->qc;
825         struct ipr_sata_port *sata_port = qc->ap->private_data;
826
827         qc->err_mask |= AC_ERR_OTHER;
828         sata_port->ioasa.status |= ATA_BUSY;
829         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
830         ata_qc_complete(qc);
831 }
832
833 /**
834  * ipr_scsi_eh_done - mid-layer done function for aborted ops
835  * @ipr_cmd:    ipr command struct
836  *
837  * This function is invoked by the interrupt handler for
838  * ops generated by the SCSI mid-layer which are being aborted.
839  *
840  * Return value:
841  *      none
842  **/
843 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
844 {
845         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
846
847         scsi_cmd->result |= (DID_ERROR << 16);
848
849         scsi_dma_unmap(ipr_cmd->scsi_cmd);
850         scsi_cmd->scsi_done(scsi_cmd);
851         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
852 }
853
854 /**
855  * ipr_fail_all_ops - Fails all outstanding ops.
856  * @ioa_cfg:    ioa config struct
857  *
858  * This function fails all outstanding ops.
859  *
860  * Return value:
861  *      none
862  **/
863 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
864 {
865         struct ipr_cmnd *ipr_cmd, *temp;
866         struct ipr_hrr_queue *hrrq;
867
868         ENTER;
869         for_each_hrrq(hrrq, ioa_cfg) {
870                 spin_lock(&hrrq->_lock);
871                 list_for_each_entry_safe(ipr_cmd,
872                                         temp, &hrrq->hrrq_pending_q, queue) {
873                         list_del(&ipr_cmd->queue);
874
875                         ipr_cmd->s.ioasa.hdr.ioasc =
876                                 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
877                         ipr_cmd->s.ioasa.hdr.ilid =
878                                 cpu_to_be32(IPR_DRIVER_ILID);
879
880                         if (ipr_cmd->scsi_cmd)
881                                 ipr_cmd->done = ipr_scsi_eh_done;
882                         else if (ipr_cmd->qc)
883                                 ipr_cmd->done = ipr_sata_eh_done;
884
885                         ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
886                                      IPR_IOASC_IOA_WAS_RESET);
887                         del_timer(&ipr_cmd->timer);
888                         ipr_cmd->done(ipr_cmd);
889                 }
890                 spin_unlock(&hrrq->_lock);
891         }
892         LEAVE;
893 }
894
895 /**
896  * ipr_send_command -  Send driver initiated requests.
897  * @ipr_cmd:            ipr command struct
898  *
899  * This function sends a command to the adapter using the correct write call.
900  * In the case of sis64, calculate the ioarcb size required. Then or in the
901  * appropriate bits.
902  *
903  * Return value:
904  *      none
905  **/
906 static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
907 {
908         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
909         dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
910
911         if (ioa_cfg->sis64) {
912                 /* The default size is 256 bytes */
913                 send_dma_addr |= 0x1;
914
915                 /* If the number of ioadls * size of ioadl > 128 bytes,
916                    then use a 512 byte ioarcb */
917                 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
918                         send_dma_addr |= 0x4;
919                 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
920         } else
921                 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
922 }
923
924 /**
925  * ipr_do_req -  Send driver initiated requests.
926  * @ipr_cmd:            ipr command struct
927  * @done:                       done function
928  * @timeout_func:       timeout function
929  * @timeout:            timeout value
930  *
931  * This function sends the specified command to the adapter with the
932  * timeout given. The done function is invoked on command completion.
933  *
934  * Return value:
935  *      none
936  **/
937 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
938                        void (*done) (struct ipr_cmnd *),
939                        void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
940 {
941         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
942
943         ipr_cmd->done = done;
944
945         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
946         ipr_cmd->timer.expires = jiffies + timeout;
947         ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
948
949         add_timer(&ipr_cmd->timer);
950
951         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
952
953         ipr_send_command(ipr_cmd);
954 }
955
956 /**
957  * ipr_internal_cmd_done - Op done function for an internally generated op.
958  * @ipr_cmd:    ipr command struct
959  *
960  * This function is the op done function for an internally generated,
961  * blocking op. It simply wakes the sleeping thread.
962  *
963  * Return value:
964  *      none
965  **/
966 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
967 {
968         if (ipr_cmd->sibling)
969                 ipr_cmd->sibling = NULL;
970         else
971                 complete(&ipr_cmd->completion);
972 }
973
974 /**
975  * ipr_init_ioadl - initialize the ioadl for the correct SIS type
976  * @ipr_cmd:    ipr command struct
977  * @dma_addr:   dma address
978  * @len:        transfer length
979  * @flags:      ioadl flag value
980  *
981  * This function initializes an ioadl in the case where there is only a single
982  * descriptor.
983  *
984  * Return value:
985  *      nothing
986  **/
987 static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
988                            u32 len, int flags)
989 {
990         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
991         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
992
993         ipr_cmd->dma_use_sg = 1;
994
995         if (ipr_cmd->ioa_cfg->sis64) {
996                 ioadl64->flags = cpu_to_be32(flags);
997                 ioadl64->data_len = cpu_to_be32(len);
998                 ioadl64->address = cpu_to_be64(dma_addr);
999
1000                 ipr_cmd->ioarcb.ioadl_len =
1001                         cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
1002                 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1003         } else {
1004                 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
1005                 ioadl->address = cpu_to_be32(dma_addr);
1006
1007                 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
1008                         ipr_cmd->ioarcb.read_ioadl_len =
1009                                 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1010                         ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
1011                 } else {
1012                         ipr_cmd->ioarcb.ioadl_len =
1013                                 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1014                         ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1015                 }
1016         }
1017 }
1018
1019 /**
1020  * ipr_send_blocking_cmd - Send command and sleep on its completion.
1021  * @ipr_cmd:    ipr command struct
1022  * @timeout_func:       function to invoke if command times out
1023  * @timeout:    timeout
1024  *
1025  * Return value:
1026  *      none
1027  **/
1028 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
1029                                   void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
1030                                   u32 timeout)
1031 {
1032         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1033
1034         init_completion(&ipr_cmd->completion);
1035         ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1036
1037         spin_unlock_irq(ioa_cfg->host->host_lock);
1038         wait_for_completion(&ipr_cmd->completion);
1039         spin_lock_irq(ioa_cfg->host->host_lock);
1040 }
1041
1042 static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1043 {
1044         if (ioa_cfg->hrrq_num == 1)
1045                 return 0;
1046         else
1047                 return (atomic_add_return(1, &ioa_cfg->hrrq_index) % (ioa_cfg->hrrq_num - 1)) + 1;
1048 }
1049
1050 /**
1051  * ipr_send_hcam - Send an HCAM to the adapter.
1052  * @ioa_cfg:    ioa config struct
1053  * @type:               HCAM type
1054  * @hostrcb:    hostrcb struct
1055  *
1056  * This function will send a Host Controlled Async command to the adapter.
1057  * If HCAMs are currently not allowed to be issued to the adapter, it will
1058  * place the hostrcb on the free queue.
1059  *
1060  * Return value:
1061  *      none
1062  **/
1063 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1064                           struct ipr_hostrcb *hostrcb)
1065 {
1066         struct ipr_cmnd *ipr_cmd;
1067         struct ipr_ioarcb *ioarcb;
1068
1069         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1070                 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
1071                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1072                 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1073
1074                 ipr_cmd->u.hostrcb = hostrcb;
1075                 ioarcb = &ipr_cmd->ioarcb;
1076
1077                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1078                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1079                 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1080                 ioarcb->cmd_pkt.cdb[1] = type;
1081                 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1082                 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1083
1084                 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1085                                sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1086
1087                 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1088                         ipr_cmd->done = ipr_process_ccn;
1089                 else
1090                         ipr_cmd->done = ipr_process_error;
1091
1092                 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1093
1094                 ipr_send_command(ipr_cmd);
1095         } else {
1096                 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1097         }
1098 }
1099
1100 /**
1101  * ipr_update_ata_class - Update the ata class in the resource entry
1102  * @res:        resource entry struct
1103  * @proto:      cfgte device bus protocol value
1104  *
1105  * Return value:
1106  *      none
1107  **/
1108 static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1109 {
1110         switch (proto) {
1111         case IPR_PROTO_SATA:
1112         case IPR_PROTO_SAS_STP:
1113                 res->ata_class = ATA_DEV_ATA;
1114                 break;
1115         case IPR_PROTO_SATA_ATAPI:
1116         case IPR_PROTO_SAS_STP_ATAPI:
1117                 res->ata_class = ATA_DEV_ATAPI;
1118                 break;
1119         default:
1120                 res->ata_class = ATA_DEV_UNKNOWN;
1121                 break;
1122         };
1123 }
1124
1125 /**
1126  * ipr_init_res_entry - Initialize a resource entry struct.
1127  * @res:        resource entry struct
1128  * @cfgtew:     config table entry wrapper struct
1129  *
1130  * Return value:
1131  *      none
1132  **/
1133 static void ipr_init_res_entry(struct ipr_resource_entry *res,
1134                                struct ipr_config_table_entry_wrapper *cfgtew)
1135 {
1136         int found = 0;
1137         unsigned int proto;
1138         struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1139         struct ipr_resource_entry *gscsi_res = NULL;
1140
1141         res->needs_sync_complete = 0;
1142         res->in_erp = 0;
1143         res->add_to_ml = 0;
1144         res->del_from_ml = 0;
1145         res->resetting_device = 0;
1146         res->reset_occurred = 0;
1147         res->sdev = NULL;
1148         res->sata_port = NULL;
1149
1150         if (ioa_cfg->sis64) {
1151                 proto = cfgtew->u.cfgte64->proto;
1152                 res->res_flags = cfgtew->u.cfgte64->res_flags;
1153                 res->qmodel = IPR_QUEUEING_MODEL64(res);
1154                 res->type = cfgtew->u.cfgte64->res_type;
1155
1156                 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1157                         sizeof(res->res_path));
1158
1159                 res->bus = 0;
1160                 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1161                         sizeof(res->dev_lun.scsi_lun));
1162                 res->lun = scsilun_to_int(&res->dev_lun);
1163
1164                 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1165                         list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1166                                 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1167                                         found = 1;
1168                                         res->target = gscsi_res->target;
1169                                         break;
1170                                 }
1171                         }
1172                         if (!found) {
1173                                 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1174                                                                   ioa_cfg->max_devs_supported);
1175                                 set_bit(res->target, ioa_cfg->target_ids);
1176                         }
1177                 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1178                         res->bus = IPR_IOAFP_VIRTUAL_BUS;
1179                         res->target = 0;
1180                 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1181                         res->bus = IPR_ARRAY_VIRTUAL_BUS;
1182                         res->target = find_first_zero_bit(ioa_cfg->array_ids,
1183                                                           ioa_cfg->max_devs_supported);
1184                         set_bit(res->target, ioa_cfg->array_ids);
1185                 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1186                         res->bus = IPR_VSET_VIRTUAL_BUS;
1187                         res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1188                                                           ioa_cfg->max_devs_supported);
1189                         set_bit(res->target, ioa_cfg->vset_ids);
1190                 } else {
1191                         res->target = find_first_zero_bit(ioa_cfg->target_ids,
1192                                                           ioa_cfg->max_devs_supported);
1193                         set_bit(res->target, ioa_cfg->target_ids);
1194                 }
1195         } else {
1196                 proto = cfgtew->u.cfgte->proto;
1197                 res->qmodel = IPR_QUEUEING_MODEL(res);
1198                 res->flags = cfgtew->u.cfgte->flags;
1199                 if (res->flags & IPR_IS_IOA_RESOURCE)
1200                         res->type = IPR_RES_TYPE_IOAFP;
1201                 else
1202                         res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1203
1204                 res->bus = cfgtew->u.cfgte->res_addr.bus;
1205                 res->target = cfgtew->u.cfgte->res_addr.target;
1206                 res->lun = cfgtew->u.cfgte->res_addr.lun;
1207                 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
1208         }
1209
1210         ipr_update_ata_class(res, proto);
1211 }
1212
1213 /**
1214  * ipr_is_same_device - Determine if two devices are the same.
1215  * @res:        resource entry struct
1216  * @cfgtew:     config table entry wrapper struct
1217  *
1218  * Return value:
1219  *      1 if the devices are the same / 0 otherwise
1220  **/
1221 static int ipr_is_same_device(struct ipr_resource_entry *res,
1222                               struct ipr_config_table_entry_wrapper *cfgtew)
1223 {
1224         if (res->ioa_cfg->sis64) {
1225                 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1226                                         sizeof(cfgtew->u.cfgte64->dev_id)) &&
1227                         !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1228                                         sizeof(cfgtew->u.cfgte64->lun))) {
1229                         return 1;
1230                 }
1231         } else {
1232                 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1233                     res->target == cfgtew->u.cfgte->res_addr.target &&
1234                     res->lun == cfgtew->u.cfgte->res_addr.lun)
1235                         return 1;
1236         }
1237
1238         return 0;
1239 }
1240
1241 /**
1242  * __ipr_format_res_path - Format the resource path for printing.
1243  * @res_path:   resource path
1244  * @buf:        buffer
1245  * @len:        length of buffer provided
1246  *
1247  * Return value:
1248  *      pointer to buffer
1249  **/
1250 static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
1251 {
1252         int i;
1253         char *p = buffer;
1254
1255         *p = '\0';
1256         p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1257         for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1258                 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
1259
1260         return buffer;
1261 }
1262
1263 /**
1264  * ipr_format_res_path - Format the resource path for printing.
1265  * @ioa_cfg:    ioa config struct
1266  * @res_path:   resource path
1267  * @buf:        buffer
1268  * @len:        length of buffer provided
1269  *
1270  * Return value:
1271  *      pointer to buffer
1272  **/
1273 static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1274                                  u8 *res_path, char *buffer, int len)
1275 {
1276         char *p = buffer;
1277
1278         *p = '\0';
1279         p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1280         __ipr_format_res_path(res_path, p, len - (buffer - p));
1281         return buffer;
1282 }
1283
1284 /**
1285  * ipr_update_res_entry - Update the resource entry.
1286  * @res:        resource entry struct
1287  * @cfgtew:     config table entry wrapper struct
1288  *
1289  * Return value:
1290  *      none
1291  **/
1292 static void ipr_update_res_entry(struct ipr_resource_entry *res,
1293                                  struct ipr_config_table_entry_wrapper *cfgtew)
1294 {
1295         char buffer[IPR_MAX_RES_PATH_LENGTH];
1296         unsigned int proto;
1297         int new_path = 0;
1298
1299         if (res->ioa_cfg->sis64) {
1300                 res->flags = cfgtew->u.cfgte64->flags;
1301                 res->res_flags = cfgtew->u.cfgte64->res_flags;
1302                 res->type = cfgtew->u.cfgte64->res_type;
1303
1304                 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1305                         sizeof(struct ipr_std_inq_data));
1306
1307                 res->qmodel = IPR_QUEUEING_MODEL64(res);
1308                 proto = cfgtew->u.cfgte64->proto;
1309                 res->res_handle = cfgtew->u.cfgte64->res_handle;
1310                 res->dev_id = cfgtew->u.cfgte64->dev_id;
1311
1312                 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1313                         sizeof(res->dev_lun.scsi_lun));
1314
1315                 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1316                                         sizeof(res->res_path))) {
1317                         memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1318                                 sizeof(res->res_path));
1319                         new_path = 1;
1320                 }
1321
1322                 if (res->sdev && new_path)
1323                         sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1324                                     ipr_format_res_path(res->ioa_cfg,
1325                                         res->res_path, buffer, sizeof(buffer)));
1326         } else {
1327                 res->flags = cfgtew->u.cfgte->flags;
1328                 if (res->flags & IPR_IS_IOA_RESOURCE)
1329                         res->type = IPR_RES_TYPE_IOAFP;
1330                 else
1331                         res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1332
1333                 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1334                         sizeof(struct ipr_std_inq_data));
1335
1336                 res->qmodel = IPR_QUEUEING_MODEL(res);
1337                 proto = cfgtew->u.cfgte->proto;
1338                 res->res_handle = cfgtew->u.cfgte->res_handle;
1339         }
1340
1341         ipr_update_ata_class(res, proto);
1342 }
1343
1344 /**
1345  * ipr_clear_res_target - Clear the bit in the bit map representing the target
1346  *                        for the resource.
1347  * @res:        resource entry struct
1348  * @cfgtew:     config table entry wrapper struct
1349  *
1350  * Return value:
1351  *      none
1352  **/
1353 static void ipr_clear_res_target(struct ipr_resource_entry *res)
1354 {
1355         struct ipr_resource_entry *gscsi_res = NULL;
1356         struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1357
1358         if (!ioa_cfg->sis64)
1359                 return;
1360
1361         if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1362                 clear_bit(res->target, ioa_cfg->array_ids);
1363         else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1364                 clear_bit(res->target, ioa_cfg->vset_ids);
1365         else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1366                 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1367                         if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1368                                 return;
1369                 clear_bit(res->target, ioa_cfg->target_ids);
1370
1371         } else if (res->bus == 0)
1372                 clear_bit(res->target, ioa_cfg->target_ids);
1373 }
1374
1375 /**
1376  * ipr_handle_config_change - Handle a config change from the adapter
1377  * @ioa_cfg:    ioa config struct
1378  * @hostrcb:    hostrcb
1379  *
1380  * Return value:
1381  *      none
1382  **/
1383 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1384                                      struct ipr_hostrcb *hostrcb)
1385 {
1386         struct ipr_resource_entry *res = NULL;
1387         struct ipr_config_table_entry_wrapper cfgtew;
1388         __be32 cc_res_handle;
1389
1390         u32 is_ndn = 1;
1391
1392         if (ioa_cfg->sis64) {
1393                 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1394                 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1395         } else {
1396                 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1397                 cc_res_handle = cfgtew.u.cfgte->res_handle;
1398         }
1399
1400         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1401                 if (res->res_handle == cc_res_handle) {
1402                         is_ndn = 0;
1403                         break;
1404                 }
1405         }
1406
1407         if (is_ndn) {
1408                 if (list_empty(&ioa_cfg->free_res_q)) {
1409                         ipr_send_hcam(ioa_cfg,
1410                                       IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1411                                       hostrcb);
1412                         return;
1413                 }
1414
1415                 res = list_entry(ioa_cfg->free_res_q.next,
1416                                  struct ipr_resource_entry, queue);
1417
1418                 list_del(&res->queue);
1419                 ipr_init_res_entry(res, &cfgtew);
1420                 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1421         }
1422
1423         ipr_update_res_entry(res, &cfgtew);
1424
1425         if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1426                 if (res->sdev) {
1427                         res->del_from_ml = 1;
1428                         res->res_handle = IPR_INVALID_RES_HANDLE;
1429                         if (ioa_cfg->allow_ml_add_del)
1430                                 schedule_work(&ioa_cfg->work_q);
1431                 } else {
1432                         ipr_clear_res_target(res);
1433                         list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1434                 }
1435         } else if (!res->sdev || res->del_from_ml) {
1436                 res->add_to_ml = 1;
1437                 if (ioa_cfg->allow_ml_add_del)
1438                         schedule_work(&ioa_cfg->work_q);
1439         }
1440
1441         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1442 }
1443
1444 /**
1445  * ipr_process_ccn - Op done function for a CCN.
1446  * @ipr_cmd:    ipr command struct
1447  *
1448  * This function is the op done function for a configuration
1449  * change notification host controlled async from the adapter.
1450  *
1451  * Return value:
1452  *      none
1453  **/
1454 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1455 {
1456         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1457         struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1458         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1459
1460         list_del(&hostrcb->queue);
1461         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1462
1463         if (ioasc) {
1464                 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
1465                         dev_err(&ioa_cfg->pdev->dev,
1466                                 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1467
1468                 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1469         } else {
1470                 ipr_handle_config_change(ioa_cfg, hostrcb);
1471         }
1472 }
1473
1474 /**
1475  * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1476  * @i:          index into buffer
1477  * @buf:                string to modify
1478  *
1479  * This function will strip all trailing whitespace, pad the end
1480  * of the string with a single space, and NULL terminate the string.
1481  *
1482  * Return value:
1483  *      new length of string
1484  **/
1485 static int strip_and_pad_whitespace(int i, char *buf)
1486 {
1487         while (i && buf[i] == ' ')
1488                 i--;
1489         buf[i+1] = ' ';
1490         buf[i+2] = '\0';
1491         return i + 2;
1492 }
1493
1494 /**
1495  * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1496  * @prefix:             string to print at start of printk
1497  * @hostrcb:    hostrcb pointer
1498  * @vpd:                vendor/product id/sn struct
1499  *
1500  * Return value:
1501  *      none
1502  **/
1503 static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1504                                 struct ipr_vpd *vpd)
1505 {
1506         char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1507         int i = 0;
1508
1509         memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1510         i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1511
1512         memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1513         i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1514
1515         memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1516         buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1517
1518         ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1519 }
1520
1521 /**
1522  * ipr_log_vpd - Log the passed VPD to the error log.
1523  * @vpd:                vendor/product id/sn struct
1524  *
1525  * Return value:
1526  *      none
1527  **/
1528 static void ipr_log_vpd(struct ipr_vpd *vpd)
1529 {
1530         char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1531                     + IPR_SERIAL_NUM_LEN];
1532
1533         memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1534         memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1535                IPR_PROD_ID_LEN);
1536         buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1537         ipr_err("Vendor/Product ID: %s\n", buffer);
1538
1539         memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1540         buffer[IPR_SERIAL_NUM_LEN] = '\0';
1541         ipr_err("    Serial Number: %s\n", buffer);
1542 }
1543
1544 /**
1545  * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1546  * @prefix:             string to print at start of printk
1547  * @hostrcb:    hostrcb pointer
1548  * @vpd:                vendor/product id/sn/wwn struct
1549  *
1550  * Return value:
1551  *      none
1552  **/
1553 static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1554                                     struct ipr_ext_vpd *vpd)
1555 {
1556         ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1557         ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1558                      be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1559 }
1560
1561 /**
1562  * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1563  * @vpd:                vendor/product id/sn/wwn struct
1564  *
1565  * Return value:
1566  *      none
1567  **/
1568 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1569 {
1570         ipr_log_vpd(&vpd->vpd);
1571         ipr_err("    WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1572                 be32_to_cpu(vpd->wwid[1]));
1573 }
1574
1575 /**
1576  * ipr_log_enhanced_cache_error - Log a cache error.
1577  * @ioa_cfg:    ioa config struct
1578  * @hostrcb:    hostrcb struct
1579  *
1580  * Return value:
1581  *      none
1582  **/
1583 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1584                                          struct ipr_hostrcb *hostrcb)
1585 {
1586         struct ipr_hostrcb_type_12_error *error;
1587
1588         if (ioa_cfg->sis64)
1589                 error = &hostrcb->hcam.u.error64.u.type_12_error;
1590         else
1591                 error = &hostrcb->hcam.u.error.u.type_12_error;
1592
1593         ipr_err("-----Current Configuration-----\n");
1594         ipr_err("Cache Directory Card Information:\n");
1595         ipr_log_ext_vpd(&error->ioa_vpd);
1596         ipr_err("Adapter Card Information:\n");
1597         ipr_log_ext_vpd(&error->cfc_vpd);
1598
1599         ipr_err("-----Expected Configuration-----\n");
1600         ipr_err("Cache Directory Card Information:\n");
1601         ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1602         ipr_err("Adapter Card Information:\n");
1603         ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1604
1605         ipr_err("Additional IOA Data: %08X %08X %08X\n",
1606                      be32_to_cpu(error->ioa_data[0]),
1607                      be32_to_cpu(error->ioa_data[1]),
1608                      be32_to_cpu(error->ioa_data[2]));
1609 }
1610
1611 /**
1612  * ipr_log_cache_error - Log a cache error.
1613  * @ioa_cfg:    ioa config struct
1614  * @hostrcb:    hostrcb struct
1615  *
1616  * Return value:
1617  *      none
1618  **/
1619 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1620                                 struct ipr_hostrcb *hostrcb)
1621 {
1622         struct ipr_hostrcb_type_02_error *error =
1623                 &hostrcb->hcam.u.error.u.type_02_error;
1624
1625         ipr_err("-----Current Configuration-----\n");
1626         ipr_err("Cache Directory Card Information:\n");
1627         ipr_log_vpd(&error->ioa_vpd);
1628         ipr_err("Adapter Card Information:\n");
1629         ipr_log_vpd(&error->cfc_vpd);
1630
1631         ipr_err("-----Expected Configuration-----\n");
1632         ipr_err("Cache Directory Card Information:\n");
1633         ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1634         ipr_err("Adapter Card Information:\n");
1635         ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1636
1637         ipr_err("Additional IOA Data: %08X %08X %08X\n",
1638                      be32_to_cpu(error->ioa_data[0]),
1639                      be32_to_cpu(error->ioa_data[1]),
1640                      be32_to_cpu(error->ioa_data[2]));
1641 }
1642
1643 /**
1644  * ipr_log_enhanced_config_error - Log a configuration error.
1645  * @ioa_cfg:    ioa config struct
1646  * @hostrcb:    hostrcb struct
1647  *
1648  * Return value:
1649  *      none
1650  **/
1651 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1652                                           struct ipr_hostrcb *hostrcb)
1653 {
1654         int errors_logged, i;
1655         struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1656         struct ipr_hostrcb_type_13_error *error;
1657
1658         error = &hostrcb->hcam.u.error.u.type_13_error;
1659         errors_logged = be32_to_cpu(error->errors_logged);
1660
1661         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1662                 be32_to_cpu(error->errors_detected), errors_logged);
1663
1664         dev_entry = error->dev;
1665
1666         for (i = 0; i < errors_logged; i++, dev_entry++) {
1667                 ipr_err_separator;
1668
1669                 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1670                 ipr_log_ext_vpd(&dev_entry->vpd);
1671
1672                 ipr_err("-----New Device Information-----\n");
1673                 ipr_log_ext_vpd(&dev_entry->new_vpd);
1674
1675                 ipr_err("Cache Directory Card Information:\n");
1676                 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1677
1678                 ipr_err("Adapter Card Information:\n");
1679                 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1680         }
1681 }
1682
1683 /**
1684  * ipr_log_sis64_config_error - Log a device error.
1685  * @ioa_cfg:    ioa config struct
1686  * @hostrcb:    hostrcb struct
1687  *
1688  * Return value:
1689  *      none
1690  **/
1691 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1692                                        struct ipr_hostrcb *hostrcb)
1693 {
1694         int errors_logged, i;
1695         struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1696         struct ipr_hostrcb_type_23_error *error;
1697         char buffer[IPR_MAX_RES_PATH_LENGTH];
1698
1699         error = &hostrcb->hcam.u.error64.u.type_23_error;
1700         errors_logged = be32_to_cpu(error->errors_logged);
1701
1702         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1703                 be32_to_cpu(error->errors_detected), errors_logged);
1704
1705         dev_entry = error->dev;
1706
1707         for (i = 0; i < errors_logged; i++, dev_entry++) {
1708                 ipr_err_separator;
1709
1710                 ipr_err("Device %d : %s", i + 1,
1711                         __ipr_format_res_path(dev_entry->res_path,
1712                                               buffer, sizeof(buffer)));
1713                 ipr_log_ext_vpd(&dev_entry->vpd);
1714
1715                 ipr_err("-----New Device Information-----\n");
1716                 ipr_log_ext_vpd(&dev_entry->new_vpd);
1717
1718                 ipr_err("Cache Directory Card Information:\n");
1719                 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1720
1721                 ipr_err("Adapter Card Information:\n");
1722                 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1723         }
1724 }
1725
1726 /**
1727  * ipr_log_config_error - Log a configuration error.
1728  * @ioa_cfg:    ioa config struct
1729  * @hostrcb:    hostrcb struct
1730  *
1731  * Return value:
1732  *      none
1733  **/
1734 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1735                                  struct ipr_hostrcb *hostrcb)
1736 {
1737         int errors_logged, i;
1738         struct ipr_hostrcb_device_data_entry *dev_entry;
1739         struct ipr_hostrcb_type_03_error *error;
1740
1741         error = &hostrcb->hcam.u.error.u.type_03_error;
1742         errors_logged = be32_to_cpu(error->errors_logged);
1743
1744         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1745                 be32_to_cpu(error->errors_detected), errors_logged);
1746
1747         dev_entry = error->dev;
1748
1749         for (i = 0; i < errors_logged; i++, dev_entry++) {
1750                 ipr_err_separator;
1751
1752                 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1753                 ipr_log_vpd(&dev_entry->vpd);
1754
1755                 ipr_err("-----New Device Information-----\n");
1756                 ipr_log_vpd(&dev_entry->new_vpd);
1757
1758                 ipr_err("Cache Directory Card Information:\n");
1759                 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1760
1761                 ipr_err("Adapter Card Information:\n");
1762                 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1763
1764                 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1765                         be32_to_cpu(dev_entry->ioa_data[0]),
1766                         be32_to_cpu(dev_entry->ioa_data[1]),
1767                         be32_to_cpu(dev_entry->ioa_data[2]),
1768                         be32_to_cpu(dev_entry->ioa_data[3]),
1769                         be32_to_cpu(dev_entry->ioa_data[4]));
1770         }
1771 }
1772
1773 /**
1774  * ipr_log_enhanced_array_error - Log an array configuration error.
1775  * @ioa_cfg:    ioa config struct
1776  * @hostrcb:    hostrcb struct
1777  *
1778  * Return value:
1779  *      none
1780  **/
1781 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1782                                          struct ipr_hostrcb *hostrcb)
1783 {
1784         int i, num_entries;
1785         struct ipr_hostrcb_type_14_error *error;
1786         struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1787         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1788
1789         error = &hostrcb->hcam.u.error.u.type_14_error;
1790
1791         ipr_err_separator;
1792
1793         ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1794                 error->protection_level,
1795                 ioa_cfg->host->host_no,
1796                 error->last_func_vset_res_addr.bus,
1797                 error->last_func_vset_res_addr.target,
1798                 error->last_func_vset_res_addr.lun);
1799
1800         ipr_err_separator;
1801
1802         array_entry = error->array_member;
1803         num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1804                             ARRAY_SIZE(error->array_member));
1805
1806         for (i = 0; i < num_entries; i++, array_entry++) {
1807                 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1808                         continue;
1809
1810                 if (be32_to_cpu(error->exposed_mode_adn) == i)
1811                         ipr_err("Exposed Array Member %d:\n", i);
1812                 else
1813                         ipr_err("Array Member %d:\n", i);
1814
1815                 ipr_log_ext_vpd(&array_entry->vpd);
1816                 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1817                 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1818                                  "Expected Location");
1819
1820                 ipr_err_separator;
1821         }
1822 }
1823
1824 /**
1825  * ipr_log_array_error - Log an array configuration error.
1826  * @ioa_cfg:    ioa config struct
1827  * @hostrcb:    hostrcb struct
1828  *
1829  * Return value:
1830  *      none
1831  **/
1832 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1833                                 struct ipr_hostrcb *hostrcb)
1834 {
1835         int i;
1836         struct ipr_hostrcb_type_04_error *error;
1837         struct ipr_hostrcb_array_data_entry *array_entry;
1838         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1839
1840         error = &hostrcb->hcam.u.error.u.type_04_error;
1841
1842         ipr_err_separator;
1843
1844         ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1845                 error->protection_level,
1846                 ioa_cfg->host->host_no,
1847                 error->last_func_vset_res_addr.bus,
1848                 error->last_func_vset_res_addr.target,
1849                 error->last_func_vset_res_addr.lun);
1850
1851         ipr_err_separator;
1852
1853         array_entry = error->array_member;
1854
1855         for (i = 0; i < 18; i++) {
1856                 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1857                         continue;
1858
1859                 if (be32_to_cpu(error->exposed_mode_adn) == i)
1860                         ipr_err("Exposed Array Member %d:\n", i);
1861                 else
1862                         ipr_err("Array Member %d:\n", i);
1863
1864                 ipr_log_vpd(&array_entry->vpd);
1865
1866                 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1867                 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1868                                  "Expected Location");
1869
1870                 ipr_err_separator;
1871
1872                 if (i == 9)
1873                         array_entry = error->array_member2;
1874                 else
1875                         array_entry++;
1876         }
1877 }
1878
1879 /**
1880  * ipr_log_hex_data - Log additional hex IOA error data.
1881  * @ioa_cfg:    ioa config struct
1882  * @data:               IOA error data
1883  * @len:                data length
1884  *
1885  * Return value:
1886  *      none
1887  **/
1888 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1889 {
1890         int i;
1891
1892         if (len == 0)
1893                 return;
1894
1895         if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1896                 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1897
1898         for (i = 0; i < len / 4; i += 4) {
1899                 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1900                         be32_to_cpu(data[i]),
1901                         be32_to_cpu(data[i+1]),
1902                         be32_to_cpu(data[i+2]),
1903                         be32_to_cpu(data[i+3]));
1904         }
1905 }
1906
1907 /**
1908  * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1909  * @ioa_cfg:    ioa config struct
1910  * @hostrcb:    hostrcb struct
1911  *
1912  * Return value:
1913  *      none
1914  **/
1915 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1916                                             struct ipr_hostrcb *hostrcb)
1917 {
1918         struct ipr_hostrcb_type_17_error *error;
1919
1920         if (ioa_cfg->sis64)
1921                 error = &hostrcb->hcam.u.error64.u.type_17_error;
1922         else
1923                 error = &hostrcb->hcam.u.error.u.type_17_error;
1924
1925         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1926         strim(error->failure_reason);
1927
1928         ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1929                      be32_to_cpu(hostrcb->hcam.u.error.prc));
1930         ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1931         ipr_log_hex_data(ioa_cfg, error->data,
1932                          be32_to_cpu(hostrcb->hcam.length) -
1933                          (offsetof(struct ipr_hostrcb_error, u) +
1934                           offsetof(struct ipr_hostrcb_type_17_error, data)));
1935 }
1936
1937 /**
1938  * ipr_log_dual_ioa_error - Log a dual adapter error.
1939  * @ioa_cfg:    ioa config struct
1940  * @hostrcb:    hostrcb struct
1941  *
1942  * Return value:
1943  *      none
1944  **/
1945 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1946                                    struct ipr_hostrcb *hostrcb)
1947 {
1948         struct ipr_hostrcb_type_07_error *error;
1949
1950         error = &hostrcb->hcam.u.error.u.type_07_error;
1951         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1952         strim(error->failure_reason);
1953
1954         ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1955                      be32_to_cpu(hostrcb->hcam.u.error.prc));
1956         ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1957         ipr_log_hex_data(ioa_cfg, error->data,
1958                          be32_to_cpu(hostrcb->hcam.length) -
1959                          (offsetof(struct ipr_hostrcb_error, u) +
1960                           offsetof(struct ipr_hostrcb_type_07_error, data)));
1961 }
1962
1963 static const struct {
1964         u8 active;
1965         char *desc;
1966 } path_active_desc[] = {
1967         { IPR_PATH_NO_INFO, "Path" },
1968         { IPR_PATH_ACTIVE, "Active path" },
1969         { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1970 };
1971
1972 static const struct {
1973         u8 state;
1974         char *desc;
1975 } path_state_desc[] = {
1976         { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1977         { IPR_PATH_HEALTHY, "is healthy" },
1978         { IPR_PATH_DEGRADED, "is degraded" },
1979         { IPR_PATH_FAILED, "is failed" }
1980 };
1981
1982 /**
1983  * ipr_log_fabric_path - Log a fabric path error
1984  * @hostrcb:    hostrcb struct
1985  * @fabric:             fabric descriptor
1986  *
1987  * Return value:
1988  *      none
1989  **/
1990 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1991                                 struct ipr_hostrcb_fabric_desc *fabric)
1992 {
1993         int i, j;
1994         u8 path_state = fabric->path_state;
1995         u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1996         u8 state = path_state & IPR_PATH_STATE_MASK;
1997
1998         for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1999                 if (path_active_desc[i].active != active)
2000                         continue;
2001
2002                 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2003                         if (path_state_desc[j].state != state)
2004                                 continue;
2005
2006                         if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
2007                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
2008                                              path_active_desc[i].desc, path_state_desc[j].desc,
2009                                              fabric->ioa_port);
2010                         } else if (fabric->cascaded_expander == 0xff) {
2011                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
2012                                              path_active_desc[i].desc, path_state_desc[j].desc,
2013                                              fabric->ioa_port, fabric->phy);
2014                         } else if (fabric->phy == 0xff) {
2015                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
2016                                              path_active_desc[i].desc, path_state_desc[j].desc,
2017                                              fabric->ioa_port, fabric->cascaded_expander);
2018                         } else {
2019                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2020                                              path_active_desc[i].desc, path_state_desc[j].desc,
2021                                              fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2022                         }
2023                         return;
2024                 }
2025         }
2026
2027         ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
2028                 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2029 }
2030
2031 /**
2032  * ipr_log64_fabric_path - Log a fabric path error
2033  * @hostrcb:    hostrcb struct
2034  * @fabric:             fabric descriptor
2035  *
2036  * Return value:
2037  *      none
2038  **/
2039 static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2040                                   struct ipr_hostrcb64_fabric_desc *fabric)
2041 {
2042         int i, j;
2043         u8 path_state = fabric->path_state;
2044         u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2045         u8 state = path_state & IPR_PATH_STATE_MASK;
2046         char buffer[IPR_MAX_RES_PATH_LENGTH];
2047
2048         for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2049                 if (path_active_desc[i].active != active)
2050                         continue;
2051
2052                 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2053                         if (path_state_desc[j].state != state)
2054                                 continue;
2055
2056                         ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2057                                      path_active_desc[i].desc, path_state_desc[j].desc,
2058                                      ipr_format_res_path(hostrcb->ioa_cfg,
2059                                                 fabric->res_path,
2060                                                 buffer, sizeof(buffer)));
2061                         return;
2062                 }
2063         }
2064
2065         ipr_err("Path state=%02X Resource Path=%s\n", path_state,
2066                 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2067                                     buffer, sizeof(buffer)));
2068 }
2069
2070 static const struct {
2071         u8 type;
2072         char *desc;
2073 } path_type_desc[] = {
2074         { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2075         { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2076         { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2077         { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2078 };
2079
2080 static const struct {
2081         u8 status;
2082         char *desc;
2083 } path_status_desc[] = {
2084         { IPR_PATH_CFG_NO_PROB, "Functional" },
2085         { IPR_PATH_CFG_DEGRADED, "Degraded" },
2086         { IPR_PATH_CFG_FAILED, "Failed" },
2087         { IPR_PATH_CFG_SUSPECT, "Suspect" },
2088         { IPR_PATH_NOT_DETECTED, "Missing" },
2089         { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2090 };
2091
2092 static const char *link_rate[] = {
2093         "unknown",
2094         "disabled",
2095         "phy reset problem",
2096         "spinup hold",
2097         "port selector",
2098         "unknown",
2099         "unknown",
2100         "unknown",
2101         "1.5Gbps",
2102         "3.0Gbps",
2103         "unknown",
2104         "unknown",
2105         "unknown",
2106         "unknown",
2107         "unknown",
2108         "unknown"
2109 };
2110
2111 /**
2112  * ipr_log_path_elem - Log a fabric path element.
2113  * @hostrcb:    hostrcb struct
2114  * @cfg:                fabric path element struct
2115  *
2116  * Return value:
2117  *      none
2118  **/
2119 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2120                               struct ipr_hostrcb_config_element *cfg)
2121 {
2122         int i, j;
2123         u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2124         u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2125
2126         if (type == IPR_PATH_CFG_NOT_EXIST)
2127                 return;
2128
2129         for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2130                 if (path_type_desc[i].type != type)
2131                         continue;
2132
2133                 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2134                         if (path_status_desc[j].status != status)
2135                                 continue;
2136
2137                         if (type == IPR_PATH_CFG_IOA_PORT) {
2138                                 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2139                                              path_status_desc[j].desc, path_type_desc[i].desc,
2140                                              cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2141                                              be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2142                         } else {
2143                                 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2144                                         ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2145                                                      path_status_desc[j].desc, path_type_desc[i].desc,
2146                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2147                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2148                                 } else if (cfg->cascaded_expander == 0xff) {
2149                                         ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2150                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2151                                                      path_type_desc[i].desc, cfg->phy,
2152                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2153                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2154                                 } else if (cfg->phy == 0xff) {
2155                                         ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2156                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2157                                                      path_type_desc[i].desc, cfg->cascaded_expander,
2158                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2159                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2160                                 } else {
2161                                         ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2162                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2163                                                      path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2164                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2165                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2166                                 }
2167                         }
2168                         return;
2169                 }
2170         }
2171
2172         ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2173                      "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2174                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2175                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2176 }
2177
2178 /**
2179  * ipr_log64_path_elem - Log a fabric path element.
2180  * @hostrcb:    hostrcb struct
2181  * @cfg:                fabric path element struct
2182  *
2183  * Return value:
2184  *      none
2185  **/
2186 static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2187                                 struct ipr_hostrcb64_config_element *cfg)
2188 {
2189         int i, j;
2190         u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2191         u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2192         u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2193         char buffer[IPR_MAX_RES_PATH_LENGTH];
2194
2195         if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2196                 return;
2197
2198         for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2199                 if (path_type_desc[i].type != type)
2200                         continue;
2201
2202                 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2203                         if (path_status_desc[j].status != status)
2204                                 continue;
2205
2206                         ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2207                                      path_status_desc[j].desc, path_type_desc[i].desc,
2208                                      ipr_format_res_path(hostrcb->ioa_cfg,
2209                                         cfg->res_path, buffer, sizeof(buffer)),
2210                                         link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2211                                         be32_to_cpu(cfg->wwid[0]),
2212                                         be32_to_cpu(cfg->wwid[1]));
2213                         return;
2214                 }
2215         }
2216         ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2217                      "WWN=%08X%08X\n", cfg->type_status,
2218                      ipr_format_res_path(hostrcb->ioa_cfg,
2219                         cfg->res_path, buffer, sizeof(buffer)),
2220                         link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2221                         be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2222 }
2223
2224 /**
2225  * ipr_log_fabric_error - Log a fabric error.
2226  * @ioa_cfg:    ioa config struct
2227  * @hostrcb:    hostrcb struct
2228  *
2229  * Return value:
2230  *      none
2231  **/
2232 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2233                                  struct ipr_hostrcb *hostrcb)
2234 {
2235         struct ipr_hostrcb_type_20_error *error;
2236         struct ipr_hostrcb_fabric_desc *fabric;
2237         struct ipr_hostrcb_config_element *cfg;
2238         int i, add_len;
2239
2240         error = &hostrcb->hcam.u.error.u.type_20_error;
2241         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2242         ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2243
2244         add_len = be32_to_cpu(hostrcb->hcam.length) -
2245                 (offsetof(struct ipr_hostrcb_error, u) +
2246                  offsetof(struct ipr_hostrcb_type_20_error, desc));
2247
2248         for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2249                 ipr_log_fabric_path(hostrcb, fabric);
2250                 for_each_fabric_cfg(fabric, cfg)
2251                         ipr_log_path_elem(hostrcb, cfg);
2252
2253                 add_len -= be16_to_cpu(fabric->length);
2254                 fabric = (struct ipr_hostrcb_fabric_desc *)
2255                         ((unsigned long)fabric + be16_to_cpu(fabric->length));
2256         }
2257
2258         ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2259 }
2260
2261 /**
2262  * ipr_log_sis64_array_error - Log a sis64 array error.
2263  * @ioa_cfg:    ioa config struct
2264  * @hostrcb:    hostrcb struct
2265  *
2266  * Return value:
2267  *      none
2268  **/
2269 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2270                                       struct ipr_hostrcb *hostrcb)
2271 {
2272         int i, num_entries;
2273         struct ipr_hostrcb_type_24_error *error;
2274         struct ipr_hostrcb64_array_data_entry *array_entry;
2275         char buffer[IPR_MAX_RES_PATH_LENGTH];
2276         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2277
2278         error = &hostrcb->hcam.u.error64.u.type_24_error;
2279
2280         ipr_err_separator;
2281
2282         ipr_err("RAID %s Array Configuration: %s\n",
2283                 error->protection_level,
2284                 ipr_format_res_path(ioa_cfg, error->last_res_path,
2285                         buffer, sizeof(buffer)));
2286
2287         ipr_err_separator;
2288
2289         array_entry = error->array_member;
2290         num_entries = min_t(u32, error->num_entries,
2291                             ARRAY_SIZE(error->array_member));
2292
2293         for (i = 0; i < num_entries; i++, array_entry++) {
2294
2295                 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2296                         continue;
2297
2298                 if (error->exposed_mode_adn == i)
2299                         ipr_err("Exposed Array Member %d:\n", i);
2300                 else
2301                         ipr_err("Array Member %d:\n", i);
2302
2303                 ipr_err("Array Member %d:\n", i);
2304                 ipr_log_ext_vpd(&array_entry->vpd);
2305                 ipr_err("Current Location: %s\n",
2306                          ipr_format_res_path(ioa_cfg, array_entry->res_path,
2307                                 buffer, sizeof(buffer)));
2308                 ipr_err("Expected Location: %s\n",
2309                          ipr_format_res_path(ioa_cfg,
2310                                 array_entry->expected_res_path,
2311                                 buffer, sizeof(buffer)));
2312
2313                 ipr_err_separator;
2314         }
2315 }
2316
2317 /**
2318  * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2319  * @ioa_cfg:    ioa config struct
2320  * @hostrcb:    hostrcb struct
2321  *
2322  * Return value:
2323  *      none
2324  **/
2325 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2326                                        struct ipr_hostrcb *hostrcb)
2327 {
2328         struct ipr_hostrcb_type_30_error *error;
2329         struct ipr_hostrcb64_fabric_desc *fabric;
2330         struct ipr_hostrcb64_config_element *cfg;
2331         int i, add_len;
2332
2333         error = &hostrcb->hcam.u.error64.u.type_30_error;
2334
2335         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2336         ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2337
2338         add_len = be32_to_cpu(hostrcb->hcam.length) -
2339                 (offsetof(struct ipr_hostrcb64_error, u) +
2340                  offsetof(struct ipr_hostrcb_type_30_error, desc));
2341
2342         for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2343                 ipr_log64_fabric_path(hostrcb, fabric);
2344                 for_each_fabric_cfg(fabric, cfg)
2345                         ipr_log64_path_elem(hostrcb, cfg);
2346
2347                 add_len -= be16_to_cpu(fabric->length);
2348                 fabric = (struct ipr_hostrcb64_fabric_desc *)
2349                         ((unsigned long)fabric + be16_to_cpu(fabric->length));
2350         }
2351
2352         ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2353 }
2354
2355 /**
2356  * ipr_log_generic_error - Log an adapter error.
2357  * @ioa_cfg:    ioa config struct
2358  * @hostrcb:    hostrcb struct
2359  *
2360  * Return value:
2361  *      none
2362  **/
2363 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2364                                   struct ipr_hostrcb *hostrcb)
2365 {
2366         ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2367                          be32_to_cpu(hostrcb->hcam.length));
2368 }
2369
2370 /**
2371  * ipr_log_sis64_device_error - Log a cache error.
2372  * @ioa_cfg:    ioa config struct
2373  * @hostrcb:    hostrcb struct
2374  *
2375  * Return value:
2376  *      none
2377  **/
2378 static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
2379                                          struct ipr_hostrcb *hostrcb)
2380 {
2381         struct ipr_hostrcb_type_21_error *error;
2382         char buffer[IPR_MAX_RES_PATH_LENGTH];
2383
2384         error = &hostrcb->hcam.u.error64.u.type_21_error;
2385
2386         ipr_err("-----Failing Device Information-----\n");
2387         ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2388                 be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
2389                  be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
2390         ipr_err("Device Resource Path: %s\n",
2391                 __ipr_format_res_path(error->res_path,
2392                                       buffer, sizeof(buffer)));
2393         error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
2394         error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
2395         ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
2396         ipr_err("Secondary Problem Description:  %s\n", error->second_problem_desc);
2397         ipr_err("SCSI Sense Data:\n");
2398         ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
2399         ipr_err("SCSI Command Descriptor Block: \n");
2400         ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
2401
2402         ipr_err("Additional IOA Data:\n");
2403         ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
2404 }
2405
2406 /**
2407  * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2408  * @ioasc:      IOASC
2409  *
2410  * This function will return the index of into the ipr_error_table
2411  * for the specified IOASC. If the IOASC is not in the table,
2412  * 0 will be returned, which points to the entry used for unknown errors.
2413  *
2414  * Return value:
2415  *      index into the ipr_error_table
2416  **/
2417 static u32 ipr_get_error(u32 ioasc)
2418 {
2419         int i;
2420
2421         for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2422                 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2423                         return i;
2424
2425         return 0;
2426 }
2427
2428 /**
2429  * ipr_handle_log_data - Log an adapter error.
2430  * @ioa_cfg:    ioa config struct
2431  * @hostrcb:    hostrcb struct
2432  *
2433  * This function logs an adapter error to the system.
2434  *
2435  * Return value:
2436  *      none
2437  **/
2438 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2439                                 struct ipr_hostrcb *hostrcb)
2440 {
2441         u32 ioasc;
2442         int error_index;
2443         struct ipr_hostrcb_type_21_error *error;
2444
2445         if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2446                 return;
2447
2448         if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2449                 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2450
2451         if (ioa_cfg->sis64)
2452                 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2453         else
2454                 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2455
2456         if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2457             ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2458                 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2459                 scsi_report_bus_reset(ioa_cfg->host,
2460                                       hostrcb->hcam.u.error.fd_res_addr.bus);
2461         }
2462
2463         error_index = ipr_get_error(ioasc);
2464
2465         if (!ipr_error_table[error_index].log_hcam)
2466                 return;
2467
2468         if (ioasc == IPR_IOASC_HW_CMD_FAILED &&
2469             hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) {
2470                 error = &hostrcb->hcam.u.error64.u.type_21_error;
2471
2472                 if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST &&
2473                         ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
2474                                 return;
2475         }
2476
2477         ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2478
2479         /* Set indication we have logged an error */
2480         ioa_cfg->errors_logged++;
2481
2482         if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2483                 return;
2484         if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2485                 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2486
2487         switch (hostrcb->hcam.overlay_id) {
2488         case IPR_HOST_RCB_OVERLAY_ID_2:
2489                 ipr_log_cache_error(ioa_cfg, hostrcb);
2490                 break;
2491         case IPR_HOST_RCB_OVERLAY_ID_3:
2492                 ipr_log_config_error(ioa_cfg, hostrcb);
2493                 break;
2494         case IPR_HOST_RCB_OVERLAY_ID_4:
2495         case IPR_HOST_RCB_OVERLAY_ID_6:
2496                 ipr_log_array_error(ioa_cfg, hostrcb);
2497                 break;
2498         case IPR_HOST_RCB_OVERLAY_ID_7:
2499                 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2500                 break;
2501         case IPR_HOST_RCB_OVERLAY_ID_12:
2502                 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2503                 break;
2504         case IPR_HOST_RCB_OVERLAY_ID_13:
2505                 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2506                 break;
2507         case IPR_HOST_RCB_OVERLAY_ID_14:
2508         case IPR_HOST_RCB_OVERLAY_ID_16:
2509                 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2510                 break;
2511         case IPR_HOST_RCB_OVERLAY_ID_17:
2512                 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2513                 break;
2514         case IPR_HOST_RCB_OVERLAY_ID_20:
2515                 ipr_log_fabric_error(ioa_cfg, hostrcb);
2516                 break;
2517         case IPR_HOST_RCB_OVERLAY_ID_21:
2518                 ipr_log_sis64_device_error(ioa_cfg, hostrcb);
2519                 break;
2520         case IPR_HOST_RCB_OVERLAY_ID_23:
2521                 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2522                 break;
2523         case IPR_HOST_RCB_OVERLAY_ID_24:
2524         case IPR_HOST_RCB_OVERLAY_ID_26:
2525                 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2526                 break;
2527         case IPR_HOST_RCB_OVERLAY_ID_30:
2528                 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2529                 break;
2530         case IPR_HOST_RCB_OVERLAY_ID_1:
2531         case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2532         default:
2533                 ipr_log_generic_error(ioa_cfg, hostrcb);
2534                 break;
2535         }
2536 }
2537
2538 /**
2539  * ipr_process_error - Op done function for an adapter error log.
2540  * @ipr_cmd:    ipr command struct
2541  *
2542  * This function is the op done function for an error log host
2543  * controlled async from the adapter. It will log the error and
2544  * send the HCAM back to the adapter.
2545  *
2546  * Return value:
2547  *      none
2548  **/
2549 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2550 {
2551         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2552         struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2553         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2554         u32 fd_ioasc;
2555
2556         if (ioa_cfg->sis64)
2557                 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2558         else
2559                 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2560
2561         list_del(&hostrcb->queue);
2562         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
2563
2564         if (!ioasc) {
2565                 ipr_handle_log_data(ioa_cfg, hostrcb);
2566                 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2567                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2568         } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
2569                 dev_err(&ioa_cfg->pdev->dev,
2570                         "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2571         }
2572
2573         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2574 }
2575
2576 /**
2577  * ipr_timeout -  An internally generated op has timed out.
2578  * @ipr_cmd:    ipr command struct
2579  *
2580  * This function blocks host requests and initiates an
2581  * adapter reset.
2582  *
2583  * Return value:
2584  *      none
2585  **/
2586 static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2587 {
2588         unsigned long lock_flags = 0;
2589         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2590
2591         ENTER;
2592         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2593
2594         ioa_cfg->errors_logged++;
2595         dev_err(&ioa_cfg->pdev->dev,
2596                 "Adapter being reset due to command timeout.\n");
2597
2598         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2599                 ioa_cfg->sdt_state = GET_DUMP;
2600
2601         if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2602                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2603
2604         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2605         LEAVE;
2606 }
2607
2608 /**
2609  * ipr_oper_timeout -  Adapter timed out transitioning to operational
2610  * @ipr_cmd:    ipr command struct
2611  *
2612  * This function blocks host requests and initiates an
2613  * adapter reset.
2614  *
2615  * Return value:
2616  *      none
2617  **/
2618 static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2619 {
2620         unsigned long lock_flags = 0;
2621         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2622
2623         ENTER;
2624         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2625
2626         ioa_cfg->errors_logged++;
2627         dev_err(&ioa_cfg->pdev->dev,
2628                 "Adapter timed out transitioning to operational.\n");
2629
2630         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2631                 ioa_cfg->sdt_state = GET_DUMP;
2632
2633         if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2634                 if (ipr_fastfail)
2635                         ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2636                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2637         }
2638
2639         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2640         LEAVE;
2641 }
2642
2643 /**
2644  * ipr_find_ses_entry - Find matching SES in SES table
2645  * @res:        resource entry struct of SES
2646  *
2647  * Return value:
2648  *      pointer to SES table entry / NULL on failure
2649  **/
2650 static const struct ipr_ses_table_entry *
2651 ipr_find_ses_entry(struct ipr_resource_entry *res)
2652 {
2653         int i, j, matches;
2654         struct ipr_std_inq_vpids *vpids;
2655         const struct ipr_ses_table_entry *ste = ipr_ses_table;
2656
2657         for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2658                 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2659                         if (ste->compare_product_id_byte[j] == 'X') {
2660                                 vpids = &res->std_inq_data.vpids;
2661                                 if (vpids->product_id[j] == ste->product_id[j])
2662                                         matches++;
2663                                 else
2664                                         break;
2665                         } else
2666                                 matches++;
2667                 }
2668
2669                 if (matches == IPR_PROD_ID_LEN)
2670                         return ste;
2671         }
2672
2673         return NULL;
2674 }
2675
2676 /**
2677  * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2678  * @ioa_cfg:    ioa config struct
2679  * @bus:                SCSI bus
2680  * @bus_width:  bus width
2681  *
2682  * Return value:
2683  *      SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2684  *      For a 2-byte wide SCSI bus, the maximum transfer speed is
2685  *      twice the maximum transfer rate (e.g. for a wide enabled bus,
2686  *      max 160MHz = max 320MB/sec).
2687  **/
2688 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2689 {
2690         struct ipr_resource_entry *res;
2691         const struct ipr_ses_table_entry *ste;
2692         u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2693
2694         /* Loop through each config table entry in the config table buffer */
2695         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2696                 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2697                         continue;
2698
2699                 if (bus != res->bus)
2700                         continue;
2701
2702                 if (!(ste = ipr_find_ses_entry(res)))
2703                         continue;
2704
2705                 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2706         }
2707
2708         return max_xfer_rate;
2709 }
2710
2711 /**
2712  * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2713  * @ioa_cfg:            ioa config struct
2714  * @max_delay:          max delay in micro-seconds to wait
2715  *
2716  * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2717  *
2718  * Return value:
2719  *      0 on success / other on failure
2720  **/
2721 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2722 {
2723         volatile u32 pcii_reg;
2724         int delay = 1;
2725
2726         /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2727         while (delay < max_delay) {
2728                 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2729
2730                 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2731                         return 0;
2732
2733                 /* udelay cannot be used if delay is more than a few milliseconds */
2734                 if ((delay / 1000) > MAX_UDELAY_MS)
2735                         mdelay(delay / 1000);
2736                 else
2737                         udelay(delay);
2738
2739                 delay += delay;
2740         }
2741         return -EIO;
2742 }
2743
2744 /**
2745  * ipr_get_sis64_dump_data_section - Dump IOA memory
2746  * @ioa_cfg:                    ioa config struct
2747  * @start_addr:                 adapter address to dump
2748  * @dest:                       destination kernel buffer
2749  * @length_in_words:            length to dump in 4 byte words
2750  *
2751  * Return value:
2752  *      0 on success
2753  **/
2754 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2755                                            u32 start_addr,
2756                                            __be32 *dest, u32 length_in_words)
2757 {
2758         int i;
2759
2760         for (i = 0; i < length_in_words; i++) {
2761                 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2762                 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2763                 dest++;
2764         }
2765
2766         return 0;
2767 }
2768
2769 /**
2770  * ipr_get_ldump_data_section - Dump IOA memory
2771  * @ioa_cfg:                    ioa config struct
2772  * @start_addr:                 adapter address to dump
2773  * @dest:                               destination kernel buffer
2774  * @length_in_words:    length to dump in 4 byte words
2775  *
2776  * Return value:
2777  *      0 on success / -EIO on failure
2778  **/
2779 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2780                                       u32 start_addr,
2781                                       __be32 *dest, u32 length_in_words)
2782 {
2783         volatile u32 temp_pcii_reg;
2784         int i, delay = 0;
2785
2786         if (ioa_cfg->sis64)
2787                 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2788                                                        dest, length_in_words);
2789
2790         /* Write IOA interrupt reg starting LDUMP state  */
2791         writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2792                ioa_cfg->regs.set_uproc_interrupt_reg32);
2793
2794         /* Wait for IO debug acknowledge */
2795         if (ipr_wait_iodbg_ack(ioa_cfg,
2796                                IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2797                 dev_err(&ioa_cfg->pdev->dev,
2798                         "IOA dump long data transfer timeout\n");
2799                 return -EIO;
2800         }
2801
2802         /* Signal LDUMP interlocked - clear IO debug ack */
2803         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2804                ioa_cfg->regs.clr_interrupt_reg);
2805
2806         /* Write Mailbox with starting address */
2807         writel(start_addr, ioa_cfg->ioa_mailbox);
2808
2809         /* Signal address valid - clear IOA Reset alert */
2810         writel(IPR_UPROCI_RESET_ALERT,
2811                ioa_cfg->regs.clr_uproc_interrupt_reg32);
2812
2813         for (i = 0; i < length_in_words; i++) {
2814                 /* Wait for IO debug acknowledge */
2815                 if (ipr_wait_iodbg_ack(ioa_cfg,
2816                                        IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2817                         dev_err(&ioa_cfg->pdev->dev,
2818                                 "IOA dump short data transfer timeout\n");
2819                         return -EIO;
2820                 }
2821
2822                 /* Read data from mailbox and increment destination pointer */
2823                 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2824                 dest++;
2825
2826                 /* For all but the last word of data, signal data received */
2827                 if (i < (length_in_words - 1)) {
2828                         /* Signal dump data received - Clear IO debug Ack */
2829                         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2830                                ioa_cfg->regs.clr_interrupt_reg);
2831                 }
2832         }
2833
2834         /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2835         writel(IPR_UPROCI_RESET_ALERT,
2836                ioa_cfg->regs.set_uproc_interrupt_reg32);
2837
2838         writel(IPR_UPROCI_IO_DEBUG_ALERT,
2839                ioa_cfg->regs.clr_uproc_interrupt_reg32);
2840
2841         /* Signal dump data received - Clear IO debug Ack */
2842         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2843                ioa_cfg->regs.clr_interrupt_reg);
2844
2845         /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2846         while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2847                 temp_pcii_reg =
2848                     readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2849
2850                 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2851                         return 0;
2852
2853                 udelay(10);
2854                 delay += 10;
2855         }
2856
2857         return 0;
2858 }
2859
2860 #ifdef CONFIG_SCSI_IPR_DUMP
2861 /**
2862  * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2863  * @ioa_cfg:            ioa config struct
2864  * @pci_address:        adapter address
2865  * @length:                     length of data to copy
2866  *
2867  * Copy data from PCI adapter to kernel buffer.
2868  * Note: length MUST be a 4 byte multiple
2869  * Return value:
2870  *      0 on success / other on failure
2871  **/
2872 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2873                         unsigned long pci_address, u32 length)
2874 {
2875         int bytes_copied = 0;
2876         int cur_len, rc, rem_len, rem_page_len, max_dump_size;
2877         __be32 *page;
2878         unsigned long lock_flags = 0;
2879         struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2880
2881         if (ioa_cfg->sis64)
2882                 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2883         else
2884                 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2885
2886         while (bytes_copied < length &&
2887                (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
2888                 if (ioa_dump->page_offset >= PAGE_SIZE ||
2889                     ioa_dump->page_offset == 0) {
2890                         page = (__be32 *)__get_free_page(GFP_ATOMIC);
2891
2892                         if (!page) {
2893                                 ipr_trace;
2894                                 return bytes_copied;
2895                         }
2896
2897                         ioa_dump->page_offset = 0;
2898                         ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2899                         ioa_dump->next_page_index++;
2900                 } else
2901                         page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2902
2903                 rem_len = length - bytes_copied;
2904                 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2905                 cur_len = min(rem_len, rem_page_len);
2906
2907                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2908                 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2909                         rc = -EIO;
2910                 } else {
2911                         rc = ipr_get_ldump_data_section(ioa_cfg,
2912                                                         pci_address + bytes_copied,
2913                                                         &page[ioa_dump->page_offset / 4],
2914                                                         (cur_len / sizeof(u32)));
2915                 }
2916                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2917
2918                 if (!rc) {
2919                         ioa_dump->page_offset += cur_len;
2920                         bytes_copied += cur_len;
2921                 } else {
2922                         ipr_trace;
2923                         break;
2924                 }
2925                 schedule();
2926         }
2927
2928         return bytes_copied;
2929 }
2930
2931 /**
2932  * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2933  * @hdr:        dump entry header struct
2934  *
2935  * Return value:
2936  *      nothing
2937  **/
2938 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2939 {
2940         hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2941         hdr->num_elems = 1;
2942         hdr->offset = sizeof(*hdr);
2943         hdr->status = IPR_DUMP_STATUS_SUCCESS;
2944 }
2945
2946 /**
2947  * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2948  * @ioa_cfg:    ioa config struct
2949  * @driver_dump:        driver dump struct
2950  *
2951  * Return value:
2952  *      nothing
2953  **/
2954 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2955                                    struct ipr_driver_dump *driver_dump)
2956 {
2957         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2958
2959         ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2960         driver_dump->ioa_type_entry.hdr.len =
2961                 sizeof(struct ipr_dump_ioa_type_entry) -
2962                 sizeof(struct ipr_dump_entry_header);
2963         driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2964         driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2965         driver_dump->ioa_type_entry.type = ioa_cfg->type;
2966         driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2967                 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2968                 ucode_vpd->minor_release[1];
2969         driver_dump->hdr.num_entries++;
2970 }
2971
2972 /**
2973  * ipr_dump_version_data - Fill in the driver version in the dump.
2974  * @ioa_cfg:    ioa config struct
2975  * @driver_dump:        driver dump struct
2976  *
2977  * Return value:
2978  *      nothing
2979  **/
2980 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2981                                   struct ipr_driver_dump *driver_dump)
2982 {
2983         ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2984         driver_dump->version_entry.hdr.len =
2985                 sizeof(struct ipr_dump_version_entry) -
2986                 sizeof(struct ipr_dump_entry_header);
2987         driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2988         driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2989         strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2990         driver_dump->hdr.num_entries++;
2991 }
2992
2993 /**
2994  * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2995  * @ioa_cfg:    ioa config struct
2996  * @driver_dump:        driver dump struct
2997  *
2998  * Return value:
2999  *      nothing
3000  **/
3001 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
3002                                    struct ipr_driver_dump *driver_dump)
3003 {
3004         ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
3005         driver_dump->trace_entry.hdr.len =
3006                 sizeof(struct ipr_dump_trace_entry) -
3007                 sizeof(struct ipr_dump_entry_header);
3008         driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3009         driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
3010         memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
3011         driver_dump->hdr.num_entries++;
3012 }
3013
3014 /**
3015  * ipr_dump_location_data - Fill in the IOA location in the dump.
3016  * @ioa_cfg:    ioa config struct
3017  * @driver_dump:        driver dump struct
3018  *
3019  * Return value:
3020  *      nothing
3021  **/
3022 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
3023                                    struct ipr_driver_dump *driver_dump)
3024 {
3025         ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
3026         driver_dump->location_entry.hdr.len =
3027                 sizeof(struct ipr_dump_location_entry) -
3028                 sizeof(struct ipr_dump_entry_header);
3029         driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3030         driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
3031         strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
3032         driver_dump->hdr.num_entries++;
3033 }
3034
3035 /**
3036  * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3037  * @ioa_cfg:    ioa config struct
3038  * @dump:               dump struct
3039  *
3040  * Return value:
3041  *      nothing
3042  **/
3043 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
3044 {
3045         unsigned long start_addr, sdt_word;
3046         unsigned long lock_flags = 0;
3047         struct ipr_driver_dump *driver_dump = &dump->driver_dump;
3048         struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
3049         u32 num_entries, max_num_entries, start_off, end_off;
3050         u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
3051         struct ipr_sdt *sdt;
3052         int valid = 1;
3053         int i;
3054
3055         ENTER;
3056
3057         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3058
3059         if (ioa_cfg->sdt_state != READ_DUMP) {
3060                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3061                 return;
3062         }
3063
3064         if (ioa_cfg->sis64) {
3065                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3066                 ssleep(IPR_DUMP_DELAY_SECONDS);
3067                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3068         }
3069
3070         start_addr = readl(ioa_cfg->ioa_mailbox);
3071
3072         if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
3073                 dev_err(&ioa_cfg->pdev->dev,
3074                         "Invalid dump table format: %lx\n", start_addr);
3075                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3076                 return;
3077         }
3078
3079         dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3080
3081         driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3082
3083         /* Initialize the overall dump header */
3084         driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3085         driver_dump->hdr.num_entries = 1;
3086         driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3087         driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3088         driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3089         driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3090
3091         ipr_dump_version_data(ioa_cfg, driver_dump);
3092         ipr_dump_location_data(ioa_cfg, driver_dump);
3093         ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3094         ipr_dump_trace_data(ioa_cfg, driver_dump);
3095
3096         /* Update dump_header */
3097         driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3098
3099         /* IOA Dump entry */
3100         ipr_init_dump_entry_hdr(&ioa_dump->hdr);
3101         ioa_dump->hdr.len = 0;
3102         ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3103         ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3104
3105         /* First entries in sdt are actually a list of dump addresses and
3106          lengths to gather the real dump data.  sdt represents the pointer
3107          to the ioa generated dump table.  Dump data will be extracted based
3108          on entries in this table */
3109         sdt = &ioa_dump->sdt;
3110
3111         if (ioa_cfg->sis64) {
3112                 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3113                 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3114         } else {
3115                 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3116                 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3117         }
3118
3119         bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3120                         (max_num_entries * sizeof(struct ipr_sdt_entry));
3121         rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
3122                                         bytes_to_copy / sizeof(__be32));
3123
3124         /* Smart Dump table is ready to use and the first entry is valid */
3125         if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3126             (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
3127                 dev_err(&ioa_cfg->pdev->dev,
3128                         "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3129                         rc, be32_to_cpu(sdt->hdr.state));
3130                 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3131                 ioa_cfg->sdt_state = DUMP_OBTAINED;
3132                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3133                 return;
3134         }
3135
3136         num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3137
3138         if (num_entries > max_num_entries)
3139                 num_entries = max_num_entries;
3140
3141         /* Update dump length to the actual data to be copied */
3142         dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3143         if (ioa_cfg->sis64)
3144                 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3145         else
3146                 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
3147
3148         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3149
3150         for (i = 0; i < num_entries; i++) {
3151                 if (ioa_dump->hdr.len > max_dump_size) {
3152                         driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3153                         break;
3154                 }
3155
3156                 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
3157                         sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3158                         if (ioa_cfg->sis64)
3159                                 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3160                         else {
3161                                 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3162                                 end_off = be32_to_cpu(sdt->entry[i].end_token);
3163
3164                                 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3165                                         bytes_to_copy = end_off - start_off;
3166                                 else
3167                                         valid = 0;
3168                         }
3169                         if (valid) {
3170                                 if (bytes_to_copy > max_dump_size) {
3171                                         sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3172                                         continue;
3173                                 }
3174
3175                                 /* Copy data from adapter to driver buffers */
3176                                 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3177                                                             bytes_to_copy);
3178
3179                                 ioa_dump->hdr.len += bytes_copied;
3180
3181                                 if (bytes_copied != bytes_to_copy) {
3182                                         driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3183                                         break;
3184                                 }
3185                         }
3186                 }
3187         }
3188
3189         dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3190
3191         /* Update dump_header */
3192         driver_dump->hdr.len += ioa_dump->hdr.len;
3193         wmb();
3194         ioa_cfg->sdt_state = DUMP_OBTAINED;
3195         LEAVE;
3196 }
3197
3198 #else
3199 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3200 #endif
3201
3202 /**
3203  * ipr_release_dump - Free adapter dump memory
3204  * @kref:       kref struct
3205  *
3206  * Return value:
3207  *      nothing
3208  **/
3209 static void ipr_release_dump(struct kref *kref)
3210 {
3211         struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
3212         struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3213         unsigned long lock_flags = 0;
3214         int i;
3215
3216         ENTER;
3217         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3218         ioa_cfg->dump = NULL;
3219         ioa_cfg->sdt_state = INACTIVE;
3220         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3221
3222         for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3223                 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3224
3225         vfree(dump->ioa_dump.ioa_data);
3226         kfree(dump);
3227         LEAVE;
3228 }
3229
3230 /**
3231  * ipr_worker_thread - Worker thread
3232  * @work:               ioa config struct
3233  *
3234  * Called at task level from a work thread. This function takes care
3235  * of adding and removing device from the mid-layer as configuration
3236  * changes are detected by the adapter.
3237  *
3238  * Return value:
3239  *      nothing
3240  **/
3241 static void ipr_worker_thread(struct work_struct *work)
3242 {
3243         unsigned long lock_flags;
3244         struct ipr_resource_entry *res;
3245         struct scsi_device *sdev;
3246         struct ipr_dump *dump;
3247         struct ipr_ioa_cfg *ioa_cfg =
3248                 container_of(work, struct ipr_ioa_cfg, work_q);
3249         u8 bus, target, lun;
3250         int did_work;
3251
3252         ENTER;
3253         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3254
3255         if (ioa_cfg->sdt_state == READ_DUMP) {
3256                 dump = ioa_cfg->dump;
3257                 if (!dump) {
3258                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3259                         return;
3260                 }
3261                 kref_get(&dump->kref);
3262                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3263                 ipr_get_ioa_dump(ioa_cfg, dump);
3264                 kref_put(&dump->kref, ipr_release_dump);
3265
3266                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3267                 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
3268                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3269                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3270                 return;
3271         }
3272
3273 restart:
3274         do {
3275                 did_work = 0;
3276                 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
3277                     !ioa_cfg->allow_ml_add_del) {
3278                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3279                         return;
3280                 }
3281
3282                 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3283                         if (res->del_from_ml && res->sdev) {
3284                                 did_work = 1;
3285                                 sdev = res->sdev;
3286                                 if (!scsi_device_get(sdev)) {
3287                                         if (!res->add_to_ml)
3288                                                 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3289                                         else
3290                                                 res->del_from_ml = 0;
3291                                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3292                                         scsi_remove_device(sdev);
3293                                         scsi_device_put(sdev);
3294                                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3295                                 }
3296                                 break;
3297                         }
3298                 }
3299         } while (did_work);
3300
3301         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3302                 if (res->add_to_ml) {
3303                         bus = res->bus;
3304                         target = res->target;
3305                         lun = res->lun;
3306                         res->add_to_ml = 0;
3307                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3308                         scsi_add_device(ioa_cfg->host, bus, target, lun);
3309                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3310                         goto restart;
3311                 }
3312         }
3313
3314         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3315         kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3316         LEAVE;
3317 }
3318
3319 #ifdef CONFIG_SCSI_IPR_TRACE
3320 /**
3321  * ipr_read_trace - Dump the adapter trace
3322  * @filp:               open sysfs file
3323  * @kobj:               kobject struct
3324  * @bin_attr:           bin_attribute struct
3325  * @buf:                buffer
3326  * @off:                offset
3327  * @count:              buffer size
3328  *
3329  * Return value:
3330  *      number of bytes printed to buffer
3331  **/
3332 static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
3333                               struct bin_attribute *bin_attr,
3334                               char *buf, loff_t off, size_t count)
3335 {
3336         struct device *dev = container_of(kobj, struct device, kobj);
3337         struct Scsi_Host *shost = class_to_shost(dev);
3338         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3339         unsigned long lock_flags = 0;
3340         ssize_t ret;
3341
3342         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3343         ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3344                                 IPR_TRACE_SIZE);
3345         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3346
3347         return ret;
3348 }
3349
3350 static struct bin_attribute ipr_trace_attr = {
3351         .attr = {
3352                 .name = "trace",
3353                 .mode = S_IRUGO,
3354         },
3355         .size = 0,
3356         .read = ipr_read_trace,
3357 };
3358 #endif
3359
3360 /**
3361  * ipr_show_fw_version - Show the firmware version
3362  * @dev:        class device struct
3363  * @buf:        buffer
3364  *
3365  * Return value:
3366  *      number of bytes printed to buffer
3367  **/
3368 static ssize_t ipr_show_fw_version(struct device *dev,
3369                                    struct device_attribute *attr, char *buf)
3370 {
3371         struct Scsi_Host *shost = class_to_shost(dev);
3372         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3373         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3374         unsigned long lock_flags = 0;
3375         int len;
3376
3377         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3378         len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3379                        ucode_vpd->major_release, ucode_vpd->card_type,
3380                        ucode_vpd->minor_release[0],
3381                        ucode_vpd->minor_release[1]);
3382         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3383         return len;
3384 }
3385
3386 static struct device_attribute ipr_fw_version_attr = {
3387         .attr = {
3388                 .name =         "fw_version",
3389                 .mode =         S_IRUGO,
3390         },
3391         .show = ipr_show_fw_version,
3392 };
3393
3394 /**
3395  * ipr_show_log_level - Show the adapter's error logging level
3396  * @dev:        class device struct
3397  * @buf:        buffer
3398  *
3399  * Return value:
3400  *      number of bytes printed to buffer
3401  **/
3402 static ssize_t ipr_show_log_level(struct device *dev,
3403                                    struct device_attribute *attr, char *buf)
3404 {
3405         struct Scsi_Host *shost = class_to_shost(dev);
3406         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3407         unsigned long lock_flags = 0;
3408         int len;
3409
3410         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3411         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3412         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3413         return len;
3414 }
3415
3416 /**
3417  * ipr_store_log_level - Change the adapter's error logging level
3418  * @dev:        class device struct
3419  * @buf:        buffer
3420  *
3421  * Return value:
3422  *      number of bytes printed to buffer
3423  **/
3424 static ssize_t ipr_store_log_level(struct device *dev,
3425                                    struct device_attribute *attr,
3426                                    const char *buf, size_t count)
3427 {
3428         struct Scsi_Host *shost = class_to_shost(dev);
3429         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3430         unsigned long lock_flags = 0;
3431
3432         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3433         ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3434         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3435         return strlen(buf);
3436 }
3437
3438 static struct device_attribute ipr_log_level_attr = {
3439         .attr = {
3440                 .name =         "log_level",
3441                 .mode =         S_IRUGO | S_IWUSR,
3442         },
3443         .show = ipr_show_log_level,
3444         .store = ipr_store_log_level
3445 };
3446
3447 /**
3448  * ipr_store_diagnostics - IOA Diagnostics interface
3449  * @dev:        device struct
3450  * @buf:        buffer
3451  * @count:      buffer size
3452  *
3453  * This function will reset the adapter and wait a reasonable
3454  * amount of time for any errors that the adapter might log.
3455  *
3456  * Return value:
3457  *      count on success / other on failure
3458  **/
3459 static ssize_t ipr_store_diagnostics(struct device *dev,
3460                                      struct device_attribute *attr,
3461                                      const char *buf, size_t count)
3462 {
3463         struct Scsi_Host *shost = class_to_shost(dev);
3464         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3465         unsigned long lock_flags = 0;
3466         int rc = count;
3467
3468         if (!capable(CAP_SYS_ADMIN))
3469                 return -EACCES;
3470
3471         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3472         while (ioa_cfg->in_reset_reload) {
3473                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3474                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3475                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3476         }
3477
3478         ioa_cfg->errors_logged = 0;
3479         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3480
3481         if (ioa_cfg->in_reset_reload) {
3482                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3483                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3484
3485                 /* Wait for a second for any errors to be logged */
3486                 msleep(1000);
3487         } else {
3488                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3489                 return -EIO;
3490         }
3491
3492         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3493         if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3494                 rc = -EIO;
3495         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3496
3497         return rc;
3498 }
3499
3500 static struct device_attribute ipr_diagnostics_attr = {
3501         .attr = {
3502                 .name =         "run_diagnostics",
3503                 .mode =         S_IWUSR,
3504         },
3505         .store = ipr_store_diagnostics
3506 };
3507
3508 /**
3509  * ipr_show_adapter_state - Show the adapter's state
3510  * @class_dev:  device struct
3511  * @buf:        buffer
3512  *
3513  * Return value:
3514  *      number of bytes printed to buffer
3515  **/
3516 static ssize_t ipr_show_adapter_state(struct device *dev,
3517                                       struct device_attribute *attr, char *buf)
3518 {
3519         struct Scsi_Host *shost = class_to_shost(dev);
3520         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3521         unsigned long lock_flags = 0;
3522         int len;
3523
3524         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3525         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
3526                 len = snprintf(buf, PAGE_SIZE, "offline\n");
3527         else
3528                 len = snprintf(buf, PAGE_SIZE, "online\n");
3529         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3530         return len;
3531 }
3532
3533 /**
3534  * ipr_store_adapter_state - Change adapter state
3535  * @dev:        device struct
3536  * @buf:        buffer
3537  * @count:      buffer size
3538  *
3539  * This function will change the adapter's state.
3540  *
3541  * Return value:
3542  *      count on success / other on failure
3543  **/
3544 static ssize_t ipr_store_adapter_state(struct device *dev,
3545                                        struct device_attribute *attr,
3546                                        const char *buf, size_t count)
3547 {
3548         struct Scsi_Host *shost = class_to_shost(dev);
3549         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3550         unsigned long lock_flags;
3551         int result = count, i;
3552
3553         if (!capable(CAP_SYS_ADMIN))
3554                 return -EACCES;
3555
3556         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3557         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3558             !strncmp(buf, "online", 6)) {
3559                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3560                         spin_lock(&ioa_cfg->hrrq[i]._lock);
3561                         ioa_cfg->hrrq[i].ioa_is_dead = 0;
3562                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
3563                 }
3564                 wmb();
3565                 ioa_cfg->reset_retries = 0;
3566                 ioa_cfg->in_ioa_bringdown = 0;
3567                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3568         }
3569         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3570         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3571
3572         return result;
3573 }
3574
3575 static struct device_attribute ipr_ioa_state_attr = {
3576         .attr = {
3577                 .name =         "online_state",
3578                 .mode =         S_IRUGO | S_IWUSR,
3579         },
3580         .show = ipr_show_adapter_state,
3581         .store = ipr_store_adapter_state
3582 };
3583
3584 /**
3585  * ipr_store_reset_adapter - Reset the adapter
3586  * @dev:        device struct
3587  * @buf:        buffer
3588  * @count:      buffer size
3589  *
3590  * This function will reset the adapter.
3591  *
3592  * Return value:
3593  *      count on success / other on failure
3594  **/
3595 static ssize_t ipr_store_reset_adapter(struct device *dev,
3596                                        struct device_attribute *attr,
3597                                        const char *buf, size_t count)
3598 {
3599         struct Scsi_Host *shost = class_to_shost(dev);
3600         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3601         unsigned long lock_flags;
3602         int result = count;
3603
3604         if (!capable(CAP_SYS_ADMIN))
3605                 return -EACCES;
3606
3607         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3608         if (!ioa_cfg->in_reset_reload)
3609                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3610         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3611         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3612
3613         return result;
3614 }
3615
3616 static struct device_attribute ipr_ioa_reset_attr = {
3617         .attr = {
3618                 .name =         "reset_host",
3619                 .mode =         S_IWUSR,
3620         },
3621         .store = ipr_store_reset_adapter
3622 };
3623
3624 static int ipr_iopoll(struct blk_iopoll *iop, int budget);
3625  /**
3626  * ipr_show_iopoll_weight - Show ipr polling mode
3627  * @dev:        class device struct
3628  * @buf:        buffer
3629  *
3630  * Return value:
3631  *      number of bytes printed to buffer
3632  **/
3633 static ssize_t ipr_show_iopoll_weight(struct device *dev,
3634                                    struct device_attribute *attr, char *buf)
3635 {
3636         struct Scsi_Host *shost = class_to_shost(dev);
3637         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3638         unsigned long lock_flags = 0;
3639         int len;
3640
3641         spin_lock_irqsave(shost->host_lock, lock_flags);
3642         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3643         spin_unlock_irqrestore(shost->host_lock, lock_flags);
3644
3645         return len;
3646 }
3647
3648 /**
3649  * ipr_store_iopoll_weight - Change the adapter's polling mode
3650  * @dev:        class device struct
3651  * @buf:        buffer
3652  *
3653  * Return value:
3654  *      number of bytes printed to buffer
3655  **/
3656 static ssize_t ipr_store_iopoll_weight(struct device *dev,
3657                                         struct device_attribute *attr,
3658                                         const char *buf, size_t count)
3659 {
3660         struct Scsi_Host *shost = class_to_shost(dev);
3661         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3662         unsigned long user_iopoll_weight;
3663         unsigned long lock_flags = 0;
3664         int i;
3665
3666         if (!ioa_cfg->sis64) {
3667                 dev_info(&ioa_cfg->pdev->dev, "blk-iopoll not supported on this adapter\n");
3668                 return -EINVAL;
3669         }
3670         if (kstrtoul(buf, 10, &user_iopoll_weight))
3671                 return -EINVAL;
3672
3673         if (user_iopoll_weight > 256) {
3674                 dev_info(&ioa_cfg->pdev->dev, "Invalid blk-iopoll weight. It must be less than 256\n");
3675                 return -EINVAL;
3676         }
3677
3678         if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3679                 dev_info(&ioa_cfg->pdev->dev, "Current blk-iopoll weight has the same weight\n");
3680                 return strlen(buf);
3681         }
3682
3683         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3684                 for (i = 1; i < ioa_cfg->hrrq_num; i++)
3685                         blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
3686         }
3687
3688         spin_lock_irqsave(shost->host_lock, lock_flags);
3689         ioa_cfg->iopoll_weight = user_iopoll_weight;
3690         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3691                 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3692                         blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
3693                                         ioa_cfg->iopoll_weight, ipr_iopoll);
3694                         blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
3695                 }
3696         }
3697         spin_unlock_irqrestore(shost->host_lock, lock_flags);
3698
3699         return strlen(buf);
3700 }
3701
3702 static struct device_attribute ipr_iopoll_weight_attr = {
3703         .attr = {
3704                 .name =         "iopoll_weight",
3705                 .mode =         S_IRUGO | S_IWUSR,
3706         },
3707         .show = ipr_show_iopoll_weight,
3708         .store = ipr_store_iopoll_weight
3709 };
3710
3711 /**
3712  * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3713  * @buf_len:            buffer length
3714  *
3715  * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3716  * list to use for microcode download
3717  *
3718  * Return value:
3719  *      pointer to sglist / NULL on failure
3720  **/
3721 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3722 {
3723         int sg_size, order, bsize_elem, num_elem, i, j;
3724         struct ipr_sglist *sglist;
3725         struct scatterlist *scatterlist;
3726         struct page *page;
3727
3728         /* Get the minimum size per scatter/gather element */
3729         sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3730
3731         /* Get the actual size per element */
3732         order = get_order(sg_size);
3733
3734         /* Determine the actual number of bytes per element */
3735         bsize_elem = PAGE_SIZE * (1 << order);
3736
3737         /* Determine the actual number of sg entries needed */
3738         if (buf_len % bsize_elem)
3739                 num_elem = (buf_len / bsize_elem) + 1;
3740         else
3741                 num_elem = buf_len / bsize_elem;
3742
3743         /* Allocate a scatter/gather list for the DMA */
3744         sglist = kzalloc(sizeof(struct ipr_sglist) +
3745                          (sizeof(struct scatterlist) * (num_elem - 1)),
3746                          GFP_KERNEL);
3747
3748         if (sglist == NULL) {
3749                 ipr_trace;
3750                 return NULL;
3751         }
3752
3753         scatterlist = sglist->scatterlist;
3754         sg_init_table(scatterlist, num_elem);
3755
3756         sglist->order = order;
3757         sglist->num_sg = num_elem;
3758
3759         /* Allocate a bunch of sg elements */
3760         for (i = 0; i < num_elem; i++) {
3761                 page = alloc_pages(GFP_KERNEL, order);
3762                 if (!page) {
3763                         ipr_trace;
3764
3765                         /* Free up what we already allocated */
3766                         for (j = i - 1; j >= 0; j--)
3767                                 __free_pages(sg_page(&scatterlist[j]), order);
3768                         kfree(sglist);
3769                         return NULL;
3770                 }
3771
3772                 sg_set_page(&scatterlist[i], page, 0, 0);
3773         }
3774
3775         return sglist;
3776 }
3777
3778 /**
3779  * ipr_free_ucode_buffer - Frees a microcode download buffer
3780  * @p_dnld:             scatter/gather list pointer
3781  *
3782  * Free a DMA'able ucode download buffer previously allocated with
3783  * ipr_alloc_ucode_buffer
3784  *
3785  * Return value:
3786  *      nothing
3787  **/
3788 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3789 {
3790         int i;
3791
3792         for (i = 0; i < sglist->num_sg; i++)
3793                 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
3794
3795         kfree(sglist);
3796 }
3797
3798 /**
3799  * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3800  * @sglist:             scatter/gather list pointer
3801  * @buffer:             buffer pointer
3802  * @len:                buffer length
3803  *
3804  * Copy a microcode image from a user buffer into a buffer allocated by
3805  * ipr_alloc_ucode_buffer
3806  *
3807  * Return value:
3808  *      0 on success / other on failure
3809  **/
3810 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3811                                  u8 *buffer, u32 len)
3812 {
3813         int bsize_elem, i, result = 0;
3814         struct scatterlist *scatterlist;
3815         void *kaddr;
3816
3817         /* Determine the actual number of bytes per element */
3818         bsize_elem = PAGE_SIZE * (1 << sglist->order);
3819
3820         scatterlist = sglist->scatterlist;
3821
3822         for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3823                 struct page *page = sg_page(&scatterlist[i]);
3824
3825                 kaddr = kmap(page);
3826                 memcpy(kaddr, buffer, bsize_elem);
3827                 kunmap(page);
3828
3829                 scatterlist[i].length = bsize_elem;
3830
3831                 if (result != 0) {
3832                         ipr_trace;
3833                         return result;
3834                 }
3835         }
3836
3837         if (len % bsize_elem) {
3838                 struct page *page = sg_page(&scatterlist[i]);
3839
3840                 kaddr = kmap(page);
3841                 memcpy(kaddr, buffer, len % bsize_elem);
3842                 kunmap(page);
3843
3844                 scatterlist[i].length = len % bsize_elem;
3845         }
3846
3847         sglist->buffer_len = len;
3848         return result;
3849 }
3850
3851 /**
3852  * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3853  * @ipr_cmd:            ipr command struct
3854  * @sglist:             scatter/gather list
3855  *
3856  * Builds a microcode download IOA data list (IOADL).
3857  *
3858  **/
3859 static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3860                                     struct ipr_sglist *sglist)
3861 {
3862         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3863         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3864         struct scatterlist *scatterlist = sglist->scatterlist;
3865         int i;
3866
3867         ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3868         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3869         ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3870
3871         ioarcb->ioadl_len =
3872                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3873         for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3874                 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3875                 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3876                 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3877         }
3878
3879         ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3880 }
3881
3882 /**
3883  * ipr_build_ucode_ioadl - Build a microcode download IOADL
3884  * @ipr_cmd:    ipr command struct
3885  * @sglist:             scatter/gather list
3886  *
3887  * Builds a microcode download IOA data list (IOADL).
3888  *
3889  **/
3890 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3891                                   struct ipr_sglist *sglist)
3892 {
3893         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3894         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3895         struct scatterlist *scatterlist = sglist->scatterlist;
3896         int i;
3897
3898         ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3899         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3900         ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3901
3902         ioarcb->ioadl_len =
3903                 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3904
3905         for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3906                 ioadl[i].flags_and_data_len =
3907                         cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3908                 ioadl[i].address =
3909                         cpu_to_be32(sg_dma_address(&scatterlist[i]));
3910         }
3911
3912         ioadl[i-1].flags_and_data_len |=
3913                 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3914 }
3915
3916 /**
3917  * ipr_update_ioa_ucode - Update IOA's microcode
3918  * @ioa_cfg:    ioa config struct
3919  * @sglist:             scatter/gather list
3920  *
3921  * Initiate an adapter reset to update the IOA's microcode
3922  *
3923  * Return value:
3924  *      0 on success / -EIO on failure
3925  **/
3926 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3927                                 struct ipr_sglist *sglist)
3928 {
3929         unsigned long lock_flags;
3930
3931         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3932         while (ioa_cfg->in_reset_reload) {
3933                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3934                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3935                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3936         }
3937
3938         if (ioa_cfg->ucode_sglist) {
3939                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3940                 dev_err(&ioa_cfg->pdev->dev,
3941                         "Microcode download already in progress\n");
3942                 return -EIO;
3943         }
3944
3945         sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev,
3946                                         sglist->scatterlist, sglist->num_sg,
3947                                         DMA_TO_DEVICE);
3948
3949         if (!sglist->num_dma_sg) {
3950                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3951                 dev_err(&ioa_cfg->pdev->dev,
3952                         "Failed to map microcode download buffer!\n");
3953                 return -EIO;
3954         }
3955
3956         ioa_cfg->ucode_sglist = sglist;
3957         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3958         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3959         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3960
3961         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3962         ioa_cfg->ucode_sglist = NULL;
3963         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3964         return 0;
3965 }
3966
3967 /**
3968  * ipr_store_update_fw - Update the firmware on the adapter
3969  * @class_dev:  device struct
3970  * @buf:        buffer
3971  * @count:      buffer size
3972  *
3973  * This function will update the firmware on the adapter.
3974  *
3975  * Return value:
3976  *      count on success / other on failure
3977  **/
3978 static ssize_t ipr_store_update_fw(struct device *dev,
3979                                    struct device_attribute *attr,
3980                                    const char *buf, size_t count)
3981 {
3982         struct Scsi_Host *shost = class_to_shost(dev);
3983         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3984         struct ipr_ucode_image_header *image_hdr;
3985         const struct firmware *fw_entry;
3986         struct ipr_sglist *sglist;
3987         char fname[100];
3988         char *src;
3989         int len, result, dnld_size;
3990
3991         if (!capable(CAP_SYS_ADMIN))
3992                 return -EACCES;
3993
3994         len = snprintf(fname, 99, "%s", buf);
3995         fname[len-1] = '\0';
3996
3997         if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
3998                 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3999                 return -EIO;
4000         }
4001
4002         image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
4003
4004         src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
4005         dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
4006         sglist = ipr_alloc_ucode_buffer(dnld_size);
4007
4008         if (!sglist) {
4009                 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
4010                 release_firmware(fw_entry);
4011                 return -ENOMEM;
4012         }
4013
4014         result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
4015
4016         if (result) {
4017                 dev_err(&ioa_cfg->pdev->dev,
4018                         "Microcode buffer copy to DMA buffer failed\n");
4019                 goto out;
4020         }
4021
4022         ipr_info("Updating microcode, please be patient.  This may take up to 30 minutes.\n");
4023
4024         result = ipr_update_ioa_ucode(ioa_cfg, sglist);
4025
4026         if (!result)
4027                 result = count;
4028 out:
4029         ipr_free_ucode_buffer(sglist);
4030         release_firmware(fw_entry);
4031         return result;
4032 }
4033
4034 static struct device_attribute ipr_update_fw_attr = {
4035         .attr = {
4036                 .name =         "update_fw",
4037                 .mode =         S_IWUSR,
4038         },
4039         .store = ipr_store_update_fw
4040 };
4041
4042 /**
4043  * ipr_show_fw_type - Show the adapter's firmware type.
4044  * @dev:        class device struct
4045  * @buf:        buffer
4046  *
4047  * Return value:
4048  *      number of bytes printed to buffer
4049  **/
4050 static ssize_t ipr_show_fw_type(struct device *dev,
4051                                 struct device_attribute *attr, char *buf)
4052 {
4053         struct Scsi_Host *shost = class_to_shost(dev);
4054         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4055         unsigned long lock_flags = 0;
4056         int len;
4057
4058         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4059         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4060         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4061         return len;
4062 }
4063
4064 static struct device_attribute ipr_ioa_fw_type_attr = {
4065         .attr = {
4066                 .name =         "fw_type",
4067                 .mode =         S_IRUGO,
4068         },
4069         .show = ipr_show_fw_type
4070 };
4071
4072 static struct device_attribute *ipr_ioa_attrs[] = {
4073         &ipr_fw_version_attr,
4074         &ipr_log_level_attr,
4075         &ipr_diagnostics_attr,
4076         &ipr_ioa_state_attr,
4077         &ipr_ioa_reset_attr,
4078         &ipr_update_fw_attr,
4079         &ipr_ioa_fw_type_attr,
4080         &ipr_iopoll_weight_attr,
4081         NULL,
4082 };
4083
4084 #ifdef CONFIG_SCSI_IPR_DUMP
4085 /**
4086  * ipr_read_dump - Dump the adapter
4087  * @filp:               open sysfs file
4088  * @kobj:               kobject struct
4089  * @bin_attr:           bin_attribute struct
4090  * @buf:                buffer
4091  * @off:                offset
4092  * @count:              buffer size
4093  *
4094  * Return value:
4095  *      number of bytes printed to buffer
4096  **/
4097 static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
4098                              struct bin_attribute *bin_attr,
4099                              char *buf, loff_t off, size_t count)
4100 {
4101         struct device *cdev = container_of(kobj, struct device, kobj);
4102         struct Scsi_Host *shost = class_to_shost(cdev);
4103         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4104         struct ipr_dump *dump;
4105         unsigned long lock_flags = 0;
4106         char *src;
4107         int len, sdt_end;
4108         size_t rc = count;
4109
4110         if (!capable(CAP_SYS_ADMIN))
4111                 return -EACCES;
4112
4113         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4114         dump = ioa_cfg->dump;
4115
4116         if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4117                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4118                 return 0;
4119         }
4120         kref_get(&dump->kref);
4121         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4122
4123         if (off > dump->driver_dump.hdr.len) {
4124                 kref_put(&dump->kref, ipr_release_dump);
4125                 return 0;
4126         }
4127
4128         if (off + count > dump->driver_dump.hdr.len) {
4129                 count = dump->driver_dump.hdr.len - off;
4130                 rc = count;
4131         }
4132
4133         if (count && off < sizeof(dump->driver_dump)) {
4134                 if (off + count > sizeof(dump->driver_dump))
4135                         len = sizeof(dump->driver_dump) - off;
4136                 else
4137                         len = count;
4138                 src = (u8 *)&dump->driver_dump + off;
4139                 memcpy(buf, src, len);
4140                 buf += len;
4141                 off += len;
4142                 count -= len;
4143         }
4144
4145         off -= sizeof(dump->driver_dump);
4146
4147         if (ioa_cfg->sis64)
4148                 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4149                           (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4150                            sizeof(struct ipr_sdt_entry));
4151         else
4152                 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4153                           (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4154
4155         if (count && off < sdt_end) {
4156                 if (off + count > sdt_end)
4157                         len = sdt_end - off;
4158                 else
4159                         len = count;
4160                 src = (u8 *)&dump->ioa_dump + off;
4161                 memcpy(buf, src, len);
4162                 buf += len;
4163                 off += len;
4164                 count -= len;
4165         }
4166
4167         off -= sdt_end;
4168
4169         while (count) {
4170                 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4171                         len = PAGE_ALIGN(off) - off;
4172                 else
4173                         len = count;
4174                 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4175                 src += off & ~PAGE_MASK;
4176                 memcpy(buf, src, len);
4177                 buf += len;
4178                 off += len;
4179                 count -= len;
4180         }
4181
4182         kref_put(&dump->kref, ipr_release_dump);
4183         return rc;
4184 }
4185
4186 /**
4187  * ipr_alloc_dump - Prepare for adapter dump
4188  * @ioa_cfg:    ioa config struct
4189  *
4190  * Return value:
4191  *      0 on success / other on failure
4192  **/
4193 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4194 {
4195         struct ipr_dump *dump;
4196         __be32 **ioa_data;
4197         unsigned long lock_flags = 0;
4198
4199         dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
4200
4201         if (!dump) {
4202                 ipr_err("Dump memory allocation failed\n");
4203                 return -ENOMEM;
4204         }
4205
4206         if (ioa_cfg->sis64)
4207                 ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4208         else
4209                 ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4210
4211         if (!ioa_data) {
4212                 ipr_err("Dump memory allocation failed\n");
4213                 kfree(dump);
4214                 return -ENOMEM;
4215         }
4216
4217         dump->ioa_dump.ioa_data = ioa_data;
4218
4219         kref_init(&dump->kref);
4220         dump->ioa_cfg = ioa_cfg;
4221
4222         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4223
4224         if (INACTIVE != ioa_cfg->sdt_state) {
4225                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4226                 vfree(dump->ioa_dump.ioa_data);
4227                 kfree(dump);
4228                 return 0;
4229         }
4230
4231         ioa_cfg->dump = dump;
4232         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
4233         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
4234                 ioa_cfg->dump_taken = 1;
4235                 schedule_work(&ioa_cfg->work_q);
4236         }
4237         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4238
4239         return 0;
4240 }
4241
4242 /**
4243  * ipr_free_dump - Free adapter dump memory
4244  * @ioa_cfg:    ioa config struct
4245  *
4246  * Return value:
4247  *      0 on success / other on failure
4248  **/
4249 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4250 {
4251         struct ipr_dump *dump;
4252         unsigned long lock_flags = 0;
4253
4254         ENTER;
4255
4256         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4257         dump = ioa_cfg->dump;
4258         if (!dump) {
4259                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4260                 return 0;
4261         }
4262
4263         ioa_cfg->dump = NULL;
4264         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4265
4266         kref_put(&dump->kref, ipr_release_dump);
4267
4268         LEAVE;
4269         return 0;
4270 }
4271
4272 /**
4273  * ipr_write_dump - Setup dump state of adapter
4274  * @filp:               open sysfs file
4275  * @kobj:               kobject struct
4276  * @bin_attr:           bin_attribute struct
4277  * @buf:                buffer
4278  * @off:                offset
4279  * @count:              buffer size
4280  *
4281  * Return value:
4282  *      number of bytes printed to buffer
4283  **/
4284 static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
4285                               struct bin_attribute *bin_attr,
4286                               char *buf, loff_t off, size_t count)
4287 {
4288         struct device *cdev = container_of(kobj, struct device, kobj);
4289         struct Scsi_Host *shost = class_to_shost(cdev);
4290         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4291         int rc;
4292
4293         if (!capable(CAP_SYS_ADMIN))
4294                 return -EACCES;
4295
4296         if (buf[0] == '1')
4297                 rc = ipr_alloc_dump(ioa_cfg);
4298         else if (buf[0] == '0')
4299                 rc = ipr_free_dump(ioa_cfg);
4300         else
4301                 return -EINVAL;
4302
4303         if (rc)
4304                 return rc;
4305         else
4306                 return count;
4307 }
4308
4309 static struct bin_attribute ipr_dump_attr = {
4310         .attr = {
4311                 .name = "dump",
4312                 .mode = S_IRUSR | S_IWUSR,
4313         },
4314         .size = 0,
4315         .read = ipr_read_dump,
4316         .write = ipr_write_dump
4317 };
4318 #else
4319 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4320 #endif
4321
4322 /**
4323  * ipr_change_queue_depth - Change the device's queue depth
4324  * @sdev:       scsi device struct
4325  * @qdepth:     depth to set
4326  * @reason:     calling context
4327  *
4328  * Return value:
4329  *      actual depth set
4330  **/
4331 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
4332 {
4333         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4334         struct ipr_resource_entry *res;
4335         unsigned long lock_flags = 0;
4336
4337         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4338         res = (struct ipr_resource_entry *)sdev->hostdata;
4339
4340         if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4341                 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4342         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4343
4344         scsi_change_queue_depth(sdev, qdepth);
4345         return sdev->queue_depth;
4346 }
4347
4348 /**
4349  * ipr_change_queue_type - Change the device's queue type
4350  * @dsev:               scsi device struct
4351  * @tag_type:   type of tags to use
4352  *
4353  * Return value:
4354  *      actual queue type set
4355  **/
4356 static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
4357 {
4358         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4359         struct ipr_resource_entry *res;
4360         unsigned long lock_flags = 0;
4361
4362         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4363         res = (struct ipr_resource_entry *)sdev->hostdata;
4364         if (res && ipr_is_gscsi(res))
4365                 tag_type = scsi_change_queue_type(sdev, tag_type);
4366         else
4367                 tag_type = 0;
4368         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4369         return tag_type;
4370 }
4371
4372 /**
4373  * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4374  * @dev:        device struct
4375  * @attr:       device attribute structure
4376  * @buf:        buffer
4377  *
4378  * Return value:
4379  *      number of bytes printed to buffer
4380  **/
4381 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4382 {
4383         struct scsi_device *sdev = to_scsi_device(dev);
4384         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4385         struct ipr_resource_entry *res;
4386         unsigned long lock_flags = 0;
4387         ssize_t len = -ENXIO;
4388
4389         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4390         res = (struct ipr_resource_entry *)sdev->hostdata;
4391         if (res)
4392                 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4393         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4394         return len;
4395 }
4396
4397 static struct device_attribute ipr_adapter_handle_attr = {
4398         .attr = {
4399                 .name =         "adapter_handle",
4400                 .mode =         S_IRUSR,
4401         },
4402         .show = ipr_show_adapter_handle
4403 };
4404
4405 /**
4406  * ipr_show_resource_path - Show the resource path or the resource address for
4407  *                          this device.
4408  * @dev:        device struct
4409  * @attr:       device attribute structure
4410  * @buf:        buffer
4411  *
4412  * Return value:
4413  *      number of bytes printed to buffer
4414  **/
4415 static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4416 {
4417         struct scsi_device *sdev = to_scsi_device(dev);
4418         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4419         struct ipr_resource_entry *res;
4420         unsigned long lock_flags = 0;
4421         ssize_t len = -ENXIO;
4422         char buffer[IPR_MAX_RES_PATH_LENGTH];
4423
4424         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4425         res = (struct ipr_resource_entry *)sdev->hostdata;
4426         if (res && ioa_cfg->sis64)
4427                 len = snprintf(buf, PAGE_SIZE, "%s\n",
4428                                __ipr_format_res_path(res->res_path, buffer,
4429                                                      sizeof(buffer)));
4430         else if (res)
4431                 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4432                                res->bus, res->target, res->lun);
4433
4434         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4435         return len;
4436 }
4437
4438 static struct device_attribute ipr_resource_path_attr = {
4439         .attr = {
4440                 .name =         "resource_path",
4441                 .mode =         S_IRUGO,
4442         },
4443         .show = ipr_show_resource_path
4444 };
4445
4446 /**
4447  * ipr_show_device_id - Show the device_id for this device.
4448  * @dev:        device struct
4449  * @attr:       device attribute structure
4450  * @buf:        buffer
4451  *
4452  * Return value:
4453  *      number of bytes printed to buffer
4454  **/
4455 static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4456 {
4457         struct scsi_device *sdev = to_scsi_device(dev);
4458         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4459         struct ipr_resource_entry *res;
4460         unsigned long lock_flags = 0;
4461         ssize_t len = -ENXIO;
4462
4463         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4464         res = (struct ipr_resource_entry *)sdev->hostdata;
4465         if (res && ioa_cfg->sis64)
4466                 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->dev_id);
4467         else if (res)
4468                 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4469
4470         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4471         return len;
4472 }
4473
4474 static struct device_attribute ipr_device_id_attr = {
4475         .attr = {
4476                 .name =         "device_id",
4477                 .mode =         S_IRUGO,
4478         },
4479         .show = ipr_show_device_id
4480 };
4481
4482 /**
4483  * ipr_show_resource_type - Show the resource type for this device.
4484  * @dev:        device struct
4485  * @attr:       device attribute structure
4486  * @buf:        buffer
4487  *
4488  * Return value:
4489  *      number of bytes printed to buffer
4490  **/
4491 static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4492 {
4493         struct scsi_device *sdev = to_scsi_device(dev);
4494         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4495         struct ipr_resource_entry *res;
4496         unsigned long lock_flags = 0;
4497         ssize_t len = -ENXIO;
4498
4499         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4500         res = (struct ipr_resource_entry *)sdev->hostdata;
4501
4502         if (res)
4503                 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4504
4505         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4506         return len;
4507 }
4508
4509 static struct device_attribute ipr_resource_type_attr = {
4510         .attr = {
4511                 .name =         "resource_type",
4512                 .mode =         S_IRUGO,
4513         },
4514         .show = ipr_show_resource_type
4515 };
4516
4517 static struct device_attribute *ipr_dev_attrs[] = {
4518         &ipr_adapter_handle_attr,
4519         &ipr_resource_path_attr,
4520         &ipr_device_id_attr,
4521         &ipr_resource_type_attr,
4522         NULL,
4523 };
4524
4525 /**
4526  * ipr_biosparam - Return the HSC mapping
4527  * @sdev:                       scsi device struct
4528  * @block_device:       block device pointer
4529  * @capacity:           capacity of the device
4530  * @parm:                       Array containing returned HSC values.
4531  *
4532  * This function generates the HSC parms that fdisk uses.
4533  * We want to make sure we return something that places partitions
4534  * on 4k boundaries for best performance with the IOA.
4535  *
4536  * Return value:
4537  *      0 on success
4538  **/
4539 static int ipr_biosparam(struct scsi_device *sdev,
4540                          struct block_device *block_device,
4541                          sector_t capacity, int *parm)
4542 {
4543         int heads, sectors;
4544         sector_t cylinders;
4545
4546         heads = 128;
4547         sectors = 32;
4548
4549         cylinders = capacity;
4550         sector_div(cylinders, (128 * 32));
4551
4552         /* return result */
4553         parm[0] = heads;
4554         parm[1] = sectors;
4555         parm[2] = cylinders;
4556
4557         return 0;
4558 }
4559
4560 /**
4561  * ipr_find_starget - Find target based on bus/target.
4562  * @starget:    scsi target struct
4563  *
4564  * Return value:
4565  *      resource entry pointer if found / NULL if not found
4566  **/
4567 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4568 {
4569         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4570         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4571         struct ipr_resource_entry *res;
4572
4573         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4574                 if ((res->bus == starget->channel) &&
4575                     (res->target == starget->id)) {
4576                         return res;
4577                 }
4578         }
4579
4580         return NULL;
4581 }
4582
4583 static struct ata_port_info sata_port_info;
4584
4585 /**
4586  * ipr_target_alloc - Prepare for commands to a SCSI target
4587  * @starget:    scsi target struct
4588  *
4589  * If the device is a SATA device, this function allocates an
4590  * ATA port with libata, else it does nothing.
4591  *
4592  * Return value:
4593  *      0 on success / non-0 on failure
4594  **/
4595 static int ipr_target_alloc(struct scsi_target *starget)
4596 {
4597         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4598         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4599         struct ipr_sata_port *sata_port;
4600         struct ata_port *ap;
4601         struct ipr_resource_entry *res;
4602         unsigned long lock_flags;
4603
4604         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4605         res = ipr_find_starget(starget);
4606         starget->hostdata = NULL;
4607
4608         if (res && ipr_is_gata(res)) {
4609                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4610                 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4611                 if (!sata_port)
4612                         return -ENOMEM;
4613
4614                 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4615                 if (ap) {
4616                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4617                         sata_port->ioa_cfg = ioa_cfg;
4618                         sata_port->ap = ap;
4619                         sata_port->res = res;
4620
4621                         res->sata_port = sata_port;
4622                         ap->private_data = sata_port;
4623                         starget->hostdata = sata_port;
4624                 } else {
4625                         kfree(sata_port);
4626                         return -ENOMEM;
4627                 }
4628         }
4629         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4630
4631         return 0;
4632 }
4633
4634 /**
4635  * ipr_target_destroy - Destroy a SCSI target
4636  * @starget:    scsi target struct
4637  *
4638  * If the device was a SATA device, this function frees the libata
4639  * ATA port, else it does nothing.
4640  *
4641  **/
4642 static void ipr_target_destroy(struct scsi_target *starget)
4643 {
4644         struct ipr_sata_port *sata_port = starget->hostdata;
4645         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4646         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4647
4648         if (ioa_cfg->sis64) {
4649                 if (!ipr_find_starget(starget)) {
4650                         if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4651                                 clear_bit(starget->id, ioa_cfg->array_ids);
4652                         else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4653                                 clear_bit(starget->id, ioa_cfg->vset_ids);
4654                         else if (starget->channel == 0)
4655                                 clear_bit(starget->id, ioa_cfg->target_ids);
4656                 }
4657         }
4658
4659         if (sata_port) {
4660                 starget->hostdata = NULL;
4661                 ata_sas_port_destroy(sata_port->ap);
4662                 kfree(sata_port);
4663         }
4664 }
4665
4666 /**
4667  * ipr_find_sdev - Find device based on bus/target/lun.
4668  * @sdev:       scsi device struct
4669  *
4670  * Return value:
4671  *      resource entry pointer if found / NULL if not found
4672  **/
4673 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4674 {
4675         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4676         struct ipr_resource_entry *res;
4677
4678         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4679                 if ((res->bus == sdev->channel) &&
4680                     (res->target == sdev->id) &&
4681                     (res->lun == sdev->lun))
4682                         return res;
4683         }
4684
4685         return NULL;
4686 }
4687
4688 /**
4689  * ipr_slave_destroy - Unconfigure a SCSI device
4690  * @sdev:       scsi device struct
4691  *
4692  * Return value:
4693  *      nothing
4694  **/
4695 static void ipr_slave_destroy(struct scsi_device *sdev)
4696 {
4697         struct ipr_resource_entry *res;
4698         struct ipr_ioa_cfg *ioa_cfg;
4699         unsigned long lock_flags = 0;
4700
4701         ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4702
4703         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4704         res = (struct ipr_resource_entry *) sdev->hostdata;
4705         if (res) {
4706                 if (res->sata_port)
4707                         res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
4708                 sdev->hostdata = NULL;
4709                 res->sdev = NULL;
4710                 res->sata_port = NULL;
4711         }
4712         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4713 }
4714
4715 /**
4716  * ipr_slave_configure - Configure a SCSI device
4717  * @sdev:       scsi device struct
4718  *
4719  * This function configures the specified scsi device.
4720  *
4721  * Return value:
4722  *      0 on success
4723  **/
4724 static int ipr_slave_configure(struct scsi_device *sdev)
4725 {
4726         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4727         struct ipr_resource_entry *res;
4728         struct ata_port *ap = NULL;
4729         unsigned long lock_flags = 0;
4730         char buffer[IPR_MAX_RES_PATH_LENGTH];
4731
4732         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4733         res = sdev->hostdata;
4734         if (res) {
4735                 if (ipr_is_af_dasd_device(res))
4736                         sdev->type = TYPE_RAID;
4737                 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4738                         sdev->scsi_level = 4;
4739                         sdev->no_uld_attach = 1;
4740                 }
4741                 if (ipr_is_vset_device(res)) {
4742                         blk_queue_rq_timeout(sdev->request_queue,
4743                                              IPR_VSET_RW_TIMEOUT);
4744                         blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
4745                 }
4746                 if (ipr_is_gata(res) && res->sata_port)
4747                         ap = res->sata_port->ap;
4748                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4749
4750                 if (ap) {
4751                         scsi_change_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN);
4752                         ata_sas_slave_configure(sdev, ap);
4753                 }
4754
4755                 if (ioa_cfg->sis64)
4756                         sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4757                                     ipr_format_res_path(ioa_cfg,
4758                                 res->res_path, buffer, sizeof(buffer)));
4759                 return 0;
4760         }
4761         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4762         return 0;
4763 }
4764
4765 /**
4766  * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4767  * @sdev:       scsi device struct
4768  *
4769  * This function initializes an ATA port so that future commands
4770  * sent through queuecommand will work.
4771  *
4772  * Return value:
4773  *      0 on success
4774  **/
4775 static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4776 {
4777         struct ipr_sata_port *sata_port = NULL;
4778         int rc = -ENXIO;
4779
4780         ENTER;
4781         if (sdev->sdev_target)
4782                 sata_port = sdev->sdev_target->hostdata;
4783         if (sata_port) {
4784                 rc = ata_sas_port_init(sata_port->ap);
4785                 if (rc == 0)
4786                         rc = ata_sas_sync_probe(sata_port->ap);
4787         }
4788
4789         if (rc)
4790                 ipr_slave_destroy(sdev);
4791
4792         LEAVE;
4793         return rc;
4794 }
4795
4796 /**
4797  * ipr_slave_alloc - Prepare for commands to a device.
4798  * @sdev:       scsi device struct
4799  *
4800  * This function saves a pointer to the resource entry
4801  * in the scsi device struct if the device exists. We
4802  * can then use this pointer in ipr_queuecommand when
4803  * handling new commands.
4804  *
4805  * Return value:
4806  *      0 on success / -ENXIO if device does not exist
4807  **/
4808 static int ipr_slave_alloc(struct scsi_device *sdev)
4809 {
4810         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4811         struct ipr_resource_entry *res;
4812         unsigned long lock_flags;
4813         int rc = -ENXIO;
4814
4815         sdev->hostdata = NULL;
4816
4817         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4818
4819         res = ipr_find_sdev(sdev);
4820         if (res) {
4821                 res->sdev = sdev;
4822                 res->add_to_ml = 0;
4823                 res->in_erp = 0;
4824                 sdev->hostdata = res;
4825                 if (!ipr_is_naca_model(res))
4826                         res->needs_sync_complete = 1;
4827                 rc = 0;
4828                 if (ipr_is_gata(res)) {
4829                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4830                         return ipr_ata_slave_alloc(sdev);
4831                 }
4832         }
4833
4834         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4835
4836         return rc;
4837 }
4838
4839 static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
4840 {
4841         struct ipr_ioa_cfg *ioa_cfg;
4842         unsigned long lock_flags = 0;
4843         int rc = SUCCESS;
4844
4845         ENTER;
4846         ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
4847         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4848
4849         if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
4850                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4851                 dev_err(&ioa_cfg->pdev->dev,
4852                         "Adapter being reset as a result of error recovery.\n");
4853
4854                 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4855                         ioa_cfg->sdt_state = GET_DUMP;
4856         }
4857
4858         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4859         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4860         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4861
4862         /* If we got hit with a host reset while we were already resetting
4863          the adapter for some reason, and the reset failed. */
4864         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
4865                 ipr_trace;
4866                 rc = FAILED;
4867         }
4868
4869         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4870         LEAVE;
4871         return rc;
4872 }
4873
4874 /**
4875  * ipr_device_reset - Reset the device
4876  * @ioa_cfg:    ioa config struct
4877  * @res:                resource entry struct
4878  *
4879  * This function issues a device reset to the affected device.
4880  * If the device is a SCSI device, a LUN reset will be sent
4881  * to the device first. If that does not work, a target reset
4882  * will be sent. If the device is a SATA device, a PHY reset will
4883  * be sent.
4884  *
4885  * Return value:
4886  *      0 on success / non-zero on failure
4887  **/
4888 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
4889                             struct ipr_resource_entry *res)
4890 {
4891         struct ipr_cmnd *ipr_cmd;
4892         struct ipr_ioarcb *ioarcb;
4893         struct ipr_cmd_pkt *cmd_pkt;
4894         struct ipr_ioarcb_ata_regs *regs;
4895         u32 ioasc;
4896
4897         ENTER;
4898         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4899         ioarcb = &ipr_cmd->ioarcb;
4900         cmd_pkt = &ioarcb->cmd_pkt;
4901
4902         if (ipr_cmd->ioa_cfg->sis64) {
4903                 regs = &ipr_cmd->i.ata_ioadl.regs;
4904                 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
4905         } else
4906                 regs = &ioarcb->u.add_data.u.regs;
4907
4908         ioarcb->res_handle = res->res_handle;
4909         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4910         cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4911         if (ipr_is_gata(res)) {
4912                 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
4913                 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
4914                 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
4915         }
4916
4917         ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4918         ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4919         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
4920         if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
4921                 if (ipr_cmd->ioa_cfg->sis64)
4922                         memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
4923                                sizeof(struct ipr_ioasa_gata));
4924                 else
4925                         memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
4926                                sizeof(struct ipr_ioasa_gata));
4927         }
4928
4929         LEAVE;
4930         return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
4931 }
4932
4933 /**
4934  * ipr_sata_reset - Reset the SATA port
4935  * @link:       SATA link to reset
4936  * @classes:    class of the attached device
4937  *
4938  * This function issues a SATA phy reset to the affected ATA link.
4939  *
4940  * Return value:
4941  *      0 on success / non-zero on failure
4942  **/
4943 static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
4944                                 unsigned long deadline)
4945 {
4946         struct ipr_sata_port *sata_port = link->ap->private_data;
4947         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4948         struct ipr_resource_entry *res;
4949         unsigned long lock_flags = 0;
4950         int rc = -ENXIO;
4951
4952         ENTER;
4953         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4954         while (ioa_cfg->in_reset_reload) {
4955                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4956                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4957                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4958         }
4959
4960         res = sata_port->res;
4961         if (res) {
4962                 rc = ipr_device_reset(ioa_cfg, res);
4963                 *classes = res->ata_class;
4964         }
4965
4966         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4967         LEAVE;
4968         return rc;
4969 }
4970
4971 /**
4972  * ipr_eh_dev_reset - Reset the device
4973  * @scsi_cmd:   scsi command struct
4974  *
4975  * This function issues a device reset to the affected device.
4976  * A LUN reset will be sent to the device first. If that does
4977  * not work, a target reset will be sent.
4978  *
4979  * Return value:
4980  *      SUCCESS / FAILED
4981  **/
4982 static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
4983 {
4984         struct ipr_cmnd *ipr_cmd;
4985         struct ipr_ioa_cfg *ioa_cfg;
4986         struct ipr_resource_entry *res;
4987         struct ata_port *ap;
4988         int rc = 0;
4989         struct ipr_hrr_queue *hrrq;
4990
4991         ENTER;
4992         ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4993         res = scsi_cmd->device->hostdata;
4994
4995         if (!res)
4996                 return FAILED;
4997
4998         /*
4999          * If we are currently going through reset/reload, return failed. This will force the
5000          * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5001          * reset to complete
5002          */
5003         if (ioa_cfg->in_reset_reload)
5004                 return FAILED;
5005         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5006                 return FAILED;
5007
5008         for_each_hrrq(hrrq, ioa_cfg) {
5009                 spin_lock(&hrrq->_lock);
5010                 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5011                         if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
5012                                 if (ipr_cmd->scsi_cmd)
5013                                         ipr_cmd->done = ipr_scsi_eh_done;
5014                                 if (ipr_cmd->qc)
5015                                         ipr_cmd->done = ipr_sata_eh_done;
5016                                 if (ipr_cmd->qc &&
5017                                     !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
5018                                         ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
5019                                         ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
5020                                 }
5021                         }
5022                 }
5023                 spin_unlock(&hrrq->_lock);
5024         }
5025         res->resetting_device = 1;
5026         scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
5027
5028         if (ipr_is_gata(res) && res->sata_port) {
5029                 ap = res->sata_port->ap;
5030                 spin_unlock_irq(scsi_cmd->device->host->host_lock);
5031                 ata_std_error_handler(ap);
5032                 spin_lock_irq(scsi_cmd->device->host->host_lock);
5033
5034                 for_each_hrrq(hrrq, ioa_cfg) {
5035                         spin_lock(&hrrq->_lock);
5036                         list_for_each_entry(ipr_cmd,
5037                                             &hrrq->hrrq_pending_q, queue) {
5038                                 if (ipr_cmd->ioarcb.res_handle ==
5039                                     res->res_handle) {
5040                                         rc = -EIO;
5041                                         break;
5042                                 }
5043                         }
5044                         spin_unlock(&hrrq->_lock);
5045                 }
5046         } else
5047                 rc = ipr_device_reset(ioa_cfg, res);
5048         res->resetting_device = 0;
5049         res->reset_occurred = 1;
5050
5051         LEAVE;
5052         return rc ? FAILED : SUCCESS;
5053 }
5054
5055 static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
5056 {
5057         int rc;
5058
5059         spin_lock_irq(cmd->device->host->host_lock);
5060         rc = __ipr_eh_dev_reset(cmd);
5061         spin_unlock_irq(cmd->device->host->host_lock);
5062
5063         return rc;
5064 }
5065
5066 /**
5067  * ipr_bus_reset_done - Op done function for bus reset.
5068  * @ipr_cmd:    ipr command struct
5069  *
5070  * This function is the op done function for a bus reset
5071  *
5072  * Return value:
5073  *      none
5074  **/
5075 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5076 {
5077         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5078         struct ipr_resource_entry *res;
5079
5080         ENTER;
5081         if (!ioa_cfg->sis64)
5082                 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5083                         if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5084                                 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5085                                 break;
5086                         }
5087                 }
5088
5089         /*
5090          * If abort has not completed, indicate the reset has, else call the
5091          * abort's done function to wake the sleeping eh thread
5092          */
5093         if (ipr_cmd->sibling->sibling)
5094                 ipr_cmd->sibling->sibling = NULL;
5095         else
5096                 ipr_cmd->sibling->done(ipr_cmd->sibling);
5097
5098         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5099         LEAVE;
5100 }
5101
5102 /**
5103  * ipr_abort_timeout - An abort task has timed out
5104  * @ipr_cmd:    ipr command struct
5105  *
5106  * This function handles when an abort task times out. If this
5107  * happens we issue a bus reset since we have resources tied
5108  * up that must be freed before returning to the midlayer.
5109  *
5110  * Return value:
5111  *      none
5112  **/
5113 static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
5114 {
5115         struct ipr_cmnd *reset_cmd;
5116         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5117         struct ipr_cmd_pkt *cmd_pkt;
5118         unsigned long lock_flags = 0;
5119
5120         ENTER;
5121         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5122         if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5123                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5124                 return;
5125         }
5126
5127         sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
5128         reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5129         ipr_cmd->sibling = reset_cmd;
5130         reset_cmd->sibling = ipr_cmd;
5131         reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5132         cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5133         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5134         cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5135         cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5136
5137         ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5138         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5139         LEAVE;
5140 }
5141
5142 /**
5143  * ipr_cancel_op - Cancel specified op
5144  * @scsi_cmd:   scsi command struct
5145  *
5146  * This function cancels specified op.
5147  *
5148  * Return value:
5149  *      SUCCESS / FAILED
5150  **/
5151 static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
5152 {
5153         struct ipr_cmnd *ipr_cmd;
5154         struct ipr_ioa_cfg *ioa_cfg;
5155         struct ipr_resource_entry *res;
5156         struct ipr_cmd_pkt *cmd_pkt;
5157         u32 ioasc, int_reg;
5158         int op_found = 0;
5159         struct ipr_hrr_queue *hrrq;
5160
5161         ENTER;
5162         ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5163         res = scsi_cmd->device->hostdata;
5164
5165         /* If we are currently going through reset/reload, return failed.
5166          * This will force the mid-layer to call ipr_eh_host_reset,
5167          * which will then go to sleep and wait for the reset to complete
5168          */
5169         if (ioa_cfg->in_reset_reload ||
5170             ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5171                 return FAILED;
5172         if (!res)
5173                 return FAILED;
5174
5175         /*
5176          * If we are aborting a timed out op, chances are that the timeout was caused
5177          * by a still not detected EEH error. In such cases, reading a register will
5178          * trigger the EEH recovery infrastructure.
5179          */
5180         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5181
5182         if (!ipr_is_gscsi(res))
5183                 return FAILED;
5184
5185         for_each_hrrq(hrrq, ioa_cfg) {
5186                 spin_lock(&hrrq->_lock);
5187                 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5188                         if (ipr_cmd->scsi_cmd == scsi_cmd) {
5189                                 ipr_cmd->done = ipr_scsi_eh_done;
5190                                 op_found = 1;
5191                                 break;
5192                         }
5193                 }
5194                 spin_unlock(&hrrq->_lock);
5195         }
5196
5197         if (!op_found)
5198                 return SUCCESS;
5199
5200         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5201         ipr_cmd->ioarcb.res_handle = res->res_handle;
5202         cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5203         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5204         cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5205         ipr_cmd->u.sdev = scsi_cmd->device;
5206
5207         scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5208                     scsi_cmd->cmnd[0]);
5209         ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
5210         ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5211
5212         /*
5213          * If the abort task timed out and we sent a bus reset, we will get
5214          * one the following responses to the abort
5215          */
5216         if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5217                 ioasc = 0;
5218                 ipr_trace;
5219         }
5220
5221         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5222         if (!ipr_is_naca_model(res))
5223                 res->needs_sync_complete = 1;
5224
5225         LEAVE;
5226         return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
5227 }
5228
5229 /**
5230  * ipr_eh_abort - Abort a single op
5231  * @scsi_cmd:   scsi command struct
5232  *
5233  * Return value:
5234  *      SUCCESS / FAILED
5235  **/
5236 static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
5237 {
5238         unsigned long flags;
5239         int rc;
5240
5241         ENTER;
5242
5243         spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5244         rc = ipr_cancel_op(scsi_cmd);
5245         spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
5246
5247         LEAVE;
5248         return rc;
5249 }
5250
5251 /**
5252  * ipr_handle_other_interrupt - Handle "other" interrupts
5253  * @ioa_cfg:    ioa config struct
5254  * @int_reg:    interrupt register
5255  *
5256  * Return value:
5257  *      IRQ_NONE / IRQ_HANDLED
5258  **/
5259 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5260                                               u32 int_reg)
5261 {
5262         irqreturn_t rc = IRQ_HANDLED;
5263         u32 int_mask_reg;
5264
5265         int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5266         int_reg &= ~int_mask_reg;
5267
5268         /* If an interrupt on the adapter did not occur, ignore it.
5269          * Or in the case of SIS 64, check for a stage change interrupt.
5270          */
5271         if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5272                 if (ioa_cfg->sis64) {
5273                         int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5274                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5275                         if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5276
5277                                 /* clear stage change */
5278                                 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5279                                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5280                                 list_del(&ioa_cfg->reset_cmd->queue);
5281                                 del_timer(&ioa_cfg->reset_cmd->timer);
5282                                 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5283                                 return IRQ_HANDLED;
5284                         }
5285                 }
5286
5287                 return IRQ_NONE;
5288         }
5289
5290         if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5291                 /* Mask the interrupt */
5292                 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5293
5294                 /* Clear the interrupt */
5295                 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
5296                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5297
5298                 list_del(&ioa_cfg->reset_cmd->queue);
5299                 del_timer(&ioa_cfg->reset_cmd->timer);
5300                 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5301         } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
5302                 if (ioa_cfg->clear_isr) {
5303                         if (ipr_debug && printk_ratelimit())
5304                                 dev_err(&ioa_cfg->pdev->dev,
5305                                         "Spurious interrupt detected. 0x%08X\n", int_reg);
5306                         writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5307                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5308                         return IRQ_NONE;
5309                 }
5310         } else {
5311                 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5312                         ioa_cfg->ioa_unit_checked = 1;
5313                 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5314                         dev_err(&ioa_cfg->pdev->dev,
5315                                 "No Host RRQ. 0x%08X\n", int_reg);
5316                 else
5317                         dev_err(&ioa_cfg->pdev->dev,
5318                                 "Permanent IOA failure. 0x%08X\n", int_reg);
5319
5320                 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5321                         ioa_cfg->sdt_state = GET_DUMP;
5322
5323                 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5324                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5325         }
5326
5327         return rc;
5328 }
5329
5330 /**
5331  * ipr_isr_eh - Interrupt service routine error handler
5332  * @ioa_cfg:    ioa config struct
5333  * @msg:        message to log
5334  *
5335  * Return value:
5336  *      none
5337  **/
5338 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
5339 {
5340         ioa_cfg->errors_logged++;
5341         dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
5342
5343         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5344                 ioa_cfg->sdt_state = GET_DUMP;
5345
5346         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5347 }
5348
5349 static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
5350                                                 struct list_head *doneq)
5351 {
5352         u32 ioasc;
5353         u16 cmd_index;
5354         struct ipr_cmnd *ipr_cmd;
5355         struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5356         int num_hrrq = 0;
5357
5358         /* If interrupts are disabled, ignore the interrupt */
5359         if (!hrr_queue->allow_interrupts)
5360                 return 0;
5361
5362         while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5363                hrr_queue->toggle_bit) {
5364
5365                 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5366                              IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5367                              IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5368
5369                 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5370                              cmd_index < hrr_queue->min_cmd_id)) {
5371                         ipr_isr_eh(ioa_cfg,
5372                                 "Invalid response handle from IOA: ",
5373                                 cmd_index);
5374                         break;
5375                 }
5376
5377                 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5378                 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5379
5380                 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5381
5382                 list_move_tail(&ipr_cmd->queue, doneq);
5383
5384                 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5385                         hrr_queue->hrrq_curr++;
5386                 } else {
5387                         hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5388                         hrr_queue->toggle_bit ^= 1u;
5389                 }
5390                 num_hrrq++;
5391                 if (budget > 0 && num_hrrq >= budget)
5392                         break;
5393         }
5394
5395         return num_hrrq;
5396 }
5397
5398 static int ipr_iopoll(struct blk_iopoll *iop, int budget)
5399 {
5400         struct ipr_ioa_cfg *ioa_cfg;
5401         struct ipr_hrr_queue *hrrq;
5402         struct ipr_cmnd *ipr_cmd, *temp;
5403         unsigned long hrrq_flags;
5404         int completed_ops;
5405         LIST_HEAD(doneq);
5406
5407         hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5408         ioa_cfg = hrrq->ioa_cfg;
5409
5410         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5411         completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5412
5413         if (completed_ops < budget)
5414                 blk_iopoll_complete(iop);
5415         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5416
5417         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5418                 list_del(&ipr_cmd->queue);
5419                 del_timer(&ipr_cmd->timer);
5420                 ipr_cmd->fast_done(ipr_cmd);
5421         }
5422
5423         return completed_ops;
5424 }
5425
5426 /**
5427  * ipr_isr - Interrupt service routine
5428  * @irq:        irq number
5429  * @devp:       pointer to ioa config struct
5430  *
5431  * Return value:
5432  *      IRQ_NONE / IRQ_HANDLED
5433  **/
5434 static irqreturn_t ipr_isr(int irq, void *devp)
5435 {
5436         struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5437         struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5438         unsigned long hrrq_flags = 0;
5439         u32 int_reg = 0;
5440         int num_hrrq = 0;
5441         int irq_none = 0;
5442         struct ipr_cmnd *ipr_cmd, *temp;
5443         irqreturn_t rc = IRQ_NONE;
5444         LIST_HEAD(doneq);
5445
5446         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5447         /* If interrupts are disabled, ignore the interrupt */
5448         if (!hrrq->allow_interrupts) {
5449                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5450                 return IRQ_NONE;
5451         }
5452
5453         while (1) {
5454                 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5455                         rc =  IRQ_HANDLED;
5456
5457                         if (!ioa_cfg->clear_isr)
5458                                 break;
5459
5460                         /* Clear the PCI interrupt */
5461                         num_hrrq = 0;
5462                         do {
5463                                 writel(IPR_PCII_HRRQ_UPDATED,
5464                                      ioa_cfg->regs.clr_interrupt_reg32);
5465                                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5466                         } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5467                                 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5468
5469                 } else if (rc == IRQ_NONE && irq_none == 0) {
5470                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5471                         irq_none++;
5472                 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5473                            int_reg & IPR_PCII_HRRQ_UPDATED) {
5474                         ipr_isr_eh(ioa_cfg,
5475                                 "Error clearing HRRQ: ", num_hrrq);
5476                         rc = IRQ_HANDLED;
5477                         break;
5478                 } else
5479                         break;
5480         }
5481
5482         if (unlikely(rc == IRQ_NONE))
5483                 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5484
5485         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5486         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5487                 list_del(&ipr_cmd->queue);
5488                 del_timer(&ipr_cmd->timer);
5489                 ipr_cmd->fast_done(ipr_cmd);
5490         }
5491         return rc;
5492 }
5493
5494 /**
5495  * ipr_isr_mhrrq - Interrupt service routine
5496  * @irq:        irq number
5497  * @devp:       pointer to ioa config struct
5498  *
5499  * Return value:
5500  *      IRQ_NONE / IRQ_HANDLED
5501  **/
5502 static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5503 {
5504         struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5505         struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5506         unsigned long hrrq_flags = 0;
5507         struct ipr_cmnd *ipr_cmd, *temp;
5508         irqreturn_t rc = IRQ_NONE;
5509         LIST_HEAD(doneq);
5510
5511         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5512
5513         /* If interrupts are disabled, ignore the interrupt */
5514         if (!hrrq->allow_interrupts) {
5515                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5516                 return IRQ_NONE;
5517         }
5518
5519         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
5520                 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5521                        hrrq->toggle_bit) {
5522                         if (!blk_iopoll_sched_prep(&hrrq->iopoll))
5523                                 blk_iopoll_sched(&hrrq->iopoll);
5524                         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5525                         return IRQ_HANDLED;
5526                 }
5527         } else {
5528                 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5529                         hrrq->toggle_bit)
5530
5531                         if (ipr_process_hrrq(hrrq, -1, &doneq))
5532                                 rc =  IRQ_HANDLED;
5533         }
5534
5535         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5536
5537         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5538                 list_del(&ipr_cmd->queue);
5539                 del_timer(&ipr_cmd->timer);
5540                 ipr_cmd->fast_done(ipr_cmd);
5541         }
5542         return rc;
5543 }
5544
5545 /**
5546  * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5547  * @ioa_cfg:    ioa config struct
5548  * @ipr_cmd:    ipr command struct
5549  *
5550  * Return value:
5551  *      0 on success / -1 on failure
5552  **/
5553 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5554                              struct ipr_cmnd *ipr_cmd)
5555 {
5556         int i, nseg;
5557         struct scatterlist *sg;
5558         u32 length;
5559         u32 ioadl_flags = 0;
5560         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5561         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5562         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5563
5564         length = scsi_bufflen(scsi_cmd);
5565         if (!length)
5566                 return 0;
5567
5568         nseg = scsi_dma_map(scsi_cmd);
5569         if (nseg < 0) {
5570                 if (printk_ratelimit())
5571                         dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5572                 return -1;
5573         }
5574
5575         ipr_cmd->dma_use_sg = nseg;
5576
5577         ioarcb->data_transfer_length = cpu_to_be32(length);
5578         ioarcb->ioadl_len =
5579                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5580
5581         if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5582                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5583                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5584         } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5585                 ioadl_flags = IPR_IOADL_FLAGS_READ;
5586
5587         scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5588                 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5589                 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5590                 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5591         }
5592
5593         ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5594         return 0;
5595 }
5596
5597 /**
5598  * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5599  * @ioa_cfg:    ioa config struct
5600  * @ipr_cmd:    ipr command struct
5601  *
5602  * Return value:
5603  *      0 on success / -1 on failure
5604  **/
5605 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5606                            struct ipr_cmnd *ipr_cmd)
5607 {
5608         int i, nseg;
5609         struct scatterlist *sg;
5610         u32 length;
5611         u32 ioadl_flags = 0;
5612         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5613         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5614         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5615
5616         length = scsi_bufflen(scsi_cmd);
5617         if (!length)
5618                 return 0;
5619
5620         nseg = scsi_dma_map(scsi_cmd);
5621         if (nseg < 0) {
5622                 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5623                 return -1;
5624         }
5625
5626         ipr_cmd->dma_use_sg = nseg;
5627
5628         if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5629                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5630                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5631                 ioarcb->data_transfer_length = cpu_to_be32(length);
5632                 ioarcb->ioadl_len =
5633                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5634         } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5635                 ioadl_flags = IPR_IOADL_FLAGS_READ;
5636                 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5637                 ioarcb->read_ioadl_len =
5638                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5639         }
5640
5641         if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5642                 ioadl = ioarcb->u.add_data.u.ioadl;
5643                 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5644                                     offsetof(struct ipr_ioarcb, u.add_data));
5645                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5646         }
5647
5648         scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5649                 ioadl[i].flags_and_data_len =
5650                         cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5651                 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
5652         }
5653
5654         ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5655         return 0;
5656 }
5657
5658 /**
5659  * ipr_erp_done - Process completion of ERP for a device
5660  * @ipr_cmd:            ipr command struct
5661  *
5662  * This function copies the sense buffer into the scsi_cmd
5663  * struct and pushes the scsi_done function.
5664  *
5665  * Return value:
5666  *      nothing
5667  **/
5668 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5669 {
5670         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5671         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5672         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5673
5674         if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5675                 scsi_cmd->result |= (DID_ERROR << 16);
5676                 scmd_printk(KERN_ERR, scsi_cmd,
5677                             "Request Sense failed with IOASC: 0x%08X\n", ioasc);
5678         } else {
5679                 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5680                        SCSI_SENSE_BUFFERSIZE);
5681         }
5682
5683         if (res) {
5684                 if (!ipr_is_naca_model(res))
5685                         res->needs_sync_complete = 1;
5686                 res->in_erp = 0;
5687         }
5688         scsi_dma_unmap(ipr_cmd->scsi_cmd);
5689         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5690         scsi_cmd->scsi_done(scsi_cmd);
5691 }
5692
5693 /**
5694  * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5695  * @ipr_cmd:    ipr command struct
5696  *
5697  * Return value:
5698  *      none
5699  **/
5700 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5701 {
5702         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5703         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5704         dma_addr_t dma_addr = ipr_cmd->dma_addr;
5705
5706         memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
5707         ioarcb->data_transfer_length = 0;
5708         ioarcb->read_data_transfer_length = 0;
5709         ioarcb->ioadl_len = 0;
5710         ioarcb->read_ioadl_len = 0;
5711         ioasa->hdr.ioasc = 0;
5712         ioasa->hdr.residual_data_len = 0;
5713
5714         if (ipr_cmd->ioa_cfg->sis64)
5715                 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5716                         cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5717         else {
5718                 ioarcb->write_ioadl_addr =
5719                         cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5720                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5721         }
5722 }
5723
5724 /**
5725  * ipr_erp_request_sense - Send request sense to a device
5726  * @ipr_cmd:    ipr command struct
5727  *
5728  * This function sends a request sense to a device as a result
5729  * of a check condition.
5730  *
5731  * Return value:
5732  *      nothing
5733  **/
5734 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5735 {
5736         struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5737         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5738
5739         if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5740                 ipr_erp_done(ipr_cmd);
5741                 return;
5742         }
5743
5744         ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5745
5746         cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5747         cmd_pkt->cdb[0] = REQUEST_SENSE;
5748         cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5749         cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5750         cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5751         cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5752
5753         ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5754                        SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
5755
5756         ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5757                    IPR_REQUEST_SENSE_TIMEOUT * 2);
5758 }
5759
5760 /**
5761  * ipr_erp_cancel_all - Send cancel all to a device
5762  * @ipr_cmd:    ipr command struct
5763  *
5764  * This function sends a cancel all to a device to clear the
5765  * queue. If we are running TCQ on the device, QERR is set to 1,
5766  * which means all outstanding ops have been dropped on the floor.
5767  * Cancel all will return them to us.
5768  *
5769  * Return value:
5770  *      nothing
5771  **/
5772 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5773 {
5774         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5775         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5776         struct ipr_cmd_pkt *cmd_pkt;
5777
5778         res->in_erp = 1;
5779
5780         ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5781
5782         if (!scsi_get_tag_type(scsi_cmd->device)) {
5783                 ipr_erp_request_sense(ipr_cmd);
5784                 return;
5785         }
5786
5787         cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5788         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5789         cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5790
5791         ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5792                    IPR_CANCEL_ALL_TIMEOUT);
5793 }
5794
5795 /**
5796  * ipr_dump_ioasa - Dump contents of IOASA
5797  * @ioa_cfg:    ioa config struct
5798  * @ipr_cmd:    ipr command struct
5799  * @res:                resource entry struct
5800  *
5801  * This function is invoked by the interrupt handler when ops
5802  * fail. It will log the IOASA if appropriate. Only called
5803  * for GPDD ops.
5804  *
5805  * Return value:
5806  *      none
5807  **/
5808 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
5809                            struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
5810 {
5811         int i;
5812         u16 data_len;
5813         u32 ioasc, fd_ioasc;
5814         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5815         __be32 *ioasa_data = (__be32 *)ioasa;
5816         int error_index;
5817
5818         ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
5819         fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
5820
5821         if (0 == ioasc)
5822                 return;
5823
5824         if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5825                 return;
5826
5827         if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
5828                 error_index = ipr_get_error(fd_ioasc);
5829         else
5830                 error_index = ipr_get_error(ioasc);
5831
5832         if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5833                 /* Don't log an error if the IOA already logged one */
5834                 if (ioasa->hdr.ilid != 0)
5835                         return;
5836
5837                 if (!ipr_is_gscsi(res))
5838                         return;
5839
5840                 if (ipr_error_table[error_index].log_ioasa == 0)
5841                         return;
5842         }
5843
5844         ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
5845
5846         data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
5847         if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
5848                 data_len = sizeof(struct ipr_ioasa64);
5849         else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
5850                 data_len = sizeof(struct ipr_ioasa);
5851
5852         ipr_err("IOASA Dump:\n");
5853
5854         for (i = 0; i < data_len / 4; i += 4) {
5855                 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
5856                         be32_to_cpu(ioasa_data[i]),
5857                         be32_to_cpu(ioasa_data[i+1]),
5858                         be32_to_cpu(ioasa_data[i+2]),
5859                         be32_to_cpu(ioasa_data[i+3]));
5860         }
5861 }
5862
5863 /**
5864  * ipr_gen_sense - Generate SCSI sense data from an IOASA
5865  * @ioasa:              IOASA
5866  * @sense_buf:  sense data buffer
5867  *
5868  * Return value:
5869  *      none
5870  **/
5871 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5872 {
5873         u32 failing_lba;
5874         u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
5875         struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
5876         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5877         u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
5878
5879         memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
5880
5881         if (ioasc >= IPR_FIRST_DRIVER_IOASC)
5882                 return;
5883
5884         ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
5885
5886         if (ipr_is_vset_device(res) &&
5887             ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
5888             ioasa->u.vset.failing_lba_hi != 0) {
5889                 sense_buf[0] = 0x72;
5890                 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
5891                 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
5892                 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
5893
5894                 sense_buf[7] = 12;
5895                 sense_buf[8] = 0;
5896                 sense_buf[9] = 0x0A;
5897                 sense_buf[10] = 0x80;
5898
5899                 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
5900
5901                 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
5902                 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
5903                 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
5904                 sense_buf[15] = failing_lba & 0x000000ff;
5905
5906                 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5907
5908                 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
5909                 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
5910                 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
5911                 sense_buf[19] = failing_lba & 0x000000ff;
5912         } else {
5913                 sense_buf[0] = 0x70;
5914                 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
5915                 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
5916                 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
5917
5918                 /* Illegal request */
5919                 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
5920                     (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
5921                         sense_buf[7] = 10;      /* additional length */
5922
5923                         /* IOARCB was in error */
5924                         if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
5925                                 sense_buf[15] = 0xC0;
5926                         else    /* Parameter data was invalid */
5927                                 sense_buf[15] = 0x80;
5928
5929                         sense_buf[16] =
5930                             ((IPR_FIELD_POINTER_MASK &
5931                               be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
5932                         sense_buf[17] =
5933                             (IPR_FIELD_POINTER_MASK &
5934                              be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
5935                 } else {
5936                         if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
5937                                 if (ipr_is_vset_device(res))
5938                                         failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5939                                 else
5940                                         failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
5941
5942                                 sense_buf[0] |= 0x80;   /* Or in the Valid bit */
5943                                 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
5944                                 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
5945                                 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
5946                                 sense_buf[6] = failing_lba & 0x000000ff;
5947                         }
5948
5949                         sense_buf[7] = 6;       /* additional length */
5950                 }
5951         }
5952 }
5953
5954 /**
5955  * ipr_get_autosense - Copy autosense data to sense buffer
5956  * @ipr_cmd:    ipr command struct
5957  *
5958  * This function copies the autosense buffer to the buffer
5959  * in the scsi_cmd, if there is autosense available.
5960  *
5961  * Return value:
5962  *      1 if autosense was available / 0 if not
5963  **/
5964 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
5965 {
5966         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5967         struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
5968
5969         if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
5970                 return 0;
5971
5972         if (ipr_cmd->ioa_cfg->sis64)
5973                 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
5974                        min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
5975                            SCSI_SENSE_BUFFERSIZE));
5976         else
5977                 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
5978                        min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
5979                            SCSI_SENSE_BUFFERSIZE));
5980         return 1;
5981 }
5982
5983 /**
5984  * ipr_erp_start - Process an error response for a SCSI op
5985  * @ioa_cfg:    ioa config struct
5986  * @ipr_cmd:    ipr command struct
5987  *
5988  * This function determines whether or not to initiate ERP
5989  * on the affected device.
5990  *
5991  * Return value:
5992  *      nothing
5993  **/
5994 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
5995                               struct ipr_cmnd *ipr_cmd)
5996 {
5997         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5998         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5999         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6000         u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
6001
6002         if (!res) {
6003                 ipr_scsi_eh_done(ipr_cmd);
6004                 return;
6005         }
6006
6007         if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
6008                 ipr_gen_sense(ipr_cmd);
6009
6010         ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6011
6012         switch (masked_ioasc) {
6013         case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
6014                 if (ipr_is_naca_model(res))
6015                         scsi_cmd->result |= (DID_ABORT << 16);
6016                 else
6017                         scsi_cmd->result |= (DID_IMM_RETRY << 16);
6018                 break;
6019         case IPR_IOASC_IR_RESOURCE_HANDLE:
6020         case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
6021                 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6022                 break;
6023         case IPR_IOASC_HW_SEL_TIMEOUT:
6024                 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6025                 if (!ipr_is_naca_model(res))
6026                         res->needs_sync_complete = 1;
6027                 break;
6028         case IPR_IOASC_SYNC_REQUIRED:
6029                 if (!res->in_erp)
6030                         res->needs_sync_complete = 1;
6031                 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6032                 break;
6033         case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
6034         case IPR_IOASA_IR_DUAL_IOA_DISABLED:
6035                 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
6036                 break;
6037         case IPR_IOASC_BUS_WAS_RESET:
6038         case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6039                 /*
6040                  * Report the bus reset and ask for a retry. The device
6041                  * will give CC/UA the next command.
6042                  */
6043                 if (!res->resetting_device)
6044                         scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6045                 scsi_cmd->result |= (DID_ERROR << 16);
6046                 if (!ipr_is_naca_model(res))
6047                         res->needs_sync_complete = 1;
6048                 break;
6049         case IPR_IOASC_HW_DEV_BUS_STATUS:
6050                 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6051                 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
6052                         if (!ipr_get_autosense(ipr_cmd)) {
6053                                 if (!ipr_is_naca_model(res)) {
6054                                         ipr_erp_cancel_all(ipr_cmd);
6055                                         return;
6056                                 }
6057                         }
6058                 }
6059                 if (!ipr_is_naca_model(res))
6060                         res->needs_sync_complete = 1;
6061                 break;
6062         case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6063                 break;
6064         default:
6065                 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6066                         scsi_cmd->result |= (DID_ERROR << 16);
6067                 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
6068                         res->needs_sync_complete = 1;
6069                 break;
6070         }
6071
6072         scsi_dma_unmap(ipr_cmd->scsi_cmd);
6073         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6074         scsi_cmd->scsi_done(scsi_cmd);
6075 }
6076
6077 /**
6078  * ipr_scsi_done - mid-layer done function
6079  * @ipr_cmd:    ipr command struct
6080  *
6081  * This function is invoked by the interrupt handler for
6082  * ops generated by the SCSI mid-layer
6083  *
6084  * Return value:
6085  *      none
6086  **/
6087 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6088 {
6089         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6090         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6091         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6092         unsigned long hrrq_flags;
6093
6094         scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
6095
6096         if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
6097                 scsi_dma_unmap(scsi_cmd);
6098
6099                 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
6100                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6101                 scsi_cmd->scsi_done(scsi_cmd);
6102                 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
6103         } else {
6104                 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
6105                 ipr_erp_start(ioa_cfg, ipr_cmd);
6106                 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
6107         }
6108 }
6109
6110 /**
6111  * ipr_queuecommand - Queue a mid-layer request
6112  * @shost:              scsi host struct
6113  * @scsi_cmd:   scsi command struct
6114  *
6115  * This function queues a request generated by the mid-layer.
6116  *
6117  * Return value:
6118  *      0 on success
6119  *      SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6120  *      SCSI_MLQUEUE_HOST_BUSY if host is busy
6121  **/
6122 static int ipr_queuecommand(struct Scsi_Host *shost,
6123                             struct scsi_cmnd *scsi_cmd)
6124 {
6125         struct ipr_ioa_cfg *ioa_cfg;
6126         struct ipr_resource_entry *res;
6127         struct ipr_ioarcb *ioarcb;
6128         struct ipr_cmnd *ipr_cmd;
6129         unsigned long hrrq_flags, lock_flags;
6130         int rc;
6131         struct ipr_hrr_queue *hrrq;
6132         int hrrq_id;
6133
6134         ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6135
6136         scsi_cmd->result = (DID_OK << 16);
6137         res = scsi_cmd->device->hostdata;
6138
6139         if (ipr_is_gata(res) && res->sata_port) {
6140                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6141                 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6142                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6143                 return rc;
6144         }
6145
6146         hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6147         hrrq = &ioa_cfg->hrrq[hrrq_id];
6148
6149         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6150         /*
6151          * We are currently blocking all devices due to a host reset
6152          * We have told the host to stop giving us new requests, but
6153          * ERP ops don't count. FIXME
6154          */
6155         if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
6156                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6157                 return SCSI_MLQUEUE_HOST_BUSY;
6158         }
6159
6160         /*
6161          * FIXME - Create scsi_set_host_offline interface
6162          *  and the ioa_is_dead check can be removed
6163          */
6164         if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
6165                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6166                 goto err_nodev;
6167         }
6168
6169         ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6170         if (ipr_cmd == NULL) {
6171                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6172                 return SCSI_MLQUEUE_HOST_BUSY;
6173         }
6174         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6175
6176         ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
6177         ioarcb = &ipr_cmd->ioarcb;
6178
6179         memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6180         ipr_cmd->scsi_cmd = scsi_cmd;
6181         ipr_cmd->done = ipr_scsi_eh_done;
6182
6183         if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6184                 if (scsi_cmd->underflow == 0)
6185                         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6186
6187                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6188                 if (ipr_is_gscsi(res) && res->reset_occurred) {
6189                         res->reset_occurred = 0;
6190                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
6191                 }
6192                 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
6193                 if (scsi_cmd->flags & SCMD_TAGGED)
6194                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK;
6195                 else
6196                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK;
6197         }
6198
6199         if (scsi_cmd->cmnd[0] >= 0xC0 &&
6200             (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
6201                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6202         }
6203
6204         if (ioa_cfg->sis64)
6205                 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6206         else
6207                 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
6208
6209         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6210         if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
6211                 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6212                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6213                 if (!rc)
6214                         scsi_dma_unmap(scsi_cmd);
6215                 return SCSI_MLQUEUE_HOST_BUSY;
6216         }
6217
6218         if (unlikely(hrrq->ioa_is_dead)) {
6219                 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6220                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6221                 scsi_dma_unmap(scsi_cmd);
6222                 goto err_nodev;
6223         }
6224
6225         ioarcb->res_handle = res->res_handle;
6226         if (res->needs_sync_complete) {
6227                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6228                 res->needs_sync_complete = 0;
6229         }
6230         list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
6231         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6232         ipr_send_command(ipr_cmd);
6233         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6234         return 0;
6235
6236 err_nodev:
6237         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6238         memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6239         scsi_cmd->result = (DID_NO_CONNECT << 16);
6240         scsi_cmd->scsi_done(scsi_cmd);
6241         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6242         return 0;
6243 }
6244
6245 /**
6246  * ipr_ioctl - IOCTL handler
6247  * @sdev:       scsi device struct
6248  * @cmd:        IOCTL cmd
6249  * @arg:        IOCTL arg
6250  *
6251  * Return value:
6252  *      0 on success / other on failure
6253  **/
6254 static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
6255 {
6256         struct ipr_resource_entry *res;
6257
6258         res = (struct ipr_resource_entry *)sdev->hostdata;
6259         if (res && ipr_is_gata(res)) {
6260                 if (cmd == HDIO_GET_IDENTITY)
6261                         return -ENOTTY;
6262                 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
6263         }
6264
6265         return -EINVAL;
6266 }
6267
6268 /**
6269  * ipr_info - Get information about the card/driver
6270  * @scsi_host:  scsi host struct
6271  *
6272  * Return value:
6273  *      pointer to buffer with description string
6274  **/
6275 static const char *ipr_ioa_info(struct Scsi_Host *host)
6276 {
6277         static char buffer[512];
6278         struct ipr_ioa_cfg *ioa_cfg;
6279         unsigned long lock_flags = 0;
6280
6281         ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6282
6283         spin_lock_irqsave(host->host_lock, lock_flags);
6284         sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6285         spin_unlock_irqrestore(host->host_lock, lock_flags);
6286
6287         return buffer;
6288 }
6289
6290 static struct scsi_host_template driver_template = {
6291         .module = THIS_MODULE,
6292         .name = "IPR",
6293         .info = ipr_ioa_info,
6294         .ioctl = ipr_ioctl,
6295         .queuecommand = ipr_queuecommand,
6296         .eh_abort_handler = ipr_eh_abort,
6297         .eh_device_reset_handler = ipr_eh_dev_reset,
6298         .eh_host_reset_handler = ipr_eh_host_reset,
6299         .slave_alloc = ipr_slave_alloc,
6300         .slave_configure = ipr_slave_configure,
6301         .slave_destroy = ipr_slave_destroy,
6302         .target_alloc = ipr_target_alloc,
6303         .target_destroy = ipr_target_destroy,
6304         .change_queue_depth = ipr_change_queue_depth,
6305         .change_queue_type = ipr_change_queue_type,
6306         .bios_param = ipr_biosparam,
6307         .can_queue = IPR_MAX_COMMANDS,
6308         .this_id = -1,
6309         .sg_tablesize = IPR_MAX_SGLIST,
6310         .max_sectors = IPR_IOA_MAX_SECTORS,
6311         .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6312         .use_clustering = ENABLE_CLUSTERING,
6313         .shost_attrs = ipr_ioa_attrs,
6314         .sdev_attrs = ipr_dev_attrs,
6315         .proc_name = IPR_NAME,
6316         .no_write_same = 1,
6317         .use_blk_tags = 1,
6318 };
6319
6320 /**
6321  * ipr_ata_phy_reset - libata phy_reset handler
6322  * @ap:         ata port to reset
6323  *
6324  **/
6325 static void ipr_ata_phy_reset(struct ata_port *ap)
6326 {
6327         unsigned long flags;
6328         struct ipr_sata_port *sata_port = ap->private_data;
6329         struct ipr_resource_entry *res = sata_port->res;
6330         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6331         int rc;
6332
6333         ENTER;
6334         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6335         while (ioa_cfg->in_reset_reload) {
6336                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6337                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6338                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6339         }
6340
6341         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6342                 goto out_unlock;
6343
6344         rc = ipr_device_reset(ioa_cfg, res);
6345
6346         if (rc) {
6347                 ap->link.device[0].class = ATA_DEV_NONE;
6348                 goto out_unlock;
6349         }
6350
6351         ap->link.device[0].class = res->ata_class;
6352         if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
6353                 ap->link.device[0].class = ATA_DEV_NONE;
6354
6355 out_unlock:
6356         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6357         LEAVE;
6358 }
6359
6360 /**
6361  * ipr_ata_post_internal - Cleanup after an internal command
6362  * @qc: ATA queued command
6363  *
6364  * Return value:
6365  *      none
6366  **/
6367 static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6368 {
6369         struct ipr_sata_port *sata_port = qc->ap->private_data;
6370         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6371         struct ipr_cmnd *ipr_cmd;
6372         struct ipr_hrr_queue *hrrq;
6373         unsigned long flags;
6374
6375         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6376         while (ioa_cfg->in_reset_reload) {
6377                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6378                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6379                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6380         }
6381
6382         for_each_hrrq(hrrq, ioa_cfg) {
6383                 spin_lock(&hrrq->_lock);
6384                 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6385                         if (ipr_cmd->qc == qc) {
6386                                 ipr_device_reset(ioa_cfg, sata_port->res);
6387                                 break;
6388                         }
6389                 }
6390                 spin_unlock(&hrrq->_lock);
6391         }
6392         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6393 }
6394
6395 /**
6396  * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6397  * @regs:       destination
6398  * @tf: source ATA taskfile
6399  *
6400  * Return value:
6401  *      none
6402  **/
6403 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6404                              struct ata_taskfile *tf)
6405 {
6406         regs->feature = tf->feature;
6407         regs->nsect = tf->nsect;
6408         regs->lbal = tf->lbal;
6409         regs->lbam = tf->lbam;
6410         regs->lbah = tf->lbah;
6411         regs->device = tf->device;
6412         regs->command = tf->command;
6413         regs->hob_feature = tf->hob_feature;
6414         regs->hob_nsect = tf->hob_nsect;
6415         regs->hob_lbal = tf->hob_lbal;
6416         regs->hob_lbam = tf->hob_lbam;
6417         regs->hob_lbah = tf->hob_lbah;
6418         regs->ctl = tf->ctl;
6419 }
6420
6421 /**
6422  * ipr_sata_done - done function for SATA commands
6423  * @ipr_cmd:    ipr command struct
6424  *
6425  * This function is invoked by the interrupt handler for
6426  * ops generated by the SCSI mid-layer to SATA devices
6427  *
6428  * Return value:
6429  *      none
6430  **/
6431 static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6432 {
6433         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6434         struct ata_queued_cmd *qc = ipr_cmd->qc;
6435         struct ipr_sata_port *sata_port = qc->ap->private_data;
6436         struct ipr_resource_entry *res = sata_port->res;
6437         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6438
6439         spin_lock(&ipr_cmd->hrrq->_lock);
6440         if (ipr_cmd->ioa_cfg->sis64)
6441                 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6442                        sizeof(struct ipr_ioasa_gata));
6443         else
6444                 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6445                        sizeof(struct ipr_ioasa_gata));
6446         ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6447
6448         if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
6449                 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
6450
6451         if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6452                 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
6453         else
6454                 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
6455         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6456         spin_unlock(&ipr_cmd->hrrq->_lock);
6457         ata_qc_complete(qc);
6458 }
6459
6460 /**
6461  * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6462  * @ipr_cmd:    ipr command struct
6463  * @qc:         ATA queued command
6464  *
6465  **/
6466 static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6467                                   struct ata_queued_cmd *qc)
6468 {
6469         u32 ioadl_flags = 0;
6470         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6471         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
6472         struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6473         int len = qc->nbytes;
6474         struct scatterlist *sg;
6475         unsigned int si;
6476         dma_addr_t dma_addr = ipr_cmd->dma_addr;
6477
6478         if (len == 0)
6479                 return;
6480
6481         if (qc->dma_dir == DMA_TO_DEVICE) {
6482                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6483                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6484         } else if (qc->dma_dir == DMA_FROM_DEVICE)
6485                 ioadl_flags = IPR_IOADL_FLAGS_READ;
6486
6487         ioarcb->data_transfer_length = cpu_to_be32(len);
6488         ioarcb->ioadl_len =
6489                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6490         ioarcb->u.sis64_addr_data.data_ioadl_addr =
6491                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
6492
6493         for_each_sg(qc->sg, sg, qc->n_elem, si) {
6494                 ioadl64->flags = cpu_to_be32(ioadl_flags);
6495                 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6496                 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6497
6498                 last_ioadl64 = ioadl64;
6499                 ioadl64++;
6500         }
6501
6502         if (likely(last_ioadl64))
6503                 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6504 }
6505
6506 /**
6507  * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6508  * @ipr_cmd:    ipr command struct
6509  * @qc:         ATA queued command
6510  *
6511  **/
6512 static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6513                                 struct ata_queued_cmd *qc)
6514 {
6515         u32 ioadl_flags = 0;
6516         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6517         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
6518         struct ipr_ioadl_desc *last_ioadl = NULL;
6519         int len = qc->nbytes;
6520         struct scatterlist *sg;
6521         unsigned int si;
6522
6523         if (len == 0)
6524                 return;
6525
6526         if (qc->dma_dir == DMA_TO_DEVICE) {
6527                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6528                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6529                 ioarcb->data_transfer_length = cpu_to_be32(len);
6530                 ioarcb->ioadl_len =
6531                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6532         } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6533                 ioadl_flags = IPR_IOADL_FLAGS_READ;
6534                 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6535                 ioarcb->read_ioadl_len =
6536                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6537         }
6538
6539         for_each_sg(qc->sg, sg, qc->n_elem, si) {
6540                 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6541                 ioadl->address = cpu_to_be32(sg_dma_address(sg));
6542
6543                 last_ioadl = ioadl;
6544                 ioadl++;
6545         }
6546
6547         if (likely(last_ioadl))
6548                 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6549 }
6550
6551 /**
6552  * ipr_qc_defer - Get a free ipr_cmd
6553  * @qc: queued command
6554  *
6555  * Return value:
6556  *      0 if success
6557  **/
6558 static int ipr_qc_defer(struct ata_queued_cmd *qc)
6559 {
6560         struct ata_port *ap = qc->ap;
6561         struct ipr_sata_port *sata_port = ap->private_data;
6562         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6563         struct ipr_cmnd *ipr_cmd;
6564         struct ipr_hrr_queue *hrrq;
6565         int hrrq_id;
6566
6567         hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6568         hrrq = &ioa_cfg->hrrq[hrrq_id];
6569
6570         qc->lldd_task = NULL;
6571         spin_lock(&hrrq->_lock);
6572         if (unlikely(hrrq->ioa_is_dead)) {
6573                 spin_unlock(&hrrq->_lock);
6574                 return 0;
6575         }
6576
6577         if (unlikely(!hrrq->allow_cmds)) {
6578                 spin_unlock(&hrrq->_lock);
6579                 return ATA_DEFER_LINK;
6580         }
6581
6582         ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6583         if (ipr_cmd == NULL) {
6584                 spin_unlock(&hrrq->_lock);
6585                 return ATA_DEFER_LINK;
6586         }
6587
6588         qc->lldd_task = ipr_cmd;
6589         spin_unlock(&hrrq->_lock);
6590         return 0;
6591 }
6592
6593 /**
6594  * ipr_qc_issue - Issue a SATA qc to a device
6595  * @qc: queued command
6596  *
6597  * Return value:
6598  *      0 if success
6599  **/
6600 static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
6601 {
6602         struct ata_port *ap = qc->ap;
6603         struct ipr_sata_port *sata_port = ap->private_data;
6604         struct ipr_resource_entry *res = sata_port->res;
6605         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6606         struct ipr_cmnd *ipr_cmd;
6607         struct ipr_ioarcb *ioarcb;
6608         struct ipr_ioarcb_ata_regs *regs;
6609
6610         if (qc->lldd_task == NULL)
6611                 ipr_qc_defer(qc);
6612
6613         ipr_cmd = qc->lldd_task;
6614         if (ipr_cmd == NULL)
6615                 return AC_ERR_SYSTEM;
6616
6617         qc->lldd_task = NULL;
6618         spin_lock(&ipr_cmd->hrrq->_lock);
6619         if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
6620                         ipr_cmd->hrrq->ioa_is_dead)) {
6621                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6622                 spin_unlock(&ipr_cmd->hrrq->_lock);
6623                 return AC_ERR_SYSTEM;
6624         }
6625
6626         ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
6627         ioarcb = &ipr_cmd->ioarcb;
6628
6629         if (ioa_cfg->sis64) {
6630                 regs = &ipr_cmd->i.ata_ioadl.regs;
6631                 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
6632         } else
6633                 regs = &ioarcb->u.add_data.u.regs;
6634
6635         memset(regs, 0, sizeof(*regs));
6636         ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
6637
6638         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
6639         ipr_cmd->qc = qc;
6640         ipr_cmd->done = ipr_sata_done;
6641         ipr_cmd->ioarcb.res_handle = res->res_handle;
6642         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
6643         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6644         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6645         ipr_cmd->dma_use_sg = qc->n_elem;
6646
6647         if (ioa_cfg->sis64)
6648                 ipr_build_ata_ioadl64(ipr_cmd, qc);
6649         else
6650                 ipr_build_ata_ioadl(ipr_cmd, qc);
6651
6652         regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
6653         ipr_copy_sata_tf(regs, &qc->tf);
6654         memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
6655         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6656
6657         switch (qc->tf.protocol) {
6658         case ATA_PROT_NODATA:
6659         case ATA_PROT_PIO:
6660                 break;
6661
6662         case ATA_PROT_DMA:
6663                 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6664                 break;
6665
6666         case ATAPI_PROT_PIO:
6667         case ATAPI_PROT_NODATA:
6668                 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6669                 break;
6670
6671         case ATAPI_PROT_DMA:
6672                 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6673                 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6674                 break;
6675
6676         default:
6677                 WARN_ON(1);
6678                 spin_unlock(&ipr_cmd->hrrq->_lock);
6679                 return AC_ERR_INVALID;
6680         }
6681
6682         ipr_send_command(ipr_cmd);
6683         spin_unlock(&ipr_cmd->hrrq->_lock);
6684
6685         return 0;
6686 }
6687
6688 /**
6689  * ipr_qc_fill_rtf - Read result TF
6690  * @qc: ATA queued command
6691  *
6692  * Return value:
6693  *      true
6694  **/
6695 static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6696 {
6697         struct ipr_sata_port *sata_port = qc->ap->private_data;
6698         struct ipr_ioasa_gata *g = &sata_port->ioasa;
6699         struct ata_taskfile *tf = &qc->result_tf;
6700
6701         tf->feature = g->error;
6702         tf->nsect = g->nsect;
6703         tf->lbal = g->lbal;
6704         tf->lbam = g->lbam;
6705         tf->lbah = g->lbah;
6706         tf->device = g->device;
6707         tf->command = g->status;
6708         tf->hob_nsect = g->hob_nsect;
6709         tf->hob_lbal = g->hob_lbal;
6710         tf->hob_lbam = g->hob_lbam;
6711         tf->hob_lbah = g->hob_lbah;
6712
6713         return true;
6714 }
6715
6716 static struct ata_port_operations ipr_sata_ops = {
6717         .phy_reset = ipr_ata_phy_reset,
6718         .hardreset = ipr_sata_reset,
6719         .post_internal_cmd = ipr_ata_post_internal,
6720         .qc_prep = ata_noop_qc_prep,
6721         .qc_defer = ipr_qc_defer,
6722         .qc_issue = ipr_qc_issue,
6723         .qc_fill_rtf = ipr_qc_fill_rtf,
6724         .port_start = ata_sas_port_start,
6725         .port_stop = ata_sas_port_stop
6726 };
6727
6728 static struct ata_port_info sata_port_info = {
6729         .flags          = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
6730         .pio_mask       = ATA_PIO4_ONLY,
6731         .mwdma_mask     = ATA_MWDMA2,
6732         .udma_mask      = ATA_UDMA6,
6733         .port_ops       = &ipr_sata_ops
6734 };
6735
6736 #ifdef CONFIG_PPC_PSERIES
6737 static const u16 ipr_blocked_processors[] = {
6738         PVR_NORTHSTAR,
6739         PVR_PULSAR,
6740         PVR_POWER4,
6741         PVR_ICESTAR,
6742         PVR_SSTAR,
6743         PVR_POWER4p,
6744         PVR_630,
6745         PVR_630p
6746 };
6747
6748 /**
6749  * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6750  * @ioa_cfg:    ioa cfg struct
6751  *
6752  * Adapters that use Gemstone revision < 3.1 do not work reliably on
6753  * certain pSeries hardware. This function determines if the given
6754  * adapter is in one of these confgurations or not.
6755  *
6756  * Return value:
6757  *      1 if adapter is not supported / 0 if adapter is supported
6758  **/
6759 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6760 {
6761         int i;
6762
6763         if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
6764                 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
6765                         if (pvr_version_is(ipr_blocked_processors[i]))
6766                                 return 1;
6767                 }
6768         }
6769         return 0;
6770 }
6771 #else
6772 #define ipr_invalid_adapter(ioa_cfg) 0
6773 #endif
6774
6775 /**
6776  * ipr_ioa_bringdown_done - IOA bring down completion.
6777  * @ipr_cmd:    ipr command struct
6778  *
6779  * This function processes the completion of an adapter bring down.
6780  * It wakes any reset sleepers.
6781  *
6782  * Return value:
6783  *      IPR_RC_JOB_RETURN
6784  **/
6785 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6786 {
6787         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6788         int i;
6789
6790         ENTER;
6791         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
6792                 ipr_trace;
6793                 spin_unlock_irq(ioa_cfg->host->host_lock);
6794                 scsi_unblock_requests(ioa_cfg->host);
6795                 spin_lock_irq(ioa_cfg->host->host_lock);
6796         }
6797
6798         ioa_cfg->in_reset_reload = 0;
6799         ioa_cfg->reset_retries = 0;
6800         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
6801                 spin_lock(&ioa_cfg->hrrq[i]._lock);
6802                 ioa_cfg->hrrq[i].ioa_is_dead = 1;
6803                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
6804         }
6805         wmb();
6806
6807         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6808         wake_up_all(&ioa_cfg->reset_wait_q);
6809         LEAVE;
6810
6811         return IPR_RC_JOB_RETURN;
6812 }
6813
6814 /**
6815  * ipr_ioa_reset_done - IOA reset completion.
6816  * @ipr_cmd:    ipr command struct
6817  *
6818  * This function processes the completion of an adapter reset.
6819  * It schedules any necessary mid-layer add/removes and
6820  * wakes any reset sleepers.
6821  *
6822  * Return value:
6823  *      IPR_RC_JOB_RETURN
6824  **/
6825 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6826 {
6827         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6828         struct ipr_resource_entry *res;
6829         struct ipr_hostrcb *hostrcb, *temp;
6830         int i = 0, j;
6831
6832         ENTER;
6833         ioa_cfg->in_reset_reload = 0;
6834         for (j = 0; j < ioa_cfg->hrrq_num; j++) {
6835                 spin_lock(&ioa_cfg->hrrq[j]._lock);
6836                 ioa_cfg->hrrq[j].allow_cmds = 1;
6837                 spin_unlock(&ioa_cfg->hrrq[j]._lock);
6838         }
6839         wmb();
6840         ioa_cfg->reset_cmd = NULL;
6841         ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
6842
6843         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
6844                 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
6845                         ipr_trace;
6846                         break;
6847                 }
6848         }
6849         schedule_work(&ioa_cfg->work_q);
6850
6851         list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
6852                 list_del(&hostrcb->queue);
6853                 if (i++ < IPR_NUM_LOG_HCAMS)
6854                         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
6855                 else
6856                         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
6857         }
6858
6859         scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
6860         dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
6861
6862         ioa_cfg->reset_retries = 0;
6863         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6864         wake_up_all(&ioa_cfg->reset_wait_q);
6865
6866         spin_unlock(ioa_cfg->host->host_lock);
6867         scsi_unblock_requests(ioa_cfg->host);
6868         spin_lock(ioa_cfg->host->host_lock);
6869
6870         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6871                 scsi_block_requests(ioa_cfg->host);
6872
6873         LEAVE;
6874         return IPR_RC_JOB_RETURN;
6875 }
6876
6877 /**
6878  * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
6879  * @supported_dev:      supported device struct
6880  * @vpids:                      vendor product id struct
6881  *
6882  * Return value:
6883  *      none
6884  **/
6885 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
6886                                  struct ipr_std_inq_vpids *vpids)
6887 {
6888         memset(supported_dev, 0, sizeof(struct ipr_supported_device));
6889         memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
6890         supported_dev->num_records = 1;
6891         supported_dev->data_length =
6892                 cpu_to_be16(sizeof(struct ipr_supported_device));
6893         supported_dev->reserved = 0;
6894 }
6895
6896 /**
6897  * ipr_set_supported_devs - Send Set Supported Devices for a device
6898  * @ipr_cmd:    ipr command struct
6899  *
6900  * This function sends a Set Supported Devices to the adapter
6901  *
6902  * Return value:
6903  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6904  **/
6905 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
6906 {
6907         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6908         struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
6909         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6910         struct ipr_resource_entry *res = ipr_cmd->u.res;
6911
6912         ipr_cmd->job_step = ipr_ioa_reset_done;
6913
6914         list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
6915                 if (!ipr_is_scsi_disk(res))
6916                         continue;
6917
6918                 ipr_cmd->u.res = res;
6919                 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
6920
6921                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6922                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6923                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6924
6925                 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
6926                 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
6927                 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
6928                 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
6929
6930                 ipr_init_ioadl(ipr_cmd,
6931                                ioa_cfg->vpd_cbs_dma +
6932                                  offsetof(struct ipr_misc_cbs, supp_dev),
6933                                sizeof(struct ipr_supported_device),
6934                                IPR_IOADL_FLAGS_WRITE_LAST);
6935
6936                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6937                            IPR_SET_SUP_DEVICE_TIMEOUT);
6938
6939                 if (!ioa_cfg->sis64)
6940                         ipr_cmd->job_step = ipr_set_supported_devs;
6941                 LEAVE;
6942                 return IPR_RC_JOB_RETURN;
6943         }
6944
6945         LEAVE;
6946         return IPR_RC_JOB_CONTINUE;
6947 }
6948
6949 /**
6950  * ipr_get_mode_page - Locate specified mode page
6951  * @mode_pages: mode page buffer
6952  * @page_code:  page code to find
6953  * @len:                minimum required length for mode page
6954  *
6955  * Return value:
6956  *      pointer to mode page / NULL on failure
6957  **/
6958 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
6959                                u32 page_code, u32 len)
6960 {
6961         struct ipr_mode_page_hdr *mode_hdr;
6962         u32 page_length;
6963         u32 length;
6964
6965         if (!mode_pages || (mode_pages->hdr.length == 0))
6966                 return NULL;
6967
6968         length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
6969         mode_hdr = (struct ipr_mode_page_hdr *)
6970                 (mode_pages->data + mode_pages->hdr.block_desc_len);
6971
6972         while (length) {
6973                 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
6974                         if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
6975                                 return mode_hdr;
6976                         break;
6977                 } else {
6978                         page_length = (sizeof(struct ipr_mode_page_hdr) +
6979                                        mode_hdr->page_length);
6980                         length -= page_length;
6981                         mode_hdr = (struct ipr_mode_page_hdr *)
6982                                 ((unsigned long)mode_hdr + page_length);
6983                 }
6984         }
6985         return NULL;
6986 }
6987
6988 /**
6989  * ipr_check_term_power - Check for term power errors
6990  * @ioa_cfg:    ioa config struct
6991  * @mode_pages: IOAFP mode pages buffer
6992  *
6993  * Check the IOAFP's mode page 28 for term power errors
6994  *
6995  * Return value:
6996  *      nothing
6997  **/
6998 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
6999                                  struct ipr_mode_pages *mode_pages)
7000 {
7001         int i;
7002         int entry_length;
7003         struct ipr_dev_bus_entry *bus;
7004         struct ipr_mode_page28 *mode_page;
7005
7006         mode_page = ipr_get_mode_page(mode_pages, 0x28,
7007                                       sizeof(struct ipr_mode_page28));
7008
7009         entry_length = mode_page->entry_length;
7010
7011         bus = mode_page->bus;
7012
7013         for (i = 0; i < mode_page->num_entries; i++) {
7014                 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
7015                         dev_err(&ioa_cfg->pdev->dev,
7016                                 "Term power is absent on scsi bus %d\n",
7017                                 bus->res_addr.bus);
7018                 }
7019
7020                 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
7021         }
7022 }
7023
7024 /**
7025  * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7026  * @ioa_cfg:    ioa config struct
7027  *
7028  * Looks through the config table checking for SES devices. If
7029  * the SES device is in the SES table indicating a maximum SCSI
7030  * bus speed, the speed is limited for the bus.
7031  *
7032  * Return value:
7033  *      none
7034  **/
7035 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7036 {
7037         u32 max_xfer_rate;
7038         int i;
7039
7040         for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7041                 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7042                                                        ioa_cfg->bus_attr[i].bus_width);
7043
7044                 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7045                         ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7046         }
7047 }
7048
7049 /**
7050  * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7051  * @ioa_cfg:    ioa config struct
7052  * @mode_pages: mode page 28 buffer
7053  *
7054  * Updates mode page 28 based on driver configuration
7055  *
7056  * Return value:
7057  *      none
7058  **/
7059 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
7060                                           struct ipr_mode_pages *mode_pages)
7061 {
7062         int i, entry_length;
7063         struct ipr_dev_bus_entry *bus;
7064         struct ipr_bus_attributes *bus_attr;
7065         struct ipr_mode_page28 *mode_page;
7066
7067         mode_page = ipr_get_mode_page(mode_pages, 0x28,
7068                                       sizeof(struct ipr_mode_page28));
7069
7070         entry_length = mode_page->entry_length;
7071
7072         /* Loop for each device bus entry */
7073         for (i = 0, bus = mode_page->bus;
7074              i < mode_page->num_entries;
7075              i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7076                 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7077                         dev_err(&ioa_cfg->pdev->dev,
7078                                 "Invalid resource address reported: 0x%08X\n",
7079                                 IPR_GET_PHYS_LOC(bus->res_addr));
7080                         continue;
7081                 }
7082
7083                 bus_attr = &ioa_cfg->bus_attr[i];
7084                 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7085                 bus->bus_width = bus_attr->bus_width;
7086                 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7087                 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7088                 if (bus_attr->qas_enabled)
7089                         bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7090                 else
7091                         bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7092         }
7093 }
7094
7095 /**
7096  * ipr_build_mode_select - Build a mode select command
7097  * @ipr_cmd:    ipr command struct
7098  * @res_handle: resource handle to send command to
7099  * @parm:               Byte 2 of Mode Sense command
7100  * @dma_addr:   DMA buffer address
7101  * @xfer_len:   data transfer length
7102  *
7103  * Return value:
7104  *      none
7105  **/
7106 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
7107                                   __be32 res_handle, u8 parm,
7108                                   dma_addr_t dma_addr, u8 xfer_len)
7109 {
7110         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7111
7112         ioarcb->res_handle = res_handle;
7113         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7114         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7115         ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7116         ioarcb->cmd_pkt.cdb[1] = parm;
7117         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7118
7119         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
7120 }
7121
7122 /**
7123  * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7124  * @ipr_cmd:    ipr command struct
7125  *
7126  * This function sets up the SCSI bus attributes and sends
7127  * a Mode Select for Page 28 to activate them.
7128  *
7129  * Return value:
7130  *      IPR_RC_JOB_RETURN
7131  **/
7132 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7133 {
7134         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7135         struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7136         int length;
7137
7138         ENTER;
7139         ipr_scsi_bus_speed_limit(ioa_cfg);
7140         ipr_check_term_power(ioa_cfg, mode_pages);
7141         ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7142         length = mode_pages->hdr.length + 1;
7143         mode_pages->hdr.length = 0;
7144
7145         ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7146                               ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7147                               length);
7148
7149         ipr_cmd->job_step = ipr_set_supported_devs;
7150         ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7151                                     struct ipr_resource_entry, queue);
7152         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7153
7154         LEAVE;
7155         return IPR_RC_JOB_RETURN;
7156 }
7157
7158 /**
7159  * ipr_build_mode_sense - Builds a mode sense command
7160  * @ipr_cmd:    ipr command struct
7161  * @res:                resource entry struct
7162  * @parm:               Byte 2 of mode sense command
7163  * @dma_addr:   DMA address of mode sense buffer
7164  * @xfer_len:   Size of DMA buffer
7165  *
7166  * Return value:
7167  *      none
7168  **/
7169 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7170                                  __be32 res_handle,
7171                                  u8 parm, dma_addr_t dma_addr, u8 xfer_len)
7172 {
7173         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7174
7175         ioarcb->res_handle = res_handle;
7176         ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7177         ioarcb->cmd_pkt.cdb[2] = parm;
7178         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7179         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7180
7181         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7182 }
7183
7184 /**
7185  * ipr_reset_cmd_failed - Handle failure of IOA reset command
7186  * @ipr_cmd:    ipr command struct
7187  *
7188  * This function handles the failure of an IOA bringup command.
7189  *
7190  * Return value:
7191  *      IPR_RC_JOB_RETURN
7192  **/
7193 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7194 {
7195         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7196         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7197
7198         dev_err(&ioa_cfg->pdev->dev,
7199                 "0x%02X failed with IOASC: 0x%08X\n",
7200                 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7201
7202         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7203         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7204         return IPR_RC_JOB_RETURN;
7205 }
7206
7207 /**
7208  * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7209  * @ipr_cmd:    ipr command struct
7210  *
7211  * This function handles the failure of a Mode Sense to the IOAFP.
7212  * Some adapters do not handle all mode pages.
7213  *
7214  * Return value:
7215  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7216  **/
7217 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7218 {
7219         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7220         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7221
7222         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7223                 ipr_cmd->job_step = ipr_set_supported_devs;
7224                 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7225                                             struct ipr_resource_entry, queue);
7226                 return IPR_RC_JOB_CONTINUE;
7227         }
7228
7229         return ipr_reset_cmd_failed(ipr_cmd);
7230 }
7231
7232 /**
7233  * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7234  * @ipr_cmd:    ipr command struct
7235  *
7236  * This function send a Page 28 mode sense to the IOA to
7237  * retrieve SCSI bus attributes.
7238  *
7239  * Return value:
7240  *      IPR_RC_JOB_RETURN
7241  **/
7242 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7243 {
7244         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7245
7246         ENTER;
7247         ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7248                              0x28, ioa_cfg->vpd_cbs_dma +
7249                              offsetof(struct ipr_misc_cbs, mode_pages),
7250                              sizeof(struct ipr_mode_pages));
7251
7252         ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
7253         ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
7254
7255         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7256
7257         LEAVE;
7258         return IPR_RC_JOB_RETURN;
7259 }
7260
7261 /**
7262  * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7263  * @ipr_cmd:    ipr command struct
7264  *
7265  * This function enables dual IOA RAID support if possible.
7266  *
7267  * Return value:
7268  *      IPR_RC_JOB_RETURN
7269  **/
7270 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7271 {
7272         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7273         struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7274         struct ipr_mode_page24 *mode_page;
7275         int length;
7276
7277         ENTER;
7278         mode_page = ipr_get_mode_page(mode_pages, 0x24,
7279                                       sizeof(struct ipr_mode_page24));
7280
7281         if (mode_page)
7282                 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7283
7284         length = mode_pages->hdr.length + 1;
7285         mode_pages->hdr.length = 0;
7286
7287         ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7288                               ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7289                               length);
7290
7291         ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7292         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7293
7294         LEAVE;
7295         return IPR_RC_JOB_RETURN;
7296 }
7297
7298 /**
7299  * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7300  * @ipr_cmd:    ipr command struct
7301  *
7302  * This function handles the failure of a Mode Sense to the IOAFP.
7303  * Some adapters do not handle all mode pages.
7304  *
7305  * Return value:
7306  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7307  **/
7308 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7309 {
7310         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7311
7312         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7313                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7314                 return IPR_RC_JOB_CONTINUE;
7315         }
7316
7317         return ipr_reset_cmd_failed(ipr_cmd);
7318 }
7319
7320 /**
7321  * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7322  * @ipr_cmd:    ipr command struct
7323  *
7324  * This function send a mode sense to the IOA to retrieve
7325  * the IOA Advanced Function Control mode page.
7326  *
7327  * Return value:
7328  *      IPR_RC_JOB_RETURN
7329  **/
7330 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7331 {
7332         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7333
7334         ENTER;
7335         ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7336                              0x24, ioa_cfg->vpd_cbs_dma +
7337                              offsetof(struct ipr_misc_cbs, mode_pages),
7338                              sizeof(struct ipr_mode_pages));
7339
7340         ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7341         ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7342
7343         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7344
7345         LEAVE;
7346         return IPR_RC_JOB_RETURN;
7347 }
7348
7349 /**
7350  * ipr_init_res_table - Initialize the resource table
7351  * @ipr_cmd:    ipr command struct
7352  *
7353  * This function looks through the existing resource table, comparing
7354  * it with the config table. This function will take care of old/new
7355  * devices and schedule adding/removing them from the mid-layer
7356  * as appropriate.
7357  *
7358  * Return value:
7359  *      IPR_RC_JOB_CONTINUE
7360  **/
7361 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7362 {
7363         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7364         struct ipr_resource_entry *res, *temp;
7365         struct ipr_config_table_entry_wrapper cfgtew;
7366         int entries, found, flag, i;
7367         LIST_HEAD(old_res);
7368
7369         ENTER;
7370         if (ioa_cfg->sis64)
7371                 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7372         else
7373                 flag = ioa_cfg->u.cfg_table->hdr.flags;
7374
7375         if (flag & IPR_UCODE_DOWNLOAD_REQ)
7376                 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7377
7378         list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7379                 list_move_tail(&res->queue, &old_res);
7380
7381         if (ioa_cfg->sis64)
7382                 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
7383         else
7384                 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7385
7386         for (i = 0; i < entries; i++) {
7387                 if (ioa_cfg->sis64)
7388                         cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7389                 else
7390                         cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
7391                 found = 0;
7392
7393                 list_for_each_entry_safe(res, temp, &old_res, queue) {
7394                         if (ipr_is_same_device(res, &cfgtew)) {
7395                                 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7396                                 found = 1;
7397                                 break;
7398                         }
7399                 }
7400
7401                 if (!found) {
7402                         if (list_empty(&ioa_cfg->free_res_q)) {
7403                                 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7404                                 break;
7405                         }
7406
7407                         found = 1;
7408                         res = list_entry(ioa_cfg->free_res_q.next,
7409                                          struct ipr_resource_entry, queue);
7410                         list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7411                         ipr_init_res_entry(res, &cfgtew);
7412                         res->add_to_ml = 1;
7413                 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7414                         res->sdev->allow_restart = 1;
7415
7416                 if (found)
7417                         ipr_update_res_entry(res, &cfgtew);
7418         }
7419
7420         list_for_each_entry_safe(res, temp, &old_res, queue) {
7421                 if (res->sdev) {
7422                         res->del_from_ml = 1;
7423                         res->res_handle = IPR_INVALID_RES_HANDLE;
7424                         list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7425                 }
7426         }
7427
7428         list_for_each_entry_safe(res, temp, &old_res, queue) {
7429                 ipr_clear_res_target(res);
7430                 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7431         }
7432
7433         if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7434                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7435         else
7436                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7437
7438         LEAVE;
7439         return IPR_RC_JOB_CONTINUE;
7440 }
7441
7442 /**
7443  * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7444  * @ipr_cmd:    ipr command struct
7445  *
7446  * This function sends a Query IOA Configuration command
7447  * to the adapter to retrieve the IOA configuration table.
7448  *
7449  * Return value:
7450  *      IPR_RC_JOB_RETURN
7451  **/
7452 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7453 {
7454         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7455         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7456         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
7457         struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7458
7459         ENTER;
7460         if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7461                 ioa_cfg->dual_raid = 1;
7462         dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7463                  ucode_vpd->major_release, ucode_vpd->card_type,
7464                  ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7465         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7466         ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7467
7468         ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
7469         ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
7470         ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7471         ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
7472
7473         ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
7474                        IPR_IOADL_FLAGS_READ_LAST);
7475
7476         ipr_cmd->job_step = ipr_init_res_table;
7477
7478         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7479
7480         LEAVE;
7481         return IPR_RC_JOB_RETURN;
7482 }
7483
7484 /**
7485  * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7486  * @ipr_cmd:    ipr command struct
7487  *
7488  * This utility function sends an inquiry to the adapter.
7489  *
7490  * Return value:
7491  *      none
7492  **/
7493 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
7494                               dma_addr_t dma_addr, u8 xfer_len)
7495 {
7496         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7497
7498         ENTER;
7499         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7500         ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7501
7502         ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7503         ioarcb->cmd_pkt.cdb[1] = flags;
7504         ioarcb->cmd_pkt.cdb[2] = page;
7505         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7506
7507         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7508
7509         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7510         LEAVE;
7511 }
7512
7513 /**
7514  * ipr_inquiry_page_supported - Is the given inquiry page supported
7515  * @page0:              inquiry page 0 buffer
7516  * @page:               page code.
7517  *
7518  * This function determines if the specified inquiry page is supported.
7519  *
7520  * Return value:
7521  *      1 if page is supported / 0 if not
7522  **/
7523 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
7524 {
7525         int i;
7526
7527         for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
7528                 if (page0->page[i] == page)
7529                         return 1;
7530
7531         return 0;
7532 }
7533
7534 /**
7535  * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
7536  * @ipr_cmd:    ipr command struct
7537  *
7538  * This function sends a Page 0xD0 inquiry to the adapter
7539  * to retrieve adapter capabilities.
7540  *
7541  * Return value:
7542  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7543  **/
7544 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
7545 {
7546         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7547         struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7548         struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7549
7550         ENTER;
7551         ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7552         memset(cap, 0, sizeof(*cap));
7553
7554         if (ipr_inquiry_page_supported(page0, 0xD0)) {
7555                 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
7556                                   ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
7557                                   sizeof(struct ipr_inquiry_cap));
7558                 return IPR_RC_JOB_RETURN;
7559         }
7560
7561         LEAVE;
7562         return IPR_RC_JOB_CONTINUE;
7563 }
7564
7565 /**
7566  * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
7567  * @ipr_cmd:    ipr command struct
7568  *
7569  * This function sends a Page 3 inquiry to the adapter
7570  * to retrieve software VPD information.
7571  *
7572  * Return value:
7573  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7574  **/
7575 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
7576 {
7577         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7578
7579         ENTER;
7580
7581         ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
7582
7583         ipr_ioafp_inquiry(ipr_cmd, 1, 3,
7584                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
7585                           sizeof(struct ipr_inquiry_page3));
7586
7587         LEAVE;
7588         return IPR_RC_JOB_RETURN;
7589 }
7590
7591 /**
7592  * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
7593  * @ipr_cmd:    ipr command struct
7594  *
7595  * This function sends a Page 0 inquiry to the adapter
7596  * to retrieve supported inquiry pages.
7597  *
7598  * Return value:
7599  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7600  **/
7601 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
7602 {
7603         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7604         char type[5];
7605
7606         ENTER;
7607
7608         /* Grab the type out of the VPD and store it away */
7609         memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
7610         type[4] = '\0';
7611         ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
7612
7613         ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
7614
7615         ipr_ioafp_inquiry(ipr_cmd, 1, 0,
7616                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
7617                           sizeof(struct ipr_inquiry_page0));
7618
7619         LEAVE;
7620         return IPR_RC_JOB_RETURN;
7621 }
7622
7623 /**
7624  * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
7625  * @ipr_cmd:    ipr command struct
7626  *
7627  * This function sends a standard inquiry to the adapter.
7628  *
7629  * Return value:
7630  *      IPR_RC_JOB_RETURN
7631  **/
7632 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
7633 {
7634         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7635
7636         ENTER;
7637         ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
7638
7639         ipr_ioafp_inquiry(ipr_cmd, 0, 0,
7640                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
7641                           sizeof(struct ipr_ioa_vpd));
7642
7643         LEAVE;
7644         return IPR_RC_JOB_RETURN;
7645 }
7646
7647 /**
7648  * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
7649  * @ipr_cmd:    ipr command struct
7650  *
7651  * This function send an Identify Host Request Response Queue
7652  * command to establish the HRRQ with the adapter.
7653  *
7654  * Return value:
7655  *      IPR_RC_JOB_RETURN
7656  **/
7657 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
7658 {
7659         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7660         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7661         struct ipr_hrr_queue *hrrq;
7662
7663         ENTER;
7664         ipr_cmd->job_step = ipr_ioafp_std_inquiry;
7665         dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7666
7667         if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
7668                 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
7669
7670                 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
7671                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7672
7673                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7674                 if (ioa_cfg->sis64)
7675                         ioarcb->cmd_pkt.cdb[1] = 0x1;
7676
7677                 if (ioa_cfg->nvectors == 1)
7678                         ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
7679                 else
7680                         ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
7681
7682                 ioarcb->cmd_pkt.cdb[2] =
7683                         ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
7684                 ioarcb->cmd_pkt.cdb[3] =
7685                         ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
7686                 ioarcb->cmd_pkt.cdb[4] =
7687                         ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
7688                 ioarcb->cmd_pkt.cdb[5] =
7689                         ((u64) hrrq->host_rrq_dma) & 0xff;
7690                 ioarcb->cmd_pkt.cdb[7] =
7691                         ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
7692                 ioarcb->cmd_pkt.cdb[8] =
7693                         (sizeof(u32) * hrrq->size) & 0xff;
7694
7695                 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
7696                         ioarcb->cmd_pkt.cdb[9] =
7697                                         ioa_cfg->identify_hrrq_index;
7698
7699                 if (ioa_cfg->sis64) {
7700                         ioarcb->cmd_pkt.cdb[10] =
7701                                 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
7702                         ioarcb->cmd_pkt.cdb[11] =
7703                                 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
7704                         ioarcb->cmd_pkt.cdb[12] =
7705                                 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
7706                         ioarcb->cmd_pkt.cdb[13] =
7707                                 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
7708                 }
7709
7710                 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
7711                         ioarcb->cmd_pkt.cdb[14] =
7712                                         ioa_cfg->identify_hrrq_index;
7713
7714                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7715                            IPR_INTERNAL_TIMEOUT);
7716
7717                 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
7718                         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7719
7720                 LEAVE;
7721                 return IPR_RC_JOB_RETURN;
7722         }
7723
7724         LEAVE;
7725         return IPR_RC_JOB_CONTINUE;
7726 }
7727
7728 /**
7729  * ipr_reset_timer_done - Adapter reset timer function
7730  * @ipr_cmd:    ipr command struct
7731  *
7732  * Description: This function is used in adapter reset processing
7733  * for timing events. If the reset_cmd pointer in the IOA
7734  * config struct is not this adapter's we are doing nested
7735  * resets and fail_all_ops will take care of freeing the
7736  * command block.
7737  *
7738  * Return value:
7739  *      none
7740  **/
7741 static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
7742 {
7743         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7744         unsigned long lock_flags = 0;
7745
7746         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7747
7748         if (ioa_cfg->reset_cmd == ipr_cmd) {
7749                 list_del(&ipr_cmd->queue);
7750                 ipr_cmd->done(ipr_cmd);
7751         }
7752
7753         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7754 }
7755
7756 /**
7757  * ipr_reset_start_timer - Start a timer for adapter reset job
7758  * @ipr_cmd:    ipr command struct
7759  * @timeout:    timeout value
7760  *
7761  * Description: This function is used in adapter reset processing
7762  * for timing events. If the reset_cmd pointer in the IOA
7763  * config struct is not this adapter's we are doing nested
7764  * resets and fail_all_ops will take care of freeing the
7765  * command block.
7766  *
7767  * Return value:
7768  *      none
7769  **/
7770 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7771                                   unsigned long timeout)
7772 {
7773
7774         ENTER;
7775         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7776         ipr_cmd->done = ipr_reset_ioa_job;
7777
7778         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7779         ipr_cmd->timer.expires = jiffies + timeout;
7780         ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
7781         add_timer(&ipr_cmd->timer);
7782 }
7783
7784 /**
7785  * ipr_init_ioa_mem - Initialize ioa_cfg control block
7786  * @ioa_cfg:    ioa cfg struct
7787  *
7788  * Return value:
7789  *      nothing
7790  **/
7791 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7792 {
7793         struct ipr_hrr_queue *hrrq;
7794
7795         for_each_hrrq(hrrq, ioa_cfg) {
7796                 spin_lock(&hrrq->_lock);
7797                 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
7798
7799                 /* Initialize Host RRQ pointers */
7800                 hrrq->hrrq_start = hrrq->host_rrq;
7801                 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
7802                 hrrq->hrrq_curr = hrrq->hrrq_start;
7803                 hrrq->toggle_bit = 1;
7804                 spin_unlock(&hrrq->_lock);
7805         }
7806         wmb();
7807
7808         ioa_cfg->identify_hrrq_index = 0;
7809         if (ioa_cfg->hrrq_num == 1)
7810                 atomic_set(&ioa_cfg->hrrq_index, 0);
7811         else
7812                 atomic_set(&ioa_cfg->hrrq_index, 1);
7813
7814         /* Zero out config table */
7815         memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
7816 }
7817
7818 /**
7819  * ipr_reset_next_stage - Process IPL stage change based on feedback register.
7820  * @ipr_cmd:    ipr command struct
7821  *
7822  * Return value:
7823  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7824  **/
7825 static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
7826 {
7827         unsigned long stage, stage_time;
7828         u32 feedback;
7829         volatile u32 int_reg;
7830         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7831         u64 maskval = 0;
7832
7833         feedback = readl(ioa_cfg->regs.init_feedback_reg);
7834         stage = feedback & IPR_IPL_INIT_STAGE_MASK;
7835         stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
7836
7837         ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
7838
7839         /* sanity check the stage_time value */
7840         if (stage_time == 0)
7841                 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
7842         else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
7843                 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
7844         else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
7845                 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
7846
7847         if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
7848                 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
7849                 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7850                 stage_time = ioa_cfg->transop_timeout;
7851                 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7852         } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
7853                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7854                 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7855                         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7856                         maskval = IPR_PCII_IPL_STAGE_CHANGE;
7857                         maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
7858                         writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
7859                         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7860                         return IPR_RC_JOB_CONTINUE;
7861                 }
7862         }
7863
7864         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7865         ipr_cmd->timer.expires = jiffies + stage_time * HZ;
7866         ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7867         ipr_cmd->done = ipr_reset_ioa_job;
7868         add_timer(&ipr_cmd->timer);
7869
7870         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7871
7872         return IPR_RC_JOB_RETURN;
7873 }
7874
7875 /**
7876  * ipr_reset_enable_ioa - Enable the IOA following a reset.
7877  * @ipr_cmd:    ipr command struct
7878  *
7879  * This function reinitializes some control blocks and
7880  * enables destructive diagnostics on the adapter.
7881  *
7882  * Return value:
7883  *      IPR_RC_JOB_RETURN
7884  **/
7885 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7886 {
7887         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7888         volatile u32 int_reg;
7889         volatile u64 maskval;
7890         int i;
7891
7892         ENTER;
7893         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7894         ipr_init_ioa_mem(ioa_cfg);
7895
7896         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7897                 spin_lock(&ioa_cfg->hrrq[i]._lock);
7898                 ioa_cfg->hrrq[i].allow_interrupts = 1;
7899                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
7900         }
7901         wmb();
7902         if (ioa_cfg->sis64) {
7903                 /* Set the adapter to the correct endian mode. */
7904                 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7905                 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
7906         }
7907
7908         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7909
7910         if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7911                 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
7912                        ioa_cfg->regs.clr_interrupt_mask_reg32);
7913                 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7914                 return IPR_RC_JOB_CONTINUE;
7915         }
7916
7917         /* Enable destructive diagnostics on IOA */
7918         writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
7919
7920         if (ioa_cfg->sis64) {
7921                 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7922                 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
7923                 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
7924         } else
7925                 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
7926
7927         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7928
7929         dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
7930
7931         if (ioa_cfg->sis64) {
7932                 ipr_cmd->job_step = ipr_reset_next_stage;
7933                 return IPR_RC_JOB_CONTINUE;
7934         }
7935
7936         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7937         ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
7938         ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7939         ipr_cmd->done = ipr_reset_ioa_job;
7940         add_timer(&ipr_cmd->timer);
7941         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7942
7943         LEAVE;
7944         return IPR_RC_JOB_RETURN;
7945 }
7946
7947 /**
7948  * ipr_reset_wait_for_dump - Wait for a dump to timeout.
7949  * @ipr_cmd:    ipr command struct
7950  *
7951  * This function is invoked when an adapter dump has run out
7952  * of processing time.
7953  *
7954  * Return value:
7955  *      IPR_RC_JOB_CONTINUE
7956  **/
7957 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
7958 {
7959         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7960
7961         if (ioa_cfg->sdt_state == GET_DUMP)
7962                 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7963         else if (ioa_cfg->sdt_state == READ_DUMP)
7964                 ioa_cfg->sdt_state = ABORT_DUMP;
7965
7966         ioa_cfg->dump_timeout = 1;
7967         ipr_cmd->job_step = ipr_reset_alert;
7968
7969         return IPR_RC_JOB_CONTINUE;
7970 }
7971
7972 /**
7973  * ipr_unit_check_no_data - Log a unit check/no data error log
7974  * @ioa_cfg:            ioa config struct
7975  *
7976  * Logs an error indicating the adapter unit checked, but for some
7977  * reason, we were unable to fetch the unit check buffer.
7978  *
7979  * Return value:
7980  *      nothing
7981  **/
7982 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
7983 {
7984         ioa_cfg->errors_logged++;
7985         dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
7986 }
7987
7988 /**
7989  * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
7990  * @ioa_cfg:            ioa config struct
7991  *
7992  * Fetches the unit check buffer from the adapter by clocking the data
7993  * through the mailbox register.
7994  *
7995  * Return value:
7996  *      nothing
7997  **/
7998 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
7999 {
8000         unsigned long mailbox;
8001         struct ipr_hostrcb *hostrcb;
8002         struct ipr_uc_sdt sdt;
8003         int rc, length;
8004         u32 ioasc;
8005
8006         mailbox = readl(ioa_cfg->ioa_mailbox);
8007
8008         if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
8009                 ipr_unit_check_no_data(ioa_cfg);
8010                 return;
8011         }
8012
8013         memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
8014         rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
8015                                         (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
8016
8017         if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
8018             ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
8019             (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
8020                 ipr_unit_check_no_data(ioa_cfg);
8021                 return;
8022         }
8023
8024         /* Find length of the first sdt entry (UC buffer) */
8025         if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
8026                 length = be32_to_cpu(sdt.entry[0].end_token);
8027         else
8028                 length = (be32_to_cpu(sdt.entry[0].end_token) -
8029                           be32_to_cpu(sdt.entry[0].start_token)) &
8030                           IPR_FMT2_MBX_ADDR_MASK;
8031
8032         hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8033                              struct ipr_hostrcb, queue);
8034         list_del(&hostrcb->queue);
8035         memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8036
8037         rc = ipr_get_ldump_data_section(ioa_cfg,
8038                                         be32_to_cpu(sdt.entry[0].start_token),
8039                                         (__be32 *)&hostrcb->hcam,
8040                                         min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
8041
8042         if (!rc) {
8043                 ipr_handle_log_data(ioa_cfg, hostrcb);
8044                 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
8045                 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8046                     ioa_cfg->sdt_state == GET_DUMP)
8047                         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8048         } else
8049                 ipr_unit_check_no_data(ioa_cfg);
8050
8051         list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8052 }
8053
8054 /**
8055  * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8056  * @ipr_cmd:    ipr command struct
8057  *
8058  * Description: This function will call to get the unit check buffer.
8059  *
8060  * Return value:
8061  *      IPR_RC_JOB_RETURN
8062  **/
8063 static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8064 {
8065         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8066
8067         ENTER;
8068         ioa_cfg->ioa_unit_checked = 0;
8069         ipr_get_unit_check_buffer(ioa_cfg);
8070         ipr_cmd->job_step = ipr_reset_alert;
8071         ipr_reset_start_timer(ipr_cmd, 0);
8072
8073         LEAVE;
8074         return IPR_RC_JOB_RETURN;
8075 }
8076
8077 /**
8078  * ipr_reset_restore_cfg_space - Restore PCI config space.
8079  * @ipr_cmd:    ipr command struct
8080  *
8081  * Description: This function restores the saved PCI config space of
8082  * the adapter, fails all outstanding ops back to the callers, and
8083  * fetches the dump/unit check if applicable to this reset.
8084  *
8085  * Return value:
8086  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8087  **/
8088 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8089 {
8090         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8091         u32 int_reg;
8092
8093         ENTER;
8094         ioa_cfg->pdev->state_saved = true;
8095         pci_restore_state(ioa_cfg->pdev);
8096
8097         if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
8098                 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8099                 return IPR_RC_JOB_CONTINUE;
8100         }
8101
8102         ipr_fail_all_ops(ioa_cfg);
8103
8104         if (ioa_cfg->sis64) {
8105                 /* Set the adapter to the correct endian mode. */
8106                 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8107                 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8108         }
8109
8110         if (ioa_cfg->ioa_unit_checked) {
8111                 if (ioa_cfg->sis64) {
8112                         ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8113                         ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8114                         return IPR_RC_JOB_RETURN;
8115                 } else {
8116                         ioa_cfg->ioa_unit_checked = 0;
8117                         ipr_get_unit_check_buffer(ioa_cfg);
8118                         ipr_cmd->job_step = ipr_reset_alert;
8119                         ipr_reset_start_timer(ipr_cmd, 0);
8120                         return IPR_RC_JOB_RETURN;
8121                 }
8122         }
8123
8124         if (ioa_cfg->in_ioa_bringdown) {
8125                 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8126         } else {
8127                 ipr_cmd->job_step = ipr_reset_enable_ioa;
8128
8129                 if (GET_DUMP == ioa_cfg->sdt_state) {
8130                         ioa_cfg->sdt_state = READ_DUMP;
8131                         ioa_cfg->dump_timeout = 0;
8132                         if (ioa_cfg->sis64)
8133                                 ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8134                         else
8135                                 ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
8136                         ipr_cmd->job_step = ipr_reset_wait_for_dump;
8137                         schedule_work(&ioa_cfg->work_q);
8138                         return IPR_RC_JOB_RETURN;
8139                 }
8140         }
8141
8142         LEAVE;
8143         return IPR_RC_JOB_CONTINUE;
8144 }
8145
8146 /**
8147  * ipr_reset_bist_done - BIST has completed on the adapter.
8148  * @ipr_cmd:    ipr command struct
8149  *
8150  * Description: Unblock config space and resume the reset process.
8151  *
8152  * Return value:
8153  *      IPR_RC_JOB_CONTINUE
8154  **/
8155 static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8156 {
8157         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8158
8159         ENTER;
8160         if (ioa_cfg->cfg_locked)
8161                 pci_cfg_access_unlock(ioa_cfg->pdev);
8162         ioa_cfg->cfg_locked = 0;
8163         ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8164         LEAVE;
8165         return IPR_RC_JOB_CONTINUE;
8166 }
8167
8168 /**
8169  * ipr_reset_start_bist - Run BIST on the adapter.
8170  * @ipr_cmd:    ipr command struct
8171  *
8172  * Description: This function runs BIST on the adapter, then delays 2 seconds.
8173  *
8174  * Return value:
8175  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8176  **/
8177 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8178 {
8179         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8180         int rc = PCIBIOS_SUCCESSFUL;
8181
8182         ENTER;
8183         if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8184                 writel(IPR_UPROCI_SIS64_START_BIST,
8185                        ioa_cfg->regs.set_uproc_interrupt_reg32);
8186         else
8187                 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8188
8189         if (rc == PCIBIOS_SUCCESSFUL) {
8190                 ipr_cmd->job_step = ipr_reset_bist_done;
8191                 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8192                 rc = IPR_RC_JOB_RETURN;
8193         } else {
8194                 if (ioa_cfg->cfg_locked)
8195                         pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8196                 ioa_cfg->cfg_locked = 0;
8197                 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8198                 rc = IPR_RC_JOB_CONTINUE;
8199         }
8200
8201         LEAVE;
8202         return rc;
8203 }
8204
8205 /**
8206  * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8207  * @ipr_cmd:    ipr command struct
8208  *
8209  * Description: This clears PCI reset to the adapter and delays two seconds.
8210  *
8211  * Return value:
8212  *      IPR_RC_JOB_RETURN
8213  **/
8214 static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8215 {
8216         ENTER;
8217         pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
8218         ipr_cmd->job_step = ipr_reset_bist_done;
8219         ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8220         LEAVE;
8221         return IPR_RC_JOB_RETURN;
8222 }
8223
8224 /**
8225  * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8226  * @ipr_cmd:    ipr command struct
8227  *
8228  * Description: This asserts PCI reset to the adapter.
8229  *
8230  * Return value:
8231  *      IPR_RC_JOB_RETURN
8232  **/
8233 static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8234 {
8235         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8236         struct pci_dev *pdev = ioa_cfg->pdev;
8237
8238         ENTER;
8239         pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8240         ipr_cmd->job_step = ipr_reset_slot_reset_done;
8241         ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
8242         LEAVE;
8243         return IPR_RC_JOB_RETURN;
8244 }
8245
8246 /**
8247  * ipr_reset_block_config_access_wait - Wait for permission to block config access
8248  * @ipr_cmd:    ipr command struct
8249  *
8250  * Description: This attempts to block config access to the IOA.
8251  *
8252  * Return value:
8253  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8254  **/
8255 static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8256 {
8257         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8258         int rc = IPR_RC_JOB_CONTINUE;
8259
8260         if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8261                 ioa_cfg->cfg_locked = 1;
8262                 ipr_cmd->job_step = ioa_cfg->reset;
8263         } else {
8264                 if (ipr_cmd->u.time_left) {
8265                         rc = IPR_RC_JOB_RETURN;
8266                         ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8267                         ipr_reset_start_timer(ipr_cmd,
8268                                               IPR_CHECK_FOR_RESET_TIMEOUT);
8269                 } else {
8270                         ipr_cmd->job_step = ioa_cfg->reset;
8271                         dev_err(&ioa_cfg->pdev->dev,
8272                                 "Timed out waiting to lock config access. Resetting anyway.\n");
8273                 }
8274         }
8275
8276         return rc;
8277 }
8278
8279 /**
8280  * ipr_reset_block_config_access - Block config access to the IOA
8281  * @ipr_cmd:    ipr command struct
8282  *
8283  * Description: This attempts to block config access to the IOA
8284  *
8285  * Return value:
8286  *      IPR_RC_JOB_CONTINUE
8287  **/
8288 static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8289 {
8290         ipr_cmd->ioa_cfg->cfg_locked = 0;
8291         ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8292         ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8293         return IPR_RC_JOB_CONTINUE;
8294 }
8295
8296 /**
8297  * ipr_reset_allowed - Query whether or not IOA can be reset
8298  * @ioa_cfg:    ioa config struct
8299  *
8300  * Return value:
8301  *      0 if reset not allowed / non-zero if reset is allowed
8302  **/
8303 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8304 {
8305         volatile u32 temp_reg;
8306
8307         temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8308         return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8309 }
8310
8311 /**
8312  * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8313  * @ipr_cmd:    ipr command struct
8314  *
8315  * Description: This function waits for adapter permission to run BIST,
8316  * then runs BIST. If the adapter does not give permission after a
8317  * reasonable time, we will reset the adapter anyway. The impact of
8318  * resetting the adapter without warning the adapter is the risk of
8319  * losing the persistent error log on the adapter. If the adapter is
8320  * reset while it is writing to the flash on the adapter, the flash
8321  * segment will have bad ECC and be zeroed.
8322  *
8323  * Return value:
8324  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8325  **/
8326 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8327 {
8328         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8329         int rc = IPR_RC_JOB_RETURN;
8330
8331         if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8332                 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8333                 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8334         } else {
8335                 ipr_cmd->job_step = ipr_reset_block_config_access;
8336                 rc = IPR_RC_JOB_CONTINUE;
8337         }
8338
8339         return rc;
8340 }
8341
8342 /**
8343  * ipr_reset_alert - Alert the adapter of a pending reset
8344  * @ipr_cmd:    ipr command struct
8345  *
8346  * Description: This function alerts the adapter that it will be reset.
8347  * If memory space is not currently enabled, proceed directly
8348  * to running BIST on the adapter. The timer must always be started
8349  * so we guarantee we do not run BIST from ipr_isr.
8350  *
8351  * Return value:
8352  *      IPR_RC_JOB_RETURN
8353  **/
8354 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8355 {
8356         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8357         u16 cmd_reg;
8358         int rc;
8359
8360         ENTER;
8361         rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8362
8363         if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8364                 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
8365                 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
8366                 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8367         } else {
8368                 ipr_cmd->job_step = ipr_reset_block_config_access;
8369         }
8370
8371         ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8372         ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8373
8374         LEAVE;
8375         return IPR_RC_JOB_RETURN;
8376 }
8377
8378 /**
8379  * ipr_reset_ucode_download_done - Microcode download completion
8380  * @ipr_cmd:    ipr command struct
8381  *
8382  * Description: This function unmaps the microcode download buffer.
8383  *
8384  * Return value:
8385  *      IPR_RC_JOB_CONTINUE
8386  **/
8387 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
8388 {
8389         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8390         struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8391
8392         dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist,
8393                      sglist->num_sg, DMA_TO_DEVICE);
8394
8395         ipr_cmd->job_step = ipr_reset_alert;
8396         return IPR_RC_JOB_CONTINUE;
8397 }
8398
8399 /**
8400  * ipr_reset_ucode_download - Download microcode to the adapter
8401  * @ipr_cmd:    ipr command struct
8402  *
8403  * Description: This function checks to see if it there is microcode
8404  * to download to the adapter. If there is, a download is performed.
8405  *
8406  * Return value:
8407  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8408  **/
8409 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
8410 {
8411         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8412         struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8413
8414         ENTER;
8415         ipr_cmd->job_step = ipr_reset_alert;
8416
8417         if (!sglist)
8418                 return IPR_RC_JOB_CONTINUE;
8419
8420         ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8421         ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
8422         ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
8423         ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
8424         ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
8425         ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
8426         ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
8427
8428         if (ioa_cfg->sis64)
8429                 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
8430         else
8431                 ipr_build_ucode_ioadl(ipr_cmd, sglist);
8432         ipr_cmd->job_step = ipr_reset_ucode_download_done;
8433
8434         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8435                    IPR_WRITE_BUFFER_TIMEOUT);
8436
8437         LEAVE;
8438         return IPR_RC_JOB_RETURN;
8439 }
8440
8441 /**
8442  * ipr_reset_shutdown_ioa - Shutdown the adapter
8443  * @ipr_cmd:    ipr command struct
8444  *
8445  * Description: This function issues an adapter shutdown of the
8446  * specified type to the specified adapter as part of the
8447  * adapter reset job.
8448  *
8449  * Return value:
8450  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8451  **/
8452 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
8453 {
8454         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8455         enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
8456         unsigned long timeout;
8457         int rc = IPR_RC_JOB_CONTINUE;
8458
8459         ENTER;
8460         if (shutdown_type != IPR_SHUTDOWN_NONE &&
8461                         !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
8462                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8463                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8464                 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
8465                 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
8466
8467                 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
8468                         timeout = IPR_SHUTDOWN_TIMEOUT;
8469                 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
8470                         timeout = IPR_INTERNAL_TIMEOUT;
8471                 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
8472                         timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
8473                 else
8474                         timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
8475
8476                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
8477
8478                 rc = IPR_RC_JOB_RETURN;
8479                 ipr_cmd->job_step = ipr_reset_ucode_download;
8480         } else
8481                 ipr_cmd->job_step = ipr_reset_alert;
8482
8483         LEAVE;
8484         return rc;
8485 }
8486
8487 /**
8488  * ipr_reset_ioa_job - Adapter reset job
8489  * @ipr_cmd:    ipr command struct
8490  *
8491  * Description: This function is the job router for the adapter reset job.
8492  *
8493  * Return value:
8494  *      none
8495  **/
8496 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
8497 {
8498         u32 rc, ioasc;
8499         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8500
8501         do {
8502                 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
8503
8504                 if (ioa_cfg->reset_cmd != ipr_cmd) {
8505                         /*
8506                          * We are doing nested adapter resets and this is
8507                          * not the current reset job.
8508                          */
8509                         list_add_tail(&ipr_cmd->queue,
8510                                         &ipr_cmd->hrrq->hrrq_free_q);
8511                         return;
8512                 }
8513
8514                 if (IPR_IOASC_SENSE_KEY(ioasc)) {
8515                         rc = ipr_cmd->job_step_failed(ipr_cmd);
8516                         if (rc == IPR_RC_JOB_RETURN)
8517                                 return;
8518                 }
8519
8520                 ipr_reinit_ipr_cmnd(ipr_cmd);
8521                 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
8522                 rc = ipr_cmd->job_step(ipr_cmd);
8523         } while (rc == IPR_RC_JOB_CONTINUE);
8524 }
8525
8526 /**
8527  * _ipr_initiate_ioa_reset - Initiate an adapter reset
8528  * @ioa_cfg:            ioa config struct
8529  * @job_step:           first job step of reset job
8530  * @shutdown_type:      shutdown type
8531  *
8532  * Description: This function will initiate the reset of the given adapter
8533  * starting at the selected job step.
8534  * If the caller needs to wait on the completion of the reset,
8535  * the caller must sleep on the reset_wait_q.
8536  *
8537  * Return value:
8538  *      none
8539  **/
8540 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8541                                     int (*job_step) (struct ipr_cmnd *),
8542                                     enum ipr_shutdown_type shutdown_type)
8543 {
8544         struct ipr_cmnd *ipr_cmd;
8545         int i;
8546
8547         ioa_cfg->in_reset_reload = 1;
8548         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8549                 spin_lock(&ioa_cfg->hrrq[i]._lock);
8550                 ioa_cfg->hrrq[i].allow_cmds = 0;
8551                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8552         }
8553         wmb();
8554         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa)
8555                 scsi_block_requests(ioa_cfg->host);
8556
8557         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
8558         ioa_cfg->reset_cmd = ipr_cmd;
8559         ipr_cmd->job_step = job_step;
8560         ipr_cmd->u.shutdown_type = shutdown_type;
8561
8562         ipr_reset_ioa_job(ipr_cmd);
8563 }
8564
8565 /**
8566  * ipr_initiate_ioa_reset - Initiate an adapter reset
8567  * @ioa_cfg:            ioa config struct
8568  * @shutdown_type:      shutdown type
8569  *
8570  * Description: This function will initiate the reset of the given adapter.
8571  * If the caller needs to wait on the completion of the reset,
8572  * the caller must sleep on the reset_wait_q.
8573  *
8574  * Return value:
8575  *      none
8576  **/
8577 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8578                                    enum ipr_shutdown_type shutdown_type)
8579 {
8580         int i;
8581
8582         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
8583                 return;
8584
8585         if (ioa_cfg->in_reset_reload) {
8586                 if (ioa_cfg->sdt_state == GET_DUMP)
8587                         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8588                 else if (ioa_cfg->sdt_state == READ_DUMP)
8589                         ioa_cfg->sdt_state = ABORT_DUMP;
8590         }
8591
8592         if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
8593                 dev_err(&ioa_cfg->pdev->dev,
8594                         "IOA taken offline - error recovery failed\n");
8595
8596                 ioa_cfg->reset_retries = 0;
8597                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8598                         spin_lock(&ioa_cfg->hrrq[i]._lock);
8599                         ioa_cfg->hrrq[i].ioa_is_dead = 1;
8600                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
8601                 }
8602                 wmb();
8603
8604                 if (ioa_cfg->in_ioa_bringdown) {
8605                         ioa_cfg->reset_cmd = NULL;
8606                         ioa_cfg->in_reset_reload = 0;
8607                         ipr_fail_all_ops(ioa_cfg);
8608                         wake_up_all(&ioa_cfg->reset_wait_q);
8609
8610                         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
8611                                 spin_unlock_irq(ioa_cfg->host->host_lock);
8612                                 scsi_unblock_requests(ioa_cfg->host);
8613                                 spin_lock_irq(ioa_cfg->host->host_lock);
8614                         }
8615                         return;
8616                 } else {
8617                         ioa_cfg->in_ioa_bringdown = 1;
8618                         shutdown_type = IPR_SHUTDOWN_NONE;
8619                 }
8620         }
8621
8622         _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
8623                                 shutdown_type);
8624 }
8625
8626 /**
8627  * ipr_reset_freeze - Hold off all I/O activity
8628  * @ipr_cmd:    ipr command struct
8629  *
8630  * Description: If the PCI slot is frozen, hold off all I/O
8631  * activity; then, as soon as the slot is available again,
8632  * initiate an adapter reset.
8633  */
8634 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
8635 {
8636         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8637         int i;
8638
8639         /* Disallow new interrupts, avoid loop */
8640         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8641                 spin_lock(&ioa_cfg->hrrq[i]._lock);
8642                 ioa_cfg->hrrq[i].allow_interrupts = 0;
8643                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8644         }
8645         wmb();
8646         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8647         ipr_cmd->done = ipr_reset_ioa_job;
8648         return IPR_RC_JOB_RETURN;
8649 }
8650
8651 /**
8652  * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
8653  * @pdev:       PCI device struct
8654  *
8655  * Description: This routine is called to tell us that the MMIO
8656  * access to the IOA has been restored
8657  */
8658 static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
8659 {
8660         unsigned long flags = 0;
8661         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8662
8663         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8664         if (!ioa_cfg->probe_done)
8665                 pci_save_state(pdev);
8666         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8667         return PCI_ERS_RESULT_NEED_RESET;
8668 }
8669
8670 /**
8671  * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
8672  * @pdev:       PCI device struct
8673  *
8674  * Description: This routine is called to tell us that the PCI bus
8675  * is down. Can't do anything here, except put the device driver
8676  * into a holding pattern, waiting for the PCI bus to come back.
8677  */
8678 static void ipr_pci_frozen(struct pci_dev *pdev)
8679 {
8680         unsigned long flags = 0;
8681         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8682
8683         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8684         if (ioa_cfg->probe_done)
8685                 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
8686         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8687 }
8688
8689 /**
8690  * ipr_pci_slot_reset - Called when PCI slot has been reset.
8691  * @pdev:       PCI device struct
8692  *
8693  * Description: This routine is called by the pci error recovery
8694  * code after the PCI slot has been reset, just before we
8695  * should resume normal operations.
8696  */
8697 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
8698 {
8699         unsigned long flags = 0;
8700         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8701
8702         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8703         if (ioa_cfg->probe_done) {
8704                 if (ioa_cfg->needs_warm_reset)
8705                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8706                 else
8707                         _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
8708                                                 IPR_SHUTDOWN_NONE);
8709         } else
8710                 wake_up_all(&ioa_cfg->eeh_wait_q);
8711         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8712         return PCI_ERS_RESULT_RECOVERED;
8713 }
8714
8715 /**
8716  * ipr_pci_perm_failure - Called when PCI slot is dead for good.
8717  * @pdev:       PCI device struct
8718  *
8719  * Description: This routine is called when the PCI bus has
8720  * permanently failed.
8721  */
8722 static void ipr_pci_perm_failure(struct pci_dev *pdev)
8723 {
8724         unsigned long flags = 0;
8725         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8726         int i;
8727
8728         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8729         if (ioa_cfg->probe_done) {
8730                 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8731                         ioa_cfg->sdt_state = ABORT_DUMP;
8732                 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
8733                 ioa_cfg->in_ioa_bringdown = 1;
8734                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8735                         spin_lock(&ioa_cfg->hrrq[i]._lock);
8736                         ioa_cfg->hrrq[i].allow_cmds = 0;
8737                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
8738                 }
8739                 wmb();
8740                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8741         } else
8742                 wake_up_all(&ioa_cfg->eeh_wait_q);
8743         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8744 }
8745
8746 /**
8747  * ipr_pci_error_detected - Called when a PCI error is detected.
8748  * @pdev:       PCI device struct
8749  * @state:      PCI channel state
8750  *
8751  * Description: Called when a PCI error is detected.
8752  *
8753  * Return value:
8754  *      PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
8755  */
8756 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
8757                                                pci_channel_state_t state)
8758 {
8759         switch (state) {
8760         case pci_channel_io_frozen:
8761                 ipr_pci_frozen(pdev);
8762                 return PCI_ERS_RESULT_CAN_RECOVER;
8763         case pci_channel_io_perm_failure:
8764                 ipr_pci_perm_failure(pdev);
8765                 return PCI_ERS_RESULT_DISCONNECT;
8766                 break;
8767         default:
8768                 break;
8769         }
8770         return PCI_ERS_RESULT_NEED_RESET;
8771 }
8772
8773 /**
8774  * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
8775  * @ioa_cfg:    ioa cfg struct
8776  *
8777  * Description: This is the second phase of adapter intialization
8778  * This function takes care of initilizing the adapter to the point
8779  * where it can accept new commands.
8780
8781  * Return value:
8782  *      0 on success / -EIO on failure
8783  **/
8784 static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
8785 {
8786         int rc = 0;
8787         unsigned long host_lock_flags = 0;
8788
8789         ENTER;
8790         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8791         dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
8792         ioa_cfg->probe_done = 1;
8793         if (ioa_cfg->needs_hard_reset) {
8794                 ioa_cfg->needs_hard_reset = 0;
8795                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8796         } else
8797                 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
8798                                         IPR_SHUTDOWN_NONE);
8799         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8800         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8801         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8802
8803         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
8804                 rc = -EIO;
8805         } else if (ipr_invalid_adapter(ioa_cfg)) {
8806                 if (!ipr_testmode)
8807                         rc = -EIO;
8808
8809                 dev_err(&ioa_cfg->pdev->dev,
8810                         "Adapter not supported in this hardware configuration.\n");
8811         }
8812
8813         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8814
8815         LEAVE;
8816         return rc;
8817 }
8818
8819 /**
8820  * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
8821  * @ioa_cfg:    ioa config struct
8822  *
8823  * Return value:
8824  *      none
8825  **/
8826 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8827 {
8828         int i;
8829
8830         for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8831                 if (ioa_cfg->ipr_cmnd_list[i])
8832                         dma_pool_free(ioa_cfg->ipr_cmd_pool,
8833                                       ioa_cfg->ipr_cmnd_list[i],
8834                                       ioa_cfg->ipr_cmnd_list_dma[i]);
8835
8836                 ioa_cfg->ipr_cmnd_list[i] = NULL;
8837         }
8838
8839         if (ioa_cfg->ipr_cmd_pool)
8840                 dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
8841
8842         kfree(ioa_cfg->ipr_cmnd_list);
8843         kfree(ioa_cfg->ipr_cmnd_list_dma);
8844         ioa_cfg->ipr_cmnd_list = NULL;
8845         ioa_cfg->ipr_cmnd_list_dma = NULL;
8846         ioa_cfg->ipr_cmd_pool = NULL;
8847 }
8848
8849 /**
8850  * ipr_free_mem - Frees memory allocated for an adapter
8851  * @ioa_cfg:    ioa cfg struct
8852  *
8853  * Return value:
8854  *      nothing
8855  **/
8856 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
8857 {
8858         int i;
8859
8860         kfree(ioa_cfg->res_entries);
8861         dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs),
8862                           ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8863         ipr_free_cmd_blks(ioa_cfg);
8864
8865         for (i = 0; i < ioa_cfg->hrrq_num; i++)
8866                 dma_free_coherent(&ioa_cfg->pdev->dev,
8867                                   sizeof(u32) * ioa_cfg->hrrq[i].size,
8868                                   ioa_cfg->hrrq[i].host_rrq,
8869                                   ioa_cfg->hrrq[i].host_rrq_dma);
8870
8871         dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
8872                           ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
8873
8874         for (i = 0; i < IPR_NUM_HCAMS; i++) {
8875                 dma_free_coherent(&ioa_cfg->pdev->dev,
8876                                   sizeof(struct ipr_hostrcb),
8877                                   ioa_cfg->hostrcb[i],
8878                                   ioa_cfg->hostrcb_dma[i]);
8879         }
8880
8881         ipr_free_dump(ioa_cfg);
8882         kfree(ioa_cfg->trace);
8883 }
8884
8885 /**
8886  * ipr_free_all_resources - Free all allocated resources for an adapter.
8887  * @ipr_cmd:    ipr command struct
8888  *
8889  * This function frees all allocated resources for the
8890  * specified adapter.
8891  *
8892  * Return value:
8893  *      none
8894  **/
8895 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
8896 {
8897         struct pci_dev *pdev = ioa_cfg->pdev;
8898
8899         ENTER;
8900         if (ioa_cfg->intr_flag == IPR_USE_MSI ||
8901             ioa_cfg->intr_flag == IPR_USE_MSIX) {
8902                 int i;
8903                 for (i = 0; i < ioa_cfg->nvectors; i++)
8904                         free_irq(ioa_cfg->vectors_info[i].vec,
8905                                 &ioa_cfg->hrrq[i]);
8906         } else
8907                 free_irq(pdev->irq, &ioa_cfg->hrrq[0]);
8908
8909         if (ioa_cfg->intr_flag == IPR_USE_MSI) {
8910                 pci_disable_msi(pdev);
8911                 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
8912         } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
8913                 pci_disable_msix(pdev);
8914                 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
8915         }
8916
8917         iounmap(ioa_cfg->hdw_dma_regs);
8918         pci_release_regions(pdev);
8919         ipr_free_mem(ioa_cfg);
8920         scsi_host_put(ioa_cfg->host);
8921         pci_disable_device(pdev);
8922         LEAVE;
8923 }
8924
8925 /**
8926  * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
8927  * @ioa_cfg:    ioa config struct
8928  *
8929  * Return value:
8930  *      0 on success / -ENOMEM on allocation failure
8931  **/
8932 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8933 {
8934         struct ipr_cmnd *ipr_cmd;
8935         struct ipr_ioarcb *ioarcb;
8936         dma_addr_t dma_addr;
8937         int i, entries_each_hrrq, hrrq_id = 0;
8938
8939         ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev,
8940                                                 sizeof(struct ipr_cmnd), 512, 0);
8941
8942         if (!ioa_cfg->ipr_cmd_pool)
8943                 return -ENOMEM;
8944
8945         ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
8946         ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
8947
8948         if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
8949                 ipr_free_cmd_blks(ioa_cfg);
8950                 return -ENOMEM;
8951         }
8952
8953         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8954                 if (ioa_cfg->hrrq_num > 1) {
8955                         if (i == 0) {
8956                                 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
8957                                 ioa_cfg->hrrq[i].min_cmd_id = 0;
8958                                         ioa_cfg->hrrq[i].max_cmd_id =
8959                                                 (entries_each_hrrq - 1);
8960                         } else {
8961                                 entries_each_hrrq =
8962                                         IPR_NUM_BASE_CMD_BLKS/
8963                                         (ioa_cfg->hrrq_num - 1);
8964                                 ioa_cfg->hrrq[i].min_cmd_id =
8965                                         IPR_NUM_INTERNAL_CMD_BLKS +
8966                                         (i - 1) * entries_each_hrrq;
8967                                 ioa_cfg->hrrq[i].max_cmd_id =
8968                                         (IPR_NUM_INTERNAL_CMD_BLKS +
8969                                         i * entries_each_hrrq - 1);
8970                         }
8971                 } else {
8972                         entries_each_hrrq = IPR_NUM_CMD_BLKS;
8973                         ioa_cfg->hrrq[i].min_cmd_id = 0;
8974                         ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
8975                 }
8976                 ioa_cfg->hrrq[i].size = entries_each_hrrq;
8977         }
8978
8979         BUG_ON(ioa_cfg->hrrq_num == 0);
8980
8981         i = IPR_NUM_CMD_BLKS -
8982                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
8983         if (i > 0) {
8984                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
8985                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
8986         }
8987
8988         for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8989                 ipr_cmd = dma_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
8990
8991                 if (!ipr_cmd) {
8992                         ipr_free_cmd_blks(ioa_cfg);
8993                         return -ENOMEM;
8994                 }
8995
8996                 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
8997                 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
8998                 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
8999
9000                 ioarcb = &ipr_cmd->ioarcb;
9001                 ipr_cmd->dma_addr = dma_addr;
9002                 if (ioa_cfg->sis64)
9003                         ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
9004                 else
9005                         ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
9006
9007                 ioarcb->host_response_handle = cpu_to_be32(i << 2);
9008                 if (ioa_cfg->sis64) {
9009                         ioarcb->u.sis64_addr_data.data_ioadl_addr =
9010                                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
9011                         ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
9012                                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
9013                 } else {
9014                         ioarcb->write_ioadl_addr =
9015                                 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
9016                         ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
9017                         ioarcb->ioasa_host_pci_addr =
9018                                 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
9019                 }
9020                 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
9021                 ipr_cmd->cmd_index = i;
9022                 ipr_cmd->ioa_cfg = ioa_cfg;
9023                 ipr_cmd->sense_buffer_dma = dma_addr +
9024                         offsetof(struct ipr_cmnd, sense_buffer);
9025
9026                 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
9027                 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
9028                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9029                 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9030                         hrrq_id++;
9031         }
9032
9033         return 0;
9034 }
9035
9036 /**
9037  * ipr_alloc_mem - Allocate memory for an adapter
9038  * @ioa_cfg:    ioa config struct
9039  *
9040  * Return value:
9041  *      0 on success / non-zero for error
9042  **/
9043 static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
9044 {
9045         struct pci_dev *pdev = ioa_cfg->pdev;
9046         int i, rc = -ENOMEM;
9047
9048         ENTER;
9049         ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
9050                                        ioa_cfg->max_devs_supported, GFP_KERNEL);
9051
9052         if (!ioa_cfg->res_entries)
9053                 goto out;
9054
9055         for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
9056                 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
9057                 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9058         }
9059
9060         ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev,
9061                                               sizeof(struct ipr_misc_cbs),
9062                                               &ioa_cfg->vpd_cbs_dma,
9063                                               GFP_KERNEL);
9064
9065         if (!ioa_cfg->vpd_cbs)
9066                 goto out_free_res_entries;
9067
9068         if (ipr_alloc_cmd_blks(ioa_cfg))
9069                 goto out_free_vpd_cbs;
9070
9071         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9072                 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev,
9073                                         sizeof(u32) * ioa_cfg->hrrq[i].size,
9074                                         &ioa_cfg->hrrq[i].host_rrq_dma,
9075                                         GFP_KERNEL);
9076
9077                 if (!ioa_cfg->hrrq[i].host_rrq)  {
9078                         while (--i > 0)
9079                                 dma_free_coherent(&pdev->dev,
9080                                         sizeof(u32) * ioa_cfg->hrrq[i].size,
9081                                         ioa_cfg->hrrq[i].host_rrq,
9082                                         ioa_cfg->hrrq[i].host_rrq_dma);
9083                         goto out_ipr_free_cmd_blocks;
9084                 }
9085                 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9086         }
9087
9088         ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev,
9089                                                   ioa_cfg->cfg_table_size,
9090                                                   &ioa_cfg->cfg_table_dma,
9091                                                   GFP_KERNEL);
9092
9093         if (!ioa_cfg->u.cfg_table)
9094                 goto out_free_host_rrq;
9095
9096         for (i = 0; i < IPR_NUM_HCAMS; i++) {
9097                 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
9098                                                          sizeof(struct ipr_hostrcb),
9099                                                          &ioa_cfg->hostrcb_dma[i],
9100                                                          GFP_KERNEL);
9101
9102                 if (!ioa_cfg->hostrcb[i])
9103                         goto out_free_hostrcb_dma;
9104
9105                 ioa_cfg->hostrcb[i]->hostrcb_dma =
9106                         ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
9107                 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
9108                 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9109         }
9110
9111         ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
9112                                  IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
9113
9114         if (!ioa_cfg->trace)
9115                 goto out_free_hostrcb_dma;
9116
9117         rc = 0;
9118 out:
9119         LEAVE;
9120         return rc;
9121
9122 out_free_hostrcb_dma:
9123         while (i-- > 0) {
9124                 dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb),
9125                                   ioa_cfg->hostrcb[i],
9126                                   ioa_cfg->hostrcb_dma[i]);
9127         }
9128         dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size,
9129                           ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9130 out_free_host_rrq:
9131         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9132                 dma_free_coherent(&pdev->dev,
9133                                   sizeof(u32) * ioa_cfg->hrrq[i].size,
9134                                   ioa_cfg->hrrq[i].host_rrq,
9135                                   ioa_cfg->hrrq[i].host_rrq_dma);
9136         }
9137 out_ipr_free_cmd_blocks:
9138         ipr_free_cmd_blks(ioa_cfg);
9139 out_free_vpd_cbs:
9140         dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs),
9141                           ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9142 out_free_res_entries:
9143         kfree(ioa_cfg->res_entries);
9144         goto out;
9145 }
9146
9147 /**
9148  * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9149  * @ioa_cfg:    ioa config struct
9150  *
9151  * Return value:
9152  *      none
9153  **/
9154 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
9155 {
9156         int i;
9157
9158         for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9159                 ioa_cfg->bus_attr[i].bus = i;
9160                 ioa_cfg->bus_attr[i].qas_enabled = 0;
9161                 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9162                 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9163                         ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9164                 else
9165                         ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9166         }
9167 }
9168
9169 /**
9170  * ipr_init_regs - Initialize IOA registers
9171  * @ioa_cfg:    ioa config struct
9172  *
9173  * Return value:
9174  *      none
9175  **/
9176 static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
9177 {
9178         const struct ipr_interrupt_offsets *p;
9179         struct ipr_interrupts *t;
9180         void __iomem *base;
9181
9182         p = &ioa_cfg->chip_cfg->regs;
9183         t = &ioa_cfg->regs;
9184         base = ioa_cfg->hdw_dma_regs;
9185
9186         t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9187         t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
9188         t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
9189         t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
9190         t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
9191         t->clr_interrupt_reg = base + p->clr_interrupt_reg;
9192         t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
9193         t->sense_interrupt_reg = base + p->sense_interrupt_reg;
9194         t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
9195         t->ioarrin_reg = base + p->ioarrin_reg;
9196         t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
9197         t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
9198         t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
9199         t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
9200         t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
9201         t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
9202
9203         if (ioa_cfg->sis64) {
9204                 t->init_feedback_reg = base + p->init_feedback_reg;
9205                 t->dump_addr_reg = base + p->dump_addr_reg;
9206                 t->dump_data_reg = base + p->dump_data_reg;
9207                 t->endian_swap_reg = base + p->endian_swap_reg;
9208         }
9209 }
9210
9211 /**
9212  * ipr_init_ioa_cfg - Initialize IOA config struct
9213  * @ioa_cfg:    ioa config struct
9214  * @host:               scsi host struct
9215  * @pdev:               PCI dev struct
9216  *
9217  * Return value:
9218  *      none
9219  **/
9220 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9221                              struct Scsi_Host *host, struct pci_dev *pdev)
9222 {
9223         int i;
9224
9225         ioa_cfg->host = host;
9226         ioa_cfg->pdev = pdev;
9227         ioa_cfg->log_level = ipr_log_level;
9228         ioa_cfg->doorbell = IPR_DOORBELL;
9229         sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9230         sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
9231         sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9232         sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9233         sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9234         sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9235
9236         INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9237         INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9238         INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9239         INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9240         INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
9241         init_waitqueue_head(&ioa_cfg->reset_wait_q);
9242         init_waitqueue_head(&ioa_cfg->msi_wait_q);
9243         init_waitqueue_head(&ioa_cfg->eeh_wait_q);
9244         ioa_cfg->sdt_state = INACTIVE;
9245
9246         ipr_initialize_bus_attr(ioa_cfg);
9247         ioa_cfg->max_devs_supported = ipr_max_devs;
9248
9249         if (ioa_cfg->sis64) {
9250                 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9251                 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9252                 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9253                         ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9254                 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9255                                            + ((sizeof(struct ipr_config_table_entry64)
9256                                                * ioa_cfg->max_devs_supported)));
9257         } else {
9258                 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9259                 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9260                 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9261                         ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9262                 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9263                                            + ((sizeof(struct ipr_config_table_entry)
9264                                                * ioa_cfg->max_devs_supported)));
9265         }
9266
9267         host->max_channel = IPR_MAX_BUS_TO_SCAN;
9268         host->unique_id = host->host_no;
9269         host->max_cmd_len = IPR_MAX_CDB_LEN;
9270         host->can_queue = ioa_cfg->max_cmds;
9271         pci_set_drvdata(pdev, ioa_cfg);
9272
9273         for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9274                 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9275                 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9276                 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9277                 if (i == 0)
9278                         ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9279                 else
9280                         ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
9281         }
9282 }
9283
9284 /**
9285  * ipr_get_chip_info - Find adapter chip information
9286  * @dev_id:             PCI device id struct
9287  *
9288  * Return value:
9289  *      ptr to chip information on success / NULL on failure
9290  **/
9291 static const struct ipr_chip_t *
9292 ipr_get_chip_info(const struct pci_device_id *dev_id)
9293 {
9294         int i;
9295
9296         for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
9297                 if (ipr_chip[i].vendor == dev_id->vendor &&
9298                     ipr_chip[i].device == dev_id->device)
9299                         return &ipr_chip[i];
9300         return NULL;
9301 }
9302
9303 /**
9304  * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
9305  *                                              during probe time
9306  * @ioa_cfg:    ioa config struct
9307  *
9308  * Return value:
9309  *      None
9310  **/
9311 static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
9312 {
9313         struct pci_dev *pdev = ioa_cfg->pdev;
9314
9315         if (pci_channel_offline(pdev)) {
9316                 wait_event_timeout(ioa_cfg->eeh_wait_q,
9317                                    !pci_channel_offline(pdev),
9318                                    IPR_PCI_ERROR_RECOVERY_TIMEOUT);
9319                 pci_restore_state(pdev);
9320         }
9321 }
9322
9323 static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg)
9324 {
9325         struct msix_entry entries[IPR_MAX_MSIX_VECTORS];
9326         int i, vectors;
9327
9328         for (i = 0; i < ARRAY_SIZE(entries); ++i)
9329                 entries[i].entry = i;
9330
9331         vectors = pci_enable_msix_range(ioa_cfg->pdev,
9332                                         entries, 1, ipr_number_of_msix);
9333         if (vectors < 0) {
9334                 ipr_wait_for_pci_err_recovery(ioa_cfg);
9335                 return vectors;
9336         }
9337
9338         for (i = 0; i < vectors; i++)
9339                 ioa_cfg->vectors_info[i].vec = entries[i].vector;
9340         ioa_cfg->nvectors = vectors;
9341
9342         return 0;
9343 }
9344
9345 static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg)
9346 {
9347         int i, vectors;
9348
9349         vectors = pci_enable_msi_range(ioa_cfg->pdev, 1, ipr_number_of_msix);
9350         if (vectors < 0) {
9351                 ipr_wait_for_pci_err_recovery(ioa_cfg);
9352                 return vectors;
9353         }
9354
9355         for (i = 0; i < vectors; i++)
9356                 ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i;
9357         ioa_cfg->nvectors = vectors;
9358
9359         return 0;
9360 }
9361
9362 static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
9363 {
9364         int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
9365
9366         for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
9367                 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
9368                          "host%d-%d", ioa_cfg->host->host_no, vec_idx);
9369                 ioa_cfg->vectors_info[vec_idx].
9370                         desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
9371         }
9372 }
9373
9374 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg)
9375 {
9376         int i, rc;
9377
9378         for (i = 1; i < ioa_cfg->nvectors; i++) {
9379                 rc = request_irq(ioa_cfg->vectors_info[i].vec,
9380                         ipr_isr_mhrrq,
9381                         0,
9382                         ioa_cfg->vectors_info[i].desc,
9383                         &ioa_cfg->hrrq[i]);
9384                 if (rc) {
9385                         while (--i >= 0)
9386                                 free_irq(ioa_cfg->vectors_info[i].vec,
9387                                         &ioa_cfg->hrrq[i]);
9388                         return rc;
9389                 }
9390         }
9391         return 0;
9392 }
9393
9394 /**
9395  * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
9396  * @pdev:               PCI device struct
9397  *
9398  * Description: Simply set the msi_received flag to 1 indicating that
9399  * Message Signaled Interrupts are supported.
9400  *
9401  * Return value:
9402  *      0 on success / non-zero on failure
9403  **/
9404 static irqreturn_t ipr_test_intr(int irq, void *devp)
9405 {
9406         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
9407         unsigned long lock_flags = 0;
9408         irqreturn_t rc = IRQ_HANDLED;
9409
9410         dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
9411         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9412
9413         ioa_cfg->msi_received = 1;
9414         wake_up(&ioa_cfg->msi_wait_q);
9415
9416         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9417         return rc;
9418 }
9419
9420 /**
9421  * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
9422  * @pdev:               PCI device struct
9423  *
9424  * Description: The return value from pci_enable_msi_range() can not always be
9425  * trusted.  This routine sets up and initiates a test interrupt to determine
9426  * if the interrupt is received via the ipr_test_intr() service routine.
9427  * If the tests fails, the driver will fall back to LSI.
9428  *
9429  * Return value:
9430  *      0 on success / non-zero on failure
9431  **/
9432 static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
9433 {
9434         int rc;
9435         volatile u32 int_reg;
9436         unsigned long lock_flags = 0;
9437
9438         ENTER;
9439
9440         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9441         init_waitqueue_head(&ioa_cfg->msi_wait_q);
9442         ioa_cfg->msi_received = 0;
9443         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9444         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
9445         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
9446         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9447
9448         if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9449                 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9450         else
9451                 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9452         if (rc) {
9453                 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
9454                 return rc;
9455         } else if (ipr_debug)
9456                 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
9457
9458         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
9459         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
9460         wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
9461         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9462         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9463
9464         if (!ioa_cfg->msi_received) {
9465                 /* MSI test failed */
9466                 dev_info(&pdev->dev, "MSI test failed.  Falling back to LSI.\n");
9467                 rc = -EOPNOTSUPP;
9468         } else if (ipr_debug)
9469                 dev_info(&pdev->dev, "MSI test succeeded.\n");
9470
9471         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9472
9473         if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9474                 free_irq(ioa_cfg->vectors_info[0].vec, ioa_cfg);
9475         else
9476                 free_irq(pdev->irq, ioa_cfg);
9477
9478         LEAVE;
9479
9480         return rc;
9481 }
9482
9483  /* ipr_probe_ioa - Allocates memory and does first stage of initialization
9484  * @pdev:               PCI device struct
9485  * @dev_id:             PCI device id struct
9486  *
9487  * Return value:
9488  *      0 on success / non-zero on failure
9489  **/
9490 static int ipr_probe_ioa(struct pci_dev *pdev,
9491                          const struct pci_device_id *dev_id)
9492 {
9493         struct ipr_ioa_cfg *ioa_cfg;
9494         struct Scsi_Host *host;
9495         unsigned long ipr_regs_pci;
9496         void __iomem *ipr_regs;
9497         int rc = PCIBIOS_SUCCESSFUL;
9498         volatile u32 mask, uproc, interrupts;
9499         unsigned long lock_flags, driver_lock_flags;
9500
9501         ENTER;
9502
9503         dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
9504         host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
9505
9506         if (!host) {
9507                 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
9508                 rc = -ENOMEM;
9509                 goto out;
9510         }
9511
9512         ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
9513         memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
9514         ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
9515
9516         ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
9517
9518         if (!ioa_cfg->ipr_chip) {
9519                 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
9520                         dev_id->vendor, dev_id->device);
9521                 goto out_scsi_host_put;
9522         }
9523
9524         /* set SIS 32 or SIS 64 */
9525         ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
9526         ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
9527         ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
9528         ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
9529
9530         if (ipr_transop_timeout)
9531                 ioa_cfg->transop_timeout = ipr_transop_timeout;
9532         else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
9533                 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
9534         else
9535                 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
9536
9537         ioa_cfg->revid = pdev->revision;
9538
9539         ipr_init_ioa_cfg(ioa_cfg, host, pdev);
9540
9541         ipr_regs_pci = pci_resource_start(pdev, 0);
9542
9543         rc = pci_request_regions(pdev, IPR_NAME);
9544         if (rc < 0) {
9545                 dev_err(&pdev->dev,
9546                         "Couldn't register memory range of registers\n");
9547                 goto out_scsi_host_put;
9548         }
9549
9550         rc = pci_enable_device(pdev);
9551
9552         if (rc || pci_channel_offline(pdev)) {
9553                 if (pci_channel_offline(pdev)) {
9554                         ipr_wait_for_pci_err_recovery(ioa_cfg);
9555                         rc = pci_enable_device(pdev);
9556                 }
9557
9558                 if (rc) {
9559                         dev_err(&pdev->dev, "Cannot enable adapter\n");
9560                         ipr_wait_for_pci_err_recovery(ioa_cfg);
9561                         goto out_release_regions;
9562                 }
9563         }
9564
9565         ipr_regs = pci_ioremap_bar(pdev, 0);
9566
9567         if (!ipr_regs) {
9568                 dev_err(&pdev->dev,
9569                         "Couldn't map memory range of registers\n");
9570                 rc = -ENOMEM;
9571                 goto out_disable;
9572         }
9573
9574         ioa_cfg->hdw_dma_regs = ipr_regs;
9575         ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
9576         ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
9577
9578         ipr_init_regs(ioa_cfg);
9579
9580         if (ioa_cfg->sis64) {
9581                 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9582                 if (rc < 0) {
9583                         dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n");
9584                         rc = dma_set_mask_and_coherent(&pdev->dev,
9585                                                        DMA_BIT_MASK(32));
9586                 }
9587         } else
9588                 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9589
9590         if (rc < 0) {
9591                 dev_err(&pdev->dev, "Failed to set DMA mask\n");
9592                 goto cleanup_nomem;
9593         }
9594
9595         rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
9596                                    ioa_cfg->chip_cfg->cache_line_size);
9597
9598         if (rc != PCIBIOS_SUCCESSFUL) {
9599                 dev_err(&pdev->dev, "Write of cache line size failed\n");
9600                 ipr_wait_for_pci_err_recovery(ioa_cfg);
9601                 rc = -EIO;
9602                 goto cleanup_nomem;
9603         }
9604
9605         /* Issue MMIO read to ensure card is not in EEH */
9606         interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
9607         ipr_wait_for_pci_err_recovery(ioa_cfg);
9608
9609         if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
9610                 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
9611                         IPR_MAX_MSIX_VECTORS);
9612                 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
9613         }
9614
9615         if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
9616                         ipr_enable_msix(ioa_cfg) == 0)
9617                 ioa_cfg->intr_flag = IPR_USE_MSIX;
9618         else if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
9619                         ipr_enable_msi(ioa_cfg) == 0)
9620                 ioa_cfg->intr_flag = IPR_USE_MSI;
9621         else {
9622                 ioa_cfg->intr_flag = IPR_USE_LSI;
9623                 ioa_cfg->nvectors = 1;
9624                 dev_info(&pdev->dev, "Cannot enable MSI.\n");
9625         }
9626
9627         pci_set_master(pdev);
9628
9629         if (pci_channel_offline(pdev)) {
9630                 ipr_wait_for_pci_err_recovery(ioa_cfg);
9631                 pci_set_master(pdev);
9632                 if (pci_channel_offline(pdev)) {
9633                         rc = -EIO;
9634                         goto out_msi_disable;
9635                 }
9636         }
9637
9638         if (ioa_cfg->intr_flag == IPR_USE_MSI ||
9639             ioa_cfg->intr_flag == IPR_USE_MSIX) {
9640                 rc = ipr_test_msi(ioa_cfg, pdev);
9641                 if (rc == -EOPNOTSUPP) {
9642                         ipr_wait_for_pci_err_recovery(ioa_cfg);
9643                         if (ioa_cfg->intr_flag == IPR_USE_MSI) {
9644                                 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
9645                                 pci_disable_msi(pdev);
9646                          } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
9647                                 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
9648                                 pci_disable_msix(pdev);
9649                         }
9650
9651                         ioa_cfg->intr_flag = IPR_USE_LSI;
9652                         ioa_cfg->nvectors = 1;
9653                 }
9654                 else if (rc)
9655                         goto out_msi_disable;
9656                 else {
9657                         if (ioa_cfg->intr_flag == IPR_USE_MSI)
9658                                 dev_info(&pdev->dev,
9659                                         "Request for %d MSIs succeeded with starting IRQ: %d\n",
9660                                         ioa_cfg->nvectors, pdev->irq);
9661                         else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9662                                 dev_info(&pdev->dev,
9663                                         "Request for %d MSIXs succeeded.",
9664                                         ioa_cfg->nvectors);
9665                 }
9666         }
9667
9668         ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
9669                                 (unsigned int)num_online_cpus(),
9670                                 (unsigned int)IPR_MAX_HRRQ_NUM);
9671
9672         if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
9673                 goto out_msi_disable;
9674
9675         if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
9676                 goto out_msi_disable;
9677
9678         rc = ipr_alloc_mem(ioa_cfg);
9679         if (rc < 0) {
9680                 dev_err(&pdev->dev,
9681                         "Couldn't allocate enough memory for device driver!\n");
9682                 goto out_msi_disable;
9683         }
9684
9685         /* Save away PCI config space for use following IOA reset */
9686         rc = pci_save_state(pdev);
9687
9688         if (rc != PCIBIOS_SUCCESSFUL) {
9689                 dev_err(&pdev->dev, "Failed to save PCI config space\n");
9690                 rc = -EIO;
9691                 goto cleanup_nolog;
9692         }
9693
9694         /*
9695          * If HRRQ updated interrupt is not masked, or reset alert is set,
9696          * the card is in an unknown state and needs a hard reset
9697          */
9698         mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
9699         interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
9700         uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
9701         if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
9702                 ioa_cfg->needs_hard_reset = 1;
9703         if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
9704                 ioa_cfg->needs_hard_reset = 1;
9705         if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
9706                 ioa_cfg->ioa_unit_checked = 1;
9707
9708         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9709         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9710         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9711
9712         if (ioa_cfg->intr_flag == IPR_USE_MSI
9713                         || ioa_cfg->intr_flag == IPR_USE_MSIX) {
9714                 name_msi_vectors(ioa_cfg);
9715                 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_isr,
9716                         0,
9717                         ioa_cfg->vectors_info[0].desc,
9718                         &ioa_cfg->hrrq[0]);
9719                 if (!rc)
9720                         rc = ipr_request_other_msi_irqs(ioa_cfg);
9721         } else {
9722                 rc = request_irq(pdev->irq, ipr_isr,
9723                          IRQF_SHARED,
9724                          IPR_NAME, &ioa_cfg->hrrq[0]);
9725         }
9726         if (rc) {
9727                 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
9728                         pdev->irq, rc);
9729                 goto cleanup_nolog;
9730         }
9731
9732         if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
9733             (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
9734                 ioa_cfg->needs_warm_reset = 1;
9735                 ioa_cfg->reset = ipr_reset_slot_reset;
9736         } else
9737                 ioa_cfg->reset = ipr_reset_start_bist;
9738
9739         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
9740         list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
9741         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
9742
9743         LEAVE;
9744 out:
9745         return rc;
9746
9747 cleanup_nolog:
9748         ipr_free_mem(ioa_cfg);
9749 out_msi_disable:
9750         ipr_wait_for_pci_err_recovery(ioa_cfg);
9751         if (ioa_cfg->intr_flag == IPR_USE_MSI)
9752                 pci_disable_msi(pdev);
9753         else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9754                 pci_disable_msix(pdev);
9755 cleanup_nomem:
9756         iounmap(ipr_regs);
9757 out_disable:
9758         pci_disable_device(pdev);
9759 out_release_regions:
9760         pci_release_regions(pdev);
9761 out_scsi_host_put:
9762         scsi_host_put(host);
9763         goto out;
9764 }
9765
9766 /**
9767  * ipr_scan_vsets - Scans for VSET devices
9768  * @ioa_cfg:    ioa config struct
9769  *
9770  * Description: Since the VSET resources do not follow SAM in that we can have
9771  * sparse LUNs with no LUN 0, we have to scan for these ourselves.
9772  *
9773  * Return value:
9774  *      none
9775  **/
9776 static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
9777 {
9778         int target, lun;
9779
9780         for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
9781                 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++)
9782                         scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
9783 }
9784
9785 /**
9786  * ipr_initiate_ioa_bringdown - Bring down an adapter
9787  * @ioa_cfg:            ioa config struct
9788  * @shutdown_type:      shutdown type
9789  *
9790  * Description: This function will initiate bringing down the adapter.
9791  * This consists of issuing an IOA shutdown to the adapter
9792  * to flush the cache, and running BIST.
9793  * If the caller needs to wait on the completion of the reset,
9794  * the caller must sleep on the reset_wait_q.
9795  *
9796  * Return value:
9797  *      none
9798  **/
9799 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
9800                                        enum ipr_shutdown_type shutdown_type)
9801 {
9802         ENTER;
9803         if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9804                 ioa_cfg->sdt_state = ABORT_DUMP;
9805         ioa_cfg->reset_retries = 0;
9806         ioa_cfg->in_ioa_bringdown = 1;
9807         ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
9808         LEAVE;
9809 }
9810
9811 /**
9812  * __ipr_remove - Remove a single adapter
9813  * @pdev:       pci device struct
9814  *
9815  * Adapter hot plug remove entry point.
9816  *
9817  * Return value:
9818  *      none
9819  **/
9820 static void __ipr_remove(struct pci_dev *pdev)
9821 {
9822         unsigned long host_lock_flags = 0;
9823         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9824         int i;
9825         unsigned long driver_lock_flags;
9826         ENTER;
9827
9828         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9829         while (ioa_cfg->in_reset_reload) {
9830                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9831                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9832                 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9833         }
9834
9835         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9836                 spin_lock(&ioa_cfg->hrrq[i]._lock);
9837                 ioa_cfg->hrrq[i].removing_ioa = 1;
9838                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9839         }
9840         wmb();
9841         ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9842
9843         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9844         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9845         flush_work(&ioa_cfg->work_q);
9846         INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9847         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9848
9849         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
9850         list_del(&ioa_cfg->queue);
9851         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
9852
9853         if (ioa_cfg->sdt_state == ABORT_DUMP)
9854                 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9855         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9856
9857         ipr_free_all_resources(ioa_cfg);
9858
9859         LEAVE;
9860 }
9861
9862 /**
9863  * ipr_remove - IOA hot plug remove entry point
9864  * @pdev:       pci device struct
9865  *
9866  * Adapter hot plug remove entry point.
9867  *
9868  * Return value:
9869  *      none
9870  **/
9871 static void ipr_remove(struct pci_dev *pdev)
9872 {
9873         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9874
9875         ENTER;
9876
9877         ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
9878                               &ipr_trace_attr);
9879         ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
9880                              &ipr_dump_attr);
9881         scsi_remove_host(ioa_cfg->host);
9882
9883         __ipr_remove(pdev);
9884
9885         LEAVE;
9886 }
9887
9888 /**
9889  * ipr_probe - Adapter hot plug add entry point
9890  *
9891  * Return value:
9892  *      0 on success / non-zero on failure
9893  **/
9894 static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
9895 {
9896         struct ipr_ioa_cfg *ioa_cfg;
9897         int rc, i;
9898
9899         rc = ipr_probe_ioa(pdev, dev_id);
9900
9901         if (rc)
9902                 return rc;
9903
9904         ioa_cfg = pci_get_drvdata(pdev);
9905         rc = ipr_probe_ioa_part2(ioa_cfg);
9906
9907         if (rc) {
9908                 __ipr_remove(pdev);
9909                 return rc;
9910         }
9911
9912         rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
9913
9914         if (rc) {
9915                 __ipr_remove(pdev);
9916                 return rc;
9917         }
9918
9919         rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
9920                                    &ipr_trace_attr);
9921
9922         if (rc) {
9923                 scsi_remove_host(ioa_cfg->host);
9924                 __ipr_remove(pdev);
9925                 return rc;
9926         }
9927
9928         rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
9929                                    &ipr_dump_attr);
9930
9931         if (rc) {
9932                 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
9933                                       &ipr_trace_attr);
9934                 scsi_remove_host(ioa_cfg->host);
9935                 __ipr_remove(pdev);
9936                 return rc;
9937         }
9938
9939         scsi_scan_host(ioa_cfg->host);
9940         ipr_scan_vsets(ioa_cfg);
9941         scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
9942         ioa_cfg->allow_ml_add_del = 1;
9943         ioa_cfg->host->max_channel = IPR_VSET_BUS;
9944         ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
9945
9946         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
9947                 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
9948                         blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
9949                                         ioa_cfg->iopoll_weight, ipr_iopoll);
9950                         blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
9951                 }
9952         }
9953
9954         schedule_work(&ioa_cfg->work_q);
9955         return 0;
9956 }
9957
9958 /**
9959  * ipr_shutdown - Shutdown handler.
9960  * @pdev:       pci device struct
9961  *
9962  * This function is invoked upon system shutdown/reboot. It will issue
9963  * an adapter shutdown to the adapter to flush the write cache.
9964  *
9965  * Return value:
9966  *      none
9967  **/
9968 static void ipr_shutdown(struct pci_dev *pdev)
9969 {
9970         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9971         unsigned long lock_flags = 0;
9972         int i;
9973
9974         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9975         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
9976                 ioa_cfg->iopoll_weight = 0;
9977                 for (i = 1; i < ioa_cfg->hrrq_num; i++)
9978                         blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
9979         }
9980
9981         while (ioa_cfg->in_reset_reload) {
9982                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9983                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9984                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9985         }
9986
9987         ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9988         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9989         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9990 }
9991
9992 static struct pci_device_id ipr_pci_table[] = {
9993         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9994                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
9995         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9996                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
9997         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9998                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
9999         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10000                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
10001         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10002                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
10003         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10004                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
10005         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10006                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
10007         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10008                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
10009                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10010         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10011               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10012         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10013               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10014               IPR_USE_LONG_TRANSOP_TIMEOUT },
10015         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10016               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10017               IPR_USE_LONG_TRANSOP_TIMEOUT },
10018         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10019               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10020         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10021               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10022               IPR_USE_LONG_TRANSOP_TIMEOUT},
10023         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10024               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10025               IPR_USE_LONG_TRANSOP_TIMEOUT },
10026         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10027               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
10028               IPR_USE_LONG_TRANSOP_TIMEOUT },
10029         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10030               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
10031         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10032               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
10033         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10034               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
10035               IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
10036         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
10037                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
10038         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10039                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
10040         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10041                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
10042                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10043         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10044                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
10045                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10046         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10047                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
10048         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10049                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
10050         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10051                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
10052         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10053                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
10054         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10055                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
10056         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10057                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
10058         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10059                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
10060         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10061                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
10062         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10063                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
10064         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10065                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
10066         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10067                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
10068         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10069                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
10070         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10071                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
10072         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10073                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
10074         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10075                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
10076         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10077                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
10078         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10079                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
10080         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10081                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
10082         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10083                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
10084         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10085                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
10086         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10087                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
10088         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10089                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
10090         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10091                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
10092         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10093                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
10094         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10095                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
10096         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10097                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
10098         { }
10099 };
10100 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10101
10102 static const struct pci_error_handlers ipr_err_handler = {
10103         .error_detected = ipr_pci_error_detected,
10104         .mmio_enabled = ipr_pci_mmio_enabled,
10105         .slot_reset = ipr_pci_slot_reset,
10106 };
10107
10108 static struct pci_driver ipr_driver = {
10109         .name = IPR_NAME,
10110         .id_table = ipr_pci_table,
10111         .probe = ipr_probe,
10112         .remove = ipr_remove,
10113         .shutdown = ipr_shutdown,
10114         .err_handler = &ipr_err_handler,
10115 };
10116
10117 /**
10118  * ipr_halt_done - Shutdown prepare completion
10119  *
10120  * Return value:
10121  *      none
10122  **/
10123 static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10124 {
10125         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
10126 }
10127
10128 /**
10129  * ipr_halt - Issue shutdown prepare to all adapters
10130  *
10131  * Return value:
10132  *      NOTIFY_OK on success / NOTIFY_DONE on failure
10133  **/
10134 static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10135 {
10136         struct ipr_cmnd *ipr_cmd;
10137         struct ipr_ioa_cfg *ioa_cfg;
10138         unsigned long flags = 0, driver_lock_flags;
10139
10140         if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10141                 return NOTIFY_DONE;
10142
10143         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10144
10145         list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10146                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10147                 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
10148                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10149                         continue;
10150                 }
10151
10152                 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10153                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10154                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10155                 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10156                 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10157
10158                 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10159                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10160         }
10161         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10162
10163         return NOTIFY_OK;
10164 }
10165
10166 static struct notifier_block ipr_notifier = {
10167         ipr_halt, NULL, 0
10168 };
10169
10170 /**
10171  * ipr_init - Module entry point
10172  *
10173  * Return value:
10174  *      0 on success / negative value on failure
10175  **/
10176 static int __init ipr_init(void)
10177 {
10178         ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10179                  IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10180
10181         register_reboot_notifier(&ipr_notifier);
10182         return pci_register_driver(&ipr_driver);
10183 }
10184
10185 /**
10186  * ipr_exit - Module unload
10187  *
10188  * Module unload entry point.
10189  *
10190  * Return value:
10191  *      none
10192  **/
10193 static void __exit ipr_exit(void)
10194 {
10195         unregister_reboot_notifier(&ipr_notifier);
10196         pci_unregister_driver(&ipr_driver);
10197 }
10198
10199 module_init(ipr_init);
10200 module_exit(ipr_exit);