Merge tag 'for-v3.19/omap-a' of git://git.kernel.org/pub/scm/linux/kernel/git/pjw...
[cascardo/linux.git] / drivers / scsi / ipr.c
1 /*
2  * ipr.c -- driver for IBM Power Linux RAID adapters
3  *
4  * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5  *
6  * Copyright (C) 2003, 2004 IBM Corporation
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21  *
22  */
23
24 /*
25  * Notes:
26  *
27  * This driver is used to control the following SCSI adapters:
28  *
29  * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30  *
31  * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32  *              PCI-X Dual Channel Ultra 320 SCSI Adapter
33  *              PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34  *              Embedded SCSI adapter on p615 and p655 systems
35  *
36  * Supported Hardware Features:
37  *      - Ultra 320 SCSI controller
38  *      - PCI-X host interface
39  *      - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40  *      - Non-Volatile Write Cache
41  *      - Supports attachment of non-RAID disks, tape, and optical devices
42  *      - RAID Levels 0, 5, 10
43  *      - Hot spare
44  *      - Background Parity Checking
45  *      - Background Data Scrubbing
46  *      - Ability to increase the capacity of an existing RAID 5 disk array
47  *              by adding disks
48  *
49  * Driver Features:
50  *      - Tagged command queuing
51  *      - Adapter microcode download
52  *      - PCI hot plug
53  *      - SCSI device hot plug
54  *
55  */
56
57 #include <linux/fs.h>
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/slab.h>
63 #include <linux/vmalloc.h>
64 #include <linux/ioport.h>
65 #include <linux/delay.h>
66 #include <linux/pci.h>
67 #include <linux/wait.h>
68 #include <linux/spinlock.h>
69 #include <linux/sched.h>
70 #include <linux/interrupt.h>
71 #include <linux/blkdev.h>
72 #include <linux/firmware.h>
73 #include <linux/module.h>
74 #include <linux/moduleparam.h>
75 #include <linux/libata.h>
76 #include <linux/hdreg.h>
77 #include <linux/reboot.h>
78 #include <linux/stringify.h>
79 #include <asm/io.h>
80 #include <asm/irq.h>
81 #include <asm/processor.h>
82 #include <scsi/scsi.h>
83 #include <scsi/scsi_host.h>
84 #include <scsi/scsi_tcq.h>
85 #include <scsi/scsi_eh.h>
86 #include <scsi/scsi_cmnd.h>
87 #include "ipr.h"
88
89 /*
90  *   Global Data
91  */
92 static LIST_HEAD(ipr_ioa_head);
93 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94 static unsigned int ipr_max_speed = 1;
95 static int ipr_testmode = 0;
96 static unsigned int ipr_fastfail = 0;
97 static unsigned int ipr_transop_timeout = 0;
98 static unsigned int ipr_debug = 0;
99 static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
100 static unsigned int ipr_dual_ioa_raid = 1;
101 static unsigned int ipr_number_of_msix = 2;
102 static DEFINE_SPINLOCK(ipr_driver_lock);
103
104 /* This table describes the differences between DMA controller chips */
105 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
106         { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
107                 .mailbox = 0x0042C,
108                 .max_cmds = 100,
109                 .cache_line_size = 0x20,
110                 .clear_isr = 1,
111                 .iopoll_weight = 0,
112                 {
113                         .set_interrupt_mask_reg = 0x0022C,
114                         .clr_interrupt_mask_reg = 0x00230,
115                         .clr_interrupt_mask_reg32 = 0x00230,
116                         .sense_interrupt_mask_reg = 0x0022C,
117                         .sense_interrupt_mask_reg32 = 0x0022C,
118                         .clr_interrupt_reg = 0x00228,
119                         .clr_interrupt_reg32 = 0x00228,
120                         .sense_interrupt_reg = 0x00224,
121                         .sense_interrupt_reg32 = 0x00224,
122                         .ioarrin_reg = 0x00404,
123                         .sense_uproc_interrupt_reg = 0x00214,
124                         .sense_uproc_interrupt_reg32 = 0x00214,
125                         .set_uproc_interrupt_reg = 0x00214,
126                         .set_uproc_interrupt_reg32 = 0x00214,
127                         .clr_uproc_interrupt_reg = 0x00218,
128                         .clr_uproc_interrupt_reg32 = 0x00218
129                 }
130         },
131         { /* Snipe and Scamp */
132                 .mailbox = 0x0052C,
133                 .max_cmds = 100,
134                 .cache_line_size = 0x20,
135                 .clear_isr = 1,
136                 .iopoll_weight = 0,
137                 {
138                         .set_interrupt_mask_reg = 0x00288,
139                         .clr_interrupt_mask_reg = 0x0028C,
140                         .clr_interrupt_mask_reg32 = 0x0028C,
141                         .sense_interrupt_mask_reg = 0x00288,
142                         .sense_interrupt_mask_reg32 = 0x00288,
143                         .clr_interrupt_reg = 0x00284,
144                         .clr_interrupt_reg32 = 0x00284,
145                         .sense_interrupt_reg = 0x00280,
146                         .sense_interrupt_reg32 = 0x00280,
147                         .ioarrin_reg = 0x00504,
148                         .sense_uproc_interrupt_reg = 0x00290,
149                         .sense_uproc_interrupt_reg32 = 0x00290,
150                         .set_uproc_interrupt_reg = 0x00290,
151                         .set_uproc_interrupt_reg32 = 0x00290,
152                         .clr_uproc_interrupt_reg = 0x00294,
153                         .clr_uproc_interrupt_reg32 = 0x00294
154                 }
155         },
156         { /* CRoC */
157                 .mailbox = 0x00044,
158                 .max_cmds = 1000,
159                 .cache_line_size = 0x20,
160                 .clear_isr = 0,
161                 .iopoll_weight = 64,
162                 {
163                         .set_interrupt_mask_reg = 0x00010,
164                         .clr_interrupt_mask_reg = 0x00018,
165                         .clr_interrupt_mask_reg32 = 0x0001C,
166                         .sense_interrupt_mask_reg = 0x00010,
167                         .sense_interrupt_mask_reg32 = 0x00014,
168                         .clr_interrupt_reg = 0x00008,
169                         .clr_interrupt_reg32 = 0x0000C,
170                         .sense_interrupt_reg = 0x00000,
171                         .sense_interrupt_reg32 = 0x00004,
172                         .ioarrin_reg = 0x00070,
173                         .sense_uproc_interrupt_reg = 0x00020,
174                         .sense_uproc_interrupt_reg32 = 0x00024,
175                         .set_uproc_interrupt_reg = 0x00020,
176                         .set_uproc_interrupt_reg32 = 0x00024,
177                         .clr_uproc_interrupt_reg = 0x00028,
178                         .clr_uproc_interrupt_reg32 = 0x0002C,
179                         .init_feedback_reg = 0x0005C,
180                         .dump_addr_reg = 0x00064,
181                         .dump_data_reg = 0x00068,
182                         .endian_swap_reg = 0x00084
183                 }
184         },
185 };
186
187 static const struct ipr_chip_t ipr_chip[] = {
188         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
189         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
190         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
191         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
192         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
193         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
194         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
195         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
196         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
197 };
198
199 static int ipr_max_bus_speeds[] = {
200         IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
201 };
202
203 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
204 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
205 module_param_named(max_speed, ipr_max_speed, uint, 0);
206 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
207 module_param_named(log_level, ipr_log_level, uint, 0);
208 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
209 module_param_named(testmode, ipr_testmode, int, 0);
210 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
211 module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
212 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
213 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
214 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
215 module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
216 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
217 module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
218 MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
219 module_param_named(max_devs, ipr_max_devs, int, 0);
220 MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
221                  "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
222 module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
223 MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16).  (default:2)");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(IPR_DRIVER_VERSION);
226
227 /*  A constant array of IOASCs/URCs/Error Messages */
228 static const
229 struct ipr_error_table_t ipr_error_table[] = {
230         {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
231         "8155: An unknown error was received"},
232         {0x00330000, 0, 0,
233         "Soft underlength error"},
234         {0x005A0000, 0, 0,
235         "Command to be cancelled not found"},
236         {0x00808000, 0, 0,
237         "Qualified success"},
238         {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
239         "FFFE: Soft device bus error recovered by the IOA"},
240         {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
241         "4101: Soft device bus fabric error"},
242         {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
243         "FFFC: Logical block guard error recovered by the device"},
244         {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
245         "FFFC: Logical block reference tag error recovered by the device"},
246         {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
247         "4171: Recovered scatter list tag / sequence number error"},
248         {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
249         "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
250         {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
251         "4171: Recovered logical block sequence number error on IOA to Host transfer"},
252         {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
253         "FFFD: Recovered logical block reference tag error detected by the IOA"},
254         {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
255         "FFFD: Logical block guard error recovered by the IOA"},
256         {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
257         "FFF9: Device sector reassign successful"},
258         {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
259         "FFF7: Media error recovered by device rewrite procedures"},
260         {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
261         "7001: IOA sector reassignment successful"},
262         {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
263         "FFF9: Soft media error. Sector reassignment recommended"},
264         {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
265         "FFF7: Media error recovered by IOA rewrite procedures"},
266         {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
267         "FF3D: Soft PCI bus error recovered by the IOA"},
268         {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
269         "FFF6: Device hardware error recovered by the IOA"},
270         {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
271         "FFF6: Device hardware error recovered by the device"},
272         {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
273         "FF3D: Soft IOA error recovered by the IOA"},
274         {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
275         "FFFA: Undefined device response recovered by the IOA"},
276         {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
277         "FFF6: Device bus error, message or command phase"},
278         {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
279         "FFFE: Task Management Function failed"},
280         {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
281         "FFF6: Failure prediction threshold exceeded"},
282         {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
283         "8009: Impending cache battery pack failure"},
284         {0x02040100, 0, 0,
285         "Logical Unit in process of becoming ready"},
286         {0x02040200, 0, 0,
287         "Initializing command required"},
288         {0x02040400, 0, 0,
289         "34FF: Disk device format in progress"},
290         {0x02040C00, 0, 0,
291         "Logical unit not accessible, target port in unavailable state"},
292         {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
293         "9070: IOA requested reset"},
294         {0x023F0000, 0, 0,
295         "Synchronization required"},
296         {0x02408500, 0, 0,
297         "IOA microcode download required"},
298         {0x02408600, 0, 0,
299         "Device bus connection is prohibited by host"},
300         {0x024E0000, 0, 0,
301         "No ready, IOA shutdown"},
302         {0x025A0000, 0, 0,
303         "Not ready, IOA has been shutdown"},
304         {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
305         "3020: Storage subsystem configuration error"},
306         {0x03110B00, 0, 0,
307         "FFF5: Medium error, data unreadable, recommend reassign"},
308         {0x03110C00, 0, 0,
309         "7000: Medium error, data unreadable, do not reassign"},
310         {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
311         "FFF3: Disk media format bad"},
312         {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
313         "3002: Addressed device failed to respond to selection"},
314         {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
315         "3100: Device bus error"},
316         {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
317         "3109: IOA timed out a device command"},
318         {0x04088000, 0, 0,
319         "3120: SCSI bus is not operational"},
320         {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
321         "4100: Hard device bus fabric error"},
322         {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
323         "310C: Logical block guard error detected by the device"},
324         {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
325         "310C: Logical block reference tag error detected by the device"},
326         {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
327         "4170: Scatter list tag / sequence number error"},
328         {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
329         "8150: Logical block CRC error on IOA to Host transfer"},
330         {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
331         "4170: Logical block sequence number error on IOA to Host transfer"},
332         {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
333         "310D: Logical block reference tag error detected by the IOA"},
334         {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
335         "310D: Logical block guard error detected by the IOA"},
336         {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
337         "9000: IOA reserved area data check"},
338         {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
339         "9001: IOA reserved area invalid data pattern"},
340         {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
341         "9002: IOA reserved area LRC error"},
342         {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
343         "Hardware Error, IOA metadata access error"},
344         {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
345         "102E: Out of alternate sectors for disk storage"},
346         {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
347         "FFF4: Data transfer underlength error"},
348         {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
349         "FFF4: Data transfer overlength error"},
350         {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
351         "3400: Logical unit failure"},
352         {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
353         "FFF4: Device microcode is corrupt"},
354         {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
355         "8150: PCI bus error"},
356         {0x04430000, 1, 0,
357         "Unsupported device bus message received"},
358         {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
359         "FFF4: Disk device problem"},
360         {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
361         "8150: Permanent IOA failure"},
362         {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
363         "3010: Disk device returned wrong response to IOA"},
364         {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
365         "8151: IOA microcode error"},
366         {0x04448500, 0, 0,
367         "Device bus status error"},
368         {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
369         "8157: IOA error requiring IOA reset to recover"},
370         {0x04448700, 0, 0,
371         "ATA device status error"},
372         {0x04490000, 0, 0,
373         "Message reject received from the device"},
374         {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
375         "8008: A permanent cache battery pack failure occurred"},
376         {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
377         "9090: Disk unit has been modified after the last known status"},
378         {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
379         "9081: IOA detected device error"},
380         {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
381         "9082: IOA detected device error"},
382         {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
383         "3110: Device bus error, message or command phase"},
384         {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
385         "3110: SAS Command / Task Management Function failed"},
386         {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
387         "9091: Incorrect hardware configuration change has been detected"},
388         {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
389         "9073: Invalid multi-adapter configuration"},
390         {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
391         "4010: Incorrect connection between cascaded expanders"},
392         {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
393         "4020: Connections exceed IOA design limits"},
394         {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
395         "4030: Incorrect multipath connection"},
396         {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
397         "4110: Unsupported enclosure function"},
398         {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
399         "4120: SAS cable VPD cannot be read"},
400         {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
401         "FFF4: Command to logical unit failed"},
402         {0x05240000, 1, 0,
403         "Illegal request, invalid request type or request packet"},
404         {0x05250000, 0, 0,
405         "Illegal request, invalid resource handle"},
406         {0x05258000, 0, 0,
407         "Illegal request, commands not allowed to this device"},
408         {0x05258100, 0, 0,
409         "Illegal request, command not allowed to a secondary adapter"},
410         {0x05258200, 0, 0,
411         "Illegal request, command not allowed to a non-optimized resource"},
412         {0x05260000, 0, 0,
413         "Illegal request, invalid field in parameter list"},
414         {0x05260100, 0, 0,
415         "Illegal request, parameter not supported"},
416         {0x05260200, 0, 0,
417         "Illegal request, parameter value invalid"},
418         {0x052C0000, 0, 0,
419         "Illegal request, command sequence error"},
420         {0x052C8000, 1, 0,
421         "Illegal request, dual adapter support not enabled"},
422         {0x052C8100, 1, 0,
423         "Illegal request, another cable connector was physically disabled"},
424         {0x054E8000, 1, 0,
425         "Illegal request, inconsistent group id/group count"},
426         {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
427         "9031: Array protection temporarily suspended, protection resuming"},
428         {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
429         "9040: Array protection temporarily suspended, protection resuming"},
430         {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
431         "4080: IOA exceeded maximum operating temperature"},
432         {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
433         "4085: Service required"},
434         {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
435         "3140: Device bus not ready to ready transition"},
436         {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
437         "FFFB: SCSI bus was reset"},
438         {0x06290500, 0, 0,
439         "FFFE: SCSI bus transition to single ended"},
440         {0x06290600, 0, 0,
441         "FFFE: SCSI bus transition to LVD"},
442         {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
443         "FFFB: SCSI bus was reset by another initiator"},
444         {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
445         "3029: A device replacement has occurred"},
446         {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
447         "4102: Device bus fabric performance degradation"},
448         {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
449         "9051: IOA cache data exists for a missing or failed device"},
450         {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
451         "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
452         {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
453         "9025: Disk unit is not supported at its physical location"},
454         {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
455         "3020: IOA detected a SCSI bus configuration error"},
456         {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
457         "3150: SCSI bus configuration error"},
458         {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
459         "9074: Asymmetric advanced function disk configuration"},
460         {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
461         "4040: Incomplete multipath connection between IOA and enclosure"},
462         {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
463         "4041: Incomplete multipath connection between enclosure and device"},
464         {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
465         "9075: Incomplete multipath connection between IOA and remote IOA"},
466         {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
467         "9076: Configuration error, missing remote IOA"},
468         {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
469         "4050: Enclosure does not support a required multipath function"},
470         {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
471         "4121: Configuration error, required cable is missing"},
472         {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
473         "4122: Cable is not plugged into the correct location on remote IOA"},
474         {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
475         "4123: Configuration error, invalid cable vital product data"},
476         {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
477         "4124: Configuration error, both cable ends are plugged into the same IOA"},
478         {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
479         "4070: Logically bad block written on device"},
480         {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
481         "9041: Array protection temporarily suspended"},
482         {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
483         "9042: Corrupt array parity detected on specified device"},
484         {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
485         "9030: Array no longer protected due to missing or failed disk unit"},
486         {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
487         "9071: Link operational transition"},
488         {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
489         "9072: Link not operational transition"},
490         {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
491         "9032: Array exposed but still protected"},
492         {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
493         "70DD: Device forced failed by disrupt device command"},
494         {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
495         "4061: Multipath redundancy level got better"},
496         {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
497         "4060: Multipath redundancy level got worse"},
498         {0x07270000, 0, 0,
499         "Failure due to other device"},
500         {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
501         "9008: IOA does not support functions expected by devices"},
502         {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
503         "9010: Cache data associated with attached devices cannot be found"},
504         {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
505         "9011: Cache data belongs to devices other than those attached"},
506         {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
507         "9020: Array missing 2 or more devices with only 1 device present"},
508         {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
509         "9021: Array missing 2 or more devices with 2 or more devices present"},
510         {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
511         "9022: Exposed array is missing a required device"},
512         {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
513         "9023: Array member(s) not at required physical locations"},
514         {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
515         "9024: Array not functional due to present hardware configuration"},
516         {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
517         "9026: Array not functional due to present hardware configuration"},
518         {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
519         "9027: Array is missing a device and parity is out of sync"},
520         {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
521         "9028: Maximum number of arrays already exist"},
522         {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
523         "9050: Required cache data cannot be located for a disk unit"},
524         {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
525         "9052: Cache data exists for a device that has been modified"},
526         {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
527         "9054: IOA resources not available due to previous problems"},
528         {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
529         "9092: Disk unit requires initialization before use"},
530         {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
531         "9029: Incorrect hardware configuration change has been detected"},
532         {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
533         "9060: One or more disk pairs are missing from an array"},
534         {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
535         "9061: One or more disks are missing from an array"},
536         {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
537         "9062: One or more disks are missing from an array"},
538         {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
539         "9063: Maximum number of functional arrays has been exceeded"},
540         {0x07279A00, 0, 0,
541         "Data protect, other volume set problem"},
542         {0x0B260000, 0, 0,
543         "Aborted command, invalid descriptor"},
544         {0x0B3F9000, 0, 0,
545         "Target operating conditions have changed, dual adapter takeover"},
546         {0x0B530200, 0, 0,
547         "Aborted command, medium removal prevented"},
548         {0x0B5A0000, 0, 0,
549         "Command terminated by host"},
550         {0x0B5B8000, 0, 0,
551         "Aborted command, command terminated by host"}
552 };
553
554 static const struct ipr_ses_table_entry ipr_ses_table[] = {
555         { "2104-DL1        ", "XXXXXXXXXXXXXXXX", 80 },
556         { "2104-TL1        ", "XXXXXXXXXXXXXXXX", 80 },
557         { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
558         { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
559         { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
560         { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
561         { "2104-DU3        ", "XXXXXXXXXXXXXXXX", 160 },
562         { "2104-TU3        ", "XXXXXXXXXXXXXXXX", 160 },
563         { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
564         { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
565         { "St  V1S2        ", "XXXXXXXXXXXXXXXX", 160 },
566         { "HSBPD4M  PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
567         { "VSBPD1H   U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
568 };
569
570 /*
571  *  Function Prototypes
572  */
573 static int ipr_reset_alert(struct ipr_cmnd *);
574 static void ipr_process_ccn(struct ipr_cmnd *);
575 static void ipr_process_error(struct ipr_cmnd *);
576 static void ipr_reset_ioa_job(struct ipr_cmnd *);
577 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
578                                    enum ipr_shutdown_type);
579
580 #ifdef CONFIG_SCSI_IPR_TRACE
581 /**
582  * ipr_trc_hook - Add a trace entry to the driver trace
583  * @ipr_cmd:    ipr command struct
584  * @type:               trace type
585  * @add_data:   additional data
586  *
587  * Return value:
588  *      none
589  **/
590 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
591                          u8 type, u32 add_data)
592 {
593         struct ipr_trace_entry *trace_entry;
594         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
595
596         trace_entry = &ioa_cfg->trace[atomic_add_return
597                         (1, &ioa_cfg->trace_index)%IPR_NUM_TRACE_ENTRIES];
598         trace_entry->time = jiffies;
599         trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
600         trace_entry->type = type;
601         if (ipr_cmd->ioa_cfg->sis64)
602                 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
603         else
604                 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
605         trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
606         trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
607         trace_entry->u.add_data = add_data;
608         wmb();
609 }
610 #else
611 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
612 #endif
613
614 /**
615  * ipr_lock_and_done - Acquire lock and complete command
616  * @ipr_cmd:    ipr command struct
617  *
618  * Return value:
619  *      none
620  **/
621 static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
622 {
623         unsigned long lock_flags;
624         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
625
626         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
627         ipr_cmd->done(ipr_cmd);
628         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
629 }
630
631 /**
632  * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
633  * @ipr_cmd:    ipr command struct
634  *
635  * Return value:
636  *      none
637  **/
638 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
639 {
640         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
641         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
642         struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
643         dma_addr_t dma_addr = ipr_cmd->dma_addr;
644         int hrrq_id;
645
646         hrrq_id = ioarcb->cmd_pkt.hrrq_id;
647         memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
648         ioarcb->cmd_pkt.hrrq_id = hrrq_id;
649         ioarcb->data_transfer_length = 0;
650         ioarcb->read_data_transfer_length = 0;
651         ioarcb->ioadl_len = 0;
652         ioarcb->read_ioadl_len = 0;
653
654         if (ipr_cmd->ioa_cfg->sis64) {
655                 ioarcb->u.sis64_addr_data.data_ioadl_addr =
656                         cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
657                 ioasa64->u.gata.status = 0;
658         } else {
659                 ioarcb->write_ioadl_addr =
660                         cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
661                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
662                 ioasa->u.gata.status = 0;
663         }
664
665         ioasa->hdr.ioasc = 0;
666         ioasa->hdr.residual_data_len = 0;
667         ipr_cmd->scsi_cmd = NULL;
668         ipr_cmd->qc = NULL;
669         ipr_cmd->sense_buffer[0] = 0;
670         ipr_cmd->dma_use_sg = 0;
671 }
672
673 /**
674  * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
675  * @ipr_cmd:    ipr command struct
676  *
677  * Return value:
678  *      none
679  **/
680 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
681                               void (*fast_done) (struct ipr_cmnd *))
682 {
683         ipr_reinit_ipr_cmnd(ipr_cmd);
684         ipr_cmd->u.scratch = 0;
685         ipr_cmd->sibling = NULL;
686         ipr_cmd->fast_done = fast_done;
687         init_timer(&ipr_cmd->timer);
688 }
689
690 /**
691  * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
692  * @ioa_cfg:    ioa config struct
693  *
694  * Return value:
695  *      pointer to ipr command struct
696  **/
697 static
698 struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
699 {
700         struct ipr_cmnd *ipr_cmd = NULL;
701
702         if (likely(!list_empty(&hrrq->hrrq_free_q))) {
703                 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
704                         struct ipr_cmnd, queue);
705                 list_del(&ipr_cmd->queue);
706         }
707
708
709         return ipr_cmd;
710 }
711
712 /**
713  * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
714  * @ioa_cfg:    ioa config struct
715  *
716  * Return value:
717  *      pointer to ipr command struct
718  **/
719 static
720 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
721 {
722         struct ipr_cmnd *ipr_cmd =
723                 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
724         ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
725         return ipr_cmd;
726 }
727
728 /**
729  * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
730  * @ioa_cfg:    ioa config struct
731  * @clr_ints:     interrupts to clear
732  *
733  * This function masks all interrupts on the adapter, then clears the
734  * interrupts specified in the mask
735  *
736  * Return value:
737  *      none
738  **/
739 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
740                                           u32 clr_ints)
741 {
742         volatile u32 int_reg;
743         int i;
744
745         /* Stop new interrupts */
746         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
747                 spin_lock(&ioa_cfg->hrrq[i]._lock);
748                 ioa_cfg->hrrq[i].allow_interrupts = 0;
749                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
750         }
751         wmb();
752
753         /* Set interrupt mask to stop all new interrupts */
754         if (ioa_cfg->sis64)
755                 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
756         else
757                 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
758
759         /* Clear any pending interrupts */
760         if (ioa_cfg->sis64)
761                 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
762         writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
763         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
764 }
765
766 /**
767  * ipr_save_pcix_cmd_reg - Save PCI-X command register
768  * @ioa_cfg:    ioa config struct
769  *
770  * Return value:
771  *      0 on success / -EIO on failure
772  **/
773 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
774 {
775         int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
776
777         if (pcix_cmd_reg == 0)
778                 return 0;
779
780         if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
781                                  &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
782                 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
783                 return -EIO;
784         }
785
786         ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
787         return 0;
788 }
789
790 /**
791  * ipr_set_pcix_cmd_reg - Setup PCI-X command register
792  * @ioa_cfg:    ioa config struct
793  *
794  * Return value:
795  *      0 on success / -EIO on failure
796  **/
797 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
798 {
799         int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
800
801         if (pcix_cmd_reg) {
802                 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
803                                           ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
804                         dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
805                         return -EIO;
806                 }
807         }
808
809         return 0;
810 }
811
812 /**
813  * ipr_sata_eh_done - done function for aborted SATA commands
814  * @ipr_cmd:    ipr command struct
815  *
816  * This function is invoked for ops generated to SATA
817  * devices which are being aborted.
818  *
819  * Return value:
820  *      none
821  **/
822 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
823 {
824         struct ata_queued_cmd *qc = ipr_cmd->qc;
825         struct ipr_sata_port *sata_port = qc->ap->private_data;
826
827         qc->err_mask |= AC_ERR_OTHER;
828         sata_port->ioasa.status |= ATA_BUSY;
829         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
830         ata_qc_complete(qc);
831 }
832
833 /**
834  * ipr_scsi_eh_done - mid-layer done function for aborted ops
835  * @ipr_cmd:    ipr command struct
836  *
837  * This function is invoked by the interrupt handler for
838  * ops generated by the SCSI mid-layer which are being aborted.
839  *
840  * Return value:
841  *      none
842  **/
843 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
844 {
845         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
846
847         scsi_cmd->result |= (DID_ERROR << 16);
848
849         scsi_dma_unmap(ipr_cmd->scsi_cmd);
850         scsi_cmd->scsi_done(scsi_cmd);
851         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
852 }
853
854 /**
855  * ipr_fail_all_ops - Fails all outstanding ops.
856  * @ioa_cfg:    ioa config struct
857  *
858  * This function fails all outstanding ops.
859  *
860  * Return value:
861  *      none
862  **/
863 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
864 {
865         struct ipr_cmnd *ipr_cmd, *temp;
866         struct ipr_hrr_queue *hrrq;
867
868         ENTER;
869         for_each_hrrq(hrrq, ioa_cfg) {
870                 spin_lock(&hrrq->_lock);
871                 list_for_each_entry_safe(ipr_cmd,
872                                         temp, &hrrq->hrrq_pending_q, queue) {
873                         list_del(&ipr_cmd->queue);
874
875                         ipr_cmd->s.ioasa.hdr.ioasc =
876                                 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
877                         ipr_cmd->s.ioasa.hdr.ilid =
878                                 cpu_to_be32(IPR_DRIVER_ILID);
879
880                         if (ipr_cmd->scsi_cmd)
881                                 ipr_cmd->done = ipr_scsi_eh_done;
882                         else if (ipr_cmd->qc)
883                                 ipr_cmd->done = ipr_sata_eh_done;
884
885                         ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
886                                      IPR_IOASC_IOA_WAS_RESET);
887                         del_timer(&ipr_cmd->timer);
888                         ipr_cmd->done(ipr_cmd);
889                 }
890                 spin_unlock(&hrrq->_lock);
891         }
892         LEAVE;
893 }
894
895 /**
896  * ipr_send_command -  Send driver initiated requests.
897  * @ipr_cmd:            ipr command struct
898  *
899  * This function sends a command to the adapter using the correct write call.
900  * In the case of sis64, calculate the ioarcb size required. Then or in the
901  * appropriate bits.
902  *
903  * Return value:
904  *      none
905  **/
906 static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
907 {
908         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
909         dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
910
911         if (ioa_cfg->sis64) {
912                 /* The default size is 256 bytes */
913                 send_dma_addr |= 0x1;
914
915                 /* If the number of ioadls * size of ioadl > 128 bytes,
916                    then use a 512 byte ioarcb */
917                 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
918                         send_dma_addr |= 0x4;
919                 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
920         } else
921                 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
922 }
923
924 /**
925  * ipr_do_req -  Send driver initiated requests.
926  * @ipr_cmd:            ipr command struct
927  * @done:                       done function
928  * @timeout_func:       timeout function
929  * @timeout:            timeout value
930  *
931  * This function sends the specified command to the adapter with the
932  * timeout given. The done function is invoked on command completion.
933  *
934  * Return value:
935  *      none
936  **/
937 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
938                        void (*done) (struct ipr_cmnd *),
939                        void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
940 {
941         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
942
943         ipr_cmd->done = done;
944
945         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
946         ipr_cmd->timer.expires = jiffies + timeout;
947         ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
948
949         add_timer(&ipr_cmd->timer);
950
951         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
952
953         ipr_send_command(ipr_cmd);
954 }
955
956 /**
957  * ipr_internal_cmd_done - Op done function for an internally generated op.
958  * @ipr_cmd:    ipr command struct
959  *
960  * This function is the op done function for an internally generated,
961  * blocking op. It simply wakes the sleeping thread.
962  *
963  * Return value:
964  *      none
965  **/
966 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
967 {
968         if (ipr_cmd->sibling)
969                 ipr_cmd->sibling = NULL;
970         else
971                 complete(&ipr_cmd->completion);
972 }
973
974 /**
975  * ipr_init_ioadl - initialize the ioadl for the correct SIS type
976  * @ipr_cmd:    ipr command struct
977  * @dma_addr:   dma address
978  * @len:        transfer length
979  * @flags:      ioadl flag value
980  *
981  * This function initializes an ioadl in the case where there is only a single
982  * descriptor.
983  *
984  * Return value:
985  *      nothing
986  **/
987 static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
988                            u32 len, int flags)
989 {
990         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
991         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
992
993         ipr_cmd->dma_use_sg = 1;
994
995         if (ipr_cmd->ioa_cfg->sis64) {
996                 ioadl64->flags = cpu_to_be32(flags);
997                 ioadl64->data_len = cpu_to_be32(len);
998                 ioadl64->address = cpu_to_be64(dma_addr);
999
1000                 ipr_cmd->ioarcb.ioadl_len =
1001                         cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
1002                 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1003         } else {
1004                 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
1005                 ioadl->address = cpu_to_be32(dma_addr);
1006
1007                 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
1008                         ipr_cmd->ioarcb.read_ioadl_len =
1009                                 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1010                         ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
1011                 } else {
1012                         ipr_cmd->ioarcb.ioadl_len =
1013                                 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1014                         ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1015                 }
1016         }
1017 }
1018
1019 /**
1020  * ipr_send_blocking_cmd - Send command and sleep on its completion.
1021  * @ipr_cmd:    ipr command struct
1022  * @timeout_func:       function to invoke if command times out
1023  * @timeout:    timeout
1024  *
1025  * Return value:
1026  *      none
1027  **/
1028 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
1029                                   void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
1030                                   u32 timeout)
1031 {
1032         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1033
1034         init_completion(&ipr_cmd->completion);
1035         ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1036
1037         spin_unlock_irq(ioa_cfg->host->host_lock);
1038         wait_for_completion(&ipr_cmd->completion);
1039         spin_lock_irq(ioa_cfg->host->host_lock);
1040 }
1041
1042 static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1043 {
1044         if (ioa_cfg->hrrq_num == 1)
1045                 return 0;
1046         else
1047                 return (atomic_add_return(1, &ioa_cfg->hrrq_index) % (ioa_cfg->hrrq_num - 1)) + 1;
1048 }
1049
1050 /**
1051  * ipr_send_hcam - Send an HCAM to the adapter.
1052  * @ioa_cfg:    ioa config struct
1053  * @type:               HCAM type
1054  * @hostrcb:    hostrcb struct
1055  *
1056  * This function will send a Host Controlled Async command to the adapter.
1057  * If HCAMs are currently not allowed to be issued to the adapter, it will
1058  * place the hostrcb on the free queue.
1059  *
1060  * Return value:
1061  *      none
1062  **/
1063 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1064                           struct ipr_hostrcb *hostrcb)
1065 {
1066         struct ipr_cmnd *ipr_cmd;
1067         struct ipr_ioarcb *ioarcb;
1068
1069         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1070                 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
1071                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1072                 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1073
1074                 ipr_cmd->u.hostrcb = hostrcb;
1075                 ioarcb = &ipr_cmd->ioarcb;
1076
1077                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1078                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1079                 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1080                 ioarcb->cmd_pkt.cdb[1] = type;
1081                 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1082                 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1083
1084                 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1085                                sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1086
1087                 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1088                         ipr_cmd->done = ipr_process_ccn;
1089                 else
1090                         ipr_cmd->done = ipr_process_error;
1091
1092                 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1093
1094                 ipr_send_command(ipr_cmd);
1095         } else {
1096                 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1097         }
1098 }
1099
1100 /**
1101  * ipr_update_ata_class - Update the ata class in the resource entry
1102  * @res:        resource entry struct
1103  * @proto:      cfgte device bus protocol value
1104  *
1105  * Return value:
1106  *      none
1107  **/
1108 static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1109 {
1110         switch (proto) {
1111         case IPR_PROTO_SATA:
1112         case IPR_PROTO_SAS_STP:
1113                 res->ata_class = ATA_DEV_ATA;
1114                 break;
1115         case IPR_PROTO_SATA_ATAPI:
1116         case IPR_PROTO_SAS_STP_ATAPI:
1117                 res->ata_class = ATA_DEV_ATAPI;
1118                 break;
1119         default:
1120                 res->ata_class = ATA_DEV_UNKNOWN;
1121                 break;
1122         };
1123 }
1124
1125 /**
1126  * ipr_init_res_entry - Initialize a resource entry struct.
1127  * @res:        resource entry struct
1128  * @cfgtew:     config table entry wrapper struct
1129  *
1130  * Return value:
1131  *      none
1132  **/
1133 static void ipr_init_res_entry(struct ipr_resource_entry *res,
1134                                struct ipr_config_table_entry_wrapper *cfgtew)
1135 {
1136         int found = 0;
1137         unsigned int proto;
1138         struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1139         struct ipr_resource_entry *gscsi_res = NULL;
1140
1141         res->needs_sync_complete = 0;
1142         res->in_erp = 0;
1143         res->add_to_ml = 0;
1144         res->del_from_ml = 0;
1145         res->resetting_device = 0;
1146         res->reset_occurred = 0;
1147         res->sdev = NULL;
1148         res->sata_port = NULL;
1149
1150         if (ioa_cfg->sis64) {
1151                 proto = cfgtew->u.cfgte64->proto;
1152                 res->res_flags = cfgtew->u.cfgte64->res_flags;
1153                 res->qmodel = IPR_QUEUEING_MODEL64(res);
1154                 res->type = cfgtew->u.cfgte64->res_type;
1155
1156                 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1157                         sizeof(res->res_path));
1158
1159                 res->bus = 0;
1160                 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1161                         sizeof(res->dev_lun.scsi_lun));
1162                 res->lun = scsilun_to_int(&res->dev_lun);
1163
1164                 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1165                         list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1166                                 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1167                                         found = 1;
1168                                         res->target = gscsi_res->target;
1169                                         break;
1170                                 }
1171                         }
1172                         if (!found) {
1173                                 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1174                                                                   ioa_cfg->max_devs_supported);
1175                                 set_bit(res->target, ioa_cfg->target_ids);
1176                         }
1177                 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1178                         res->bus = IPR_IOAFP_VIRTUAL_BUS;
1179                         res->target = 0;
1180                 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1181                         res->bus = IPR_ARRAY_VIRTUAL_BUS;
1182                         res->target = find_first_zero_bit(ioa_cfg->array_ids,
1183                                                           ioa_cfg->max_devs_supported);
1184                         set_bit(res->target, ioa_cfg->array_ids);
1185                 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1186                         res->bus = IPR_VSET_VIRTUAL_BUS;
1187                         res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1188                                                           ioa_cfg->max_devs_supported);
1189                         set_bit(res->target, ioa_cfg->vset_ids);
1190                 } else {
1191                         res->target = find_first_zero_bit(ioa_cfg->target_ids,
1192                                                           ioa_cfg->max_devs_supported);
1193                         set_bit(res->target, ioa_cfg->target_ids);
1194                 }
1195         } else {
1196                 proto = cfgtew->u.cfgte->proto;
1197                 res->qmodel = IPR_QUEUEING_MODEL(res);
1198                 res->flags = cfgtew->u.cfgte->flags;
1199                 if (res->flags & IPR_IS_IOA_RESOURCE)
1200                         res->type = IPR_RES_TYPE_IOAFP;
1201                 else
1202                         res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1203
1204                 res->bus = cfgtew->u.cfgte->res_addr.bus;
1205                 res->target = cfgtew->u.cfgte->res_addr.target;
1206                 res->lun = cfgtew->u.cfgte->res_addr.lun;
1207                 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
1208         }
1209
1210         ipr_update_ata_class(res, proto);
1211 }
1212
1213 /**
1214  * ipr_is_same_device - Determine if two devices are the same.
1215  * @res:        resource entry struct
1216  * @cfgtew:     config table entry wrapper struct
1217  *
1218  * Return value:
1219  *      1 if the devices are the same / 0 otherwise
1220  **/
1221 static int ipr_is_same_device(struct ipr_resource_entry *res,
1222                               struct ipr_config_table_entry_wrapper *cfgtew)
1223 {
1224         if (res->ioa_cfg->sis64) {
1225                 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1226                                         sizeof(cfgtew->u.cfgte64->dev_id)) &&
1227                         !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1228                                         sizeof(cfgtew->u.cfgte64->lun))) {
1229                         return 1;
1230                 }
1231         } else {
1232                 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1233                     res->target == cfgtew->u.cfgte->res_addr.target &&
1234                     res->lun == cfgtew->u.cfgte->res_addr.lun)
1235                         return 1;
1236         }
1237
1238         return 0;
1239 }
1240
1241 /**
1242  * __ipr_format_res_path - Format the resource path for printing.
1243  * @res_path:   resource path
1244  * @buf:        buffer
1245  * @len:        length of buffer provided
1246  *
1247  * Return value:
1248  *      pointer to buffer
1249  **/
1250 static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
1251 {
1252         int i;
1253         char *p = buffer;
1254
1255         *p = '\0';
1256         p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1257         for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1258                 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
1259
1260         return buffer;
1261 }
1262
1263 /**
1264  * ipr_format_res_path - Format the resource path for printing.
1265  * @ioa_cfg:    ioa config struct
1266  * @res_path:   resource path
1267  * @buf:        buffer
1268  * @len:        length of buffer provided
1269  *
1270  * Return value:
1271  *      pointer to buffer
1272  **/
1273 static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1274                                  u8 *res_path, char *buffer, int len)
1275 {
1276         char *p = buffer;
1277
1278         *p = '\0';
1279         p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1280         __ipr_format_res_path(res_path, p, len - (buffer - p));
1281         return buffer;
1282 }
1283
1284 /**
1285  * ipr_update_res_entry - Update the resource entry.
1286  * @res:        resource entry struct
1287  * @cfgtew:     config table entry wrapper struct
1288  *
1289  * Return value:
1290  *      none
1291  **/
1292 static void ipr_update_res_entry(struct ipr_resource_entry *res,
1293                                  struct ipr_config_table_entry_wrapper *cfgtew)
1294 {
1295         char buffer[IPR_MAX_RES_PATH_LENGTH];
1296         unsigned int proto;
1297         int new_path = 0;
1298
1299         if (res->ioa_cfg->sis64) {
1300                 res->flags = cfgtew->u.cfgte64->flags;
1301                 res->res_flags = cfgtew->u.cfgte64->res_flags;
1302                 res->type = cfgtew->u.cfgte64->res_type;
1303
1304                 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1305                         sizeof(struct ipr_std_inq_data));
1306
1307                 res->qmodel = IPR_QUEUEING_MODEL64(res);
1308                 proto = cfgtew->u.cfgte64->proto;
1309                 res->res_handle = cfgtew->u.cfgte64->res_handle;
1310                 res->dev_id = cfgtew->u.cfgte64->dev_id;
1311
1312                 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1313                         sizeof(res->dev_lun.scsi_lun));
1314
1315                 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1316                                         sizeof(res->res_path))) {
1317                         memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1318                                 sizeof(res->res_path));
1319                         new_path = 1;
1320                 }
1321
1322                 if (res->sdev && new_path)
1323                         sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1324                                     ipr_format_res_path(res->ioa_cfg,
1325                                         res->res_path, buffer, sizeof(buffer)));
1326         } else {
1327                 res->flags = cfgtew->u.cfgte->flags;
1328                 if (res->flags & IPR_IS_IOA_RESOURCE)
1329                         res->type = IPR_RES_TYPE_IOAFP;
1330                 else
1331                         res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1332
1333                 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1334                         sizeof(struct ipr_std_inq_data));
1335
1336                 res->qmodel = IPR_QUEUEING_MODEL(res);
1337                 proto = cfgtew->u.cfgte->proto;
1338                 res->res_handle = cfgtew->u.cfgte->res_handle;
1339         }
1340
1341         ipr_update_ata_class(res, proto);
1342 }
1343
1344 /**
1345  * ipr_clear_res_target - Clear the bit in the bit map representing the target
1346  *                        for the resource.
1347  * @res:        resource entry struct
1348  * @cfgtew:     config table entry wrapper struct
1349  *
1350  * Return value:
1351  *      none
1352  **/
1353 static void ipr_clear_res_target(struct ipr_resource_entry *res)
1354 {
1355         struct ipr_resource_entry *gscsi_res = NULL;
1356         struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1357
1358         if (!ioa_cfg->sis64)
1359                 return;
1360
1361         if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1362                 clear_bit(res->target, ioa_cfg->array_ids);
1363         else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1364                 clear_bit(res->target, ioa_cfg->vset_ids);
1365         else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1366                 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1367                         if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1368                                 return;
1369                 clear_bit(res->target, ioa_cfg->target_ids);
1370
1371         } else if (res->bus == 0)
1372                 clear_bit(res->target, ioa_cfg->target_ids);
1373 }
1374
1375 /**
1376  * ipr_handle_config_change - Handle a config change from the adapter
1377  * @ioa_cfg:    ioa config struct
1378  * @hostrcb:    hostrcb
1379  *
1380  * Return value:
1381  *      none
1382  **/
1383 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1384                                      struct ipr_hostrcb *hostrcb)
1385 {
1386         struct ipr_resource_entry *res = NULL;
1387         struct ipr_config_table_entry_wrapper cfgtew;
1388         __be32 cc_res_handle;
1389
1390         u32 is_ndn = 1;
1391
1392         if (ioa_cfg->sis64) {
1393                 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1394                 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1395         } else {
1396                 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1397                 cc_res_handle = cfgtew.u.cfgte->res_handle;
1398         }
1399
1400         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1401                 if (res->res_handle == cc_res_handle) {
1402                         is_ndn = 0;
1403                         break;
1404                 }
1405         }
1406
1407         if (is_ndn) {
1408                 if (list_empty(&ioa_cfg->free_res_q)) {
1409                         ipr_send_hcam(ioa_cfg,
1410                                       IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1411                                       hostrcb);
1412                         return;
1413                 }
1414
1415                 res = list_entry(ioa_cfg->free_res_q.next,
1416                                  struct ipr_resource_entry, queue);
1417
1418                 list_del(&res->queue);
1419                 ipr_init_res_entry(res, &cfgtew);
1420                 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1421         }
1422
1423         ipr_update_res_entry(res, &cfgtew);
1424
1425         if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1426                 if (res->sdev) {
1427                         res->del_from_ml = 1;
1428                         res->res_handle = IPR_INVALID_RES_HANDLE;
1429                         if (ioa_cfg->allow_ml_add_del)
1430                                 schedule_work(&ioa_cfg->work_q);
1431                 } else {
1432                         ipr_clear_res_target(res);
1433                         list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1434                 }
1435         } else if (!res->sdev || res->del_from_ml) {
1436                 res->add_to_ml = 1;
1437                 if (ioa_cfg->allow_ml_add_del)
1438                         schedule_work(&ioa_cfg->work_q);
1439         }
1440
1441         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1442 }
1443
1444 /**
1445  * ipr_process_ccn - Op done function for a CCN.
1446  * @ipr_cmd:    ipr command struct
1447  *
1448  * This function is the op done function for a configuration
1449  * change notification host controlled async from the adapter.
1450  *
1451  * Return value:
1452  *      none
1453  **/
1454 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1455 {
1456         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1457         struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1458         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1459
1460         list_del(&hostrcb->queue);
1461         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1462
1463         if (ioasc) {
1464                 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
1465                         dev_err(&ioa_cfg->pdev->dev,
1466                                 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1467
1468                 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1469         } else {
1470                 ipr_handle_config_change(ioa_cfg, hostrcb);
1471         }
1472 }
1473
1474 /**
1475  * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1476  * @i:          index into buffer
1477  * @buf:                string to modify
1478  *
1479  * This function will strip all trailing whitespace, pad the end
1480  * of the string with a single space, and NULL terminate the string.
1481  *
1482  * Return value:
1483  *      new length of string
1484  **/
1485 static int strip_and_pad_whitespace(int i, char *buf)
1486 {
1487         while (i && buf[i] == ' ')
1488                 i--;
1489         buf[i+1] = ' ';
1490         buf[i+2] = '\0';
1491         return i + 2;
1492 }
1493
1494 /**
1495  * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1496  * @prefix:             string to print at start of printk
1497  * @hostrcb:    hostrcb pointer
1498  * @vpd:                vendor/product id/sn struct
1499  *
1500  * Return value:
1501  *      none
1502  **/
1503 static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1504                                 struct ipr_vpd *vpd)
1505 {
1506         char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1507         int i = 0;
1508
1509         memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1510         i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1511
1512         memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1513         i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1514
1515         memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1516         buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1517
1518         ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1519 }
1520
1521 /**
1522  * ipr_log_vpd - Log the passed VPD to the error log.
1523  * @vpd:                vendor/product id/sn struct
1524  *
1525  * Return value:
1526  *      none
1527  **/
1528 static void ipr_log_vpd(struct ipr_vpd *vpd)
1529 {
1530         char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1531                     + IPR_SERIAL_NUM_LEN];
1532
1533         memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1534         memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1535                IPR_PROD_ID_LEN);
1536         buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1537         ipr_err("Vendor/Product ID: %s\n", buffer);
1538
1539         memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1540         buffer[IPR_SERIAL_NUM_LEN] = '\0';
1541         ipr_err("    Serial Number: %s\n", buffer);
1542 }
1543
1544 /**
1545  * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1546  * @prefix:             string to print at start of printk
1547  * @hostrcb:    hostrcb pointer
1548  * @vpd:                vendor/product id/sn/wwn struct
1549  *
1550  * Return value:
1551  *      none
1552  **/
1553 static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1554                                     struct ipr_ext_vpd *vpd)
1555 {
1556         ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1557         ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1558                      be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1559 }
1560
1561 /**
1562  * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1563  * @vpd:                vendor/product id/sn/wwn struct
1564  *
1565  * Return value:
1566  *      none
1567  **/
1568 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1569 {
1570         ipr_log_vpd(&vpd->vpd);
1571         ipr_err("    WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1572                 be32_to_cpu(vpd->wwid[1]));
1573 }
1574
1575 /**
1576  * ipr_log_enhanced_cache_error - Log a cache error.
1577  * @ioa_cfg:    ioa config struct
1578  * @hostrcb:    hostrcb struct
1579  *
1580  * Return value:
1581  *      none
1582  **/
1583 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1584                                          struct ipr_hostrcb *hostrcb)
1585 {
1586         struct ipr_hostrcb_type_12_error *error;
1587
1588         if (ioa_cfg->sis64)
1589                 error = &hostrcb->hcam.u.error64.u.type_12_error;
1590         else
1591                 error = &hostrcb->hcam.u.error.u.type_12_error;
1592
1593         ipr_err("-----Current Configuration-----\n");
1594         ipr_err("Cache Directory Card Information:\n");
1595         ipr_log_ext_vpd(&error->ioa_vpd);
1596         ipr_err("Adapter Card Information:\n");
1597         ipr_log_ext_vpd(&error->cfc_vpd);
1598
1599         ipr_err("-----Expected Configuration-----\n");
1600         ipr_err("Cache Directory Card Information:\n");
1601         ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1602         ipr_err("Adapter Card Information:\n");
1603         ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1604
1605         ipr_err("Additional IOA Data: %08X %08X %08X\n",
1606                      be32_to_cpu(error->ioa_data[0]),
1607                      be32_to_cpu(error->ioa_data[1]),
1608                      be32_to_cpu(error->ioa_data[2]));
1609 }
1610
1611 /**
1612  * ipr_log_cache_error - Log a cache error.
1613  * @ioa_cfg:    ioa config struct
1614  * @hostrcb:    hostrcb struct
1615  *
1616  * Return value:
1617  *      none
1618  **/
1619 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1620                                 struct ipr_hostrcb *hostrcb)
1621 {
1622         struct ipr_hostrcb_type_02_error *error =
1623                 &hostrcb->hcam.u.error.u.type_02_error;
1624
1625         ipr_err("-----Current Configuration-----\n");
1626         ipr_err("Cache Directory Card Information:\n");
1627         ipr_log_vpd(&error->ioa_vpd);
1628         ipr_err("Adapter Card Information:\n");
1629         ipr_log_vpd(&error->cfc_vpd);
1630
1631         ipr_err("-----Expected Configuration-----\n");
1632         ipr_err("Cache Directory Card Information:\n");
1633         ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1634         ipr_err("Adapter Card Information:\n");
1635         ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1636
1637         ipr_err("Additional IOA Data: %08X %08X %08X\n",
1638                      be32_to_cpu(error->ioa_data[0]),
1639                      be32_to_cpu(error->ioa_data[1]),
1640                      be32_to_cpu(error->ioa_data[2]));
1641 }
1642
1643 /**
1644  * ipr_log_enhanced_config_error - Log a configuration error.
1645  * @ioa_cfg:    ioa config struct
1646  * @hostrcb:    hostrcb struct
1647  *
1648  * Return value:
1649  *      none
1650  **/
1651 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1652                                           struct ipr_hostrcb *hostrcb)
1653 {
1654         int errors_logged, i;
1655         struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1656         struct ipr_hostrcb_type_13_error *error;
1657
1658         error = &hostrcb->hcam.u.error.u.type_13_error;
1659         errors_logged = be32_to_cpu(error->errors_logged);
1660
1661         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1662                 be32_to_cpu(error->errors_detected), errors_logged);
1663
1664         dev_entry = error->dev;
1665
1666         for (i = 0; i < errors_logged; i++, dev_entry++) {
1667                 ipr_err_separator;
1668
1669                 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1670                 ipr_log_ext_vpd(&dev_entry->vpd);
1671
1672                 ipr_err("-----New Device Information-----\n");
1673                 ipr_log_ext_vpd(&dev_entry->new_vpd);
1674
1675                 ipr_err("Cache Directory Card Information:\n");
1676                 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1677
1678                 ipr_err("Adapter Card Information:\n");
1679                 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1680         }
1681 }
1682
1683 /**
1684  * ipr_log_sis64_config_error - Log a device error.
1685  * @ioa_cfg:    ioa config struct
1686  * @hostrcb:    hostrcb struct
1687  *
1688  * Return value:
1689  *      none
1690  **/
1691 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1692                                        struct ipr_hostrcb *hostrcb)
1693 {
1694         int errors_logged, i;
1695         struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1696         struct ipr_hostrcb_type_23_error *error;
1697         char buffer[IPR_MAX_RES_PATH_LENGTH];
1698
1699         error = &hostrcb->hcam.u.error64.u.type_23_error;
1700         errors_logged = be32_to_cpu(error->errors_logged);
1701
1702         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1703                 be32_to_cpu(error->errors_detected), errors_logged);
1704
1705         dev_entry = error->dev;
1706
1707         for (i = 0; i < errors_logged; i++, dev_entry++) {
1708                 ipr_err_separator;
1709
1710                 ipr_err("Device %d : %s", i + 1,
1711                         __ipr_format_res_path(dev_entry->res_path,
1712                                               buffer, sizeof(buffer)));
1713                 ipr_log_ext_vpd(&dev_entry->vpd);
1714
1715                 ipr_err("-----New Device Information-----\n");
1716                 ipr_log_ext_vpd(&dev_entry->new_vpd);
1717
1718                 ipr_err("Cache Directory Card Information:\n");
1719                 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1720
1721                 ipr_err("Adapter Card Information:\n");
1722                 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1723         }
1724 }
1725
1726 /**
1727  * ipr_log_config_error - Log a configuration error.
1728  * @ioa_cfg:    ioa config struct
1729  * @hostrcb:    hostrcb struct
1730  *
1731  * Return value:
1732  *      none
1733  **/
1734 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1735                                  struct ipr_hostrcb *hostrcb)
1736 {
1737         int errors_logged, i;
1738         struct ipr_hostrcb_device_data_entry *dev_entry;
1739         struct ipr_hostrcb_type_03_error *error;
1740
1741         error = &hostrcb->hcam.u.error.u.type_03_error;
1742         errors_logged = be32_to_cpu(error->errors_logged);
1743
1744         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1745                 be32_to_cpu(error->errors_detected), errors_logged);
1746
1747         dev_entry = error->dev;
1748
1749         for (i = 0; i < errors_logged; i++, dev_entry++) {
1750                 ipr_err_separator;
1751
1752                 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1753                 ipr_log_vpd(&dev_entry->vpd);
1754
1755                 ipr_err("-----New Device Information-----\n");
1756                 ipr_log_vpd(&dev_entry->new_vpd);
1757
1758                 ipr_err("Cache Directory Card Information:\n");
1759                 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1760
1761                 ipr_err("Adapter Card Information:\n");
1762                 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1763
1764                 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1765                         be32_to_cpu(dev_entry->ioa_data[0]),
1766                         be32_to_cpu(dev_entry->ioa_data[1]),
1767                         be32_to_cpu(dev_entry->ioa_data[2]),
1768                         be32_to_cpu(dev_entry->ioa_data[3]),
1769                         be32_to_cpu(dev_entry->ioa_data[4]));
1770         }
1771 }
1772
1773 /**
1774  * ipr_log_enhanced_array_error - Log an array configuration error.
1775  * @ioa_cfg:    ioa config struct
1776  * @hostrcb:    hostrcb struct
1777  *
1778  * Return value:
1779  *      none
1780  **/
1781 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1782                                          struct ipr_hostrcb *hostrcb)
1783 {
1784         int i, num_entries;
1785         struct ipr_hostrcb_type_14_error *error;
1786         struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1787         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1788
1789         error = &hostrcb->hcam.u.error.u.type_14_error;
1790
1791         ipr_err_separator;
1792
1793         ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1794                 error->protection_level,
1795                 ioa_cfg->host->host_no,
1796                 error->last_func_vset_res_addr.bus,
1797                 error->last_func_vset_res_addr.target,
1798                 error->last_func_vset_res_addr.lun);
1799
1800         ipr_err_separator;
1801
1802         array_entry = error->array_member;
1803         num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1804                             ARRAY_SIZE(error->array_member));
1805
1806         for (i = 0; i < num_entries; i++, array_entry++) {
1807                 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1808                         continue;
1809
1810                 if (be32_to_cpu(error->exposed_mode_adn) == i)
1811                         ipr_err("Exposed Array Member %d:\n", i);
1812                 else
1813                         ipr_err("Array Member %d:\n", i);
1814
1815                 ipr_log_ext_vpd(&array_entry->vpd);
1816                 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1817                 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1818                                  "Expected Location");
1819
1820                 ipr_err_separator;
1821         }
1822 }
1823
1824 /**
1825  * ipr_log_array_error - Log an array configuration error.
1826  * @ioa_cfg:    ioa config struct
1827  * @hostrcb:    hostrcb struct
1828  *
1829  * Return value:
1830  *      none
1831  **/
1832 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1833                                 struct ipr_hostrcb *hostrcb)
1834 {
1835         int i;
1836         struct ipr_hostrcb_type_04_error *error;
1837         struct ipr_hostrcb_array_data_entry *array_entry;
1838         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1839
1840         error = &hostrcb->hcam.u.error.u.type_04_error;
1841
1842         ipr_err_separator;
1843
1844         ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1845                 error->protection_level,
1846                 ioa_cfg->host->host_no,
1847                 error->last_func_vset_res_addr.bus,
1848                 error->last_func_vset_res_addr.target,
1849                 error->last_func_vset_res_addr.lun);
1850
1851         ipr_err_separator;
1852
1853         array_entry = error->array_member;
1854
1855         for (i = 0; i < 18; i++) {
1856                 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1857                         continue;
1858
1859                 if (be32_to_cpu(error->exposed_mode_adn) == i)
1860                         ipr_err("Exposed Array Member %d:\n", i);
1861                 else
1862                         ipr_err("Array Member %d:\n", i);
1863
1864                 ipr_log_vpd(&array_entry->vpd);
1865
1866                 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1867                 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1868                                  "Expected Location");
1869
1870                 ipr_err_separator;
1871
1872                 if (i == 9)
1873                         array_entry = error->array_member2;
1874                 else
1875                         array_entry++;
1876         }
1877 }
1878
1879 /**
1880  * ipr_log_hex_data - Log additional hex IOA error data.
1881  * @ioa_cfg:    ioa config struct
1882  * @data:               IOA error data
1883  * @len:                data length
1884  *
1885  * Return value:
1886  *      none
1887  **/
1888 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1889 {
1890         int i;
1891
1892         if (len == 0)
1893                 return;
1894
1895         if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1896                 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1897
1898         for (i = 0; i < len / 4; i += 4) {
1899                 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1900                         be32_to_cpu(data[i]),
1901                         be32_to_cpu(data[i+1]),
1902                         be32_to_cpu(data[i+2]),
1903                         be32_to_cpu(data[i+3]));
1904         }
1905 }
1906
1907 /**
1908  * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1909  * @ioa_cfg:    ioa config struct
1910  * @hostrcb:    hostrcb struct
1911  *
1912  * Return value:
1913  *      none
1914  **/
1915 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1916                                             struct ipr_hostrcb *hostrcb)
1917 {
1918         struct ipr_hostrcb_type_17_error *error;
1919
1920         if (ioa_cfg->sis64)
1921                 error = &hostrcb->hcam.u.error64.u.type_17_error;
1922         else
1923                 error = &hostrcb->hcam.u.error.u.type_17_error;
1924
1925         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1926         strim(error->failure_reason);
1927
1928         ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1929                      be32_to_cpu(hostrcb->hcam.u.error.prc));
1930         ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1931         ipr_log_hex_data(ioa_cfg, error->data,
1932                          be32_to_cpu(hostrcb->hcam.length) -
1933                          (offsetof(struct ipr_hostrcb_error, u) +
1934                           offsetof(struct ipr_hostrcb_type_17_error, data)));
1935 }
1936
1937 /**
1938  * ipr_log_dual_ioa_error - Log a dual adapter error.
1939  * @ioa_cfg:    ioa config struct
1940  * @hostrcb:    hostrcb struct
1941  *
1942  * Return value:
1943  *      none
1944  **/
1945 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1946                                    struct ipr_hostrcb *hostrcb)
1947 {
1948         struct ipr_hostrcb_type_07_error *error;
1949
1950         error = &hostrcb->hcam.u.error.u.type_07_error;
1951         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1952         strim(error->failure_reason);
1953
1954         ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1955                      be32_to_cpu(hostrcb->hcam.u.error.prc));
1956         ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1957         ipr_log_hex_data(ioa_cfg, error->data,
1958                          be32_to_cpu(hostrcb->hcam.length) -
1959                          (offsetof(struct ipr_hostrcb_error, u) +
1960                           offsetof(struct ipr_hostrcb_type_07_error, data)));
1961 }
1962
1963 static const struct {
1964         u8 active;
1965         char *desc;
1966 } path_active_desc[] = {
1967         { IPR_PATH_NO_INFO, "Path" },
1968         { IPR_PATH_ACTIVE, "Active path" },
1969         { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1970 };
1971
1972 static const struct {
1973         u8 state;
1974         char *desc;
1975 } path_state_desc[] = {
1976         { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1977         { IPR_PATH_HEALTHY, "is healthy" },
1978         { IPR_PATH_DEGRADED, "is degraded" },
1979         { IPR_PATH_FAILED, "is failed" }
1980 };
1981
1982 /**
1983  * ipr_log_fabric_path - Log a fabric path error
1984  * @hostrcb:    hostrcb struct
1985  * @fabric:             fabric descriptor
1986  *
1987  * Return value:
1988  *      none
1989  **/
1990 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1991                                 struct ipr_hostrcb_fabric_desc *fabric)
1992 {
1993         int i, j;
1994         u8 path_state = fabric->path_state;
1995         u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1996         u8 state = path_state & IPR_PATH_STATE_MASK;
1997
1998         for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1999                 if (path_active_desc[i].active != active)
2000                         continue;
2001
2002                 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2003                         if (path_state_desc[j].state != state)
2004                                 continue;
2005
2006                         if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
2007                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
2008                                              path_active_desc[i].desc, path_state_desc[j].desc,
2009                                              fabric->ioa_port);
2010                         } else if (fabric->cascaded_expander == 0xff) {
2011                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
2012                                              path_active_desc[i].desc, path_state_desc[j].desc,
2013                                              fabric->ioa_port, fabric->phy);
2014                         } else if (fabric->phy == 0xff) {
2015                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
2016                                              path_active_desc[i].desc, path_state_desc[j].desc,
2017                                              fabric->ioa_port, fabric->cascaded_expander);
2018                         } else {
2019                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2020                                              path_active_desc[i].desc, path_state_desc[j].desc,
2021                                              fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2022                         }
2023                         return;
2024                 }
2025         }
2026
2027         ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
2028                 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2029 }
2030
2031 /**
2032  * ipr_log64_fabric_path - Log a fabric path error
2033  * @hostrcb:    hostrcb struct
2034  * @fabric:             fabric descriptor
2035  *
2036  * Return value:
2037  *      none
2038  **/
2039 static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2040                                   struct ipr_hostrcb64_fabric_desc *fabric)
2041 {
2042         int i, j;
2043         u8 path_state = fabric->path_state;
2044         u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2045         u8 state = path_state & IPR_PATH_STATE_MASK;
2046         char buffer[IPR_MAX_RES_PATH_LENGTH];
2047
2048         for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2049                 if (path_active_desc[i].active != active)
2050                         continue;
2051
2052                 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2053                         if (path_state_desc[j].state != state)
2054                                 continue;
2055
2056                         ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2057                                      path_active_desc[i].desc, path_state_desc[j].desc,
2058                                      ipr_format_res_path(hostrcb->ioa_cfg,
2059                                                 fabric->res_path,
2060                                                 buffer, sizeof(buffer)));
2061                         return;
2062                 }
2063         }
2064
2065         ipr_err("Path state=%02X Resource Path=%s\n", path_state,
2066                 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2067                                     buffer, sizeof(buffer)));
2068 }
2069
2070 static const struct {
2071         u8 type;
2072         char *desc;
2073 } path_type_desc[] = {
2074         { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2075         { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2076         { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2077         { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2078 };
2079
2080 static const struct {
2081         u8 status;
2082         char *desc;
2083 } path_status_desc[] = {
2084         { IPR_PATH_CFG_NO_PROB, "Functional" },
2085         { IPR_PATH_CFG_DEGRADED, "Degraded" },
2086         { IPR_PATH_CFG_FAILED, "Failed" },
2087         { IPR_PATH_CFG_SUSPECT, "Suspect" },
2088         { IPR_PATH_NOT_DETECTED, "Missing" },
2089         { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2090 };
2091
2092 static const char *link_rate[] = {
2093         "unknown",
2094         "disabled",
2095         "phy reset problem",
2096         "spinup hold",
2097         "port selector",
2098         "unknown",
2099         "unknown",
2100         "unknown",
2101         "1.5Gbps",
2102         "3.0Gbps",
2103         "unknown",
2104         "unknown",
2105         "unknown",
2106         "unknown",
2107         "unknown",
2108         "unknown"
2109 };
2110
2111 /**
2112  * ipr_log_path_elem - Log a fabric path element.
2113  * @hostrcb:    hostrcb struct
2114  * @cfg:                fabric path element struct
2115  *
2116  * Return value:
2117  *      none
2118  **/
2119 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2120                               struct ipr_hostrcb_config_element *cfg)
2121 {
2122         int i, j;
2123         u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2124         u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2125
2126         if (type == IPR_PATH_CFG_NOT_EXIST)
2127                 return;
2128
2129         for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2130                 if (path_type_desc[i].type != type)
2131                         continue;
2132
2133                 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2134                         if (path_status_desc[j].status != status)
2135                                 continue;
2136
2137                         if (type == IPR_PATH_CFG_IOA_PORT) {
2138                                 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2139                                              path_status_desc[j].desc, path_type_desc[i].desc,
2140                                              cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2141                                              be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2142                         } else {
2143                                 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2144                                         ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2145                                                      path_status_desc[j].desc, path_type_desc[i].desc,
2146                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2147                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2148                                 } else if (cfg->cascaded_expander == 0xff) {
2149                                         ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2150                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2151                                                      path_type_desc[i].desc, cfg->phy,
2152                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2153                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2154                                 } else if (cfg->phy == 0xff) {
2155                                         ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2156                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2157                                                      path_type_desc[i].desc, cfg->cascaded_expander,
2158                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2159                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2160                                 } else {
2161                                         ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2162                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2163                                                      path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2164                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2165                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2166                                 }
2167                         }
2168                         return;
2169                 }
2170         }
2171
2172         ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2173                      "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2174                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2175                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2176 }
2177
2178 /**
2179  * ipr_log64_path_elem - Log a fabric path element.
2180  * @hostrcb:    hostrcb struct
2181  * @cfg:                fabric path element struct
2182  *
2183  * Return value:
2184  *      none
2185  **/
2186 static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2187                                 struct ipr_hostrcb64_config_element *cfg)
2188 {
2189         int i, j;
2190         u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2191         u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2192         u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2193         char buffer[IPR_MAX_RES_PATH_LENGTH];
2194
2195         if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2196                 return;
2197
2198         for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2199                 if (path_type_desc[i].type != type)
2200                         continue;
2201
2202                 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2203                         if (path_status_desc[j].status != status)
2204                                 continue;
2205
2206                         ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2207                                      path_status_desc[j].desc, path_type_desc[i].desc,
2208                                      ipr_format_res_path(hostrcb->ioa_cfg,
2209                                         cfg->res_path, buffer, sizeof(buffer)),
2210                                         link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2211                                         be32_to_cpu(cfg->wwid[0]),
2212                                         be32_to_cpu(cfg->wwid[1]));
2213                         return;
2214                 }
2215         }
2216         ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2217                      "WWN=%08X%08X\n", cfg->type_status,
2218                      ipr_format_res_path(hostrcb->ioa_cfg,
2219                         cfg->res_path, buffer, sizeof(buffer)),
2220                         link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2221                         be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2222 }
2223
2224 /**
2225  * ipr_log_fabric_error - Log a fabric error.
2226  * @ioa_cfg:    ioa config struct
2227  * @hostrcb:    hostrcb struct
2228  *
2229  * Return value:
2230  *      none
2231  **/
2232 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2233                                  struct ipr_hostrcb *hostrcb)
2234 {
2235         struct ipr_hostrcb_type_20_error *error;
2236         struct ipr_hostrcb_fabric_desc *fabric;
2237         struct ipr_hostrcb_config_element *cfg;
2238         int i, add_len;
2239
2240         error = &hostrcb->hcam.u.error.u.type_20_error;
2241         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2242         ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2243
2244         add_len = be32_to_cpu(hostrcb->hcam.length) -
2245                 (offsetof(struct ipr_hostrcb_error, u) +
2246                  offsetof(struct ipr_hostrcb_type_20_error, desc));
2247
2248         for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2249                 ipr_log_fabric_path(hostrcb, fabric);
2250                 for_each_fabric_cfg(fabric, cfg)
2251                         ipr_log_path_elem(hostrcb, cfg);
2252
2253                 add_len -= be16_to_cpu(fabric->length);
2254                 fabric = (struct ipr_hostrcb_fabric_desc *)
2255                         ((unsigned long)fabric + be16_to_cpu(fabric->length));
2256         }
2257
2258         ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2259 }
2260
2261 /**
2262  * ipr_log_sis64_array_error - Log a sis64 array error.
2263  * @ioa_cfg:    ioa config struct
2264  * @hostrcb:    hostrcb struct
2265  *
2266  * Return value:
2267  *      none
2268  **/
2269 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2270                                       struct ipr_hostrcb *hostrcb)
2271 {
2272         int i, num_entries;
2273         struct ipr_hostrcb_type_24_error *error;
2274         struct ipr_hostrcb64_array_data_entry *array_entry;
2275         char buffer[IPR_MAX_RES_PATH_LENGTH];
2276         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2277
2278         error = &hostrcb->hcam.u.error64.u.type_24_error;
2279
2280         ipr_err_separator;
2281
2282         ipr_err("RAID %s Array Configuration: %s\n",
2283                 error->protection_level,
2284                 ipr_format_res_path(ioa_cfg, error->last_res_path,
2285                         buffer, sizeof(buffer)));
2286
2287         ipr_err_separator;
2288
2289         array_entry = error->array_member;
2290         num_entries = min_t(u32, error->num_entries,
2291                             ARRAY_SIZE(error->array_member));
2292
2293         for (i = 0; i < num_entries; i++, array_entry++) {
2294
2295                 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2296                         continue;
2297
2298                 if (error->exposed_mode_adn == i)
2299                         ipr_err("Exposed Array Member %d:\n", i);
2300                 else
2301                         ipr_err("Array Member %d:\n", i);
2302
2303                 ipr_err("Array Member %d:\n", i);
2304                 ipr_log_ext_vpd(&array_entry->vpd);
2305                 ipr_err("Current Location: %s\n",
2306                          ipr_format_res_path(ioa_cfg, array_entry->res_path,
2307                                 buffer, sizeof(buffer)));
2308                 ipr_err("Expected Location: %s\n",
2309                          ipr_format_res_path(ioa_cfg,
2310                                 array_entry->expected_res_path,
2311                                 buffer, sizeof(buffer)));
2312
2313                 ipr_err_separator;
2314         }
2315 }
2316
2317 /**
2318  * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2319  * @ioa_cfg:    ioa config struct
2320  * @hostrcb:    hostrcb struct
2321  *
2322  * Return value:
2323  *      none
2324  **/
2325 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2326                                        struct ipr_hostrcb *hostrcb)
2327 {
2328         struct ipr_hostrcb_type_30_error *error;
2329         struct ipr_hostrcb64_fabric_desc *fabric;
2330         struct ipr_hostrcb64_config_element *cfg;
2331         int i, add_len;
2332
2333         error = &hostrcb->hcam.u.error64.u.type_30_error;
2334
2335         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2336         ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2337
2338         add_len = be32_to_cpu(hostrcb->hcam.length) -
2339                 (offsetof(struct ipr_hostrcb64_error, u) +
2340                  offsetof(struct ipr_hostrcb_type_30_error, desc));
2341
2342         for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2343                 ipr_log64_fabric_path(hostrcb, fabric);
2344                 for_each_fabric_cfg(fabric, cfg)
2345                         ipr_log64_path_elem(hostrcb, cfg);
2346
2347                 add_len -= be16_to_cpu(fabric->length);
2348                 fabric = (struct ipr_hostrcb64_fabric_desc *)
2349                         ((unsigned long)fabric + be16_to_cpu(fabric->length));
2350         }
2351
2352         ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2353 }
2354
2355 /**
2356  * ipr_log_generic_error - Log an adapter error.
2357  * @ioa_cfg:    ioa config struct
2358  * @hostrcb:    hostrcb struct
2359  *
2360  * Return value:
2361  *      none
2362  **/
2363 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2364                                   struct ipr_hostrcb *hostrcb)
2365 {
2366         ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2367                          be32_to_cpu(hostrcb->hcam.length));
2368 }
2369
2370 /**
2371  * ipr_log_sis64_device_error - Log a cache error.
2372  * @ioa_cfg:    ioa config struct
2373  * @hostrcb:    hostrcb struct
2374  *
2375  * Return value:
2376  *      none
2377  **/
2378 static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
2379                                          struct ipr_hostrcb *hostrcb)
2380 {
2381         struct ipr_hostrcb_type_21_error *error;
2382         char buffer[IPR_MAX_RES_PATH_LENGTH];
2383
2384         error = &hostrcb->hcam.u.error64.u.type_21_error;
2385
2386         ipr_err("-----Failing Device Information-----\n");
2387         ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2388                 be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
2389                  be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
2390         ipr_err("Device Resource Path: %s\n",
2391                 __ipr_format_res_path(error->res_path,
2392                                       buffer, sizeof(buffer)));
2393         error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
2394         error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
2395         ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
2396         ipr_err("Secondary Problem Description:  %s\n", error->second_problem_desc);
2397         ipr_err("SCSI Sense Data:\n");
2398         ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
2399         ipr_err("SCSI Command Descriptor Block: \n");
2400         ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
2401
2402         ipr_err("Additional IOA Data:\n");
2403         ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
2404 }
2405
2406 /**
2407  * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2408  * @ioasc:      IOASC
2409  *
2410  * This function will return the index of into the ipr_error_table
2411  * for the specified IOASC. If the IOASC is not in the table,
2412  * 0 will be returned, which points to the entry used for unknown errors.
2413  *
2414  * Return value:
2415  *      index into the ipr_error_table
2416  **/
2417 static u32 ipr_get_error(u32 ioasc)
2418 {
2419         int i;
2420
2421         for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2422                 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2423                         return i;
2424
2425         return 0;
2426 }
2427
2428 /**
2429  * ipr_handle_log_data - Log an adapter error.
2430  * @ioa_cfg:    ioa config struct
2431  * @hostrcb:    hostrcb struct
2432  *
2433  * This function logs an adapter error to the system.
2434  *
2435  * Return value:
2436  *      none
2437  **/
2438 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2439                                 struct ipr_hostrcb *hostrcb)
2440 {
2441         u32 ioasc;
2442         int error_index;
2443         struct ipr_hostrcb_type_21_error *error;
2444
2445         if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2446                 return;
2447
2448         if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2449                 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2450
2451         if (ioa_cfg->sis64)
2452                 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2453         else
2454                 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2455
2456         if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2457             ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2458                 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2459                 scsi_report_bus_reset(ioa_cfg->host,
2460                                       hostrcb->hcam.u.error.fd_res_addr.bus);
2461         }
2462
2463         error_index = ipr_get_error(ioasc);
2464
2465         if (!ipr_error_table[error_index].log_hcam)
2466                 return;
2467
2468         if (ioasc == IPR_IOASC_HW_CMD_FAILED &&
2469             hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) {
2470                 error = &hostrcb->hcam.u.error64.u.type_21_error;
2471
2472                 if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST &&
2473                         ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
2474                                 return;
2475         }
2476
2477         ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2478
2479         /* Set indication we have logged an error */
2480         ioa_cfg->errors_logged++;
2481
2482         if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2483                 return;
2484         if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2485                 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2486
2487         switch (hostrcb->hcam.overlay_id) {
2488         case IPR_HOST_RCB_OVERLAY_ID_2:
2489                 ipr_log_cache_error(ioa_cfg, hostrcb);
2490                 break;
2491         case IPR_HOST_RCB_OVERLAY_ID_3:
2492                 ipr_log_config_error(ioa_cfg, hostrcb);
2493                 break;
2494         case IPR_HOST_RCB_OVERLAY_ID_4:
2495         case IPR_HOST_RCB_OVERLAY_ID_6:
2496                 ipr_log_array_error(ioa_cfg, hostrcb);
2497                 break;
2498         case IPR_HOST_RCB_OVERLAY_ID_7:
2499                 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2500                 break;
2501         case IPR_HOST_RCB_OVERLAY_ID_12:
2502                 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2503                 break;
2504         case IPR_HOST_RCB_OVERLAY_ID_13:
2505                 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2506                 break;
2507         case IPR_HOST_RCB_OVERLAY_ID_14:
2508         case IPR_HOST_RCB_OVERLAY_ID_16:
2509                 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2510                 break;
2511         case IPR_HOST_RCB_OVERLAY_ID_17:
2512                 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2513                 break;
2514         case IPR_HOST_RCB_OVERLAY_ID_20:
2515                 ipr_log_fabric_error(ioa_cfg, hostrcb);
2516                 break;
2517         case IPR_HOST_RCB_OVERLAY_ID_21:
2518                 ipr_log_sis64_device_error(ioa_cfg, hostrcb);
2519                 break;
2520         case IPR_HOST_RCB_OVERLAY_ID_23:
2521                 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2522                 break;
2523         case IPR_HOST_RCB_OVERLAY_ID_24:
2524         case IPR_HOST_RCB_OVERLAY_ID_26:
2525                 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2526                 break;
2527         case IPR_HOST_RCB_OVERLAY_ID_30:
2528                 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2529                 break;
2530         case IPR_HOST_RCB_OVERLAY_ID_1:
2531         case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2532         default:
2533                 ipr_log_generic_error(ioa_cfg, hostrcb);
2534                 break;
2535         }
2536 }
2537
2538 /**
2539  * ipr_process_error - Op done function for an adapter error log.
2540  * @ipr_cmd:    ipr command struct
2541  *
2542  * This function is the op done function for an error log host
2543  * controlled async from the adapter. It will log the error and
2544  * send the HCAM back to the adapter.
2545  *
2546  * Return value:
2547  *      none
2548  **/
2549 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2550 {
2551         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2552         struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2553         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2554         u32 fd_ioasc;
2555
2556         if (ioa_cfg->sis64)
2557                 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2558         else
2559                 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2560
2561         list_del(&hostrcb->queue);
2562         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
2563
2564         if (!ioasc) {
2565                 ipr_handle_log_data(ioa_cfg, hostrcb);
2566                 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2567                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2568         } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
2569                 dev_err(&ioa_cfg->pdev->dev,
2570                         "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2571         }
2572
2573         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2574 }
2575
2576 /**
2577  * ipr_timeout -  An internally generated op has timed out.
2578  * @ipr_cmd:    ipr command struct
2579  *
2580  * This function blocks host requests and initiates an
2581  * adapter reset.
2582  *
2583  * Return value:
2584  *      none
2585  **/
2586 static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2587 {
2588         unsigned long lock_flags = 0;
2589         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2590
2591         ENTER;
2592         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2593
2594         ioa_cfg->errors_logged++;
2595         dev_err(&ioa_cfg->pdev->dev,
2596                 "Adapter being reset due to command timeout.\n");
2597
2598         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2599                 ioa_cfg->sdt_state = GET_DUMP;
2600
2601         if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2602                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2603
2604         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2605         LEAVE;
2606 }
2607
2608 /**
2609  * ipr_oper_timeout -  Adapter timed out transitioning to operational
2610  * @ipr_cmd:    ipr command struct
2611  *
2612  * This function blocks host requests and initiates an
2613  * adapter reset.
2614  *
2615  * Return value:
2616  *      none
2617  **/
2618 static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2619 {
2620         unsigned long lock_flags = 0;
2621         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2622
2623         ENTER;
2624         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2625
2626         ioa_cfg->errors_logged++;
2627         dev_err(&ioa_cfg->pdev->dev,
2628                 "Adapter timed out transitioning to operational.\n");
2629
2630         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2631                 ioa_cfg->sdt_state = GET_DUMP;
2632
2633         if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2634                 if (ipr_fastfail)
2635                         ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2636                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2637         }
2638
2639         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2640         LEAVE;
2641 }
2642
2643 /**
2644  * ipr_find_ses_entry - Find matching SES in SES table
2645  * @res:        resource entry struct of SES
2646  *
2647  * Return value:
2648  *      pointer to SES table entry / NULL on failure
2649  **/
2650 static const struct ipr_ses_table_entry *
2651 ipr_find_ses_entry(struct ipr_resource_entry *res)
2652 {
2653         int i, j, matches;
2654         struct ipr_std_inq_vpids *vpids;
2655         const struct ipr_ses_table_entry *ste = ipr_ses_table;
2656
2657         for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2658                 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2659                         if (ste->compare_product_id_byte[j] == 'X') {
2660                                 vpids = &res->std_inq_data.vpids;
2661                                 if (vpids->product_id[j] == ste->product_id[j])
2662                                         matches++;
2663                                 else
2664                                         break;
2665                         } else
2666                                 matches++;
2667                 }
2668
2669                 if (matches == IPR_PROD_ID_LEN)
2670                         return ste;
2671         }
2672
2673         return NULL;
2674 }
2675
2676 /**
2677  * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2678  * @ioa_cfg:    ioa config struct
2679  * @bus:                SCSI bus
2680  * @bus_width:  bus width
2681  *
2682  * Return value:
2683  *      SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2684  *      For a 2-byte wide SCSI bus, the maximum transfer speed is
2685  *      twice the maximum transfer rate (e.g. for a wide enabled bus,
2686  *      max 160MHz = max 320MB/sec).
2687  **/
2688 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2689 {
2690         struct ipr_resource_entry *res;
2691         const struct ipr_ses_table_entry *ste;
2692         u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2693
2694         /* Loop through each config table entry in the config table buffer */
2695         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2696                 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2697                         continue;
2698
2699                 if (bus != res->bus)
2700                         continue;
2701
2702                 if (!(ste = ipr_find_ses_entry(res)))
2703                         continue;
2704
2705                 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2706         }
2707
2708         return max_xfer_rate;
2709 }
2710
2711 /**
2712  * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2713  * @ioa_cfg:            ioa config struct
2714  * @max_delay:          max delay in micro-seconds to wait
2715  *
2716  * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2717  *
2718  * Return value:
2719  *      0 on success / other on failure
2720  **/
2721 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2722 {
2723         volatile u32 pcii_reg;
2724         int delay = 1;
2725
2726         /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2727         while (delay < max_delay) {
2728                 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2729
2730                 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2731                         return 0;
2732
2733                 /* udelay cannot be used if delay is more than a few milliseconds */
2734                 if ((delay / 1000) > MAX_UDELAY_MS)
2735                         mdelay(delay / 1000);
2736                 else
2737                         udelay(delay);
2738
2739                 delay += delay;
2740         }
2741         return -EIO;
2742 }
2743
2744 /**
2745  * ipr_get_sis64_dump_data_section - Dump IOA memory
2746  * @ioa_cfg:                    ioa config struct
2747  * @start_addr:                 adapter address to dump
2748  * @dest:                       destination kernel buffer
2749  * @length_in_words:            length to dump in 4 byte words
2750  *
2751  * Return value:
2752  *      0 on success
2753  **/
2754 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2755                                            u32 start_addr,
2756                                            __be32 *dest, u32 length_in_words)
2757 {
2758         int i;
2759
2760         for (i = 0; i < length_in_words; i++) {
2761                 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2762                 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2763                 dest++;
2764         }
2765
2766         return 0;
2767 }
2768
2769 /**
2770  * ipr_get_ldump_data_section - Dump IOA memory
2771  * @ioa_cfg:                    ioa config struct
2772  * @start_addr:                 adapter address to dump
2773  * @dest:                               destination kernel buffer
2774  * @length_in_words:    length to dump in 4 byte words
2775  *
2776  * Return value:
2777  *      0 on success / -EIO on failure
2778  **/
2779 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2780                                       u32 start_addr,
2781                                       __be32 *dest, u32 length_in_words)
2782 {
2783         volatile u32 temp_pcii_reg;
2784         int i, delay = 0;
2785
2786         if (ioa_cfg->sis64)
2787                 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2788                                                        dest, length_in_words);
2789
2790         /* Write IOA interrupt reg starting LDUMP state  */
2791         writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2792                ioa_cfg->regs.set_uproc_interrupt_reg32);
2793
2794         /* Wait for IO debug acknowledge */
2795         if (ipr_wait_iodbg_ack(ioa_cfg,
2796                                IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2797                 dev_err(&ioa_cfg->pdev->dev,
2798                         "IOA dump long data transfer timeout\n");
2799                 return -EIO;
2800         }
2801
2802         /* Signal LDUMP interlocked - clear IO debug ack */
2803         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2804                ioa_cfg->regs.clr_interrupt_reg);
2805
2806         /* Write Mailbox with starting address */
2807         writel(start_addr, ioa_cfg->ioa_mailbox);
2808
2809         /* Signal address valid - clear IOA Reset alert */
2810         writel(IPR_UPROCI_RESET_ALERT,
2811                ioa_cfg->regs.clr_uproc_interrupt_reg32);
2812
2813         for (i = 0; i < length_in_words; i++) {
2814                 /* Wait for IO debug acknowledge */
2815                 if (ipr_wait_iodbg_ack(ioa_cfg,
2816                                        IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2817                         dev_err(&ioa_cfg->pdev->dev,
2818                                 "IOA dump short data transfer timeout\n");
2819                         return -EIO;
2820                 }
2821
2822                 /* Read data from mailbox and increment destination pointer */
2823                 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2824                 dest++;
2825
2826                 /* For all but the last word of data, signal data received */
2827                 if (i < (length_in_words - 1)) {
2828                         /* Signal dump data received - Clear IO debug Ack */
2829                         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2830                                ioa_cfg->regs.clr_interrupt_reg);
2831                 }
2832         }
2833
2834         /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2835         writel(IPR_UPROCI_RESET_ALERT,
2836                ioa_cfg->regs.set_uproc_interrupt_reg32);
2837
2838         writel(IPR_UPROCI_IO_DEBUG_ALERT,
2839                ioa_cfg->regs.clr_uproc_interrupt_reg32);
2840
2841         /* Signal dump data received - Clear IO debug Ack */
2842         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2843                ioa_cfg->regs.clr_interrupt_reg);
2844
2845         /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2846         while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2847                 temp_pcii_reg =
2848                     readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2849
2850                 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2851                         return 0;
2852
2853                 udelay(10);
2854                 delay += 10;
2855         }
2856
2857         return 0;
2858 }
2859
2860 #ifdef CONFIG_SCSI_IPR_DUMP
2861 /**
2862  * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2863  * @ioa_cfg:            ioa config struct
2864  * @pci_address:        adapter address
2865  * @length:                     length of data to copy
2866  *
2867  * Copy data from PCI adapter to kernel buffer.
2868  * Note: length MUST be a 4 byte multiple
2869  * Return value:
2870  *      0 on success / other on failure
2871  **/
2872 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2873                         unsigned long pci_address, u32 length)
2874 {
2875         int bytes_copied = 0;
2876         int cur_len, rc, rem_len, rem_page_len, max_dump_size;
2877         __be32 *page;
2878         unsigned long lock_flags = 0;
2879         struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2880
2881         if (ioa_cfg->sis64)
2882                 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2883         else
2884                 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2885
2886         while (bytes_copied < length &&
2887                (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
2888                 if (ioa_dump->page_offset >= PAGE_SIZE ||
2889                     ioa_dump->page_offset == 0) {
2890                         page = (__be32 *)__get_free_page(GFP_ATOMIC);
2891
2892                         if (!page) {
2893                                 ipr_trace;
2894                                 return bytes_copied;
2895                         }
2896
2897                         ioa_dump->page_offset = 0;
2898                         ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2899                         ioa_dump->next_page_index++;
2900                 } else
2901                         page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2902
2903                 rem_len = length - bytes_copied;
2904                 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2905                 cur_len = min(rem_len, rem_page_len);
2906
2907                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2908                 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2909                         rc = -EIO;
2910                 } else {
2911                         rc = ipr_get_ldump_data_section(ioa_cfg,
2912                                                         pci_address + bytes_copied,
2913                                                         &page[ioa_dump->page_offset / 4],
2914                                                         (cur_len / sizeof(u32)));
2915                 }
2916                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2917
2918                 if (!rc) {
2919                         ioa_dump->page_offset += cur_len;
2920                         bytes_copied += cur_len;
2921                 } else {
2922                         ipr_trace;
2923                         break;
2924                 }
2925                 schedule();
2926         }
2927
2928         return bytes_copied;
2929 }
2930
2931 /**
2932  * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2933  * @hdr:        dump entry header struct
2934  *
2935  * Return value:
2936  *      nothing
2937  **/
2938 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2939 {
2940         hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2941         hdr->num_elems = 1;
2942         hdr->offset = sizeof(*hdr);
2943         hdr->status = IPR_DUMP_STATUS_SUCCESS;
2944 }
2945
2946 /**
2947  * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2948  * @ioa_cfg:    ioa config struct
2949  * @driver_dump:        driver dump struct
2950  *
2951  * Return value:
2952  *      nothing
2953  **/
2954 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2955                                    struct ipr_driver_dump *driver_dump)
2956 {
2957         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2958
2959         ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2960         driver_dump->ioa_type_entry.hdr.len =
2961                 sizeof(struct ipr_dump_ioa_type_entry) -
2962                 sizeof(struct ipr_dump_entry_header);
2963         driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2964         driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2965         driver_dump->ioa_type_entry.type = ioa_cfg->type;
2966         driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2967                 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2968                 ucode_vpd->minor_release[1];
2969         driver_dump->hdr.num_entries++;
2970 }
2971
2972 /**
2973  * ipr_dump_version_data - Fill in the driver version in the dump.
2974  * @ioa_cfg:    ioa config struct
2975  * @driver_dump:        driver dump struct
2976  *
2977  * Return value:
2978  *      nothing
2979  **/
2980 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2981                                   struct ipr_driver_dump *driver_dump)
2982 {
2983         ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2984         driver_dump->version_entry.hdr.len =
2985                 sizeof(struct ipr_dump_version_entry) -
2986                 sizeof(struct ipr_dump_entry_header);
2987         driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2988         driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2989         strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2990         driver_dump->hdr.num_entries++;
2991 }
2992
2993 /**
2994  * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2995  * @ioa_cfg:    ioa config struct
2996  * @driver_dump:        driver dump struct
2997  *
2998  * Return value:
2999  *      nothing
3000  **/
3001 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
3002                                    struct ipr_driver_dump *driver_dump)
3003 {
3004         ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
3005         driver_dump->trace_entry.hdr.len =
3006                 sizeof(struct ipr_dump_trace_entry) -
3007                 sizeof(struct ipr_dump_entry_header);
3008         driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3009         driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
3010         memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
3011         driver_dump->hdr.num_entries++;
3012 }
3013
3014 /**
3015  * ipr_dump_location_data - Fill in the IOA location in the dump.
3016  * @ioa_cfg:    ioa config struct
3017  * @driver_dump:        driver dump struct
3018  *
3019  * Return value:
3020  *      nothing
3021  **/
3022 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
3023                                    struct ipr_driver_dump *driver_dump)
3024 {
3025         ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
3026         driver_dump->location_entry.hdr.len =
3027                 sizeof(struct ipr_dump_location_entry) -
3028                 sizeof(struct ipr_dump_entry_header);
3029         driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3030         driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
3031         strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
3032         driver_dump->hdr.num_entries++;
3033 }
3034
3035 /**
3036  * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3037  * @ioa_cfg:    ioa config struct
3038  * @dump:               dump struct
3039  *
3040  * Return value:
3041  *      nothing
3042  **/
3043 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
3044 {
3045         unsigned long start_addr, sdt_word;
3046         unsigned long lock_flags = 0;
3047         struct ipr_driver_dump *driver_dump = &dump->driver_dump;
3048         struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
3049         u32 num_entries, max_num_entries, start_off, end_off;
3050         u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
3051         struct ipr_sdt *sdt;
3052         int valid = 1;
3053         int i;
3054
3055         ENTER;
3056
3057         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3058
3059         if (ioa_cfg->sdt_state != READ_DUMP) {
3060                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3061                 return;
3062         }
3063
3064         if (ioa_cfg->sis64) {
3065                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3066                 ssleep(IPR_DUMP_DELAY_SECONDS);
3067                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3068         }
3069
3070         start_addr = readl(ioa_cfg->ioa_mailbox);
3071
3072         if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
3073                 dev_err(&ioa_cfg->pdev->dev,
3074                         "Invalid dump table format: %lx\n", start_addr);
3075                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3076                 return;
3077         }
3078
3079         dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3080
3081         driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3082
3083         /* Initialize the overall dump header */
3084         driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3085         driver_dump->hdr.num_entries = 1;
3086         driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3087         driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3088         driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3089         driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3090
3091         ipr_dump_version_data(ioa_cfg, driver_dump);
3092         ipr_dump_location_data(ioa_cfg, driver_dump);
3093         ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3094         ipr_dump_trace_data(ioa_cfg, driver_dump);
3095
3096         /* Update dump_header */
3097         driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3098
3099         /* IOA Dump entry */
3100         ipr_init_dump_entry_hdr(&ioa_dump->hdr);
3101         ioa_dump->hdr.len = 0;
3102         ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3103         ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3104
3105         /* First entries in sdt are actually a list of dump addresses and
3106          lengths to gather the real dump data.  sdt represents the pointer
3107          to the ioa generated dump table.  Dump data will be extracted based
3108          on entries in this table */
3109         sdt = &ioa_dump->sdt;
3110
3111         if (ioa_cfg->sis64) {
3112                 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3113                 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3114         } else {
3115                 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3116                 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3117         }
3118
3119         bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3120                         (max_num_entries * sizeof(struct ipr_sdt_entry));
3121         rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
3122                                         bytes_to_copy / sizeof(__be32));
3123
3124         /* Smart Dump table is ready to use and the first entry is valid */
3125         if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3126             (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
3127                 dev_err(&ioa_cfg->pdev->dev,
3128                         "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3129                         rc, be32_to_cpu(sdt->hdr.state));
3130                 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3131                 ioa_cfg->sdt_state = DUMP_OBTAINED;
3132                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3133                 return;
3134         }
3135
3136         num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3137
3138         if (num_entries > max_num_entries)
3139                 num_entries = max_num_entries;
3140
3141         /* Update dump length to the actual data to be copied */
3142         dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3143         if (ioa_cfg->sis64)
3144                 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3145         else
3146                 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
3147
3148         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3149
3150         for (i = 0; i < num_entries; i++) {
3151                 if (ioa_dump->hdr.len > max_dump_size) {
3152                         driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3153                         break;
3154                 }
3155
3156                 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
3157                         sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3158                         if (ioa_cfg->sis64)
3159                                 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3160                         else {
3161                                 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3162                                 end_off = be32_to_cpu(sdt->entry[i].end_token);
3163
3164                                 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3165                                         bytes_to_copy = end_off - start_off;
3166                                 else
3167                                         valid = 0;
3168                         }
3169                         if (valid) {
3170                                 if (bytes_to_copy > max_dump_size) {
3171                                         sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3172                                         continue;
3173                                 }
3174
3175                                 /* Copy data from adapter to driver buffers */
3176                                 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3177                                                             bytes_to_copy);
3178
3179                                 ioa_dump->hdr.len += bytes_copied;
3180
3181                                 if (bytes_copied != bytes_to_copy) {
3182                                         driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3183                                         break;
3184                                 }
3185                         }
3186                 }
3187         }
3188
3189         dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3190
3191         /* Update dump_header */
3192         driver_dump->hdr.len += ioa_dump->hdr.len;
3193         wmb();
3194         ioa_cfg->sdt_state = DUMP_OBTAINED;
3195         LEAVE;
3196 }
3197
3198 #else
3199 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3200 #endif
3201
3202 /**
3203  * ipr_release_dump - Free adapter dump memory
3204  * @kref:       kref struct
3205  *
3206  * Return value:
3207  *      nothing
3208  **/
3209 static void ipr_release_dump(struct kref *kref)
3210 {
3211         struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
3212         struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3213         unsigned long lock_flags = 0;
3214         int i;
3215
3216         ENTER;
3217         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3218         ioa_cfg->dump = NULL;
3219         ioa_cfg->sdt_state = INACTIVE;
3220         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3221
3222         for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3223                 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3224
3225         vfree(dump->ioa_dump.ioa_data);
3226         kfree(dump);
3227         LEAVE;
3228 }
3229
3230 /**
3231  * ipr_worker_thread - Worker thread
3232  * @work:               ioa config struct
3233  *
3234  * Called at task level from a work thread. This function takes care
3235  * of adding and removing device from the mid-layer as configuration
3236  * changes are detected by the adapter.
3237  *
3238  * Return value:
3239  *      nothing
3240  **/
3241 static void ipr_worker_thread(struct work_struct *work)
3242 {
3243         unsigned long lock_flags;
3244         struct ipr_resource_entry *res;
3245         struct scsi_device *sdev;
3246         struct ipr_dump *dump;
3247         struct ipr_ioa_cfg *ioa_cfg =
3248                 container_of(work, struct ipr_ioa_cfg, work_q);
3249         u8 bus, target, lun;
3250         int did_work;
3251
3252         ENTER;
3253         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3254
3255         if (ioa_cfg->sdt_state == READ_DUMP) {
3256                 dump = ioa_cfg->dump;
3257                 if (!dump) {
3258                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3259                         return;
3260                 }
3261                 kref_get(&dump->kref);
3262                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3263                 ipr_get_ioa_dump(ioa_cfg, dump);
3264                 kref_put(&dump->kref, ipr_release_dump);
3265
3266                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3267                 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
3268                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3269                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3270                 return;
3271         }
3272
3273 restart:
3274         do {
3275                 did_work = 0;
3276                 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
3277                     !ioa_cfg->allow_ml_add_del) {
3278                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3279                         return;
3280                 }
3281
3282                 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3283                         if (res->del_from_ml && res->sdev) {
3284                                 did_work = 1;
3285                                 sdev = res->sdev;
3286                                 if (!scsi_device_get(sdev)) {
3287                                         if (!res->add_to_ml)
3288                                                 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3289                                         else
3290                                                 res->del_from_ml = 0;
3291                                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3292                                         scsi_remove_device(sdev);
3293                                         scsi_device_put(sdev);
3294                                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3295                                 }
3296                                 break;
3297                         }
3298                 }
3299         } while (did_work);
3300
3301         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3302                 if (res->add_to_ml) {
3303                         bus = res->bus;
3304                         target = res->target;
3305                         lun = res->lun;
3306                         res->add_to_ml = 0;
3307                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3308                         scsi_add_device(ioa_cfg->host, bus, target, lun);
3309                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3310                         goto restart;
3311                 }
3312         }
3313
3314         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3315         kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3316         LEAVE;
3317 }
3318
3319 #ifdef CONFIG_SCSI_IPR_TRACE
3320 /**
3321  * ipr_read_trace - Dump the adapter trace
3322  * @filp:               open sysfs file
3323  * @kobj:               kobject struct
3324  * @bin_attr:           bin_attribute struct
3325  * @buf:                buffer
3326  * @off:                offset
3327  * @count:              buffer size
3328  *
3329  * Return value:
3330  *      number of bytes printed to buffer
3331  **/
3332 static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
3333                               struct bin_attribute *bin_attr,
3334                               char *buf, loff_t off, size_t count)
3335 {
3336         struct device *dev = container_of(kobj, struct device, kobj);
3337         struct Scsi_Host *shost = class_to_shost(dev);
3338         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3339         unsigned long lock_flags = 0;
3340         ssize_t ret;
3341
3342         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3343         ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3344                                 IPR_TRACE_SIZE);
3345         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3346
3347         return ret;
3348 }
3349
3350 static struct bin_attribute ipr_trace_attr = {
3351         .attr = {
3352                 .name = "trace",
3353                 .mode = S_IRUGO,
3354         },
3355         .size = 0,
3356         .read = ipr_read_trace,
3357 };
3358 #endif
3359
3360 /**
3361  * ipr_show_fw_version - Show the firmware version
3362  * @dev:        class device struct
3363  * @buf:        buffer
3364  *
3365  * Return value:
3366  *      number of bytes printed to buffer
3367  **/
3368 static ssize_t ipr_show_fw_version(struct device *dev,
3369                                    struct device_attribute *attr, char *buf)
3370 {
3371         struct Scsi_Host *shost = class_to_shost(dev);
3372         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3373         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3374         unsigned long lock_flags = 0;
3375         int len;
3376
3377         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3378         len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3379                        ucode_vpd->major_release, ucode_vpd->card_type,
3380                        ucode_vpd->minor_release[0],
3381                        ucode_vpd->minor_release[1]);
3382         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3383         return len;
3384 }
3385
3386 static struct device_attribute ipr_fw_version_attr = {
3387         .attr = {
3388                 .name =         "fw_version",
3389                 .mode =         S_IRUGO,
3390         },
3391         .show = ipr_show_fw_version,
3392 };
3393
3394 /**
3395  * ipr_show_log_level - Show the adapter's error logging level
3396  * @dev:        class device struct
3397  * @buf:        buffer
3398  *
3399  * Return value:
3400  *      number of bytes printed to buffer
3401  **/
3402 static ssize_t ipr_show_log_level(struct device *dev,
3403                                    struct device_attribute *attr, char *buf)
3404 {
3405         struct Scsi_Host *shost = class_to_shost(dev);
3406         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3407         unsigned long lock_flags = 0;
3408         int len;
3409
3410         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3411         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3412         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3413         return len;
3414 }
3415
3416 /**
3417  * ipr_store_log_level - Change the adapter's error logging level
3418  * @dev:        class device struct
3419  * @buf:        buffer
3420  *
3421  * Return value:
3422  *      number of bytes printed to buffer
3423  **/
3424 static ssize_t ipr_store_log_level(struct device *dev,
3425                                    struct device_attribute *attr,
3426                                    const char *buf, size_t count)
3427 {
3428         struct Scsi_Host *shost = class_to_shost(dev);
3429         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3430         unsigned long lock_flags = 0;
3431
3432         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3433         ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3434         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3435         return strlen(buf);
3436 }
3437
3438 static struct device_attribute ipr_log_level_attr = {
3439         .attr = {
3440                 .name =         "log_level",
3441                 .mode =         S_IRUGO | S_IWUSR,
3442         },
3443         .show = ipr_show_log_level,
3444         .store = ipr_store_log_level
3445 };
3446
3447 /**
3448  * ipr_store_diagnostics - IOA Diagnostics interface
3449  * @dev:        device struct
3450  * @buf:        buffer
3451  * @count:      buffer size
3452  *
3453  * This function will reset the adapter and wait a reasonable
3454  * amount of time for any errors that the adapter might log.
3455  *
3456  * Return value:
3457  *      count on success / other on failure
3458  **/
3459 static ssize_t ipr_store_diagnostics(struct device *dev,
3460                                      struct device_attribute *attr,
3461                                      const char *buf, size_t count)
3462 {
3463         struct Scsi_Host *shost = class_to_shost(dev);
3464         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3465         unsigned long lock_flags = 0;
3466         int rc = count;
3467
3468         if (!capable(CAP_SYS_ADMIN))
3469                 return -EACCES;
3470
3471         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3472         while (ioa_cfg->in_reset_reload) {
3473                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3474                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3475                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3476         }
3477
3478         ioa_cfg->errors_logged = 0;
3479         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3480
3481         if (ioa_cfg->in_reset_reload) {
3482                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3483                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3484
3485                 /* Wait for a second for any errors to be logged */
3486                 msleep(1000);
3487         } else {
3488                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3489                 return -EIO;
3490         }
3491
3492         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3493         if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3494                 rc = -EIO;
3495         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3496
3497         return rc;
3498 }
3499
3500 static struct device_attribute ipr_diagnostics_attr = {
3501         .attr = {
3502                 .name =         "run_diagnostics",
3503                 .mode =         S_IWUSR,
3504         },
3505         .store = ipr_store_diagnostics
3506 };
3507
3508 /**
3509  * ipr_show_adapter_state - Show the adapter's state
3510  * @class_dev:  device struct
3511  * @buf:        buffer
3512  *
3513  * Return value:
3514  *      number of bytes printed to buffer
3515  **/
3516 static ssize_t ipr_show_adapter_state(struct device *dev,
3517                                       struct device_attribute *attr, char *buf)
3518 {
3519         struct Scsi_Host *shost = class_to_shost(dev);
3520         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3521         unsigned long lock_flags = 0;
3522         int len;
3523
3524         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3525         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
3526                 len = snprintf(buf, PAGE_SIZE, "offline\n");
3527         else
3528                 len = snprintf(buf, PAGE_SIZE, "online\n");
3529         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3530         return len;
3531 }
3532
3533 /**
3534  * ipr_store_adapter_state - Change adapter state
3535  * @dev:        device struct
3536  * @buf:        buffer
3537  * @count:      buffer size
3538  *
3539  * This function will change the adapter's state.
3540  *
3541  * Return value:
3542  *      count on success / other on failure
3543  **/
3544 static ssize_t ipr_store_adapter_state(struct device *dev,
3545                                        struct device_attribute *attr,
3546                                        const char *buf, size_t count)
3547 {
3548         struct Scsi_Host *shost = class_to_shost(dev);
3549         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3550         unsigned long lock_flags;
3551         int result = count, i;
3552
3553         if (!capable(CAP_SYS_ADMIN))
3554                 return -EACCES;
3555
3556         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3557         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3558             !strncmp(buf, "online", 6)) {
3559                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3560                         spin_lock(&ioa_cfg->hrrq[i]._lock);
3561                         ioa_cfg->hrrq[i].ioa_is_dead = 0;
3562                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
3563                 }
3564                 wmb();
3565                 ioa_cfg->reset_retries = 0;
3566                 ioa_cfg->in_ioa_bringdown = 0;
3567                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3568         }
3569         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3570         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3571
3572         return result;
3573 }
3574
3575 static struct device_attribute ipr_ioa_state_attr = {
3576         .attr = {
3577                 .name =         "online_state",
3578                 .mode =         S_IRUGO | S_IWUSR,
3579         },
3580         .show = ipr_show_adapter_state,
3581         .store = ipr_store_adapter_state
3582 };
3583
3584 /**
3585  * ipr_store_reset_adapter - Reset the adapter
3586  * @dev:        device struct
3587  * @buf:        buffer
3588  * @count:      buffer size
3589  *
3590  * This function will reset the adapter.
3591  *
3592  * Return value:
3593  *      count on success / other on failure
3594  **/
3595 static ssize_t ipr_store_reset_adapter(struct device *dev,
3596                                        struct device_attribute *attr,
3597                                        const char *buf, size_t count)
3598 {
3599         struct Scsi_Host *shost = class_to_shost(dev);
3600         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3601         unsigned long lock_flags;
3602         int result = count;
3603
3604         if (!capable(CAP_SYS_ADMIN))
3605                 return -EACCES;
3606
3607         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3608         if (!ioa_cfg->in_reset_reload)
3609                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3610         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3611         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3612
3613         return result;
3614 }
3615
3616 static struct device_attribute ipr_ioa_reset_attr = {
3617         .attr = {
3618                 .name =         "reset_host",
3619                 .mode =         S_IWUSR,
3620         },
3621         .store = ipr_store_reset_adapter
3622 };
3623
3624 static int ipr_iopoll(struct blk_iopoll *iop, int budget);
3625  /**
3626  * ipr_show_iopoll_weight - Show ipr polling mode
3627  * @dev:        class device struct
3628  * @buf:        buffer
3629  *
3630  * Return value:
3631  *      number of bytes printed to buffer
3632  **/
3633 static ssize_t ipr_show_iopoll_weight(struct device *dev,
3634                                    struct device_attribute *attr, char *buf)
3635 {
3636         struct Scsi_Host *shost = class_to_shost(dev);
3637         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3638         unsigned long lock_flags = 0;
3639         int len;
3640
3641         spin_lock_irqsave(shost->host_lock, lock_flags);
3642         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3643         spin_unlock_irqrestore(shost->host_lock, lock_flags);
3644
3645         return len;
3646 }
3647
3648 /**
3649  * ipr_store_iopoll_weight - Change the adapter's polling mode
3650  * @dev:        class device struct
3651  * @buf:        buffer
3652  *
3653  * Return value:
3654  *      number of bytes printed to buffer
3655  **/
3656 static ssize_t ipr_store_iopoll_weight(struct device *dev,
3657                                         struct device_attribute *attr,
3658                                         const char *buf, size_t count)
3659 {
3660         struct Scsi_Host *shost = class_to_shost(dev);
3661         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3662         unsigned long user_iopoll_weight;
3663         unsigned long lock_flags = 0;
3664         int i;
3665
3666         if (!ioa_cfg->sis64) {
3667                 dev_info(&ioa_cfg->pdev->dev, "blk-iopoll not supported on this adapter\n");
3668                 return -EINVAL;
3669         }
3670         if (kstrtoul(buf, 10, &user_iopoll_weight))
3671                 return -EINVAL;
3672
3673         if (user_iopoll_weight > 256) {
3674                 dev_info(&ioa_cfg->pdev->dev, "Invalid blk-iopoll weight. It must be less than 256\n");
3675                 return -EINVAL;
3676         }
3677
3678         if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3679                 dev_info(&ioa_cfg->pdev->dev, "Current blk-iopoll weight has the same weight\n");
3680                 return strlen(buf);
3681         }
3682
3683         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3684                 for (i = 1; i < ioa_cfg->hrrq_num; i++)
3685                         blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
3686         }
3687
3688         spin_lock_irqsave(shost->host_lock, lock_flags);
3689         ioa_cfg->iopoll_weight = user_iopoll_weight;
3690         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3691                 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3692                         blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
3693                                         ioa_cfg->iopoll_weight, ipr_iopoll);
3694                         blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
3695                 }
3696         }
3697         spin_unlock_irqrestore(shost->host_lock, lock_flags);
3698
3699         return strlen(buf);
3700 }
3701
3702 static struct device_attribute ipr_iopoll_weight_attr = {
3703         .attr = {
3704                 .name =         "iopoll_weight",
3705                 .mode =         S_IRUGO | S_IWUSR,
3706         },
3707         .show = ipr_show_iopoll_weight,
3708         .store = ipr_store_iopoll_weight
3709 };
3710
3711 /**
3712  * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3713  * @buf_len:            buffer length
3714  *
3715  * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3716  * list to use for microcode download
3717  *
3718  * Return value:
3719  *      pointer to sglist / NULL on failure
3720  **/
3721 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3722 {
3723         int sg_size, order, bsize_elem, num_elem, i, j;
3724         struct ipr_sglist *sglist;
3725         struct scatterlist *scatterlist;
3726         struct page *page;
3727
3728         /* Get the minimum size per scatter/gather element */
3729         sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3730
3731         /* Get the actual size per element */
3732         order = get_order(sg_size);
3733
3734         /* Determine the actual number of bytes per element */
3735         bsize_elem = PAGE_SIZE * (1 << order);
3736
3737         /* Determine the actual number of sg entries needed */
3738         if (buf_len % bsize_elem)
3739                 num_elem = (buf_len / bsize_elem) + 1;
3740         else
3741                 num_elem = buf_len / bsize_elem;
3742
3743         /* Allocate a scatter/gather list for the DMA */
3744         sglist = kzalloc(sizeof(struct ipr_sglist) +
3745                          (sizeof(struct scatterlist) * (num_elem - 1)),
3746                          GFP_KERNEL);
3747
3748         if (sglist == NULL) {
3749                 ipr_trace;
3750                 return NULL;
3751         }
3752
3753         scatterlist = sglist->scatterlist;
3754         sg_init_table(scatterlist, num_elem);
3755
3756         sglist->order = order;
3757         sglist->num_sg = num_elem;
3758
3759         /* Allocate a bunch of sg elements */
3760         for (i = 0; i < num_elem; i++) {
3761                 page = alloc_pages(GFP_KERNEL, order);
3762                 if (!page) {
3763                         ipr_trace;
3764
3765                         /* Free up what we already allocated */
3766                         for (j = i - 1; j >= 0; j--)
3767                                 __free_pages(sg_page(&scatterlist[j]), order);
3768                         kfree(sglist);
3769                         return NULL;
3770                 }
3771
3772                 sg_set_page(&scatterlist[i], page, 0, 0);
3773         }
3774
3775         return sglist;
3776 }
3777
3778 /**
3779  * ipr_free_ucode_buffer - Frees a microcode download buffer
3780  * @p_dnld:             scatter/gather list pointer
3781  *
3782  * Free a DMA'able ucode download buffer previously allocated with
3783  * ipr_alloc_ucode_buffer
3784  *
3785  * Return value:
3786  *      nothing
3787  **/
3788 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3789 {
3790         int i;
3791
3792         for (i = 0; i < sglist->num_sg; i++)
3793                 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
3794
3795         kfree(sglist);
3796 }
3797
3798 /**
3799  * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3800  * @sglist:             scatter/gather list pointer
3801  * @buffer:             buffer pointer
3802  * @len:                buffer length
3803  *
3804  * Copy a microcode image from a user buffer into a buffer allocated by
3805  * ipr_alloc_ucode_buffer
3806  *
3807  * Return value:
3808  *      0 on success / other on failure
3809  **/
3810 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3811                                  u8 *buffer, u32 len)
3812 {
3813         int bsize_elem, i, result = 0;
3814         struct scatterlist *scatterlist;
3815         void *kaddr;
3816
3817         /* Determine the actual number of bytes per element */
3818         bsize_elem = PAGE_SIZE * (1 << sglist->order);
3819
3820         scatterlist = sglist->scatterlist;
3821
3822         for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3823                 struct page *page = sg_page(&scatterlist[i]);
3824
3825                 kaddr = kmap(page);
3826                 memcpy(kaddr, buffer, bsize_elem);
3827                 kunmap(page);
3828
3829                 scatterlist[i].length = bsize_elem;
3830
3831                 if (result != 0) {
3832                         ipr_trace;
3833                         return result;
3834                 }
3835         }
3836
3837         if (len % bsize_elem) {
3838                 struct page *page = sg_page(&scatterlist[i]);
3839
3840                 kaddr = kmap(page);
3841                 memcpy(kaddr, buffer, len % bsize_elem);
3842                 kunmap(page);
3843
3844                 scatterlist[i].length = len % bsize_elem;
3845         }
3846
3847         sglist->buffer_len = len;
3848         return result;
3849 }
3850
3851 /**
3852  * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3853  * @ipr_cmd:            ipr command struct
3854  * @sglist:             scatter/gather list
3855  *
3856  * Builds a microcode download IOA data list (IOADL).
3857  *
3858  **/
3859 static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3860                                     struct ipr_sglist *sglist)
3861 {
3862         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3863         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3864         struct scatterlist *scatterlist = sglist->scatterlist;
3865         int i;
3866
3867         ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3868         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3869         ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3870
3871         ioarcb->ioadl_len =
3872                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3873         for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3874                 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3875                 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3876                 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3877         }
3878
3879         ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3880 }
3881
3882 /**
3883  * ipr_build_ucode_ioadl - Build a microcode download IOADL
3884  * @ipr_cmd:    ipr command struct
3885  * @sglist:             scatter/gather list
3886  *
3887  * Builds a microcode download IOA data list (IOADL).
3888  *
3889  **/
3890 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3891                                   struct ipr_sglist *sglist)
3892 {
3893         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3894         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3895         struct scatterlist *scatterlist = sglist->scatterlist;
3896         int i;
3897
3898         ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3899         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3900         ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3901
3902         ioarcb->ioadl_len =
3903                 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3904
3905         for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3906                 ioadl[i].flags_and_data_len =
3907                         cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3908                 ioadl[i].address =
3909                         cpu_to_be32(sg_dma_address(&scatterlist[i]));
3910         }
3911
3912         ioadl[i-1].flags_and_data_len |=
3913                 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3914 }
3915
3916 /**
3917  * ipr_update_ioa_ucode - Update IOA's microcode
3918  * @ioa_cfg:    ioa config struct
3919  * @sglist:             scatter/gather list
3920  *
3921  * Initiate an adapter reset to update the IOA's microcode
3922  *
3923  * Return value:
3924  *      0 on success / -EIO on failure
3925  **/
3926 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3927                                 struct ipr_sglist *sglist)
3928 {
3929         unsigned long lock_flags;
3930
3931         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3932         while (ioa_cfg->in_reset_reload) {
3933                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3934                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3935                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3936         }
3937
3938         if (ioa_cfg->ucode_sglist) {
3939                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3940                 dev_err(&ioa_cfg->pdev->dev,
3941                         "Microcode download already in progress\n");
3942                 return -EIO;
3943         }
3944
3945         sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
3946                                         sglist->num_sg, DMA_TO_DEVICE);
3947
3948         if (!sglist->num_dma_sg) {
3949                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3950                 dev_err(&ioa_cfg->pdev->dev,
3951                         "Failed to map microcode download buffer!\n");
3952                 return -EIO;
3953         }
3954
3955         ioa_cfg->ucode_sglist = sglist;
3956         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3957         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3958         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3959
3960         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3961         ioa_cfg->ucode_sglist = NULL;
3962         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3963         return 0;
3964 }
3965
3966 /**
3967  * ipr_store_update_fw - Update the firmware on the adapter
3968  * @class_dev:  device struct
3969  * @buf:        buffer
3970  * @count:      buffer size
3971  *
3972  * This function will update the firmware on the adapter.
3973  *
3974  * Return value:
3975  *      count on success / other on failure
3976  **/
3977 static ssize_t ipr_store_update_fw(struct device *dev,
3978                                    struct device_attribute *attr,
3979                                    const char *buf, size_t count)
3980 {
3981         struct Scsi_Host *shost = class_to_shost(dev);
3982         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3983         struct ipr_ucode_image_header *image_hdr;
3984         const struct firmware *fw_entry;
3985         struct ipr_sglist *sglist;
3986         char fname[100];
3987         char *src;
3988         int len, result, dnld_size;
3989
3990         if (!capable(CAP_SYS_ADMIN))
3991                 return -EACCES;
3992
3993         len = snprintf(fname, 99, "%s", buf);
3994         fname[len-1] = '\0';
3995
3996         if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
3997                 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3998                 return -EIO;
3999         }
4000
4001         image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
4002
4003         src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
4004         dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
4005         sglist = ipr_alloc_ucode_buffer(dnld_size);
4006
4007         if (!sglist) {
4008                 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
4009                 release_firmware(fw_entry);
4010                 return -ENOMEM;
4011         }
4012
4013         result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
4014
4015         if (result) {
4016                 dev_err(&ioa_cfg->pdev->dev,
4017                         "Microcode buffer copy to DMA buffer failed\n");
4018                 goto out;
4019         }
4020
4021         ipr_info("Updating microcode, please be patient.  This may take up to 30 minutes.\n");
4022
4023         result = ipr_update_ioa_ucode(ioa_cfg, sglist);
4024
4025         if (!result)
4026                 result = count;
4027 out:
4028         ipr_free_ucode_buffer(sglist);
4029         release_firmware(fw_entry);
4030         return result;
4031 }
4032
4033 static struct device_attribute ipr_update_fw_attr = {
4034         .attr = {
4035                 .name =         "update_fw",
4036                 .mode =         S_IWUSR,
4037         },
4038         .store = ipr_store_update_fw
4039 };
4040
4041 /**
4042  * ipr_show_fw_type - Show the adapter's firmware type.
4043  * @dev:        class device struct
4044  * @buf:        buffer
4045  *
4046  * Return value:
4047  *      number of bytes printed to buffer
4048  **/
4049 static ssize_t ipr_show_fw_type(struct device *dev,
4050                                 struct device_attribute *attr, char *buf)
4051 {
4052         struct Scsi_Host *shost = class_to_shost(dev);
4053         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4054         unsigned long lock_flags = 0;
4055         int len;
4056
4057         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4058         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4059         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4060         return len;
4061 }
4062
4063 static struct device_attribute ipr_ioa_fw_type_attr = {
4064         .attr = {
4065                 .name =         "fw_type",
4066                 .mode =         S_IRUGO,
4067         },
4068         .show = ipr_show_fw_type
4069 };
4070
4071 static struct device_attribute *ipr_ioa_attrs[] = {
4072         &ipr_fw_version_attr,
4073         &ipr_log_level_attr,
4074         &ipr_diagnostics_attr,
4075         &ipr_ioa_state_attr,
4076         &ipr_ioa_reset_attr,
4077         &ipr_update_fw_attr,
4078         &ipr_ioa_fw_type_attr,
4079         &ipr_iopoll_weight_attr,
4080         NULL,
4081 };
4082
4083 #ifdef CONFIG_SCSI_IPR_DUMP
4084 /**
4085  * ipr_read_dump - Dump the adapter
4086  * @filp:               open sysfs file
4087  * @kobj:               kobject struct
4088  * @bin_attr:           bin_attribute struct
4089  * @buf:                buffer
4090  * @off:                offset
4091  * @count:              buffer size
4092  *
4093  * Return value:
4094  *      number of bytes printed to buffer
4095  **/
4096 static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
4097                              struct bin_attribute *bin_attr,
4098                              char *buf, loff_t off, size_t count)
4099 {
4100         struct device *cdev = container_of(kobj, struct device, kobj);
4101         struct Scsi_Host *shost = class_to_shost(cdev);
4102         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4103         struct ipr_dump *dump;
4104         unsigned long lock_flags = 0;
4105         char *src;
4106         int len, sdt_end;
4107         size_t rc = count;
4108
4109         if (!capable(CAP_SYS_ADMIN))
4110                 return -EACCES;
4111
4112         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4113         dump = ioa_cfg->dump;
4114
4115         if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4116                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4117                 return 0;
4118         }
4119         kref_get(&dump->kref);
4120         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4121
4122         if (off > dump->driver_dump.hdr.len) {
4123                 kref_put(&dump->kref, ipr_release_dump);
4124                 return 0;
4125         }
4126
4127         if (off + count > dump->driver_dump.hdr.len) {
4128                 count = dump->driver_dump.hdr.len - off;
4129                 rc = count;
4130         }
4131
4132         if (count && off < sizeof(dump->driver_dump)) {
4133                 if (off + count > sizeof(dump->driver_dump))
4134                         len = sizeof(dump->driver_dump) - off;
4135                 else
4136                         len = count;
4137                 src = (u8 *)&dump->driver_dump + off;
4138                 memcpy(buf, src, len);
4139                 buf += len;
4140                 off += len;
4141                 count -= len;
4142         }
4143
4144         off -= sizeof(dump->driver_dump);
4145
4146         if (ioa_cfg->sis64)
4147                 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4148                           (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4149                            sizeof(struct ipr_sdt_entry));
4150         else
4151                 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4152                           (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4153
4154         if (count && off < sdt_end) {
4155                 if (off + count > sdt_end)
4156                         len = sdt_end - off;
4157                 else
4158                         len = count;
4159                 src = (u8 *)&dump->ioa_dump + off;
4160                 memcpy(buf, src, len);
4161                 buf += len;
4162                 off += len;
4163                 count -= len;
4164         }
4165
4166         off -= sdt_end;
4167
4168         while (count) {
4169                 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4170                         len = PAGE_ALIGN(off) - off;
4171                 else
4172                         len = count;
4173                 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4174                 src += off & ~PAGE_MASK;
4175                 memcpy(buf, src, len);
4176                 buf += len;
4177                 off += len;
4178                 count -= len;
4179         }
4180
4181         kref_put(&dump->kref, ipr_release_dump);
4182         return rc;
4183 }
4184
4185 /**
4186  * ipr_alloc_dump - Prepare for adapter dump
4187  * @ioa_cfg:    ioa config struct
4188  *
4189  * Return value:
4190  *      0 on success / other on failure
4191  **/
4192 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4193 {
4194         struct ipr_dump *dump;
4195         __be32 **ioa_data;
4196         unsigned long lock_flags = 0;
4197
4198         dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
4199
4200         if (!dump) {
4201                 ipr_err("Dump memory allocation failed\n");
4202                 return -ENOMEM;
4203         }
4204
4205         if (ioa_cfg->sis64)
4206                 ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4207         else
4208                 ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4209
4210         if (!ioa_data) {
4211                 ipr_err("Dump memory allocation failed\n");
4212                 kfree(dump);
4213                 return -ENOMEM;
4214         }
4215
4216         dump->ioa_dump.ioa_data = ioa_data;
4217
4218         kref_init(&dump->kref);
4219         dump->ioa_cfg = ioa_cfg;
4220
4221         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4222
4223         if (INACTIVE != ioa_cfg->sdt_state) {
4224                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4225                 vfree(dump->ioa_dump.ioa_data);
4226                 kfree(dump);
4227                 return 0;
4228         }
4229
4230         ioa_cfg->dump = dump;
4231         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
4232         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
4233                 ioa_cfg->dump_taken = 1;
4234                 schedule_work(&ioa_cfg->work_q);
4235         }
4236         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4237
4238         return 0;
4239 }
4240
4241 /**
4242  * ipr_free_dump - Free adapter dump memory
4243  * @ioa_cfg:    ioa config struct
4244  *
4245  * Return value:
4246  *      0 on success / other on failure
4247  **/
4248 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4249 {
4250         struct ipr_dump *dump;
4251         unsigned long lock_flags = 0;
4252
4253         ENTER;
4254
4255         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4256         dump = ioa_cfg->dump;
4257         if (!dump) {
4258                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4259                 return 0;
4260         }
4261
4262         ioa_cfg->dump = NULL;
4263         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4264
4265         kref_put(&dump->kref, ipr_release_dump);
4266
4267         LEAVE;
4268         return 0;
4269 }
4270
4271 /**
4272  * ipr_write_dump - Setup dump state of adapter
4273  * @filp:               open sysfs file
4274  * @kobj:               kobject struct
4275  * @bin_attr:           bin_attribute struct
4276  * @buf:                buffer
4277  * @off:                offset
4278  * @count:              buffer size
4279  *
4280  * Return value:
4281  *      number of bytes printed to buffer
4282  **/
4283 static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
4284                               struct bin_attribute *bin_attr,
4285                               char *buf, loff_t off, size_t count)
4286 {
4287         struct device *cdev = container_of(kobj, struct device, kobj);
4288         struct Scsi_Host *shost = class_to_shost(cdev);
4289         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4290         int rc;
4291
4292         if (!capable(CAP_SYS_ADMIN))
4293                 return -EACCES;
4294
4295         if (buf[0] == '1')
4296                 rc = ipr_alloc_dump(ioa_cfg);
4297         else if (buf[0] == '0')
4298                 rc = ipr_free_dump(ioa_cfg);
4299         else
4300                 return -EINVAL;
4301
4302         if (rc)
4303                 return rc;
4304         else
4305                 return count;
4306 }
4307
4308 static struct bin_attribute ipr_dump_attr = {
4309         .attr = {
4310                 .name = "dump",
4311                 .mode = S_IRUSR | S_IWUSR,
4312         },
4313         .size = 0,
4314         .read = ipr_read_dump,
4315         .write = ipr_write_dump
4316 };
4317 #else
4318 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4319 #endif
4320
4321 /**
4322  * ipr_change_queue_depth - Change the device's queue depth
4323  * @sdev:       scsi device struct
4324  * @qdepth:     depth to set
4325  * @reason:     calling context
4326  *
4327  * Return value:
4328  *      actual depth set
4329  **/
4330 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth,
4331                                   int reason)
4332 {
4333         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4334         struct ipr_resource_entry *res;
4335         unsigned long lock_flags = 0;
4336
4337         if (reason != SCSI_QDEPTH_DEFAULT)
4338                 return -EOPNOTSUPP;
4339
4340         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4341         res = (struct ipr_resource_entry *)sdev->hostdata;
4342
4343         if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4344                 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4345         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4346
4347         scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
4348         return sdev->queue_depth;
4349 }
4350
4351 /**
4352  * ipr_change_queue_type - Change the device's queue type
4353  * @dsev:               scsi device struct
4354  * @tag_type:   type of tags to use
4355  *
4356  * Return value:
4357  *      actual queue type set
4358  **/
4359 static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
4360 {
4361         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4362         struct ipr_resource_entry *res;
4363         unsigned long lock_flags = 0;
4364
4365         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4366         res = (struct ipr_resource_entry *)sdev->hostdata;
4367
4368         if (res) {
4369                 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
4370                         /*
4371                          * We don't bother quiescing the device here since the
4372                          * adapter firmware does it for us.
4373                          */
4374                         scsi_set_tag_type(sdev, tag_type);
4375
4376                         if (tag_type)
4377                                 scsi_activate_tcq(sdev, sdev->queue_depth);
4378                         else
4379                                 scsi_deactivate_tcq(sdev, sdev->queue_depth);
4380                 } else
4381                         tag_type = 0;
4382         } else
4383                 tag_type = 0;
4384
4385         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4386         return tag_type;
4387 }
4388
4389 /**
4390  * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4391  * @dev:        device struct
4392  * @attr:       device attribute structure
4393  * @buf:        buffer
4394  *
4395  * Return value:
4396  *      number of bytes printed to buffer
4397  **/
4398 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4399 {
4400         struct scsi_device *sdev = to_scsi_device(dev);
4401         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4402         struct ipr_resource_entry *res;
4403         unsigned long lock_flags = 0;
4404         ssize_t len = -ENXIO;
4405
4406         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4407         res = (struct ipr_resource_entry *)sdev->hostdata;
4408         if (res)
4409                 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4410         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4411         return len;
4412 }
4413
4414 static struct device_attribute ipr_adapter_handle_attr = {
4415         .attr = {
4416                 .name =         "adapter_handle",
4417                 .mode =         S_IRUSR,
4418         },
4419         .show = ipr_show_adapter_handle
4420 };
4421
4422 /**
4423  * ipr_show_resource_path - Show the resource path or the resource address for
4424  *                          this device.
4425  * @dev:        device struct
4426  * @attr:       device attribute structure
4427  * @buf:        buffer
4428  *
4429  * Return value:
4430  *      number of bytes printed to buffer
4431  **/
4432 static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4433 {
4434         struct scsi_device *sdev = to_scsi_device(dev);
4435         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4436         struct ipr_resource_entry *res;
4437         unsigned long lock_flags = 0;
4438         ssize_t len = -ENXIO;
4439         char buffer[IPR_MAX_RES_PATH_LENGTH];
4440
4441         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4442         res = (struct ipr_resource_entry *)sdev->hostdata;
4443         if (res && ioa_cfg->sis64)
4444                 len = snprintf(buf, PAGE_SIZE, "%s\n",
4445                                __ipr_format_res_path(res->res_path, buffer,
4446                                                      sizeof(buffer)));
4447         else if (res)
4448                 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4449                                res->bus, res->target, res->lun);
4450
4451         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4452         return len;
4453 }
4454
4455 static struct device_attribute ipr_resource_path_attr = {
4456         .attr = {
4457                 .name =         "resource_path",
4458                 .mode =         S_IRUGO,
4459         },
4460         .show = ipr_show_resource_path
4461 };
4462
4463 /**
4464  * ipr_show_device_id - Show the device_id for this device.
4465  * @dev:        device struct
4466  * @attr:       device attribute structure
4467  * @buf:        buffer
4468  *
4469  * Return value:
4470  *      number of bytes printed to buffer
4471  **/
4472 static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4473 {
4474         struct scsi_device *sdev = to_scsi_device(dev);
4475         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4476         struct ipr_resource_entry *res;
4477         unsigned long lock_flags = 0;
4478         ssize_t len = -ENXIO;
4479
4480         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4481         res = (struct ipr_resource_entry *)sdev->hostdata;
4482         if (res && ioa_cfg->sis64)
4483                 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->dev_id);
4484         else if (res)
4485                 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4486
4487         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4488         return len;
4489 }
4490
4491 static struct device_attribute ipr_device_id_attr = {
4492         .attr = {
4493                 .name =         "device_id",
4494                 .mode =         S_IRUGO,
4495         },
4496         .show = ipr_show_device_id
4497 };
4498
4499 /**
4500  * ipr_show_resource_type - Show the resource type for this device.
4501  * @dev:        device struct
4502  * @attr:       device attribute structure
4503  * @buf:        buffer
4504  *
4505  * Return value:
4506  *      number of bytes printed to buffer
4507  **/
4508 static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4509 {
4510         struct scsi_device *sdev = to_scsi_device(dev);
4511         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4512         struct ipr_resource_entry *res;
4513         unsigned long lock_flags = 0;
4514         ssize_t len = -ENXIO;
4515
4516         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4517         res = (struct ipr_resource_entry *)sdev->hostdata;
4518
4519         if (res)
4520                 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4521
4522         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4523         return len;
4524 }
4525
4526 static struct device_attribute ipr_resource_type_attr = {
4527         .attr = {
4528                 .name =         "resource_type",
4529                 .mode =         S_IRUGO,
4530         },
4531         .show = ipr_show_resource_type
4532 };
4533
4534 static struct device_attribute *ipr_dev_attrs[] = {
4535         &ipr_adapter_handle_attr,
4536         &ipr_resource_path_attr,
4537         &ipr_device_id_attr,
4538         &ipr_resource_type_attr,
4539         NULL,
4540 };
4541
4542 /**
4543  * ipr_biosparam - Return the HSC mapping
4544  * @sdev:                       scsi device struct
4545  * @block_device:       block device pointer
4546  * @capacity:           capacity of the device
4547  * @parm:                       Array containing returned HSC values.
4548  *
4549  * This function generates the HSC parms that fdisk uses.
4550  * We want to make sure we return something that places partitions
4551  * on 4k boundaries for best performance with the IOA.
4552  *
4553  * Return value:
4554  *      0 on success
4555  **/
4556 static int ipr_biosparam(struct scsi_device *sdev,
4557                          struct block_device *block_device,
4558                          sector_t capacity, int *parm)
4559 {
4560         int heads, sectors;
4561         sector_t cylinders;
4562
4563         heads = 128;
4564         sectors = 32;
4565
4566         cylinders = capacity;
4567         sector_div(cylinders, (128 * 32));
4568
4569         /* return result */
4570         parm[0] = heads;
4571         parm[1] = sectors;
4572         parm[2] = cylinders;
4573
4574         return 0;
4575 }
4576
4577 /**
4578  * ipr_find_starget - Find target based on bus/target.
4579  * @starget:    scsi target struct
4580  *
4581  * Return value:
4582  *      resource entry pointer if found / NULL if not found
4583  **/
4584 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4585 {
4586         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4587         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4588         struct ipr_resource_entry *res;
4589
4590         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4591                 if ((res->bus == starget->channel) &&
4592                     (res->target == starget->id)) {
4593                         return res;
4594                 }
4595         }
4596
4597         return NULL;
4598 }
4599
4600 static struct ata_port_info sata_port_info;
4601
4602 /**
4603  * ipr_target_alloc - Prepare for commands to a SCSI target
4604  * @starget:    scsi target struct
4605  *
4606  * If the device is a SATA device, this function allocates an
4607  * ATA port with libata, else it does nothing.
4608  *
4609  * Return value:
4610  *      0 on success / non-0 on failure
4611  **/
4612 static int ipr_target_alloc(struct scsi_target *starget)
4613 {
4614         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4615         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4616         struct ipr_sata_port *sata_port;
4617         struct ata_port *ap;
4618         struct ipr_resource_entry *res;
4619         unsigned long lock_flags;
4620
4621         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4622         res = ipr_find_starget(starget);
4623         starget->hostdata = NULL;
4624
4625         if (res && ipr_is_gata(res)) {
4626                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4627                 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4628                 if (!sata_port)
4629                         return -ENOMEM;
4630
4631                 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4632                 if (ap) {
4633                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4634                         sata_port->ioa_cfg = ioa_cfg;
4635                         sata_port->ap = ap;
4636                         sata_port->res = res;
4637
4638                         res->sata_port = sata_port;
4639                         ap->private_data = sata_port;
4640                         starget->hostdata = sata_port;
4641                 } else {
4642                         kfree(sata_port);
4643                         return -ENOMEM;
4644                 }
4645         }
4646         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4647
4648         return 0;
4649 }
4650
4651 /**
4652  * ipr_target_destroy - Destroy a SCSI target
4653  * @starget:    scsi target struct
4654  *
4655  * If the device was a SATA device, this function frees the libata
4656  * ATA port, else it does nothing.
4657  *
4658  **/
4659 static void ipr_target_destroy(struct scsi_target *starget)
4660 {
4661         struct ipr_sata_port *sata_port = starget->hostdata;
4662         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4663         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4664
4665         if (ioa_cfg->sis64) {
4666                 if (!ipr_find_starget(starget)) {
4667                         if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4668                                 clear_bit(starget->id, ioa_cfg->array_ids);
4669                         else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4670                                 clear_bit(starget->id, ioa_cfg->vset_ids);
4671                         else if (starget->channel == 0)
4672                                 clear_bit(starget->id, ioa_cfg->target_ids);
4673                 }
4674         }
4675
4676         if (sata_port) {
4677                 starget->hostdata = NULL;
4678                 ata_sas_port_destroy(sata_port->ap);
4679                 kfree(sata_port);
4680         }
4681 }
4682
4683 /**
4684  * ipr_find_sdev - Find device based on bus/target/lun.
4685  * @sdev:       scsi device struct
4686  *
4687  * Return value:
4688  *      resource entry pointer if found / NULL if not found
4689  **/
4690 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4691 {
4692         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4693         struct ipr_resource_entry *res;
4694
4695         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4696                 if ((res->bus == sdev->channel) &&
4697                     (res->target == sdev->id) &&
4698                     (res->lun == sdev->lun))
4699                         return res;
4700         }
4701
4702         return NULL;
4703 }
4704
4705 /**
4706  * ipr_slave_destroy - Unconfigure a SCSI device
4707  * @sdev:       scsi device struct
4708  *
4709  * Return value:
4710  *      nothing
4711  **/
4712 static void ipr_slave_destroy(struct scsi_device *sdev)
4713 {
4714         struct ipr_resource_entry *res;
4715         struct ipr_ioa_cfg *ioa_cfg;
4716         unsigned long lock_flags = 0;
4717
4718         ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4719
4720         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4721         res = (struct ipr_resource_entry *) sdev->hostdata;
4722         if (res) {
4723                 if (res->sata_port)
4724                         res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
4725                 sdev->hostdata = NULL;
4726                 res->sdev = NULL;
4727                 res->sata_port = NULL;
4728         }
4729         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4730 }
4731
4732 /**
4733  * ipr_slave_configure - Configure a SCSI device
4734  * @sdev:       scsi device struct
4735  *
4736  * This function configures the specified scsi device.
4737  *
4738  * Return value:
4739  *      0 on success
4740  **/
4741 static int ipr_slave_configure(struct scsi_device *sdev)
4742 {
4743         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4744         struct ipr_resource_entry *res;
4745         struct ata_port *ap = NULL;
4746         unsigned long lock_flags = 0;
4747         char buffer[IPR_MAX_RES_PATH_LENGTH];
4748
4749         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4750         res = sdev->hostdata;
4751         if (res) {
4752                 if (ipr_is_af_dasd_device(res))
4753                         sdev->type = TYPE_RAID;
4754                 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4755                         sdev->scsi_level = 4;
4756                         sdev->no_uld_attach = 1;
4757                 }
4758                 if (ipr_is_vset_device(res)) {
4759                         blk_queue_rq_timeout(sdev->request_queue,
4760                                              IPR_VSET_RW_TIMEOUT);
4761                         blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
4762                 }
4763                 if (ipr_is_gata(res) && res->sata_port)
4764                         ap = res->sata_port->ap;
4765                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4766
4767                 if (ap) {
4768                         scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
4769                         ata_sas_slave_configure(sdev, ap);
4770                 } else
4771                         scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
4772                 if (ioa_cfg->sis64)
4773                         sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4774                                     ipr_format_res_path(ioa_cfg,
4775                                 res->res_path, buffer, sizeof(buffer)));
4776                 return 0;
4777         }
4778         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4779         return 0;
4780 }
4781
4782 /**
4783  * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4784  * @sdev:       scsi device struct
4785  *
4786  * This function initializes an ATA port so that future commands
4787  * sent through queuecommand will work.
4788  *
4789  * Return value:
4790  *      0 on success
4791  **/
4792 static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4793 {
4794         struct ipr_sata_port *sata_port = NULL;
4795         int rc = -ENXIO;
4796
4797         ENTER;
4798         if (sdev->sdev_target)
4799                 sata_port = sdev->sdev_target->hostdata;
4800         if (sata_port) {
4801                 rc = ata_sas_port_init(sata_port->ap);
4802                 if (rc == 0)
4803                         rc = ata_sas_sync_probe(sata_port->ap);
4804         }
4805
4806         if (rc)
4807                 ipr_slave_destroy(sdev);
4808
4809         LEAVE;
4810         return rc;
4811 }
4812
4813 /**
4814  * ipr_slave_alloc - Prepare for commands to a device.
4815  * @sdev:       scsi device struct
4816  *
4817  * This function saves a pointer to the resource entry
4818  * in the scsi device struct if the device exists. We
4819  * can then use this pointer in ipr_queuecommand when
4820  * handling new commands.
4821  *
4822  * Return value:
4823  *      0 on success / -ENXIO if device does not exist
4824  **/
4825 static int ipr_slave_alloc(struct scsi_device *sdev)
4826 {
4827         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4828         struct ipr_resource_entry *res;
4829         unsigned long lock_flags;
4830         int rc = -ENXIO;
4831
4832         sdev->hostdata = NULL;
4833
4834         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4835
4836         res = ipr_find_sdev(sdev);
4837         if (res) {
4838                 res->sdev = sdev;
4839                 res->add_to_ml = 0;
4840                 res->in_erp = 0;
4841                 sdev->hostdata = res;
4842                 if (!ipr_is_naca_model(res))
4843                         res->needs_sync_complete = 1;
4844                 rc = 0;
4845                 if (ipr_is_gata(res)) {
4846                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4847                         return ipr_ata_slave_alloc(sdev);
4848                 }
4849         }
4850
4851         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4852
4853         return rc;
4854 }
4855
4856 static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
4857 {
4858         struct ipr_ioa_cfg *ioa_cfg;
4859         unsigned long lock_flags = 0;
4860         int rc = SUCCESS;
4861
4862         ENTER;
4863         ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
4864         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4865
4866         if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
4867                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4868                 dev_err(&ioa_cfg->pdev->dev,
4869                         "Adapter being reset as a result of error recovery.\n");
4870
4871                 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4872                         ioa_cfg->sdt_state = GET_DUMP;
4873         }
4874
4875         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4876         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4877         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4878
4879         /* If we got hit with a host reset while we were already resetting
4880          the adapter for some reason, and the reset failed. */
4881         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
4882                 ipr_trace;
4883                 rc = FAILED;
4884         }
4885
4886         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4887         LEAVE;
4888         return rc;
4889 }
4890
4891 /**
4892  * ipr_device_reset - Reset the device
4893  * @ioa_cfg:    ioa config struct
4894  * @res:                resource entry struct
4895  *
4896  * This function issues a device reset to the affected device.
4897  * If the device is a SCSI device, a LUN reset will be sent
4898  * to the device first. If that does not work, a target reset
4899  * will be sent. If the device is a SATA device, a PHY reset will
4900  * be sent.
4901  *
4902  * Return value:
4903  *      0 on success / non-zero on failure
4904  **/
4905 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
4906                             struct ipr_resource_entry *res)
4907 {
4908         struct ipr_cmnd *ipr_cmd;
4909         struct ipr_ioarcb *ioarcb;
4910         struct ipr_cmd_pkt *cmd_pkt;
4911         struct ipr_ioarcb_ata_regs *regs;
4912         u32 ioasc;
4913
4914         ENTER;
4915         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4916         ioarcb = &ipr_cmd->ioarcb;
4917         cmd_pkt = &ioarcb->cmd_pkt;
4918
4919         if (ipr_cmd->ioa_cfg->sis64) {
4920                 regs = &ipr_cmd->i.ata_ioadl.regs;
4921                 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
4922         } else
4923                 regs = &ioarcb->u.add_data.u.regs;
4924
4925         ioarcb->res_handle = res->res_handle;
4926         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4927         cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4928         if (ipr_is_gata(res)) {
4929                 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
4930                 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
4931                 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
4932         }
4933
4934         ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4935         ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4936         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
4937         if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
4938                 if (ipr_cmd->ioa_cfg->sis64)
4939                         memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
4940                                sizeof(struct ipr_ioasa_gata));
4941                 else
4942                         memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
4943                                sizeof(struct ipr_ioasa_gata));
4944         }
4945
4946         LEAVE;
4947         return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
4948 }
4949
4950 /**
4951  * ipr_sata_reset - Reset the SATA port
4952  * @link:       SATA link to reset
4953  * @classes:    class of the attached device
4954  *
4955  * This function issues a SATA phy reset to the affected ATA link.
4956  *
4957  * Return value:
4958  *      0 on success / non-zero on failure
4959  **/
4960 static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
4961                                 unsigned long deadline)
4962 {
4963         struct ipr_sata_port *sata_port = link->ap->private_data;
4964         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4965         struct ipr_resource_entry *res;
4966         unsigned long lock_flags = 0;
4967         int rc = -ENXIO;
4968
4969         ENTER;
4970         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4971         while (ioa_cfg->in_reset_reload) {
4972                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4973                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4974                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4975         }
4976
4977         res = sata_port->res;
4978         if (res) {
4979                 rc = ipr_device_reset(ioa_cfg, res);
4980                 *classes = res->ata_class;
4981         }
4982
4983         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4984         LEAVE;
4985         return rc;
4986 }
4987
4988 /**
4989  * ipr_eh_dev_reset - Reset the device
4990  * @scsi_cmd:   scsi command struct
4991  *
4992  * This function issues a device reset to the affected device.
4993  * A LUN reset will be sent to the device first. If that does
4994  * not work, a target reset will be sent.
4995  *
4996  * Return value:
4997  *      SUCCESS / FAILED
4998  **/
4999 static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
5000 {
5001         struct ipr_cmnd *ipr_cmd;
5002         struct ipr_ioa_cfg *ioa_cfg;
5003         struct ipr_resource_entry *res;
5004         struct ata_port *ap;
5005         int rc = 0;
5006         struct ipr_hrr_queue *hrrq;
5007
5008         ENTER;
5009         ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5010         res = scsi_cmd->device->hostdata;
5011
5012         if (!res)
5013                 return FAILED;
5014
5015         /*
5016          * If we are currently going through reset/reload, return failed. This will force the
5017          * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5018          * reset to complete
5019          */
5020         if (ioa_cfg->in_reset_reload)
5021                 return FAILED;
5022         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5023                 return FAILED;
5024
5025         for_each_hrrq(hrrq, ioa_cfg) {
5026                 spin_lock(&hrrq->_lock);
5027                 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5028                         if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
5029                                 if (ipr_cmd->scsi_cmd)
5030                                         ipr_cmd->done = ipr_scsi_eh_done;
5031                                 if (ipr_cmd->qc)
5032                                         ipr_cmd->done = ipr_sata_eh_done;
5033                                 if (ipr_cmd->qc &&
5034                                     !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
5035                                         ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
5036                                         ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
5037                                 }
5038                         }
5039                 }
5040                 spin_unlock(&hrrq->_lock);
5041         }
5042         res->resetting_device = 1;
5043         scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
5044
5045         if (ipr_is_gata(res) && res->sata_port) {
5046                 ap = res->sata_port->ap;
5047                 spin_unlock_irq(scsi_cmd->device->host->host_lock);
5048                 ata_std_error_handler(ap);
5049                 spin_lock_irq(scsi_cmd->device->host->host_lock);
5050
5051                 for_each_hrrq(hrrq, ioa_cfg) {
5052                         spin_lock(&hrrq->_lock);
5053                         list_for_each_entry(ipr_cmd,
5054                                             &hrrq->hrrq_pending_q, queue) {
5055                                 if (ipr_cmd->ioarcb.res_handle ==
5056                                     res->res_handle) {
5057                                         rc = -EIO;
5058                                         break;
5059                                 }
5060                         }
5061                         spin_unlock(&hrrq->_lock);
5062                 }
5063         } else
5064                 rc = ipr_device_reset(ioa_cfg, res);
5065         res->resetting_device = 0;
5066         res->reset_occurred = 1;
5067
5068         LEAVE;
5069         return rc ? FAILED : SUCCESS;
5070 }
5071
5072 static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
5073 {
5074         int rc;
5075
5076         spin_lock_irq(cmd->device->host->host_lock);
5077         rc = __ipr_eh_dev_reset(cmd);
5078         spin_unlock_irq(cmd->device->host->host_lock);
5079
5080         return rc;
5081 }
5082
5083 /**
5084  * ipr_bus_reset_done - Op done function for bus reset.
5085  * @ipr_cmd:    ipr command struct
5086  *
5087  * This function is the op done function for a bus reset
5088  *
5089  * Return value:
5090  *      none
5091  **/
5092 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5093 {
5094         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5095         struct ipr_resource_entry *res;
5096
5097         ENTER;
5098         if (!ioa_cfg->sis64)
5099                 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5100                         if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5101                                 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5102                                 break;
5103                         }
5104                 }
5105
5106         /*
5107          * If abort has not completed, indicate the reset has, else call the
5108          * abort's done function to wake the sleeping eh thread
5109          */
5110         if (ipr_cmd->sibling->sibling)
5111                 ipr_cmd->sibling->sibling = NULL;
5112         else
5113                 ipr_cmd->sibling->done(ipr_cmd->sibling);
5114
5115         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5116         LEAVE;
5117 }
5118
5119 /**
5120  * ipr_abort_timeout - An abort task has timed out
5121  * @ipr_cmd:    ipr command struct
5122  *
5123  * This function handles when an abort task times out. If this
5124  * happens we issue a bus reset since we have resources tied
5125  * up that must be freed before returning to the midlayer.
5126  *
5127  * Return value:
5128  *      none
5129  **/
5130 static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
5131 {
5132         struct ipr_cmnd *reset_cmd;
5133         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5134         struct ipr_cmd_pkt *cmd_pkt;
5135         unsigned long lock_flags = 0;
5136
5137         ENTER;
5138         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5139         if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5140                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5141                 return;
5142         }
5143
5144         sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
5145         reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5146         ipr_cmd->sibling = reset_cmd;
5147         reset_cmd->sibling = ipr_cmd;
5148         reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5149         cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5150         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5151         cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5152         cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5153
5154         ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5155         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5156         LEAVE;
5157 }
5158
5159 /**
5160  * ipr_cancel_op - Cancel specified op
5161  * @scsi_cmd:   scsi command struct
5162  *
5163  * This function cancels specified op.
5164  *
5165  * Return value:
5166  *      SUCCESS / FAILED
5167  **/
5168 static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
5169 {
5170         struct ipr_cmnd *ipr_cmd;
5171         struct ipr_ioa_cfg *ioa_cfg;
5172         struct ipr_resource_entry *res;
5173         struct ipr_cmd_pkt *cmd_pkt;
5174         u32 ioasc, int_reg;
5175         int op_found = 0;
5176         struct ipr_hrr_queue *hrrq;
5177
5178         ENTER;
5179         ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5180         res = scsi_cmd->device->hostdata;
5181
5182         /* If we are currently going through reset/reload, return failed.
5183          * This will force the mid-layer to call ipr_eh_host_reset,
5184          * which will then go to sleep and wait for the reset to complete
5185          */
5186         if (ioa_cfg->in_reset_reload ||
5187             ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5188                 return FAILED;
5189         if (!res)
5190                 return FAILED;
5191
5192         /*
5193          * If we are aborting a timed out op, chances are that the timeout was caused
5194          * by a still not detected EEH error. In such cases, reading a register will
5195          * trigger the EEH recovery infrastructure.
5196          */
5197         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5198
5199         if (!ipr_is_gscsi(res))
5200                 return FAILED;
5201
5202         for_each_hrrq(hrrq, ioa_cfg) {
5203                 spin_lock(&hrrq->_lock);
5204                 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5205                         if (ipr_cmd->scsi_cmd == scsi_cmd) {
5206                                 ipr_cmd->done = ipr_scsi_eh_done;
5207                                 op_found = 1;
5208                                 break;
5209                         }
5210                 }
5211                 spin_unlock(&hrrq->_lock);
5212         }
5213
5214         if (!op_found)
5215                 return SUCCESS;
5216
5217         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5218         ipr_cmd->ioarcb.res_handle = res->res_handle;
5219         cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5220         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5221         cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5222         ipr_cmd->u.sdev = scsi_cmd->device;
5223
5224         scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5225                     scsi_cmd->cmnd[0]);
5226         ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
5227         ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5228
5229         /*
5230          * If the abort task timed out and we sent a bus reset, we will get
5231          * one the following responses to the abort
5232          */
5233         if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5234                 ioasc = 0;
5235                 ipr_trace;
5236         }
5237
5238         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5239         if (!ipr_is_naca_model(res))
5240                 res->needs_sync_complete = 1;
5241
5242         LEAVE;
5243         return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
5244 }
5245
5246 /**
5247  * ipr_eh_abort - Abort a single op
5248  * @scsi_cmd:   scsi command struct
5249  *
5250  * Return value:
5251  *      SUCCESS / FAILED
5252  **/
5253 static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
5254 {
5255         unsigned long flags;
5256         int rc;
5257
5258         ENTER;
5259
5260         spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5261         rc = ipr_cancel_op(scsi_cmd);
5262         spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
5263
5264         LEAVE;
5265         return rc;
5266 }
5267
5268 /**
5269  * ipr_handle_other_interrupt - Handle "other" interrupts
5270  * @ioa_cfg:    ioa config struct
5271  * @int_reg:    interrupt register
5272  *
5273  * Return value:
5274  *      IRQ_NONE / IRQ_HANDLED
5275  **/
5276 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5277                                               u32 int_reg)
5278 {
5279         irqreturn_t rc = IRQ_HANDLED;
5280         u32 int_mask_reg;
5281
5282         int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5283         int_reg &= ~int_mask_reg;
5284
5285         /* If an interrupt on the adapter did not occur, ignore it.
5286          * Or in the case of SIS 64, check for a stage change interrupt.
5287          */
5288         if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5289                 if (ioa_cfg->sis64) {
5290                         int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5291                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5292                         if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5293
5294                                 /* clear stage change */
5295                                 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5296                                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5297                                 list_del(&ioa_cfg->reset_cmd->queue);
5298                                 del_timer(&ioa_cfg->reset_cmd->timer);
5299                                 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5300                                 return IRQ_HANDLED;
5301                         }
5302                 }
5303
5304                 return IRQ_NONE;
5305         }
5306
5307         if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5308                 /* Mask the interrupt */
5309                 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5310
5311                 /* Clear the interrupt */
5312                 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
5313                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5314
5315                 list_del(&ioa_cfg->reset_cmd->queue);
5316                 del_timer(&ioa_cfg->reset_cmd->timer);
5317                 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5318         } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
5319                 if (ioa_cfg->clear_isr) {
5320                         if (ipr_debug && printk_ratelimit())
5321                                 dev_err(&ioa_cfg->pdev->dev,
5322                                         "Spurious interrupt detected. 0x%08X\n", int_reg);
5323                         writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5324                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5325                         return IRQ_NONE;
5326                 }
5327         } else {
5328                 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5329                         ioa_cfg->ioa_unit_checked = 1;
5330                 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5331                         dev_err(&ioa_cfg->pdev->dev,
5332                                 "No Host RRQ. 0x%08X\n", int_reg);
5333                 else
5334                         dev_err(&ioa_cfg->pdev->dev,
5335                                 "Permanent IOA failure. 0x%08X\n", int_reg);
5336
5337                 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5338                         ioa_cfg->sdt_state = GET_DUMP;
5339
5340                 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5341                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5342         }
5343
5344         return rc;
5345 }
5346
5347 /**
5348  * ipr_isr_eh - Interrupt service routine error handler
5349  * @ioa_cfg:    ioa config struct
5350  * @msg:        message to log
5351  *
5352  * Return value:
5353  *      none
5354  **/
5355 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
5356 {
5357         ioa_cfg->errors_logged++;
5358         dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
5359
5360         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5361                 ioa_cfg->sdt_state = GET_DUMP;
5362
5363         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5364 }
5365
5366 static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
5367                                                 struct list_head *doneq)
5368 {
5369         u32 ioasc;
5370         u16 cmd_index;
5371         struct ipr_cmnd *ipr_cmd;
5372         struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5373         int num_hrrq = 0;
5374
5375         /* If interrupts are disabled, ignore the interrupt */
5376         if (!hrr_queue->allow_interrupts)
5377                 return 0;
5378
5379         while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5380                hrr_queue->toggle_bit) {
5381
5382                 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5383                              IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5384                              IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5385
5386                 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5387                              cmd_index < hrr_queue->min_cmd_id)) {
5388                         ipr_isr_eh(ioa_cfg,
5389                                 "Invalid response handle from IOA: ",
5390                                 cmd_index);
5391                         break;
5392                 }
5393
5394                 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5395                 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5396
5397                 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5398
5399                 list_move_tail(&ipr_cmd->queue, doneq);
5400
5401                 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5402                         hrr_queue->hrrq_curr++;
5403                 } else {
5404                         hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5405                         hrr_queue->toggle_bit ^= 1u;
5406                 }
5407                 num_hrrq++;
5408                 if (budget > 0 && num_hrrq >= budget)
5409                         break;
5410         }
5411
5412         return num_hrrq;
5413 }
5414
5415 static int ipr_iopoll(struct blk_iopoll *iop, int budget)
5416 {
5417         struct ipr_ioa_cfg *ioa_cfg;
5418         struct ipr_hrr_queue *hrrq;
5419         struct ipr_cmnd *ipr_cmd, *temp;
5420         unsigned long hrrq_flags;
5421         int completed_ops;
5422         LIST_HEAD(doneq);
5423
5424         hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5425         ioa_cfg = hrrq->ioa_cfg;
5426
5427         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5428         completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5429
5430         if (completed_ops < budget)
5431                 blk_iopoll_complete(iop);
5432         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5433
5434         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5435                 list_del(&ipr_cmd->queue);
5436                 del_timer(&ipr_cmd->timer);
5437                 ipr_cmd->fast_done(ipr_cmd);
5438         }
5439
5440         return completed_ops;
5441 }
5442
5443 /**
5444  * ipr_isr - Interrupt service routine
5445  * @irq:        irq number
5446  * @devp:       pointer to ioa config struct
5447  *
5448  * Return value:
5449  *      IRQ_NONE / IRQ_HANDLED
5450  **/
5451 static irqreturn_t ipr_isr(int irq, void *devp)
5452 {
5453         struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5454         struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5455         unsigned long hrrq_flags = 0;
5456         u32 int_reg = 0;
5457         int num_hrrq = 0;
5458         int irq_none = 0;
5459         struct ipr_cmnd *ipr_cmd, *temp;
5460         irqreturn_t rc = IRQ_NONE;
5461         LIST_HEAD(doneq);
5462
5463         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5464         /* If interrupts are disabled, ignore the interrupt */
5465         if (!hrrq->allow_interrupts) {
5466                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5467                 return IRQ_NONE;
5468         }
5469
5470         while (1) {
5471                 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5472                         rc =  IRQ_HANDLED;
5473
5474                         if (!ioa_cfg->clear_isr)
5475                                 break;
5476
5477                         /* Clear the PCI interrupt */
5478                         num_hrrq = 0;
5479                         do {
5480                                 writel(IPR_PCII_HRRQ_UPDATED,
5481                                      ioa_cfg->regs.clr_interrupt_reg32);
5482                                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5483                         } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5484                                 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5485
5486                 } else if (rc == IRQ_NONE && irq_none == 0) {
5487                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5488                         irq_none++;
5489                 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5490                            int_reg & IPR_PCII_HRRQ_UPDATED) {
5491                         ipr_isr_eh(ioa_cfg,
5492                                 "Error clearing HRRQ: ", num_hrrq);
5493                         rc = IRQ_HANDLED;
5494                         break;
5495                 } else
5496                         break;
5497         }
5498
5499         if (unlikely(rc == IRQ_NONE))
5500                 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5501
5502         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5503         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5504                 list_del(&ipr_cmd->queue);
5505                 del_timer(&ipr_cmd->timer);
5506                 ipr_cmd->fast_done(ipr_cmd);
5507         }
5508         return rc;
5509 }
5510
5511 /**
5512  * ipr_isr_mhrrq - Interrupt service routine
5513  * @irq:        irq number
5514  * @devp:       pointer to ioa config struct
5515  *
5516  * Return value:
5517  *      IRQ_NONE / IRQ_HANDLED
5518  **/
5519 static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5520 {
5521         struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5522         struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5523         unsigned long hrrq_flags = 0;
5524         struct ipr_cmnd *ipr_cmd, *temp;
5525         irqreturn_t rc = IRQ_NONE;
5526         LIST_HEAD(doneq);
5527
5528         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5529
5530         /* If interrupts are disabled, ignore the interrupt */
5531         if (!hrrq->allow_interrupts) {
5532                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5533                 return IRQ_NONE;
5534         }
5535
5536         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
5537                 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5538                        hrrq->toggle_bit) {
5539                         if (!blk_iopoll_sched_prep(&hrrq->iopoll))
5540                                 blk_iopoll_sched(&hrrq->iopoll);
5541                         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5542                         return IRQ_HANDLED;
5543                 }
5544         } else {
5545                 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5546                         hrrq->toggle_bit)
5547
5548                         if (ipr_process_hrrq(hrrq, -1, &doneq))
5549                                 rc =  IRQ_HANDLED;
5550         }
5551
5552         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5553
5554         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5555                 list_del(&ipr_cmd->queue);
5556                 del_timer(&ipr_cmd->timer);
5557                 ipr_cmd->fast_done(ipr_cmd);
5558         }
5559         return rc;
5560 }
5561
5562 /**
5563  * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5564  * @ioa_cfg:    ioa config struct
5565  * @ipr_cmd:    ipr command struct
5566  *
5567  * Return value:
5568  *      0 on success / -1 on failure
5569  **/
5570 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5571                              struct ipr_cmnd *ipr_cmd)
5572 {
5573         int i, nseg;
5574         struct scatterlist *sg;
5575         u32 length;
5576         u32 ioadl_flags = 0;
5577         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5578         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5579         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5580
5581         length = scsi_bufflen(scsi_cmd);
5582         if (!length)
5583                 return 0;
5584
5585         nseg = scsi_dma_map(scsi_cmd);
5586         if (nseg < 0) {
5587                 if (printk_ratelimit())
5588                         dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5589                 return -1;
5590         }
5591
5592         ipr_cmd->dma_use_sg = nseg;
5593
5594         ioarcb->data_transfer_length = cpu_to_be32(length);
5595         ioarcb->ioadl_len =
5596                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5597
5598         if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5599                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5600                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5601         } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5602                 ioadl_flags = IPR_IOADL_FLAGS_READ;
5603
5604         scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5605                 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5606                 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5607                 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5608         }
5609
5610         ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5611         return 0;
5612 }
5613
5614 /**
5615  * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5616  * @ioa_cfg:    ioa config struct
5617  * @ipr_cmd:    ipr command struct
5618  *
5619  * Return value:
5620  *      0 on success / -1 on failure
5621  **/
5622 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5623                            struct ipr_cmnd *ipr_cmd)
5624 {
5625         int i, nseg;
5626         struct scatterlist *sg;
5627         u32 length;
5628         u32 ioadl_flags = 0;
5629         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5630         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5631         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5632
5633         length = scsi_bufflen(scsi_cmd);
5634         if (!length)
5635                 return 0;
5636
5637         nseg = scsi_dma_map(scsi_cmd);
5638         if (nseg < 0) {
5639                 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5640                 return -1;
5641         }
5642
5643         ipr_cmd->dma_use_sg = nseg;
5644
5645         if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5646                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5647                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5648                 ioarcb->data_transfer_length = cpu_to_be32(length);
5649                 ioarcb->ioadl_len =
5650                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5651         } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5652                 ioadl_flags = IPR_IOADL_FLAGS_READ;
5653                 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5654                 ioarcb->read_ioadl_len =
5655                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5656         }
5657
5658         if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5659                 ioadl = ioarcb->u.add_data.u.ioadl;
5660                 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5661                                     offsetof(struct ipr_ioarcb, u.add_data));
5662                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5663         }
5664
5665         scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5666                 ioadl[i].flags_and_data_len =
5667                         cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5668                 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
5669         }
5670
5671         ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5672         return 0;
5673 }
5674
5675 /**
5676  * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
5677  * @scsi_cmd:   scsi command struct
5678  *
5679  * Return value:
5680  *      task attributes
5681  **/
5682 static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
5683 {
5684         u8 tag[2];
5685         u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
5686
5687         if (scsi_populate_tag_msg(scsi_cmd, tag)) {
5688                 switch (tag[0]) {
5689                 case MSG_SIMPLE_TAG:
5690                         rc = IPR_FLAGS_LO_SIMPLE_TASK;
5691                         break;
5692                 case MSG_HEAD_TAG:
5693                         rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
5694                         break;
5695                 case MSG_ORDERED_TAG:
5696                         rc = IPR_FLAGS_LO_ORDERED_TASK;
5697                         break;
5698                 };
5699         }
5700
5701         return rc;
5702 }
5703
5704 /**
5705  * ipr_erp_done - Process completion of ERP for a device
5706  * @ipr_cmd:            ipr command struct
5707  *
5708  * This function copies the sense buffer into the scsi_cmd
5709  * struct and pushes the scsi_done function.
5710  *
5711  * Return value:
5712  *      nothing
5713  **/
5714 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5715 {
5716         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5717         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5718         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5719
5720         if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5721                 scsi_cmd->result |= (DID_ERROR << 16);
5722                 scmd_printk(KERN_ERR, scsi_cmd,
5723                             "Request Sense failed with IOASC: 0x%08X\n", ioasc);
5724         } else {
5725                 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5726                        SCSI_SENSE_BUFFERSIZE);
5727         }
5728
5729         if (res) {
5730                 if (!ipr_is_naca_model(res))
5731                         res->needs_sync_complete = 1;
5732                 res->in_erp = 0;
5733         }
5734         scsi_dma_unmap(ipr_cmd->scsi_cmd);
5735         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5736         scsi_cmd->scsi_done(scsi_cmd);
5737 }
5738
5739 /**
5740  * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5741  * @ipr_cmd:    ipr command struct
5742  *
5743  * Return value:
5744  *      none
5745  **/
5746 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5747 {
5748         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5749         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5750         dma_addr_t dma_addr = ipr_cmd->dma_addr;
5751
5752         memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
5753         ioarcb->data_transfer_length = 0;
5754         ioarcb->read_data_transfer_length = 0;
5755         ioarcb->ioadl_len = 0;
5756         ioarcb->read_ioadl_len = 0;
5757         ioasa->hdr.ioasc = 0;
5758         ioasa->hdr.residual_data_len = 0;
5759
5760         if (ipr_cmd->ioa_cfg->sis64)
5761                 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5762                         cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5763         else {
5764                 ioarcb->write_ioadl_addr =
5765                         cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5766                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5767         }
5768 }
5769
5770 /**
5771  * ipr_erp_request_sense - Send request sense to a device
5772  * @ipr_cmd:    ipr command struct
5773  *
5774  * This function sends a request sense to a device as a result
5775  * of a check condition.
5776  *
5777  * Return value:
5778  *      nothing
5779  **/
5780 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5781 {
5782         struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5783         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5784
5785         if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5786                 ipr_erp_done(ipr_cmd);
5787                 return;
5788         }
5789
5790         ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5791
5792         cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5793         cmd_pkt->cdb[0] = REQUEST_SENSE;
5794         cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5795         cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5796         cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5797         cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5798
5799         ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5800                        SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
5801
5802         ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5803                    IPR_REQUEST_SENSE_TIMEOUT * 2);
5804 }
5805
5806 /**
5807  * ipr_erp_cancel_all - Send cancel all to a device
5808  * @ipr_cmd:    ipr command struct
5809  *
5810  * This function sends a cancel all to a device to clear the
5811  * queue. If we are running TCQ on the device, QERR is set to 1,
5812  * which means all outstanding ops have been dropped on the floor.
5813  * Cancel all will return them to us.
5814  *
5815  * Return value:
5816  *      nothing
5817  **/
5818 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5819 {
5820         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5821         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5822         struct ipr_cmd_pkt *cmd_pkt;
5823
5824         res->in_erp = 1;
5825
5826         ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5827
5828         if (!scsi_get_tag_type(scsi_cmd->device)) {
5829                 ipr_erp_request_sense(ipr_cmd);
5830                 return;
5831         }
5832
5833         cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5834         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5835         cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5836
5837         ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5838                    IPR_CANCEL_ALL_TIMEOUT);
5839 }
5840
5841 /**
5842  * ipr_dump_ioasa - Dump contents of IOASA
5843  * @ioa_cfg:    ioa config struct
5844  * @ipr_cmd:    ipr command struct
5845  * @res:                resource entry struct
5846  *
5847  * This function is invoked by the interrupt handler when ops
5848  * fail. It will log the IOASA if appropriate. Only called
5849  * for GPDD ops.
5850  *
5851  * Return value:
5852  *      none
5853  **/
5854 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
5855                            struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
5856 {
5857         int i;
5858         u16 data_len;
5859         u32 ioasc, fd_ioasc;
5860         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5861         __be32 *ioasa_data = (__be32 *)ioasa;
5862         int error_index;
5863
5864         ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
5865         fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
5866
5867         if (0 == ioasc)
5868                 return;
5869
5870         if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5871                 return;
5872
5873         if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
5874                 error_index = ipr_get_error(fd_ioasc);
5875         else
5876                 error_index = ipr_get_error(ioasc);
5877
5878         if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5879                 /* Don't log an error if the IOA already logged one */
5880                 if (ioasa->hdr.ilid != 0)
5881                         return;
5882
5883                 if (!ipr_is_gscsi(res))
5884                         return;
5885
5886                 if (ipr_error_table[error_index].log_ioasa == 0)
5887                         return;
5888         }
5889
5890         ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
5891
5892         data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
5893         if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
5894                 data_len = sizeof(struct ipr_ioasa64);
5895         else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
5896                 data_len = sizeof(struct ipr_ioasa);
5897
5898         ipr_err("IOASA Dump:\n");
5899
5900         for (i = 0; i < data_len / 4; i += 4) {
5901                 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
5902                         be32_to_cpu(ioasa_data[i]),
5903                         be32_to_cpu(ioasa_data[i+1]),
5904                         be32_to_cpu(ioasa_data[i+2]),
5905                         be32_to_cpu(ioasa_data[i+3]));
5906         }
5907 }
5908
5909 /**
5910  * ipr_gen_sense - Generate SCSI sense data from an IOASA
5911  * @ioasa:              IOASA
5912  * @sense_buf:  sense data buffer
5913  *
5914  * Return value:
5915  *      none
5916  **/
5917 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5918 {
5919         u32 failing_lba;
5920         u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
5921         struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
5922         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5923         u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
5924
5925         memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
5926
5927         if (ioasc >= IPR_FIRST_DRIVER_IOASC)
5928                 return;
5929
5930         ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
5931
5932         if (ipr_is_vset_device(res) &&
5933             ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
5934             ioasa->u.vset.failing_lba_hi != 0) {
5935                 sense_buf[0] = 0x72;
5936                 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
5937                 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
5938                 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
5939
5940                 sense_buf[7] = 12;
5941                 sense_buf[8] = 0;
5942                 sense_buf[9] = 0x0A;
5943                 sense_buf[10] = 0x80;
5944
5945                 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
5946
5947                 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
5948                 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
5949                 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
5950                 sense_buf[15] = failing_lba & 0x000000ff;
5951
5952                 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5953
5954                 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
5955                 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
5956                 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
5957                 sense_buf[19] = failing_lba & 0x000000ff;
5958         } else {
5959                 sense_buf[0] = 0x70;
5960                 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
5961                 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
5962                 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
5963
5964                 /* Illegal request */
5965                 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
5966                     (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
5967                         sense_buf[7] = 10;      /* additional length */
5968
5969                         /* IOARCB was in error */
5970                         if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
5971                                 sense_buf[15] = 0xC0;
5972                         else    /* Parameter data was invalid */
5973                                 sense_buf[15] = 0x80;
5974
5975                         sense_buf[16] =
5976                             ((IPR_FIELD_POINTER_MASK &
5977                               be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
5978                         sense_buf[17] =
5979                             (IPR_FIELD_POINTER_MASK &
5980                              be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
5981                 } else {
5982                         if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
5983                                 if (ipr_is_vset_device(res))
5984                                         failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5985                                 else
5986                                         failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
5987
5988                                 sense_buf[0] |= 0x80;   /* Or in the Valid bit */
5989                                 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
5990                                 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
5991                                 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
5992                                 sense_buf[6] = failing_lba & 0x000000ff;
5993                         }
5994
5995                         sense_buf[7] = 6;       /* additional length */
5996                 }
5997         }
5998 }
5999
6000 /**
6001  * ipr_get_autosense - Copy autosense data to sense buffer
6002  * @ipr_cmd:    ipr command struct
6003  *
6004  * This function copies the autosense buffer to the buffer
6005  * in the scsi_cmd, if there is autosense available.
6006  *
6007  * Return value:
6008  *      1 if autosense was available / 0 if not
6009  **/
6010 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
6011 {
6012         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6013         struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
6014
6015         if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
6016                 return 0;
6017
6018         if (ipr_cmd->ioa_cfg->sis64)
6019                 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
6020                        min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
6021                            SCSI_SENSE_BUFFERSIZE));
6022         else
6023                 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
6024                        min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
6025                            SCSI_SENSE_BUFFERSIZE));
6026         return 1;
6027 }
6028
6029 /**
6030  * ipr_erp_start - Process an error response for a SCSI op
6031  * @ioa_cfg:    ioa config struct
6032  * @ipr_cmd:    ipr command struct
6033  *
6034  * This function determines whether or not to initiate ERP
6035  * on the affected device.
6036  *
6037  * Return value:
6038  *      nothing
6039  **/
6040 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6041                               struct ipr_cmnd *ipr_cmd)
6042 {
6043         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6044         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6045         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6046         u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
6047
6048         if (!res) {
6049                 ipr_scsi_eh_done(ipr_cmd);
6050                 return;
6051         }
6052
6053         if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
6054                 ipr_gen_sense(ipr_cmd);
6055
6056         ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6057
6058         switch (masked_ioasc) {
6059         case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
6060                 if (ipr_is_naca_model(res))
6061                         scsi_cmd->result |= (DID_ABORT << 16);
6062                 else
6063                         scsi_cmd->result |= (DID_IMM_RETRY << 16);
6064                 break;
6065         case IPR_IOASC_IR_RESOURCE_HANDLE:
6066         case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
6067                 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6068                 break;
6069         case IPR_IOASC_HW_SEL_TIMEOUT:
6070                 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6071                 if (!ipr_is_naca_model(res))
6072                         res->needs_sync_complete = 1;
6073                 break;
6074         case IPR_IOASC_SYNC_REQUIRED:
6075                 if (!res->in_erp)
6076                         res->needs_sync_complete = 1;
6077                 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6078                 break;
6079         case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
6080         case IPR_IOASA_IR_DUAL_IOA_DISABLED:
6081                 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
6082                 break;
6083         case IPR_IOASC_BUS_WAS_RESET:
6084         case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6085                 /*
6086                  * Report the bus reset and ask for a retry. The device
6087                  * will give CC/UA the next command.
6088                  */
6089                 if (!res->resetting_device)
6090                         scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6091                 scsi_cmd->result |= (DID_ERROR << 16);
6092                 if (!ipr_is_naca_model(res))
6093                         res->needs_sync_complete = 1;
6094                 break;
6095         case IPR_IOASC_HW_DEV_BUS_STATUS:
6096                 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6097                 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
6098                         if (!ipr_get_autosense(ipr_cmd)) {
6099                                 if (!ipr_is_naca_model(res)) {
6100                                         ipr_erp_cancel_all(ipr_cmd);
6101                                         return;
6102                                 }
6103                         }
6104                 }
6105                 if (!ipr_is_naca_model(res))
6106                         res->needs_sync_complete = 1;
6107                 break;
6108         case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6109                 break;
6110         default:
6111                 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6112                         scsi_cmd->result |= (DID_ERROR << 16);
6113                 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
6114                         res->needs_sync_complete = 1;
6115                 break;
6116         }
6117
6118         scsi_dma_unmap(ipr_cmd->scsi_cmd);
6119         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6120         scsi_cmd->scsi_done(scsi_cmd);
6121 }
6122
6123 /**
6124  * ipr_scsi_done - mid-layer done function
6125  * @ipr_cmd:    ipr command struct
6126  *
6127  * This function is invoked by the interrupt handler for
6128  * ops generated by the SCSI mid-layer
6129  *
6130  * Return value:
6131  *      none
6132  **/
6133 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6134 {
6135         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6136         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6137         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6138         unsigned long hrrq_flags;
6139
6140         scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
6141
6142         if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
6143                 scsi_dma_unmap(scsi_cmd);
6144
6145                 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
6146                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6147                 scsi_cmd->scsi_done(scsi_cmd);
6148                 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
6149         } else {
6150                 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
6151                 ipr_erp_start(ioa_cfg, ipr_cmd);
6152                 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
6153         }
6154 }
6155
6156 /**
6157  * ipr_queuecommand - Queue a mid-layer request
6158  * @shost:              scsi host struct
6159  * @scsi_cmd:   scsi command struct
6160  *
6161  * This function queues a request generated by the mid-layer.
6162  *
6163  * Return value:
6164  *      0 on success
6165  *      SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6166  *      SCSI_MLQUEUE_HOST_BUSY if host is busy
6167  **/
6168 static int ipr_queuecommand(struct Scsi_Host *shost,
6169                             struct scsi_cmnd *scsi_cmd)
6170 {
6171         struct ipr_ioa_cfg *ioa_cfg;
6172         struct ipr_resource_entry *res;
6173         struct ipr_ioarcb *ioarcb;
6174         struct ipr_cmnd *ipr_cmd;
6175         unsigned long hrrq_flags, lock_flags;
6176         int rc;
6177         struct ipr_hrr_queue *hrrq;
6178         int hrrq_id;
6179
6180         ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6181
6182         scsi_cmd->result = (DID_OK << 16);
6183         res = scsi_cmd->device->hostdata;
6184
6185         if (ipr_is_gata(res) && res->sata_port) {
6186                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6187                 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6188                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6189                 return rc;
6190         }
6191
6192         hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6193         hrrq = &ioa_cfg->hrrq[hrrq_id];
6194
6195         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6196         /*
6197          * We are currently blocking all devices due to a host reset
6198          * We have told the host to stop giving us new requests, but
6199          * ERP ops don't count. FIXME
6200          */
6201         if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
6202                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6203                 return SCSI_MLQUEUE_HOST_BUSY;
6204         }
6205
6206         /*
6207          * FIXME - Create scsi_set_host_offline interface
6208          *  and the ioa_is_dead check can be removed
6209          */
6210         if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
6211                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6212                 goto err_nodev;
6213         }
6214
6215         ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6216         if (ipr_cmd == NULL) {
6217                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6218                 return SCSI_MLQUEUE_HOST_BUSY;
6219         }
6220         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6221
6222         ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
6223         ioarcb = &ipr_cmd->ioarcb;
6224
6225         memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6226         ipr_cmd->scsi_cmd = scsi_cmd;
6227         ipr_cmd->done = ipr_scsi_eh_done;
6228
6229         if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6230                 if (scsi_cmd->underflow == 0)
6231                         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6232
6233                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6234                 if (ipr_is_gscsi(res) && res->reset_occurred) {
6235                         res->reset_occurred = 0;
6236                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
6237                 }
6238                 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
6239                 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
6240         }
6241
6242         if (scsi_cmd->cmnd[0] >= 0xC0 &&
6243             (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
6244                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6245         }
6246
6247         if (ioa_cfg->sis64)
6248                 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6249         else
6250                 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
6251
6252         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6253         if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
6254                 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6255                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6256                 if (!rc)
6257                         scsi_dma_unmap(scsi_cmd);
6258                 return SCSI_MLQUEUE_HOST_BUSY;
6259         }
6260
6261         if (unlikely(hrrq->ioa_is_dead)) {
6262                 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6263                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6264                 scsi_dma_unmap(scsi_cmd);
6265                 goto err_nodev;
6266         }
6267
6268         ioarcb->res_handle = res->res_handle;
6269         if (res->needs_sync_complete) {
6270                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6271                 res->needs_sync_complete = 0;
6272         }
6273         list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
6274         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6275         ipr_send_command(ipr_cmd);
6276         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6277         return 0;
6278
6279 err_nodev:
6280         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6281         memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6282         scsi_cmd->result = (DID_NO_CONNECT << 16);
6283         scsi_cmd->scsi_done(scsi_cmd);
6284         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6285         return 0;
6286 }
6287
6288 /**
6289  * ipr_ioctl - IOCTL handler
6290  * @sdev:       scsi device struct
6291  * @cmd:        IOCTL cmd
6292  * @arg:        IOCTL arg
6293  *
6294  * Return value:
6295  *      0 on success / other on failure
6296  **/
6297 static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
6298 {
6299         struct ipr_resource_entry *res;
6300
6301         res = (struct ipr_resource_entry *)sdev->hostdata;
6302         if (res && ipr_is_gata(res)) {
6303                 if (cmd == HDIO_GET_IDENTITY)
6304                         return -ENOTTY;
6305                 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
6306         }
6307
6308         return -EINVAL;
6309 }
6310
6311 /**
6312  * ipr_info - Get information about the card/driver
6313  * @scsi_host:  scsi host struct
6314  *
6315  * Return value:
6316  *      pointer to buffer with description string
6317  **/
6318 static const char *ipr_ioa_info(struct Scsi_Host *host)
6319 {
6320         static char buffer[512];
6321         struct ipr_ioa_cfg *ioa_cfg;
6322         unsigned long lock_flags = 0;
6323
6324         ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6325
6326         spin_lock_irqsave(host->host_lock, lock_flags);
6327         sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6328         spin_unlock_irqrestore(host->host_lock, lock_flags);
6329
6330         return buffer;
6331 }
6332
6333 static struct scsi_host_template driver_template = {
6334         .module = THIS_MODULE,
6335         .name = "IPR",
6336         .info = ipr_ioa_info,
6337         .ioctl = ipr_ioctl,
6338         .queuecommand = ipr_queuecommand,
6339         .eh_abort_handler = ipr_eh_abort,
6340         .eh_device_reset_handler = ipr_eh_dev_reset,
6341         .eh_host_reset_handler = ipr_eh_host_reset,
6342         .slave_alloc = ipr_slave_alloc,
6343         .slave_configure = ipr_slave_configure,
6344         .slave_destroy = ipr_slave_destroy,
6345         .target_alloc = ipr_target_alloc,
6346         .target_destroy = ipr_target_destroy,
6347         .change_queue_depth = ipr_change_queue_depth,
6348         .change_queue_type = ipr_change_queue_type,
6349         .bios_param = ipr_biosparam,
6350         .can_queue = IPR_MAX_COMMANDS,
6351         .this_id = -1,
6352         .sg_tablesize = IPR_MAX_SGLIST,
6353         .max_sectors = IPR_IOA_MAX_SECTORS,
6354         .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6355         .use_clustering = ENABLE_CLUSTERING,
6356         .shost_attrs = ipr_ioa_attrs,
6357         .sdev_attrs = ipr_dev_attrs,
6358         .proc_name = IPR_NAME,
6359         .no_write_same = 1,
6360 };
6361
6362 /**
6363  * ipr_ata_phy_reset - libata phy_reset handler
6364  * @ap:         ata port to reset
6365  *
6366  **/
6367 static void ipr_ata_phy_reset(struct ata_port *ap)
6368 {
6369         unsigned long flags;
6370         struct ipr_sata_port *sata_port = ap->private_data;
6371         struct ipr_resource_entry *res = sata_port->res;
6372         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6373         int rc;
6374
6375         ENTER;
6376         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6377         while (ioa_cfg->in_reset_reload) {
6378                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6379                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6380                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6381         }
6382
6383         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6384                 goto out_unlock;
6385
6386         rc = ipr_device_reset(ioa_cfg, res);
6387
6388         if (rc) {
6389                 ap->link.device[0].class = ATA_DEV_NONE;
6390                 goto out_unlock;
6391         }
6392
6393         ap->link.device[0].class = res->ata_class;
6394         if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
6395                 ap->link.device[0].class = ATA_DEV_NONE;
6396
6397 out_unlock:
6398         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6399         LEAVE;
6400 }
6401
6402 /**
6403  * ipr_ata_post_internal - Cleanup after an internal command
6404  * @qc: ATA queued command
6405  *
6406  * Return value:
6407  *      none
6408  **/
6409 static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6410 {
6411         struct ipr_sata_port *sata_port = qc->ap->private_data;
6412         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6413         struct ipr_cmnd *ipr_cmd;
6414         struct ipr_hrr_queue *hrrq;
6415         unsigned long flags;
6416
6417         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6418         while (ioa_cfg->in_reset_reload) {
6419                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6420                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6421                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6422         }
6423
6424         for_each_hrrq(hrrq, ioa_cfg) {
6425                 spin_lock(&hrrq->_lock);
6426                 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6427                         if (ipr_cmd->qc == qc) {
6428                                 ipr_device_reset(ioa_cfg, sata_port->res);
6429                                 break;
6430                         }
6431                 }
6432                 spin_unlock(&hrrq->_lock);
6433         }
6434         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6435 }
6436
6437 /**
6438  * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6439  * @regs:       destination
6440  * @tf: source ATA taskfile
6441  *
6442  * Return value:
6443  *      none
6444  **/
6445 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6446                              struct ata_taskfile *tf)
6447 {
6448         regs->feature = tf->feature;
6449         regs->nsect = tf->nsect;
6450         regs->lbal = tf->lbal;
6451         regs->lbam = tf->lbam;
6452         regs->lbah = tf->lbah;
6453         regs->device = tf->device;
6454         regs->command = tf->command;
6455         regs->hob_feature = tf->hob_feature;
6456         regs->hob_nsect = tf->hob_nsect;
6457         regs->hob_lbal = tf->hob_lbal;
6458         regs->hob_lbam = tf->hob_lbam;
6459         regs->hob_lbah = tf->hob_lbah;
6460         regs->ctl = tf->ctl;
6461 }
6462
6463 /**
6464  * ipr_sata_done - done function for SATA commands
6465  * @ipr_cmd:    ipr command struct
6466  *
6467  * This function is invoked by the interrupt handler for
6468  * ops generated by the SCSI mid-layer to SATA devices
6469  *
6470  * Return value:
6471  *      none
6472  **/
6473 static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6474 {
6475         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6476         struct ata_queued_cmd *qc = ipr_cmd->qc;
6477         struct ipr_sata_port *sata_port = qc->ap->private_data;
6478         struct ipr_resource_entry *res = sata_port->res;
6479         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6480
6481         spin_lock(&ipr_cmd->hrrq->_lock);
6482         if (ipr_cmd->ioa_cfg->sis64)
6483                 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6484                        sizeof(struct ipr_ioasa_gata));
6485         else
6486                 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6487                        sizeof(struct ipr_ioasa_gata));
6488         ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6489
6490         if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
6491                 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
6492
6493         if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6494                 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
6495         else
6496                 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
6497         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6498         spin_unlock(&ipr_cmd->hrrq->_lock);
6499         ata_qc_complete(qc);
6500 }
6501
6502 /**
6503  * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6504  * @ipr_cmd:    ipr command struct
6505  * @qc:         ATA queued command
6506  *
6507  **/
6508 static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6509                                   struct ata_queued_cmd *qc)
6510 {
6511         u32 ioadl_flags = 0;
6512         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6513         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
6514         struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6515         int len = qc->nbytes;
6516         struct scatterlist *sg;
6517         unsigned int si;
6518         dma_addr_t dma_addr = ipr_cmd->dma_addr;
6519
6520         if (len == 0)
6521                 return;
6522
6523         if (qc->dma_dir == DMA_TO_DEVICE) {
6524                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6525                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6526         } else if (qc->dma_dir == DMA_FROM_DEVICE)
6527                 ioadl_flags = IPR_IOADL_FLAGS_READ;
6528
6529         ioarcb->data_transfer_length = cpu_to_be32(len);
6530         ioarcb->ioadl_len =
6531                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6532         ioarcb->u.sis64_addr_data.data_ioadl_addr =
6533                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
6534
6535         for_each_sg(qc->sg, sg, qc->n_elem, si) {
6536                 ioadl64->flags = cpu_to_be32(ioadl_flags);
6537                 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6538                 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6539
6540                 last_ioadl64 = ioadl64;
6541                 ioadl64++;
6542         }
6543
6544         if (likely(last_ioadl64))
6545                 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6546 }
6547
6548 /**
6549  * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6550  * @ipr_cmd:    ipr command struct
6551  * @qc:         ATA queued command
6552  *
6553  **/
6554 static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6555                                 struct ata_queued_cmd *qc)
6556 {
6557         u32 ioadl_flags = 0;
6558         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6559         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
6560         struct ipr_ioadl_desc *last_ioadl = NULL;
6561         int len = qc->nbytes;
6562         struct scatterlist *sg;
6563         unsigned int si;
6564
6565         if (len == 0)
6566                 return;
6567
6568         if (qc->dma_dir == DMA_TO_DEVICE) {
6569                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6570                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6571                 ioarcb->data_transfer_length = cpu_to_be32(len);
6572                 ioarcb->ioadl_len =
6573                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6574         } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6575                 ioadl_flags = IPR_IOADL_FLAGS_READ;
6576                 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6577                 ioarcb->read_ioadl_len =
6578                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6579         }
6580
6581         for_each_sg(qc->sg, sg, qc->n_elem, si) {
6582                 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6583                 ioadl->address = cpu_to_be32(sg_dma_address(sg));
6584
6585                 last_ioadl = ioadl;
6586                 ioadl++;
6587         }
6588
6589         if (likely(last_ioadl))
6590                 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6591 }
6592
6593 /**
6594  * ipr_qc_defer - Get a free ipr_cmd
6595  * @qc: queued command
6596  *
6597  * Return value:
6598  *      0 if success
6599  **/
6600 static int ipr_qc_defer(struct ata_queued_cmd *qc)
6601 {
6602         struct ata_port *ap = qc->ap;
6603         struct ipr_sata_port *sata_port = ap->private_data;
6604         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6605         struct ipr_cmnd *ipr_cmd;
6606         struct ipr_hrr_queue *hrrq;
6607         int hrrq_id;
6608
6609         hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6610         hrrq = &ioa_cfg->hrrq[hrrq_id];
6611
6612         qc->lldd_task = NULL;
6613         spin_lock(&hrrq->_lock);
6614         if (unlikely(hrrq->ioa_is_dead)) {
6615                 spin_unlock(&hrrq->_lock);
6616                 return 0;
6617         }
6618
6619         if (unlikely(!hrrq->allow_cmds)) {
6620                 spin_unlock(&hrrq->_lock);
6621                 return ATA_DEFER_LINK;
6622         }
6623
6624         ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6625         if (ipr_cmd == NULL) {
6626                 spin_unlock(&hrrq->_lock);
6627                 return ATA_DEFER_LINK;
6628         }
6629
6630         qc->lldd_task = ipr_cmd;
6631         spin_unlock(&hrrq->_lock);
6632         return 0;
6633 }
6634
6635 /**
6636  * ipr_qc_issue - Issue a SATA qc to a device
6637  * @qc: queued command
6638  *
6639  * Return value:
6640  *      0 if success
6641  **/
6642 static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
6643 {
6644         struct ata_port *ap = qc->ap;
6645         struct ipr_sata_port *sata_port = ap->private_data;
6646         struct ipr_resource_entry *res = sata_port->res;
6647         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6648         struct ipr_cmnd *ipr_cmd;
6649         struct ipr_ioarcb *ioarcb;
6650         struct ipr_ioarcb_ata_regs *regs;
6651
6652         if (qc->lldd_task == NULL)
6653                 ipr_qc_defer(qc);
6654
6655         ipr_cmd = qc->lldd_task;
6656         if (ipr_cmd == NULL)
6657                 return AC_ERR_SYSTEM;
6658
6659         qc->lldd_task = NULL;
6660         spin_lock(&ipr_cmd->hrrq->_lock);
6661         if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
6662                         ipr_cmd->hrrq->ioa_is_dead)) {
6663                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6664                 spin_unlock(&ipr_cmd->hrrq->_lock);
6665                 return AC_ERR_SYSTEM;
6666         }
6667
6668         ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
6669         ioarcb = &ipr_cmd->ioarcb;
6670
6671         if (ioa_cfg->sis64) {
6672                 regs = &ipr_cmd->i.ata_ioadl.regs;
6673                 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
6674         } else
6675                 regs = &ioarcb->u.add_data.u.regs;
6676
6677         memset(regs, 0, sizeof(*regs));
6678         ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
6679
6680         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
6681         ipr_cmd->qc = qc;
6682         ipr_cmd->done = ipr_sata_done;
6683         ipr_cmd->ioarcb.res_handle = res->res_handle;
6684         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
6685         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6686         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6687         ipr_cmd->dma_use_sg = qc->n_elem;
6688
6689         if (ioa_cfg->sis64)
6690                 ipr_build_ata_ioadl64(ipr_cmd, qc);
6691         else
6692                 ipr_build_ata_ioadl(ipr_cmd, qc);
6693
6694         regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
6695         ipr_copy_sata_tf(regs, &qc->tf);
6696         memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
6697         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6698
6699         switch (qc->tf.protocol) {
6700         case ATA_PROT_NODATA:
6701         case ATA_PROT_PIO:
6702                 break;
6703
6704         case ATA_PROT_DMA:
6705                 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6706                 break;
6707
6708         case ATAPI_PROT_PIO:
6709         case ATAPI_PROT_NODATA:
6710                 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6711                 break;
6712
6713         case ATAPI_PROT_DMA:
6714                 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6715                 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6716                 break;
6717
6718         default:
6719                 WARN_ON(1);
6720                 spin_unlock(&ipr_cmd->hrrq->_lock);
6721                 return AC_ERR_INVALID;
6722         }
6723
6724         ipr_send_command(ipr_cmd);
6725         spin_unlock(&ipr_cmd->hrrq->_lock);
6726
6727         return 0;
6728 }
6729
6730 /**
6731  * ipr_qc_fill_rtf - Read result TF
6732  * @qc: ATA queued command
6733  *
6734  * Return value:
6735  *      true
6736  **/
6737 static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6738 {
6739         struct ipr_sata_port *sata_port = qc->ap->private_data;
6740         struct ipr_ioasa_gata *g = &sata_port->ioasa;
6741         struct ata_taskfile *tf = &qc->result_tf;
6742
6743         tf->feature = g->error;
6744         tf->nsect = g->nsect;
6745         tf->lbal = g->lbal;
6746         tf->lbam = g->lbam;
6747         tf->lbah = g->lbah;
6748         tf->device = g->device;
6749         tf->command = g->status;
6750         tf->hob_nsect = g->hob_nsect;
6751         tf->hob_lbal = g->hob_lbal;
6752         tf->hob_lbam = g->hob_lbam;
6753         tf->hob_lbah = g->hob_lbah;
6754
6755         return true;
6756 }
6757
6758 static struct ata_port_operations ipr_sata_ops = {
6759         .phy_reset = ipr_ata_phy_reset,
6760         .hardreset = ipr_sata_reset,
6761         .post_internal_cmd = ipr_ata_post_internal,
6762         .qc_prep = ata_noop_qc_prep,
6763         .qc_defer = ipr_qc_defer,
6764         .qc_issue = ipr_qc_issue,
6765         .qc_fill_rtf = ipr_qc_fill_rtf,
6766         .port_start = ata_sas_port_start,
6767         .port_stop = ata_sas_port_stop
6768 };
6769
6770 static struct ata_port_info sata_port_info = {
6771         .flags          = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
6772         .pio_mask       = ATA_PIO4_ONLY,
6773         .mwdma_mask     = ATA_MWDMA2,
6774         .udma_mask      = ATA_UDMA6,
6775         .port_ops       = &ipr_sata_ops
6776 };
6777
6778 #ifdef CONFIG_PPC_PSERIES
6779 static const u16 ipr_blocked_processors[] = {
6780         PVR_NORTHSTAR,
6781         PVR_PULSAR,
6782         PVR_POWER4,
6783         PVR_ICESTAR,
6784         PVR_SSTAR,
6785         PVR_POWER4p,
6786         PVR_630,
6787         PVR_630p
6788 };
6789
6790 /**
6791  * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6792  * @ioa_cfg:    ioa cfg struct
6793  *
6794  * Adapters that use Gemstone revision < 3.1 do not work reliably on
6795  * certain pSeries hardware. This function determines if the given
6796  * adapter is in one of these confgurations or not.
6797  *
6798  * Return value:
6799  *      1 if adapter is not supported / 0 if adapter is supported
6800  **/
6801 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6802 {
6803         int i;
6804
6805         if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
6806                 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
6807                         if (pvr_version_is(ipr_blocked_processors[i]))
6808                                 return 1;
6809                 }
6810         }
6811         return 0;
6812 }
6813 #else
6814 #define ipr_invalid_adapter(ioa_cfg) 0
6815 #endif
6816
6817 /**
6818  * ipr_ioa_bringdown_done - IOA bring down completion.
6819  * @ipr_cmd:    ipr command struct
6820  *
6821  * This function processes the completion of an adapter bring down.
6822  * It wakes any reset sleepers.
6823  *
6824  * Return value:
6825  *      IPR_RC_JOB_RETURN
6826  **/
6827 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6828 {
6829         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6830         int i;
6831
6832         ENTER;
6833         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
6834                 ipr_trace;
6835                 spin_unlock_irq(ioa_cfg->host->host_lock);
6836                 scsi_unblock_requests(ioa_cfg->host);
6837                 spin_lock_irq(ioa_cfg->host->host_lock);
6838         }
6839
6840         ioa_cfg->in_reset_reload = 0;
6841         ioa_cfg->reset_retries = 0;
6842         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
6843                 spin_lock(&ioa_cfg->hrrq[i]._lock);
6844                 ioa_cfg->hrrq[i].ioa_is_dead = 1;
6845                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
6846         }
6847         wmb();
6848
6849         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6850         wake_up_all(&ioa_cfg->reset_wait_q);
6851         LEAVE;
6852
6853         return IPR_RC_JOB_RETURN;
6854 }
6855
6856 /**
6857  * ipr_ioa_reset_done - IOA reset completion.
6858  * @ipr_cmd:    ipr command struct
6859  *
6860  * This function processes the completion of an adapter reset.
6861  * It schedules any necessary mid-layer add/removes and
6862  * wakes any reset sleepers.
6863  *
6864  * Return value:
6865  *      IPR_RC_JOB_RETURN
6866  **/
6867 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6868 {
6869         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6870         struct ipr_resource_entry *res;
6871         struct ipr_hostrcb *hostrcb, *temp;
6872         int i = 0, j;
6873
6874         ENTER;
6875         ioa_cfg->in_reset_reload = 0;
6876         for (j = 0; j < ioa_cfg->hrrq_num; j++) {
6877                 spin_lock(&ioa_cfg->hrrq[j]._lock);
6878                 ioa_cfg->hrrq[j].allow_cmds = 1;
6879                 spin_unlock(&ioa_cfg->hrrq[j]._lock);
6880         }
6881         wmb();
6882         ioa_cfg->reset_cmd = NULL;
6883         ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
6884
6885         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
6886                 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
6887                         ipr_trace;
6888                         break;
6889                 }
6890         }
6891         schedule_work(&ioa_cfg->work_q);
6892
6893         list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
6894                 list_del(&hostrcb->queue);
6895                 if (i++ < IPR_NUM_LOG_HCAMS)
6896                         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
6897                 else
6898                         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
6899         }
6900
6901         scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
6902         dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
6903
6904         ioa_cfg->reset_retries = 0;
6905         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6906         wake_up_all(&ioa_cfg->reset_wait_q);
6907
6908         spin_unlock(ioa_cfg->host->host_lock);
6909         scsi_unblock_requests(ioa_cfg->host);
6910         spin_lock(ioa_cfg->host->host_lock);
6911
6912         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6913                 scsi_block_requests(ioa_cfg->host);
6914
6915         LEAVE;
6916         return IPR_RC_JOB_RETURN;
6917 }
6918
6919 /**
6920  * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
6921  * @supported_dev:      supported device struct
6922  * @vpids:                      vendor product id struct
6923  *
6924  * Return value:
6925  *      none
6926  **/
6927 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
6928                                  struct ipr_std_inq_vpids *vpids)
6929 {
6930         memset(supported_dev, 0, sizeof(struct ipr_supported_device));
6931         memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
6932         supported_dev->num_records = 1;
6933         supported_dev->data_length =
6934                 cpu_to_be16(sizeof(struct ipr_supported_device));
6935         supported_dev->reserved = 0;
6936 }
6937
6938 /**
6939  * ipr_set_supported_devs - Send Set Supported Devices for a device
6940  * @ipr_cmd:    ipr command struct
6941  *
6942  * This function sends a Set Supported Devices to the adapter
6943  *
6944  * Return value:
6945  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6946  **/
6947 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
6948 {
6949         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6950         struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
6951         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6952         struct ipr_resource_entry *res = ipr_cmd->u.res;
6953
6954         ipr_cmd->job_step = ipr_ioa_reset_done;
6955
6956         list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
6957                 if (!ipr_is_scsi_disk(res))
6958                         continue;
6959
6960                 ipr_cmd->u.res = res;
6961                 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
6962
6963                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6964                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6965                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6966
6967                 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
6968                 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
6969                 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
6970                 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
6971
6972                 ipr_init_ioadl(ipr_cmd,
6973                                ioa_cfg->vpd_cbs_dma +
6974                                  offsetof(struct ipr_misc_cbs, supp_dev),
6975                                sizeof(struct ipr_supported_device),
6976                                IPR_IOADL_FLAGS_WRITE_LAST);
6977
6978                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6979                            IPR_SET_SUP_DEVICE_TIMEOUT);
6980
6981                 if (!ioa_cfg->sis64)
6982                         ipr_cmd->job_step = ipr_set_supported_devs;
6983                 LEAVE;
6984                 return IPR_RC_JOB_RETURN;
6985         }
6986
6987         LEAVE;
6988         return IPR_RC_JOB_CONTINUE;
6989 }
6990
6991 /**
6992  * ipr_get_mode_page - Locate specified mode page
6993  * @mode_pages: mode page buffer
6994  * @page_code:  page code to find
6995  * @len:                minimum required length for mode page
6996  *
6997  * Return value:
6998  *      pointer to mode page / NULL on failure
6999  **/
7000 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
7001                                u32 page_code, u32 len)
7002 {
7003         struct ipr_mode_page_hdr *mode_hdr;
7004         u32 page_length;
7005         u32 length;
7006
7007         if (!mode_pages || (mode_pages->hdr.length == 0))
7008                 return NULL;
7009
7010         length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
7011         mode_hdr = (struct ipr_mode_page_hdr *)
7012                 (mode_pages->data + mode_pages->hdr.block_desc_len);
7013
7014         while (length) {
7015                 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
7016                         if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
7017                                 return mode_hdr;
7018                         break;
7019                 } else {
7020                         page_length = (sizeof(struct ipr_mode_page_hdr) +
7021                                        mode_hdr->page_length);
7022                         length -= page_length;
7023                         mode_hdr = (struct ipr_mode_page_hdr *)
7024                                 ((unsigned long)mode_hdr + page_length);
7025                 }
7026         }
7027         return NULL;
7028 }
7029
7030 /**
7031  * ipr_check_term_power - Check for term power errors
7032  * @ioa_cfg:    ioa config struct
7033  * @mode_pages: IOAFP mode pages buffer
7034  *
7035  * Check the IOAFP's mode page 28 for term power errors
7036  *
7037  * Return value:
7038  *      nothing
7039  **/
7040 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
7041                                  struct ipr_mode_pages *mode_pages)
7042 {
7043         int i;
7044         int entry_length;
7045         struct ipr_dev_bus_entry *bus;
7046         struct ipr_mode_page28 *mode_page;
7047
7048         mode_page = ipr_get_mode_page(mode_pages, 0x28,
7049                                       sizeof(struct ipr_mode_page28));
7050
7051         entry_length = mode_page->entry_length;
7052
7053         bus = mode_page->bus;
7054
7055         for (i = 0; i < mode_page->num_entries; i++) {
7056                 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
7057                         dev_err(&ioa_cfg->pdev->dev,
7058                                 "Term power is absent on scsi bus %d\n",
7059                                 bus->res_addr.bus);
7060                 }
7061
7062                 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
7063         }
7064 }
7065
7066 /**
7067  * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7068  * @ioa_cfg:    ioa config struct
7069  *
7070  * Looks through the config table checking for SES devices. If
7071  * the SES device is in the SES table indicating a maximum SCSI
7072  * bus speed, the speed is limited for the bus.
7073  *
7074  * Return value:
7075  *      none
7076  **/
7077 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7078 {
7079         u32 max_xfer_rate;
7080         int i;
7081
7082         for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7083                 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7084                                                        ioa_cfg->bus_attr[i].bus_width);
7085
7086                 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7087                         ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7088         }
7089 }
7090
7091 /**
7092  * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7093  * @ioa_cfg:    ioa config struct
7094  * @mode_pages: mode page 28 buffer
7095  *
7096  * Updates mode page 28 based on driver configuration
7097  *
7098  * Return value:
7099  *      none
7100  **/
7101 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
7102                                           struct ipr_mode_pages *mode_pages)
7103 {
7104         int i, entry_length;
7105         struct ipr_dev_bus_entry *bus;
7106         struct ipr_bus_attributes *bus_attr;
7107         struct ipr_mode_page28 *mode_page;
7108
7109         mode_page = ipr_get_mode_page(mode_pages, 0x28,
7110                                       sizeof(struct ipr_mode_page28));
7111
7112         entry_length = mode_page->entry_length;
7113
7114         /* Loop for each device bus entry */
7115         for (i = 0, bus = mode_page->bus;
7116              i < mode_page->num_entries;
7117              i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7118                 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7119                         dev_err(&ioa_cfg->pdev->dev,
7120                                 "Invalid resource address reported: 0x%08X\n",
7121                                 IPR_GET_PHYS_LOC(bus->res_addr));
7122                         continue;
7123                 }
7124
7125                 bus_attr = &ioa_cfg->bus_attr[i];
7126                 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7127                 bus->bus_width = bus_attr->bus_width;
7128                 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7129                 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7130                 if (bus_attr->qas_enabled)
7131                         bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7132                 else
7133                         bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7134         }
7135 }
7136
7137 /**
7138  * ipr_build_mode_select - Build a mode select command
7139  * @ipr_cmd:    ipr command struct
7140  * @res_handle: resource handle to send command to
7141  * @parm:               Byte 2 of Mode Sense command
7142  * @dma_addr:   DMA buffer address
7143  * @xfer_len:   data transfer length
7144  *
7145  * Return value:
7146  *      none
7147  **/
7148 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
7149                                   __be32 res_handle, u8 parm,
7150                                   dma_addr_t dma_addr, u8 xfer_len)
7151 {
7152         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7153
7154         ioarcb->res_handle = res_handle;
7155         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7156         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7157         ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7158         ioarcb->cmd_pkt.cdb[1] = parm;
7159         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7160
7161         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
7162 }
7163
7164 /**
7165  * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7166  * @ipr_cmd:    ipr command struct
7167  *
7168  * This function sets up the SCSI bus attributes and sends
7169  * a Mode Select for Page 28 to activate them.
7170  *
7171  * Return value:
7172  *      IPR_RC_JOB_RETURN
7173  **/
7174 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7175 {
7176         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7177         struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7178         int length;
7179
7180         ENTER;
7181         ipr_scsi_bus_speed_limit(ioa_cfg);
7182         ipr_check_term_power(ioa_cfg, mode_pages);
7183         ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7184         length = mode_pages->hdr.length + 1;
7185         mode_pages->hdr.length = 0;
7186
7187         ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7188                               ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7189                               length);
7190
7191         ipr_cmd->job_step = ipr_set_supported_devs;
7192         ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7193                                     struct ipr_resource_entry, queue);
7194         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7195
7196         LEAVE;
7197         return IPR_RC_JOB_RETURN;
7198 }
7199
7200 /**
7201  * ipr_build_mode_sense - Builds a mode sense command
7202  * @ipr_cmd:    ipr command struct
7203  * @res:                resource entry struct
7204  * @parm:               Byte 2 of mode sense command
7205  * @dma_addr:   DMA address of mode sense buffer
7206  * @xfer_len:   Size of DMA buffer
7207  *
7208  * Return value:
7209  *      none
7210  **/
7211 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7212                                  __be32 res_handle,
7213                                  u8 parm, dma_addr_t dma_addr, u8 xfer_len)
7214 {
7215         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7216
7217         ioarcb->res_handle = res_handle;
7218         ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7219         ioarcb->cmd_pkt.cdb[2] = parm;
7220         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7221         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7222
7223         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7224 }
7225
7226 /**
7227  * ipr_reset_cmd_failed - Handle failure of IOA reset command
7228  * @ipr_cmd:    ipr command struct
7229  *
7230  * This function handles the failure of an IOA bringup command.
7231  *
7232  * Return value:
7233  *      IPR_RC_JOB_RETURN
7234  **/
7235 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7236 {
7237         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7238         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7239
7240         dev_err(&ioa_cfg->pdev->dev,
7241                 "0x%02X failed with IOASC: 0x%08X\n",
7242                 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7243
7244         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7245         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7246         return IPR_RC_JOB_RETURN;
7247 }
7248
7249 /**
7250  * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7251  * @ipr_cmd:    ipr command struct
7252  *
7253  * This function handles the failure of a Mode Sense to the IOAFP.
7254  * Some adapters do not handle all mode pages.
7255  *
7256  * Return value:
7257  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7258  **/
7259 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7260 {
7261         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7262         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7263
7264         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7265                 ipr_cmd->job_step = ipr_set_supported_devs;
7266                 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7267                                             struct ipr_resource_entry, queue);
7268                 return IPR_RC_JOB_CONTINUE;
7269         }
7270
7271         return ipr_reset_cmd_failed(ipr_cmd);
7272 }
7273
7274 /**
7275  * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7276  * @ipr_cmd:    ipr command struct
7277  *
7278  * This function send a Page 28 mode sense to the IOA to
7279  * retrieve SCSI bus attributes.
7280  *
7281  * Return value:
7282  *      IPR_RC_JOB_RETURN
7283  **/
7284 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7285 {
7286         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7287
7288         ENTER;
7289         ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7290                              0x28, ioa_cfg->vpd_cbs_dma +
7291                              offsetof(struct ipr_misc_cbs, mode_pages),
7292                              sizeof(struct ipr_mode_pages));
7293
7294         ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
7295         ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
7296
7297         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7298
7299         LEAVE;
7300         return IPR_RC_JOB_RETURN;
7301 }
7302
7303 /**
7304  * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7305  * @ipr_cmd:    ipr command struct
7306  *
7307  * This function enables dual IOA RAID support if possible.
7308  *
7309  * Return value:
7310  *      IPR_RC_JOB_RETURN
7311  **/
7312 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7313 {
7314         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7315         struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7316         struct ipr_mode_page24 *mode_page;
7317         int length;
7318
7319         ENTER;
7320         mode_page = ipr_get_mode_page(mode_pages, 0x24,
7321                                       sizeof(struct ipr_mode_page24));
7322
7323         if (mode_page)
7324                 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7325
7326         length = mode_pages->hdr.length + 1;
7327         mode_pages->hdr.length = 0;
7328
7329         ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7330                               ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7331                               length);
7332
7333         ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7334         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7335
7336         LEAVE;
7337         return IPR_RC_JOB_RETURN;
7338 }
7339
7340 /**
7341  * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7342  * @ipr_cmd:    ipr command struct
7343  *
7344  * This function handles the failure of a Mode Sense to the IOAFP.
7345  * Some adapters do not handle all mode pages.
7346  *
7347  * Return value:
7348  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7349  **/
7350 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7351 {
7352         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7353
7354         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7355                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7356                 return IPR_RC_JOB_CONTINUE;
7357         }
7358
7359         return ipr_reset_cmd_failed(ipr_cmd);
7360 }
7361
7362 /**
7363  * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7364  * @ipr_cmd:    ipr command struct
7365  *
7366  * This function send a mode sense to the IOA to retrieve
7367  * the IOA Advanced Function Control mode page.
7368  *
7369  * Return value:
7370  *      IPR_RC_JOB_RETURN
7371  **/
7372 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7373 {
7374         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7375
7376         ENTER;
7377         ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7378                              0x24, ioa_cfg->vpd_cbs_dma +
7379                              offsetof(struct ipr_misc_cbs, mode_pages),
7380                              sizeof(struct ipr_mode_pages));
7381
7382         ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7383         ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7384
7385         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7386
7387         LEAVE;
7388         return IPR_RC_JOB_RETURN;
7389 }
7390
7391 /**
7392  * ipr_init_res_table - Initialize the resource table
7393  * @ipr_cmd:    ipr command struct
7394  *
7395  * This function looks through the existing resource table, comparing
7396  * it with the config table. This function will take care of old/new
7397  * devices and schedule adding/removing them from the mid-layer
7398  * as appropriate.
7399  *
7400  * Return value:
7401  *      IPR_RC_JOB_CONTINUE
7402  **/
7403 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7404 {
7405         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7406         struct ipr_resource_entry *res, *temp;
7407         struct ipr_config_table_entry_wrapper cfgtew;
7408         int entries, found, flag, i;
7409         LIST_HEAD(old_res);
7410
7411         ENTER;
7412         if (ioa_cfg->sis64)
7413                 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7414         else
7415                 flag = ioa_cfg->u.cfg_table->hdr.flags;
7416
7417         if (flag & IPR_UCODE_DOWNLOAD_REQ)
7418                 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7419
7420         list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7421                 list_move_tail(&res->queue, &old_res);
7422
7423         if (ioa_cfg->sis64)
7424                 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
7425         else
7426                 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7427
7428         for (i = 0; i < entries; i++) {
7429                 if (ioa_cfg->sis64)
7430                         cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7431                 else
7432                         cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
7433                 found = 0;
7434
7435                 list_for_each_entry_safe(res, temp, &old_res, queue) {
7436                         if (ipr_is_same_device(res, &cfgtew)) {
7437                                 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7438                                 found = 1;
7439                                 break;
7440                         }
7441                 }
7442
7443                 if (!found) {
7444                         if (list_empty(&ioa_cfg->free_res_q)) {
7445                                 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7446                                 break;
7447                         }
7448
7449                         found = 1;
7450                         res = list_entry(ioa_cfg->free_res_q.next,
7451                                          struct ipr_resource_entry, queue);
7452                         list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7453                         ipr_init_res_entry(res, &cfgtew);
7454                         res->add_to_ml = 1;
7455                 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7456                         res->sdev->allow_restart = 1;
7457
7458                 if (found)
7459                         ipr_update_res_entry(res, &cfgtew);
7460         }
7461
7462         list_for_each_entry_safe(res, temp, &old_res, queue) {
7463                 if (res->sdev) {
7464                         res->del_from_ml = 1;
7465                         res->res_handle = IPR_INVALID_RES_HANDLE;
7466                         list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7467                 }
7468         }
7469
7470         list_for_each_entry_safe(res, temp, &old_res, queue) {
7471                 ipr_clear_res_target(res);
7472                 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7473         }
7474
7475         if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7476                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7477         else
7478                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7479
7480         LEAVE;
7481         return IPR_RC_JOB_CONTINUE;
7482 }
7483
7484 /**
7485  * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7486  * @ipr_cmd:    ipr command struct
7487  *
7488  * This function sends a Query IOA Configuration command
7489  * to the adapter to retrieve the IOA configuration table.
7490  *
7491  * Return value:
7492  *      IPR_RC_JOB_RETURN
7493  **/
7494 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7495 {
7496         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7497         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7498         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
7499         struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7500
7501         ENTER;
7502         if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7503                 ioa_cfg->dual_raid = 1;
7504         dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7505                  ucode_vpd->major_release, ucode_vpd->card_type,
7506                  ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7507         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7508         ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7509
7510         ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
7511         ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
7512         ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7513         ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
7514
7515         ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
7516                        IPR_IOADL_FLAGS_READ_LAST);
7517
7518         ipr_cmd->job_step = ipr_init_res_table;
7519
7520         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7521
7522         LEAVE;
7523         return IPR_RC_JOB_RETURN;
7524 }
7525
7526 /**
7527  * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7528  * @ipr_cmd:    ipr command struct
7529  *
7530  * This utility function sends an inquiry to the adapter.
7531  *
7532  * Return value:
7533  *      none
7534  **/
7535 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
7536                               dma_addr_t dma_addr, u8 xfer_len)
7537 {
7538         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7539
7540         ENTER;
7541         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7542         ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7543
7544         ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7545         ioarcb->cmd_pkt.cdb[1] = flags;
7546         ioarcb->cmd_pkt.cdb[2] = page;
7547         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7548
7549         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7550
7551         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7552         LEAVE;
7553 }
7554
7555 /**
7556  * ipr_inquiry_page_supported - Is the given inquiry page supported
7557  * @page0:              inquiry page 0 buffer
7558  * @page:               page code.
7559  *
7560  * This function determines if the specified inquiry page is supported.
7561  *
7562  * Return value:
7563  *      1 if page is supported / 0 if not
7564  **/
7565 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
7566 {
7567         int i;
7568
7569         for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
7570                 if (page0->page[i] == page)
7571                         return 1;
7572
7573         return 0;
7574 }
7575
7576 /**
7577  * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
7578  * @ipr_cmd:    ipr command struct
7579  *
7580  * This function sends a Page 0xD0 inquiry to the adapter
7581  * to retrieve adapter capabilities.
7582  *
7583  * Return value:
7584  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7585  **/
7586 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
7587 {
7588         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7589         struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7590         struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7591
7592         ENTER;
7593         ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7594         memset(cap, 0, sizeof(*cap));
7595
7596         if (ipr_inquiry_page_supported(page0, 0xD0)) {
7597                 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
7598                                   ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
7599                                   sizeof(struct ipr_inquiry_cap));
7600                 return IPR_RC_JOB_RETURN;
7601         }
7602
7603         LEAVE;
7604         return IPR_RC_JOB_CONTINUE;
7605 }
7606
7607 /**
7608  * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
7609  * @ipr_cmd:    ipr command struct
7610  *
7611  * This function sends a Page 3 inquiry to the adapter
7612  * to retrieve software VPD information.
7613  *
7614  * Return value:
7615  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7616  **/
7617 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
7618 {
7619         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7620
7621         ENTER;
7622
7623         ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
7624
7625         ipr_ioafp_inquiry(ipr_cmd, 1, 3,
7626                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
7627                           sizeof(struct ipr_inquiry_page3));
7628
7629         LEAVE;
7630         return IPR_RC_JOB_RETURN;
7631 }
7632
7633 /**
7634  * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
7635  * @ipr_cmd:    ipr command struct
7636  *
7637  * This function sends a Page 0 inquiry to the adapter
7638  * to retrieve supported inquiry pages.
7639  *
7640  * Return value:
7641  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7642  **/
7643 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
7644 {
7645         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7646         char type[5];
7647
7648         ENTER;
7649
7650         /* Grab the type out of the VPD and store it away */
7651         memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
7652         type[4] = '\0';
7653         ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
7654
7655         ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
7656
7657         ipr_ioafp_inquiry(ipr_cmd, 1, 0,
7658                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
7659                           sizeof(struct ipr_inquiry_page0));
7660
7661         LEAVE;
7662         return IPR_RC_JOB_RETURN;
7663 }
7664
7665 /**
7666  * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
7667  * @ipr_cmd:    ipr command struct
7668  *
7669  * This function sends a standard inquiry to the adapter.
7670  *
7671  * Return value:
7672  *      IPR_RC_JOB_RETURN
7673  **/
7674 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
7675 {
7676         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7677
7678         ENTER;
7679         ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
7680
7681         ipr_ioafp_inquiry(ipr_cmd, 0, 0,
7682                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
7683                           sizeof(struct ipr_ioa_vpd));
7684
7685         LEAVE;
7686         return IPR_RC_JOB_RETURN;
7687 }
7688
7689 /**
7690  * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
7691  * @ipr_cmd:    ipr command struct
7692  *
7693  * This function send an Identify Host Request Response Queue
7694  * command to establish the HRRQ with the adapter.
7695  *
7696  * Return value:
7697  *      IPR_RC_JOB_RETURN
7698  **/
7699 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
7700 {
7701         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7702         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7703         struct ipr_hrr_queue *hrrq;
7704
7705         ENTER;
7706         ipr_cmd->job_step = ipr_ioafp_std_inquiry;
7707         dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7708
7709         if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
7710                 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
7711
7712                 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
7713                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7714
7715                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7716                 if (ioa_cfg->sis64)
7717                         ioarcb->cmd_pkt.cdb[1] = 0x1;
7718
7719                 if (ioa_cfg->nvectors == 1)
7720                         ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
7721                 else
7722                         ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
7723
7724                 ioarcb->cmd_pkt.cdb[2] =
7725                         ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
7726                 ioarcb->cmd_pkt.cdb[3] =
7727                         ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
7728                 ioarcb->cmd_pkt.cdb[4] =
7729                         ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
7730                 ioarcb->cmd_pkt.cdb[5] =
7731                         ((u64) hrrq->host_rrq_dma) & 0xff;
7732                 ioarcb->cmd_pkt.cdb[7] =
7733                         ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
7734                 ioarcb->cmd_pkt.cdb[8] =
7735                         (sizeof(u32) * hrrq->size) & 0xff;
7736
7737                 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
7738                         ioarcb->cmd_pkt.cdb[9] =
7739                                         ioa_cfg->identify_hrrq_index;
7740
7741                 if (ioa_cfg->sis64) {
7742                         ioarcb->cmd_pkt.cdb[10] =
7743                                 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
7744                         ioarcb->cmd_pkt.cdb[11] =
7745                                 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
7746                         ioarcb->cmd_pkt.cdb[12] =
7747                                 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
7748                         ioarcb->cmd_pkt.cdb[13] =
7749                                 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
7750                 }
7751
7752                 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
7753                         ioarcb->cmd_pkt.cdb[14] =
7754                                         ioa_cfg->identify_hrrq_index;
7755
7756                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7757                            IPR_INTERNAL_TIMEOUT);
7758
7759                 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
7760                         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7761
7762                 LEAVE;
7763                 return IPR_RC_JOB_RETURN;
7764         }
7765
7766         LEAVE;
7767         return IPR_RC_JOB_CONTINUE;
7768 }
7769
7770 /**
7771  * ipr_reset_timer_done - Adapter reset timer function
7772  * @ipr_cmd:    ipr command struct
7773  *
7774  * Description: This function is used in adapter reset processing
7775  * for timing events. If the reset_cmd pointer in the IOA
7776  * config struct is not this adapter's we are doing nested
7777  * resets and fail_all_ops will take care of freeing the
7778  * command block.
7779  *
7780  * Return value:
7781  *      none
7782  **/
7783 static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
7784 {
7785         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7786         unsigned long lock_flags = 0;
7787
7788         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7789
7790         if (ioa_cfg->reset_cmd == ipr_cmd) {
7791                 list_del(&ipr_cmd->queue);
7792                 ipr_cmd->done(ipr_cmd);
7793         }
7794
7795         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7796 }
7797
7798 /**
7799  * ipr_reset_start_timer - Start a timer for adapter reset job
7800  * @ipr_cmd:    ipr command struct
7801  * @timeout:    timeout value
7802  *
7803  * Description: This function is used in adapter reset processing
7804  * for timing events. If the reset_cmd pointer in the IOA
7805  * config struct is not this adapter's we are doing nested
7806  * resets and fail_all_ops will take care of freeing the
7807  * command block.
7808  *
7809  * Return value:
7810  *      none
7811  **/
7812 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7813                                   unsigned long timeout)
7814 {
7815
7816         ENTER;
7817         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7818         ipr_cmd->done = ipr_reset_ioa_job;
7819
7820         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7821         ipr_cmd->timer.expires = jiffies + timeout;
7822         ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
7823         add_timer(&ipr_cmd->timer);
7824 }
7825
7826 /**
7827  * ipr_init_ioa_mem - Initialize ioa_cfg control block
7828  * @ioa_cfg:    ioa cfg struct
7829  *
7830  * Return value:
7831  *      nothing
7832  **/
7833 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7834 {
7835         struct ipr_hrr_queue *hrrq;
7836
7837         for_each_hrrq(hrrq, ioa_cfg) {
7838                 spin_lock(&hrrq->_lock);
7839                 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
7840
7841                 /* Initialize Host RRQ pointers */
7842                 hrrq->hrrq_start = hrrq->host_rrq;
7843                 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
7844                 hrrq->hrrq_curr = hrrq->hrrq_start;
7845                 hrrq->toggle_bit = 1;
7846                 spin_unlock(&hrrq->_lock);
7847         }
7848         wmb();
7849
7850         ioa_cfg->identify_hrrq_index = 0;
7851         if (ioa_cfg->hrrq_num == 1)
7852                 atomic_set(&ioa_cfg->hrrq_index, 0);
7853         else
7854                 atomic_set(&ioa_cfg->hrrq_index, 1);
7855
7856         /* Zero out config table */
7857         memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
7858 }
7859
7860 /**
7861  * ipr_reset_next_stage - Process IPL stage change based on feedback register.
7862  * @ipr_cmd:    ipr command struct
7863  *
7864  * Return value:
7865  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7866  **/
7867 static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
7868 {
7869         unsigned long stage, stage_time;
7870         u32 feedback;
7871         volatile u32 int_reg;
7872         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7873         u64 maskval = 0;
7874
7875         feedback = readl(ioa_cfg->regs.init_feedback_reg);
7876         stage = feedback & IPR_IPL_INIT_STAGE_MASK;
7877         stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
7878
7879         ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
7880
7881         /* sanity check the stage_time value */
7882         if (stage_time == 0)
7883                 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
7884         else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
7885                 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
7886         else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
7887                 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
7888
7889         if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
7890                 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
7891                 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7892                 stage_time = ioa_cfg->transop_timeout;
7893                 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7894         } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
7895                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7896                 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7897                         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7898                         maskval = IPR_PCII_IPL_STAGE_CHANGE;
7899                         maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
7900                         writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
7901                         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7902                         return IPR_RC_JOB_CONTINUE;
7903                 }
7904         }
7905
7906         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7907         ipr_cmd->timer.expires = jiffies + stage_time * HZ;
7908         ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7909         ipr_cmd->done = ipr_reset_ioa_job;
7910         add_timer(&ipr_cmd->timer);
7911
7912         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7913
7914         return IPR_RC_JOB_RETURN;
7915 }
7916
7917 /**
7918  * ipr_reset_enable_ioa - Enable the IOA following a reset.
7919  * @ipr_cmd:    ipr command struct
7920  *
7921  * This function reinitializes some control blocks and
7922  * enables destructive diagnostics on the adapter.
7923  *
7924  * Return value:
7925  *      IPR_RC_JOB_RETURN
7926  **/
7927 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7928 {
7929         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7930         volatile u32 int_reg;
7931         volatile u64 maskval;
7932         int i;
7933
7934         ENTER;
7935         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7936         ipr_init_ioa_mem(ioa_cfg);
7937
7938         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7939                 spin_lock(&ioa_cfg->hrrq[i]._lock);
7940                 ioa_cfg->hrrq[i].allow_interrupts = 1;
7941                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
7942         }
7943         wmb();
7944         if (ioa_cfg->sis64) {
7945                 /* Set the adapter to the correct endian mode. */
7946                 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7947                 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
7948         }
7949
7950         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7951
7952         if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7953                 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
7954                        ioa_cfg->regs.clr_interrupt_mask_reg32);
7955                 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7956                 return IPR_RC_JOB_CONTINUE;
7957         }
7958
7959         /* Enable destructive diagnostics on IOA */
7960         writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
7961
7962         if (ioa_cfg->sis64) {
7963                 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7964                 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
7965                 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
7966         } else
7967                 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
7968
7969         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7970
7971         dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
7972
7973         if (ioa_cfg->sis64) {
7974                 ipr_cmd->job_step = ipr_reset_next_stage;
7975                 return IPR_RC_JOB_CONTINUE;
7976         }
7977
7978         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7979         ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
7980         ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7981         ipr_cmd->done = ipr_reset_ioa_job;
7982         add_timer(&ipr_cmd->timer);
7983         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7984
7985         LEAVE;
7986         return IPR_RC_JOB_RETURN;
7987 }
7988
7989 /**
7990  * ipr_reset_wait_for_dump - Wait for a dump to timeout.
7991  * @ipr_cmd:    ipr command struct
7992  *
7993  * This function is invoked when an adapter dump has run out
7994  * of processing time.
7995  *
7996  * Return value:
7997  *      IPR_RC_JOB_CONTINUE
7998  **/
7999 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
8000 {
8001         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8002
8003         if (ioa_cfg->sdt_state == GET_DUMP)
8004                 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8005         else if (ioa_cfg->sdt_state == READ_DUMP)
8006                 ioa_cfg->sdt_state = ABORT_DUMP;
8007
8008         ioa_cfg->dump_timeout = 1;
8009         ipr_cmd->job_step = ipr_reset_alert;
8010
8011         return IPR_RC_JOB_CONTINUE;
8012 }
8013
8014 /**
8015  * ipr_unit_check_no_data - Log a unit check/no data error log
8016  * @ioa_cfg:            ioa config struct
8017  *
8018  * Logs an error indicating the adapter unit checked, but for some
8019  * reason, we were unable to fetch the unit check buffer.
8020  *
8021  * Return value:
8022  *      nothing
8023  **/
8024 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
8025 {
8026         ioa_cfg->errors_logged++;
8027         dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
8028 }
8029
8030 /**
8031  * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8032  * @ioa_cfg:            ioa config struct
8033  *
8034  * Fetches the unit check buffer from the adapter by clocking the data
8035  * through the mailbox register.
8036  *
8037  * Return value:
8038  *      nothing
8039  **/
8040 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
8041 {
8042         unsigned long mailbox;
8043         struct ipr_hostrcb *hostrcb;
8044         struct ipr_uc_sdt sdt;
8045         int rc, length;
8046         u32 ioasc;
8047
8048         mailbox = readl(ioa_cfg->ioa_mailbox);
8049
8050         if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
8051                 ipr_unit_check_no_data(ioa_cfg);
8052                 return;
8053         }
8054
8055         memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
8056         rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
8057                                         (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
8058
8059         if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
8060             ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
8061             (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
8062                 ipr_unit_check_no_data(ioa_cfg);
8063                 return;
8064         }
8065
8066         /* Find length of the first sdt entry (UC buffer) */
8067         if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
8068                 length = be32_to_cpu(sdt.entry[0].end_token);
8069         else
8070                 length = (be32_to_cpu(sdt.entry[0].end_token) -
8071                           be32_to_cpu(sdt.entry[0].start_token)) &
8072                           IPR_FMT2_MBX_ADDR_MASK;
8073
8074         hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8075                              struct ipr_hostrcb, queue);
8076         list_del(&hostrcb->queue);
8077         memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8078
8079         rc = ipr_get_ldump_data_section(ioa_cfg,
8080                                         be32_to_cpu(sdt.entry[0].start_token),
8081                                         (__be32 *)&hostrcb->hcam,
8082                                         min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
8083
8084         if (!rc) {
8085                 ipr_handle_log_data(ioa_cfg, hostrcb);
8086                 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
8087                 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8088                     ioa_cfg->sdt_state == GET_DUMP)
8089                         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8090         } else
8091                 ipr_unit_check_no_data(ioa_cfg);
8092
8093         list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8094 }
8095
8096 /**
8097  * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8098  * @ipr_cmd:    ipr command struct
8099  *
8100  * Description: This function will call to get the unit check buffer.
8101  *
8102  * Return value:
8103  *      IPR_RC_JOB_RETURN
8104  **/
8105 static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8106 {
8107         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8108
8109         ENTER;
8110         ioa_cfg->ioa_unit_checked = 0;
8111         ipr_get_unit_check_buffer(ioa_cfg);
8112         ipr_cmd->job_step = ipr_reset_alert;
8113         ipr_reset_start_timer(ipr_cmd, 0);
8114
8115         LEAVE;
8116         return IPR_RC_JOB_RETURN;
8117 }
8118
8119 /**
8120  * ipr_reset_restore_cfg_space - Restore PCI config space.
8121  * @ipr_cmd:    ipr command struct
8122  *
8123  * Description: This function restores the saved PCI config space of
8124  * the adapter, fails all outstanding ops back to the callers, and
8125  * fetches the dump/unit check if applicable to this reset.
8126  *
8127  * Return value:
8128  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8129  **/
8130 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8131 {
8132         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8133         u32 int_reg;
8134
8135         ENTER;
8136         ioa_cfg->pdev->state_saved = true;
8137         pci_restore_state(ioa_cfg->pdev);
8138
8139         if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
8140                 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8141                 return IPR_RC_JOB_CONTINUE;
8142         }
8143
8144         ipr_fail_all_ops(ioa_cfg);
8145
8146         if (ioa_cfg->sis64) {
8147                 /* Set the adapter to the correct endian mode. */
8148                 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8149                 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8150         }
8151
8152         if (ioa_cfg->ioa_unit_checked) {
8153                 if (ioa_cfg->sis64) {
8154                         ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8155                         ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8156                         return IPR_RC_JOB_RETURN;
8157                 } else {
8158                         ioa_cfg->ioa_unit_checked = 0;
8159                         ipr_get_unit_check_buffer(ioa_cfg);
8160                         ipr_cmd->job_step = ipr_reset_alert;
8161                         ipr_reset_start_timer(ipr_cmd, 0);
8162                         return IPR_RC_JOB_RETURN;
8163                 }
8164         }
8165
8166         if (ioa_cfg->in_ioa_bringdown) {
8167                 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8168         } else {
8169                 ipr_cmd->job_step = ipr_reset_enable_ioa;
8170
8171                 if (GET_DUMP == ioa_cfg->sdt_state) {
8172                         ioa_cfg->sdt_state = READ_DUMP;
8173                         ioa_cfg->dump_timeout = 0;
8174                         if (ioa_cfg->sis64)
8175                                 ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8176                         else
8177                                 ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
8178                         ipr_cmd->job_step = ipr_reset_wait_for_dump;
8179                         schedule_work(&ioa_cfg->work_q);
8180                         return IPR_RC_JOB_RETURN;
8181                 }
8182         }
8183
8184         LEAVE;
8185         return IPR_RC_JOB_CONTINUE;
8186 }
8187
8188 /**
8189  * ipr_reset_bist_done - BIST has completed on the adapter.
8190  * @ipr_cmd:    ipr command struct
8191  *
8192  * Description: Unblock config space and resume the reset process.
8193  *
8194  * Return value:
8195  *      IPR_RC_JOB_CONTINUE
8196  **/
8197 static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8198 {
8199         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8200
8201         ENTER;
8202         if (ioa_cfg->cfg_locked)
8203                 pci_cfg_access_unlock(ioa_cfg->pdev);
8204         ioa_cfg->cfg_locked = 0;
8205         ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8206         LEAVE;
8207         return IPR_RC_JOB_CONTINUE;
8208 }
8209
8210 /**
8211  * ipr_reset_start_bist - Run BIST on the adapter.
8212  * @ipr_cmd:    ipr command struct
8213  *
8214  * Description: This function runs BIST on the adapter, then delays 2 seconds.
8215  *
8216  * Return value:
8217  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8218  **/
8219 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8220 {
8221         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8222         int rc = PCIBIOS_SUCCESSFUL;
8223
8224         ENTER;
8225         if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8226                 writel(IPR_UPROCI_SIS64_START_BIST,
8227                        ioa_cfg->regs.set_uproc_interrupt_reg32);
8228         else
8229                 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8230
8231         if (rc == PCIBIOS_SUCCESSFUL) {
8232                 ipr_cmd->job_step = ipr_reset_bist_done;
8233                 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8234                 rc = IPR_RC_JOB_RETURN;
8235         } else {
8236                 if (ioa_cfg->cfg_locked)
8237                         pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8238                 ioa_cfg->cfg_locked = 0;
8239                 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8240                 rc = IPR_RC_JOB_CONTINUE;
8241         }
8242
8243         LEAVE;
8244         return rc;
8245 }
8246
8247 /**
8248  * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8249  * @ipr_cmd:    ipr command struct
8250  *
8251  * Description: This clears PCI reset to the adapter and delays two seconds.
8252  *
8253  * Return value:
8254  *      IPR_RC_JOB_RETURN
8255  **/
8256 static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8257 {
8258         ENTER;
8259         pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
8260         ipr_cmd->job_step = ipr_reset_bist_done;
8261         ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8262         LEAVE;
8263         return IPR_RC_JOB_RETURN;
8264 }
8265
8266 /**
8267  * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8268  * @ipr_cmd:    ipr command struct
8269  *
8270  * Description: This asserts PCI reset to the adapter.
8271  *
8272  * Return value:
8273  *      IPR_RC_JOB_RETURN
8274  **/
8275 static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8276 {
8277         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8278         struct pci_dev *pdev = ioa_cfg->pdev;
8279
8280         ENTER;
8281         pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8282         ipr_cmd->job_step = ipr_reset_slot_reset_done;
8283         ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
8284         LEAVE;
8285         return IPR_RC_JOB_RETURN;
8286 }
8287
8288 /**
8289  * ipr_reset_block_config_access_wait - Wait for permission to block config access
8290  * @ipr_cmd:    ipr command struct
8291  *
8292  * Description: This attempts to block config access to the IOA.
8293  *
8294  * Return value:
8295  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8296  **/
8297 static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8298 {
8299         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8300         int rc = IPR_RC_JOB_CONTINUE;
8301
8302         if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8303                 ioa_cfg->cfg_locked = 1;
8304                 ipr_cmd->job_step = ioa_cfg->reset;
8305         } else {
8306                 if (ipr_cmd->u.time_left) {
8307                         rc = IPR_RC_JOB_RETURN;
8308                         ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8309                         ipr_reset_start_timer(ipr_cmd,
8310                                               IPR_CHECK_FOR_RESET_TIMEOUT);
8311                 } else {
8312                         ipr_cmd->job_step = ioa_cfg->reset;
8313                         dev_err(&ioa_cfg->pdev->dev,
8314                                 "Timed out waiting to lock config access. Resetting anyway.\n");
8315                 }
8316         }
8317
8318         return rc;
8319 }
8320
8321 /**
8322  * ipr_reset_block_config_access - Block config access to the IOA
8323  * @ipr_cmd:    ipr command struct
8324  *
8325  * Description: This attempts to block config access to the IOA
8326  *
8327  * Return value:
8328  *      IPR_RC_JOB_CONTINUE
8329  **/
8330 static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8331 {
8332         ipr_cmd->ioa_cfg->cfg_locked = 0;
8333         ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8334         ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8335         return IPR_RC_JOB_CONTINUE;
8336 }
8337
8338 /**
8339  * ipr_reset_allowed - Query whether or not IOA can be reset
8340  * @ioa_cfg:    ioa config struct
8341  *
8342  * Return value:
8343  *      0 if reset not allowed / non-zero if reset is allowed
8344  **/
8345 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8346 {
8347         volatile u32 temp_reg;
8348
8349         temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8350         return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8351 }
8352
8353 /**
8354  * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8355  * @ipr_cmd:    ipr command struct
8356  *
8357  * Description: This function waits for adapter permission to run BIST,
8358  * then runs BIST. If the adapter does not give permission after a
8359  * reasonable time, we will reset the adapter anyway. The impact of
8360  * resetting the adapter without warning the adapter is the risk of
8361  * losing the persistent error log on the adapter. If the adapter is
8362  * reset while it is writing to the flash on the adapter, the flash
8363  * segment will have bad ECC and be zeroed.
8364  *
8365  * Return value:
8366  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8367  **/
8368 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8369 {
8370         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8371         int rc = IPR_RC_JOB_RETURN;
8372
8373         if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8374                 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8375                 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8376         } else {
8377                 ipr_cmd->job_step = ipr_reset_block_config_access;
8378                 rc = IPR_RC_JOB_CONTINUE;
8379         }
8380
8381         return rc;
8382 }
8383
8384 /**
8385  * ipr_reset_alert - Alert the adapter of a pending reset
8386  * @ipr_cmd:    ipr command struct
8387  *
8388  * Description: This function alerts the adapter that it will be reset.
8389  * If memory space is not currently enabled, proceed directly
8390  * to running BIST on the adapter. The timer must always be started
8391  * so we guarantee we do not run BIST from ipr_isr.
8392  *
8393  * Return value:
8394  *      IPR_RC_JOB_RETURN
8395  **/
8396 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8397 {
8398         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8399         u16 cmd_reg;
8400         int rc;
8401
8402         ENTER;
8403         rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8404
8405         if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8406                 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
8407                 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
8408                 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8409         } else {
8410                 ipr_cmd->job_step = ipr_reset_block_config_access;
8411         }
8412
8413         ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8414         ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8415
8416         LEAVE;
8417         return IPR_RC_JOB_RETURN;
8418 }
8419
8420 /**
8421  * ipr_reset_ucode_download_done - Microcode download completion
8422  * @ipr_cmd:    ipr command struct
8423  *
8424  * Description: This function unmaps the microcode download buffer.
8425  *
8426  * Return value:
8427  *      IPR_RC_JOB_CONTINUE
8428  **/
8429 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
8430 {
8431         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8432         struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8433
8434         pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
8435                      sglist->num_sg, DMA_TO_DEVICE);
8436
8437         ipr_cmd->job_step = ipr_reset_alert;
8438         return IPR_RC_JOB_CONTINUE;
8439 }
8440
8441 /**
8442  * ipr_reset_ucode_download - Download microcode to the adapter
8443  * @ipr_cmd:    ipr command struct
8444  *
8445  * Description: This function checks to see if it there is microcode
8446  * to download to the adapter. If there is, a download is performed.
8447  *
8448  * Return value:
8449  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8450  **/
8451 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
8452 {
8453         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8454         struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8455
8456         ENTER;
8457         ipr_cmd->job_step = ipr_reset_alert;
8458
8459         if (!sglist)
8460                 return IPR_RC_JOB_CONTINUE;
8461
8462         ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8463         ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
8464         ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
8465         ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
8466         ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
8467         ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
8468         ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
8469
8470         if (ioa_cfg->sis64)
8471                 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
8472         else
8473                 ipr_build_ucode_ioadl(ipr_cmd, sglist);
8474         ipr_cmd->job_step = ipr_reset_ucode_download_done;
8475
8476         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8477                    IPR_WRITE_BUFFER_TIMEOUT);
8478
8479         LEAVE;
8480         return IPR_RC_JOB_RETURN;
8481 }
8482
8483 /**
8484  * ipr_reset_shutdown_ioa - Shutdown the adapter
8485  * @ipr_cmd:    ipr command struct
8486  *
8487  * Description: This function issues an adapter shutdown of the
8488  * specified type to the specified adapter as part of the
8489  * adapter reset job.
8490  *
8491  * Return value:
8492  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8493  **/
8494 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
8495 {
8496         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8497         enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
8498         unsigned long timeout;
8499         int rc = IPR_RC_JOB_CONTINUE;
8500
8501         ENTER;
8502         if (shutdown_type != IPR_SHUTDOWN_NONE &&
8503                         !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
8504                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8505                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8506                 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
8507                 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
8508
8509                 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
8510                         timeout = IPR_SHUTDOWN_TIMEOUT;
8511                 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
8512                         timeout = IPR_INTERNAL_TIMEOUT;
8513                 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
8514                         timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
8515                 else
8516                         timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
8517
8518                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
8519
8520                 rc = IPR_RC_JOB_RETURN;
8521                 ipr_cmd->job_step = ipr_reset_ucode_download;
8522         } else
8523                 ipr_cmd->job_step = ipr_reset_alert;
8524
8525         LEAVE;
8526         return rc;
8527 }
8528
8529 /**
8530  * ipr_reset_ioa_job - Adapter reset job
8531  * @ipr_cmd:    ipr command struct
8532  *
8533  * Description: This function is the job router for the adapter reset job.
8534  *
8535  * Return value:
8536  *      none
8537  **/
8538 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
8539 {
8540         u32 rc, ioasc;
8541         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8542
8543         do {
8544                 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
8545
8546                 if (ioa_cfg->reset_cmd != ipr_cmd) {
8547                         /*
8548                          * We are doing nested adapter resets and this is
8549                          * not the current reset job.
8550                          */
8551                         list_add_tail(&ipr_cmd->queue,
8552                                         &ipr_cmd->hrrq->hrrq_free_q);
8553                         return;
8554                 }
8555
8556                 if (IPR_IOASC_SENSE_KEY(ioasc)) {
8557                         rc = ipr_cmd->job_step_failed(ipr_cmd);
8558                         if (rc == IPR_RC_JOB_RETURN)
8559                                 return;
8560                 }
8561
8562                 ipr_reinit_ipr_cmnd(ipr_cmd);
8563                 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
8564                 rc = ipr_cmd->job_step(ipr_cmd);
8565         } while (rc == IPR_RC_JOB_CONTINUE);
8566 }
8567
8568 /**
8569  * _ipr_initiate_ioa_reset - Initiate an adapter reset
8570  * @ioa_cfg:            ioa config struct
8571  * @job_step:           first job step of reset job
8572  * @shutdown_type:      shutdown type
8573  *
8574  * Description: This function will initiate the reset of the given adapter
8575  * starting at the selected job step.
8576  * If the caller needs to wait on the completion of the reset,
8577  * the caller must sleep on the reset_wait_q.
8578  *
8579  * Return value:
8580  *      none
8581  **/
8582 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8583                                     int (*job_step) (struct ipr_cmnd *),
8584                                     enum ipr_shutdown_type shutdown_type)
8585 {
8586         struct ipr_cmnd *ipr_cmd;
8587         int i;
8588
8589         ioa_cfg->in_reset_reload = 1;
8590         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8591                 spin_lock(&ioa_cfg->hrrq[i]._lock);
8592                 ioa_cfg->hrrq[i].allow_cmds = 0;
8593                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8594         }
8595         wmb();
8596         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa)
8597                 scsi_block_requests(ioa_cfg->host);
8598
8599         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
8600         ioa_cfg->reset_cmd = ipr_cmd;
8601         ipr_cmd->job_step = job_step;
8602         ipr_cmd->u.shutdown_type = shutdown_type;
8603
8604         ipr_reset_ioa_job(ipr_cmd);
8605 }
8606
8607 /**
8608  * ipr_initiate_ioa_reset - Initiate an adapter reset
8609  * @ioa_cfg:            ioa config struct
8610  * @shutdown_type:      shutdown type
8611  *
8612  * Description: This function will initiate the reset of the given adapter.
8613  * If the caller needs to wait on the completion of the reset,
8614  * the caller must sleep on the reset_wait_q.
8615  *
8616  * Return value:
8617  *      none
8618  **/
8619 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8620                                    enum ipr_shutdown_type shutdown_type)
8621 {
8622         int i;
8623
8624         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
8625                 return;
8626
8627         if (ioa_cfg->in_reset_reload) {
8628                 if (ioa_cfg->sdt_state == GET_DUMP)
8629                         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8630                 else if (ioa_cfg->sdt_state == READ_DUMP)
8631                         ioa_cfg->sdt_state = ABORT_DUMP;
8632         }
8633
8634         if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
8635                 dev_err(&ioa_cfg->pdev->dev,
8636                         "IOA taken offline - error recovery failed\n");
8637
8638                 ioa_cfg->reset_retries = 0;
8639                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8640                         spin_lock(&ioa_cfg->hrrq[i]._lock);
8641                         ioa_cfg->hrrq[i].ioa_is_dead = 1;
8642                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
8643                 }
8644                 wmb();
8645
8646                 if (ioa_cfg->in_ioa_bringdown) {
8647                         ioa_cfg->reset_cmd = NULL;
8648                         ioa_cfg->in_reset_reload = 0;
8649                         ipr_fail_all_ops(ioa_cfg);
8650                         wake_up_all(&ioa_cfg->reset_wait_q);
8651
8652                         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
8653                                 spin_unlock_irq(ioa_cfg->host->host_lock);
8654                                 scsi_unblock_requests(ioa_cfg->host);
8655                                 spin_lock_irq(ioa_cfg->host->host_lock);
8656                         }
8657                         return;
8658                 } else {
8659                         ioa_cfg->in_ioa_bringdown = 1;
8660                         shutdown_type = IPR_SHUTDOWN_NONE;
8661                 }
8662         }
8663
8664         _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
8665                                 shutdown_type);
8666 }
8667
8668 /**
8669  * ipr_reset_freeze - Hold off all I/O activity
8670  * @ipr_cmd:    ipr command struct
8671  *
8672  * Description: If the PCI slot is frozen, hold off all I/O
8673  * activity; then, as soon as the slot is available again,
8674  * initiate an adapter reset.
8675  */
8676 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
8677 {
8678         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8679         int i;
8680
8681         /* Disallow new interrupts, avoid loop */
8682         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8683                 spin_lock(&ioa_cfg->hrrq[i]._lock);
8684                 ioa_cfg->hrrq[i].allow_interrupts = 0;
8685                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8686         }
8687         wmb();
8688         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8689         ipr_cmd->done = ipr_reset_ioa_job;
8690         return IPR_RC_JOB_RETURN;
8691 }
8692
8693 /**
8694  * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
8695  * @pdev:       PCI device struct
8696  *
8697  * Description: This routine is called to tell us that the MMIO
8698  * access to the IOA has been restored
8699  */
8700 static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
8701 {
8702         unsigned long flags = 0;
8703         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8704
8705         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8706         if (!ioa_cfg->probe_done)
8707                 pci_save_state(pdev);
8708         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8709         return PCI_ERS_RESULT_NEED_RESET;
8710 }
8711
8712 /**
8713  * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
8714  * @pdev:       PCI device struct
8715  *
8716  * Description: This routine is called to tell us that the PCI bus
8717  * is down. Can't do anything here, except put the device driver
8718  * into a holding pattern, waiting for the PCI bus to come back.
8719  */
8720 static void ipr_pci_frozen(struct pci_dev *pdev)
8721 {
8722         unsigned long flags = 0;
8723         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8724
8725         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8726         if (ioa_cfg->probe_done)
8727                 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
8728         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8729 }
8730
8731 /**
8732  * ipr_pci_slot_reset - Called when PCI slot has been reset.
8733  * @pdev:       PCI device struct
8734  *
8735  * Description: This routine is called by the pci error recovery
8736  * code after the PCI slot has been reset, just before we
8737  * should resume normal operations.
8738  */
8739 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
8740 {
8741         unsigned long flags = 0;
8742         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8743
8744         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8745         if (ioa_cfg->probe_done) {
8746                 if (ioa_cfg->needs_warm_reset)
8747                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8748                 else
8749                         _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
8750                                                 IPR_SHUTDOWN_NONE);
8751         } else
8752                 wake_up_all(&ioa_cfg->eeh_wait_q);
8753         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8754         return PCI_ERS_RESULT_RECOVERED;
8755 }
8756
8757 /**
8758  * ipr_pci_perm_failure - Called when PCI slot is dead for good.
8759  * @pdev:       PCI device struct
8760  *
8761  * Description: This routine is called when the PCI bus has
8762  * permanently failed.
8763  */
8764 static void ipr_pci_perm_failure(struct pci_dev *pdev)
8765 {
8766         unsigned long flags = 0;
8767         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8768         int i;
8769
8770         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8771         if (ioa_cfg->probe_done) {
8772                 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8773                         ioa_cfg->sdt_state = ABORT_DUMP;
8774                 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
8775                 ioa_cfg->in_ioa_bringdown = 1;
8776                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8777                         spin_lock(&ioa_cfg->hrrq[i]._lock);
8778                         ioa_cfg->hrrq[i].allow_cmds = 0;
8779                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
8780                 }
8781                 wmb();
8782                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8783         } else
8784                 wake_up_all(&ioa_cfg->eeh_wait_q);
8785         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8786 }
8787
8788 /**
8789  * ipr_pci_error_detected - Called when a PCI error is detected.
8790  * @pdev:       PCI device struct
8791  * @state:      PCI channel state
8792  *
8793  * Description: Called when a PCI error is detected.
8794  *
8795  * Return value:
8796  *      PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
8797  */
8798 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
8799                                                pci_channel_state_t state)
8800 {
8801         switch (state) {
8802         case pci_channel_io_frozen:
8803                 ipr_pci_frozen(pdev);
8804                 return PCI_ERS_RESULT_CAN_RECOVER;
8805         case pci_channel_io_perm_failure:
8806                 ipr_pci_perm_failure(pdev);
8807                 return PCI_ERS_RESULT_DISCONNECT;
8808                 break;
8809         default:
8810                 break;
8811         }
8812         return PCI_ERS_RESULT_NEED_RESET;
8813 }
8814
8815 /**
8816  * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
8817  * @ioa_cfg:    ioa cfg struct
8818  *
8819  * Description: This is the second phase of adapter intialization
8820  * This function takes care of initilizing the adapter to the point
8821  * where it can accept new commands.
8822
8823  * Return value:
8824  *      0 on success / -EIO on failure
8825  **/
8826 static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
8827 {
8828         int rc = 0;
8829         unsigned long host_lock_flags = 0;
8830
8831         ENTER;
8832         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8833         dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
8834         ioa_cfg->probe_done = 1;
8835         if (ioa_cfg->needs_hard_reset) {
8836                 ioa_cfg->needs_hard_reset = 0;
8837                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8838         } else
8839                 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
8840                                         IPR_SHUTDOWN_NONE);
8841         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8842         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8843         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8844
8845         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
8846                 rc = -EIO;
8847         } else if (ipr_invalid_adapter(ioa_cfg)) {
8848                 if (!ipr_testmode)
8849                         rc = -EIO;
8850
8851                 dev_err(&ioa_cfg->pdev->dev,
8852                         "Adapter not supported in this hardware configuration.\n");
8853         }
8854
8855         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8856
8857         LEAVE;
8858         return rc;
8859 }
8860
8861 /**
8862  * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
8863  * @ioa_cfg:    ioa config struct
8864  *
8865  * Return value:
8866  *      none
8867  **/
8868 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8869 {
8870         int i;
8871
8872         for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8873                 if (ioa_cfg->ipr_cmnd_list[i])
8874                         pci_pool_free(ioa_cfg->ipr_cmd_pool,
8875                                       ioa_cfg->ipr_cmnd_list[i],
8876                                       ioa_cfg->ipr_cmnd_list_dma[i]);
8877
8878                 ioa_cfg->ipr_cmnd_list[i] = NULL;
8879         }
8880
8881         if (ioa_cfg->ipr_cmd_pool)
8882                 pci_pool_destroy(ioa_cfg->ipr_cmd_pool);
8883
8884         kfree(ioa_cfg->ipr_cmnd_list);
8885         kfree(ioa_cfg->ipr_cmnd_list_dma);
8886         ioa_cfg->ipr_cmnd_list = NULL;
8887         ioa_cfg->ipr_cmnd_list_dma = NULL;
8888         ioa_cfg->ipr_cmd_pool = NULL;
8889 }
8890
8891 /**
8892  * ipr_free_mem - Frees memory allocated for an adapter
8893  * @ioa_cfg:    ioa cfg struct
8894  *
8895  * Return value:
8896  *      nothing
8897  **/
8898 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
8899 {
8900         int i;
8901
8902         kfree(ioa_cfg->res_entries);
8903         pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
8904                             ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8905         ipr_free_cmd_blks(ioa_cfg);
8906
8907         for (i = 0; i < ioa_cfg->hrrq_num; i++)
8908                 pci_free_consistent(ioa_cfg->pdev,
8909                                         sizeof(u32) * ioa_cfg->hrrq[i].size,
8910                                         ioa_cfg->hrrq[i].host_rrq,
8911                                         ioa_cfg->hrrq[i].host_rrq_dma);
8912
8913         pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size,
8914                             ioa_cfg->u.cfg_table,
8915                             ioa_cfg->cfg_table_dma);
8916
8917         for (i = 0; i < IPR_NUM_HCAMS; i++) {
8918                 pci_free_consistent(ioa_cfg->pdev,
8919                                     sizeof(struct ipr_hostrcb),
8920                                     ioa_cfg->hostrcb[i],
8921                                     ioa_cfg->hostrcb_dma[i]);
8922         }
8923
8924         ipr_free_dump(ioa_cfg);
8925         kfree(ioa_cfg->trace);
8926 }
8927
8928 /**
8929  * ipr_free_all_resources - Free all allocated resources for an adapter.
8930  * @ipr_cmd:    ipr command struct
8931  *
8932  * This function frees all allocated resources for the
8933  * specified adapter.
8934  *
8935  * Return value:
8936  *      none
8937  **/
8938 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
8939 {
8940         struct pci_dev *pdev = ioa_cfg->pdev;
8941
8942         ENTER;
8943         if (ioa_cfg->intr_flag == IPR_USE_MSI ||
8944             ioa_cfg->intr_flag == IPR_USE_MSIX) {
8945                 int i;
8946                 for (i = 0; i < ioa_cfg->nvectors; i++)
8947                         free_irq(ioa_cfg->vectors_info[i].vec,
8948                                 &ioa_cfg->hrrq[i]);
8949         } else
8950                 free_irq(pdev->irq, &ioa_cfg->hrrq[0]);
8951
8952         if (ioa_cfg->intr_flag == IPR_USE_MSI) {
8953                 pci_disable_msi(pdev);
8954                 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
8955         } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
8956                 pci_disable_msix(pdev);
8957                 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
8958         }
8959
8960         iounmap(ioa_cfg->hdw_dma_regs);
8961         pci_release_regions(pdev);
8962         ipr_free_mem(ioa_cfg);
8963         scsi_host_put(ioa_cfg->host);
8964         pci_disable_device(pdev);
8965         LEAVE;
8966 }
8967
8968 /**
8969  * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
8970  * @ioa_cfg:    ioa config struct
8971  *
8972  * Return value:
8973  *      0 on success / -ENOMEM on allocation failure
8974  **/
8975 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8976 {
8977         struct ipr_cmnd *ipr_cmd;
8978         struct ipr_ioarcb *ioarcb;
8979         dma_addr_t dma_addr;
8980         int i, entries_each_hrrq, hrrq_id = 0;
8981
8982         ioa_cfg->ipr_cmd_pool = pci_pool_create(IPR_NAME, ioa_cfg->pdev,
8983                                                 sizeof(struct ipr_cmnd), 512, 0);
8984
8985         if (!ioa_cfg->ipr_cmd_pool)
8986                 return -ENOMEM;
8987
8988         ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
8989         ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
8990
8991         if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
8992                 ipr_free_cmd_blks(ioa_cfg);
8993                 return -ENOMEM;
8994         }
8995
8996         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8997                 if (ioa_cfg->hrrq_num > 1) {
8998                         if (i == 0) {
8999                                 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
9000                                 ioa_cfg->hrrq[i].min_cmd_id = 0;
9001                                         ioa_cfg->hrrq[i].max_cmd_id =
9002                                                 (entries_each_hrrq - 1);
9003                         } else {
9004                                 entries_each_hrrq =
9005                                         IPR_NUM_BASE_CMD_BLKS/
9006                                         (ioa_cfg->hrrq_num - 1);
9007                                 ioa_cfg->hrrq[i].min_cmd_id =
9008                                         IPR_NUM_INTERNAL_CMD_BLKS +
9009                                         (i - 1) * entries_each_hrrq;
9010                                 ioa_cfg->hrrq[i].max_cmd_id =
9011                                         (IPR_NUM_INTERNAL_CMD_BLKS +
9012                                         i * entries_each_hrrq - 1);
9013                         }
9014                 } else {
9015                         entries_each_hrrq = IPR_NUM_CMD_BLKS;
9016                         ioa_cfg->hrrq[i].min_cmd_id = 0;
9017                         ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
9018                 }
9019                 ioa_cfg->hrrq[i].size = entries_each_hrrq;
9020         }
9021
9022         BUG_ON(ioa_cfg->hrrq_num == 0);
9023
9024         i = IPR_NUM_CMD_BLKS -
9025                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
9026         if (i > 0) {
9027                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
9028                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
9029         }
9030
9031         for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9032                 ipr_cmd = pci_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
9033
9034                 if (!ipr_cmd) {
9035                         ipr_free_cmd_blks(ioa_cfg);
9036                         return -ENOMEM;
9037                 }
9038
9039                 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
9040                 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
9041                 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
9042
9043                 ioarcb = &ipr_cmd->ioarcb;
9044                 ipr_cmd->dma_addr = dma_addr;
9045                 if (ioa_cfg->sis64)
9046                         ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
9047                 else
9048                         ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
9049
9050                 ioarcb->host_response_handle = cpu_to_be32(i << 2);
9051                 if (ioa_cfg->sis64) {
9052                         ioarcb->u.sis64_addr_data.data_ioadl_addr =
9053                                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
9054                         ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
9055                                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
9056                 } else {
9057                         ioarcb->write_ioadl_addr =
9058                                 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
9059                         ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
9060                         ioarcb->ioasa_host_pci_addr =
9061                                 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
9062                 }
9063                 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
9064                 ipr_cmd->cmd_index = i;
9065                 ipr_cmd->ioa_cfg = ioa_cfg;
9066                 ipr_cmd->sense_buffer_dma = dma_addr +
9067                         offsetof(struct ipr_cmnd, sense_buffer);
9068
9069                 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
9070                 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
9071                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9072                 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9073                         hrrq_id++;
9074         }
9075
9076         return 0;
9077 }
9078
9079 /**
9080  * ipr_alloc_mem - Allocate memory for an adapter
9081  * @ioa_cfg:    ioa config struct
9082  *
9083  * Return value:
9084  *      0 on success / non-zero for error
9085  **/
9086 static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
9087 {
9088         struct pci_dev *pdev = ioa_cfg->pdev;
9089         int i, rc = -ENOMEM;
9090
9091         ENTER;
9092         ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
9093                                        ioa_cfg->max_devs_supported, GFP_KERNEL);
9094
9095         if (!ioa_cfg->res_entries)
9096                 goto out;
9097
9098         for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
9099                 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
9100                 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9101         }
9102
9103         ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
9104                                                 sizeof(struct ipr_misc_cbs),
9105                                                 &ioa_cfg->vpd_cbs_dma);
9106
9107         if (!ioa_cfg->vpd_cbs)
9108                 goto out_free_res_entries;
9109
9110         if (ipr_alloc_cmd_blks(ioa_cfg))
9111                 goto out_free_vpd_cbs;
9112
9113         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9114                 ioa_cfg->hrrq[i].host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
9115                                         sizeof(u32) * ioa_cfg->hrrq[i].size,
9116                                         &ioa_cfg->hrrq[i].host_rrq_dma);
9117
9118                 if (!ioa_cfg->hrrq[i].host_rrq)  {
9119                         while (--i > 0)
9120                                 pci_free_consistent(pdev,
9121                                         sizeof(u32) * ioa_cfg->hrrq[i].size,
9122                                         ioa_cfg->hrrq[i].host_rrq,
9123                                         ioa_cfg->hrrq[i].host_rrq_dma);
9124                         goto out_ipr_free_cmd_blocks;
9125                 }
9126                 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9127         }
9128
9129         ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
9130                                                     ioa_cfg->cfg_table_size,
9131                                                     &ioa_cfg->cfg_table_dma);
9132
9133         if (!ioa_cfg->u.cfg_table)
9134                 goto out_free_host_rrq;
9135
9136         for (i = 0; i < IPR_NUM_HCAMS; i++) {
9137                 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
9138                                                            sizeof(struct ipr_hostrcb),
9139                                                            &ioa_cfg->hostrcb_dma[i]);
9140
9141                 if (!ioa_cfg->hostrcb[i])
9142                         goto out_free_hostrcb_dma;
9143
9144                 ioa_cfg->hostrcb[i]->hostrcb_dma =
9145                         ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
9146                 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
9147                 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9148         }
9149
9150         ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
9151                                  IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
9152
9153         if (!ioa_cfg->trace)
9154                 goto out_free_hostrcb_dma;
9155
9156         rc = 0;
9157 out:
9158         LEAVE;
9159         return rc;
9160
9161 out_free_hostrcb_dma:
9162         while (i-- > 0) {
9163                 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
9164                                     ioa_cfg->hostrcb[i],
9165                                     ioa_cfg->hostrcb_dma[i]);
9166         }
9167         pci_free_consistent(pdev, ioa_cfg->cfg_table_size,
9168                             ioa_cfg->u.cfg_table,
9169                             ioa_cfg->cfg_table_dma);
9170 out_free_host_rrq:
9171         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9172                 pci_free_consistent(pdev,
9173                                 sizeof(u32) * ioa_cfg->hrrq[i].size,
9174                                 ioa_cfg->hrrq[i].host_rrq,
9175                                 ioa_cfg->hrrq[i].host_rrq_dma);
9176         }
9177 out_ipr_free_cmd_blocks:
9178         ipr_free_cmd_blks(ioa_cfg);
9179 out_free_vpd_cbs:
9180         pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
9181                             ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9182 out_free_res_entries:
9183         kfree(ioa_cfg->res_entries);
9184         goto out;
9185 }
9186
9187 /**
9188  * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9189  * @ioa_cfg:    ioa config struct
9190  *
9191  * Return value:
9192  *      none
9193  **/
9194 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
9195 {
9196         int i;
9197
9198         for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9199                 ioa_cfg->bus_attr[i].bus = i;
9200                 ioa_cfg->bus_attr[i].qas_enabled = 0;
9201                 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9202                 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9203                         ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9204                 else
9205                         ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9206         }
9207 }
9208
9209 /**
9210  * ipr_init_regs - Initialize IOA registers
9211  * @ioa_cfg:    ioa config struct
9212  *
9213  * Return value:
9214  *      none
9215  **/
9216 static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
9217 {
9218         const struct ipr_interrupt_offsets *p;
9219         struct ipr_interrupts *t;
9220         void __iomem *base;
9221
9222         p = &ioa_cfg->chip_cfg->regs;
9223         t = &ioa_cfg->regs;
9224         base = ioa_cfg->hdw_dma_regs;
9225
9226         t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9227         t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
9228         t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
9229         t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
9230         t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
9231         t->clr_interrupt_reg = base + p->clr_interrupt_reg;
9232         t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
9233         t->sense_interrupt_reg = base + p->sense_interrupt_reg;
9234         t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
9235         t->ioarrin_reg = base + p->ioarrin_reg;
9236         t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
9237         t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
9238         t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
9239         t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
9240         t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
9241         t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
9242
9243         if (ioa_cfg->sis64) {
9244                 t->init_feedback_reg = base + p->init_feedback_reg;
9245                 t->dump_addr_reg = base + p->dump_addr_reg;
9246                 t->dump_data_reg = base + p->dump_data_reg;
9247                 t->endian_swap_reg = base + p->endian_swap_reg;
9248         }
9249 }
9250
9251 /**
9252  * ipr_init_ioa_cfg - Initialize IOA config struct
9253  * @ioa_cfg:    ioa config struct
9254  * @host:               scsi host struct
9255  * @pdev:               PCI dev struct
9256  *
9257  * Return value:
9258  *      none
9259  **/
9260 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9261                              struct Scsi_Host *host, struct pci_dev *pdev)
9262 {
9263         int i;
9264
9265         ioa_cfg->host = host;
9266         ioa_cfg->pdev = pdev;
9267         ioa_cfg->log_level = ipr_log_level;
9268         ioa_cfg->doorbell = IPR_DOORBELL;
9269         sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9270         sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
9271         sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9272         sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9273         sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9274         sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9275
9276         INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9277         INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9278         INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9279         INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9280         INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
9281         init_waitqueue_head(&ioa_cfg->reset_wait_q);
9282         init_waitqueue_head(&ioa_cfg->msi_wait_q);
9283         init_waitqueue_head(&ioa_cfg->eeh_wait_q);
9284         ioa_cfg->sdt_state = INACTIVE;
9285
9286         ipr_initialize_bus_attr(ioa_cfg);
9287         ioa_cfg->max_devs_supported = ipr_max_devs;
9288
9289         if (ioa_cfg->sis64) {
9290                 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9291                 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9292                 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9293                         ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9294                 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9295                                            + ((sizeof(struct ipr_config_table_entry64)
9296                                                * ioa_cfg->max_devs_supported)));
9297         } else {
9298                 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9299                 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9300                 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9301                         ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9302                 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9303                                            + ((sizeof(struct ipr_config_table_entry)
9304                                                * ioa_cfg->max_devs_supported)));
9305         }
9306
9307         host->max_channel = IPR_MAX_BUS_TO_SCAN;
9308         host->unique_id = host->host_no;
9309         host->max_cmd_len = IPR_MAX_CDB_LEN;
9310         host->can_queue = ioa_cfg->max_cmds;
9311         pci_set_drvdata(pdev, ioa_cfg);
9312
9313         for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9314                 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9315                 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9316                 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9317                 if (i == 0)
9318                         ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9319                 else
9320                         ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
9321         }
9322 }
9323
9324 /**
9325  * ipr_get_chip_info - Find adapter chip information
9326  * @dev_id:             PCI device id struct
9327  *
9328  * Return value:
9329  *      ptr to chip information on success / NULL on failure
9330  **/
9331 static const struct ipr_chip_t *
9332 ipr_get_chip_info(const struct pci_device_id *dev_id)
9333 {
9334         int i;
9335
9336         for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
9337                 if (ipr_chip[i].vendor == dev_id->vendor &&
9338                     ipr_chip[i].device == dev_id->device)
9339                         return &ipr_chip[i];
9340         return NULL;
9341 }
9342
9343 /**
9344  * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
9345  *                                              during probe time
9346  * @ioa_cfg:    ioa config struct
9347  *
9348  * Return value:
9349  *      None
9350  **/
9351 static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
9352 {
9353         struct pci_dev *pdev = ioa_cfg->pdev;
9354
9355         if (pci_channel_offline(pdev)) {
9356                 wait_event_timeout(ioa_cfg->eeh_wait_q,
9357                                    !pci_channel_offline(pdev),
9358                                    IPR_PCI_ERROR_RECOVERY_TIMEOUT);
9359                 pci_restore_state(pdev);
9360         }
9361 }
9362
9363 static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg)
9364 {
9365         struct msix_entry entries[IPR_MAX_MSIX_VECTORS];
9366         int i, vectors;
9367
9368         for (i = 0; i < ARRAY_SIZE(entries); ++i)
9369                 entries[i].entry = i;
9370
9371         vectors = pci_enable_msix_range(ioa_cfg->pdev,
9372                                         entries, 1, ipr_number_of_msix);
9373         if (vectors < 0) {
9374                 ipr_wait_for_pci_err_recovery(ioa_cfg);
9375                 return vectors;
9376         }
9377
9378         for (i = 0; i < vectors; i++)
9379                 ioa_cfg->vectors_info[i].vec = entries[i].vector;
9380         ioa_cfg->nvectors = vectors;
9381
9382         return 0;
9383 }
9384
9385 static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg)
9386 {
9387         int i, vectors;
9388
9389         vectors = pci_enable_msi_range(ioa_cfg->pdev, 1, ipr_number_of_msix);
9390         if (vectors < 0) {
9391                 ipr_wait_for_pci_err_recovery(ioa_cfg);
9392                 return vectors;
9393         }
9394
9395         for (i = 0; i < vectors; i++)
9396                 ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i;
9397         ioa_cfg->nvectors = vectors;
9398
9399         return 0;
9400 }
9401
9402 static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
9403 {
9404         int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
9405
9406         for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
9407                 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
9408                          "host%d-%d", ioa_cfg->host->host_no, vec_idx);
9409                 ioa_cfg->vectors_info[vec_idx].
9410                         desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
9411         }
9412 }
9413
9414 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg)
9415 {
9416         int i, rc;
9417
9418         for (i = 1; i < ioa_cfg->nvectors; i++) {
9419                 rc = request_irq(ioa_cfg->vectors_info[i].vec,
9420                         ipr_isr_mhrrq,
9421                         0,
9422                         ioa_cfg->vectors_info[i].desc,
9423                         &ioa_cfg->hrrq[i]);
9424                 if (rc) {
9425                         while (--i >= 0)
9426                                 free_irq(ioa_cfg->vectors_info[i].vec,
9427                                         &ioa_cfg->hrrq[i]);
9428                         return rc;
9429                 }
9430         }
9431         return 0;
9432 }
9433
9434 /**
9435  * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
9436  * @pdev:               PCI device struct
9437  *
9438  * Description: Simply set the msi_received flag to 1 indicating that
9439  * Message Signaled Interrupts are supported.
9440  *
9441  * Return value:
9442  *      0 on success / non-zero on failure
9443  **/
9444 static irqreturn_t ipr_test_intr(int irq, void *devp)
9445 {
9446         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
9447         unsigned long lock_flags = 0;
9448         irqreturn_t rc = IRQ_HANDLED;
9449
9450         dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
9451         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9452
9453         ioa_cfg->msi_received = 1;
9454         wake_up(&ioa_cfg->msi_wait_q);
9455
9456         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9457         return rc;
9458 }
9459
9460 /**
9461  * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
9462  * @pdev:               PCI device struct
9463  *
9464  * Description: The return value from pci_enable_msi_range() can not always be
9465  * trusted.  This routine sets up and initiates a test interrupt to determine
9466  * if the interrupt is received via the ipr_test_intr() service routine.
9467  * If the tests fails, the driver will fall back to LSI.
9468  *
9469  * Return value:
9470  *      0 on success / non-zero on failure
9471  **/
9472 static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
9473 {
9474         int rc;
9475         volatile u32 int_reg;
9476         unsigned long lock_flags = 0;
9477
9478         ENTER;
9479
9480         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9481         init_waitqueue_head(&ioa_cfg->msi_wait_q);
9482         ioa_cfg->msi_received = 0;
9483         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9484         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
9485         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
9486         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9487
9488         if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9489                 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9490         else
9491                 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9492         if (rc) {
9493                 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
9494                 return rc;
9495         } else if (ipr_debug)
9496                 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
9497
9498         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
9499         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
9500         wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
9501         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9502         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9503
9504         if (!ioa_cfg->msi_received) {
9505                 /* MSI test failed */
9506                 dev_info(&pdev->dev, "MSI test failed.  Falling back to LSI.\n");
9507                 rc = -EOPNOTSUPP;
9508         } else if (ipr_debug)
9509                 dev_info(&pdev->dev, "MSI test succeeded.\n");
9510
9511         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9512
9513         if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9514                 free_irq(ioa_cfg->vectors_info[0].vec, ioa_cfg);
9515         else
9516                 free_irq(pdev->irq, ioa_cfg);
9517
9518         LEAVE;
9519
9520         return rc;
9521 }
9522
9523  /* ipr_probe_ioa - Allocates memory and does first stage of initialization
9524  * @pdev:               PCI device struct
9525  * @dev_id:             PCI device id struct
9526  *
9527  * Return value:
9528  *      0 on success / non-zero on failure
9529  **/
9530 static int ipr_probe_ioa(struct pci_dev *pdev,
9531                          const struct pci_device_id *dev_id)
9532 {
9533         struct ipr_ioa_cfg *ioa_cfg;
9534         struct Scsi_Host *host;
9535         unsigned long ipr_regs_pci;
9536         void __iomem *ipr_regs;
9537         int rc = PCIBIOS_SUCCESSFUL;
9538         volatile u32 mask, uproc, interrupts;
9539         unsigned long lock_flags, driver_lock_flags;
9540
9541         ENTER;
9542
9543         dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
9544         host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
9545
9546         if (!host) {
9547                 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
9548                 rc = -ENOMEM;
9549                 goto out;
9550         }
9551
9552         ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
9553         memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
9554         ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
9555
9556         ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
9557
9558         if (!ioa_cfg->ipr_chip) {
9559                 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
9560                         dev_id->vendor, dev_id->device);
9561                 goto out_scsi_host_put;
9562         }
9563
9564         /* set SIS 32 or SIS 64 */
9565         ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
9566         ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
9567         ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
9568         ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
9569
9570         if (ipr_transop_timeout)
9571                 ioa_cfg->transop_timeout = ipr_transop_timeout;
9572         else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
9573                 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
9574         else
9575                 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
9576
9577         ioa_cfg->revid = pdev->revision;
9578
9579         ipr_init_ioa_cfg(ioa_cfg, host, pdev);
9580
9581         ipr_regs_pci = pci_resource_start(pdev, 0);
9582
9583         rc = pci_request_regions(pdev, IPR_NAME);
9584         if (rc < 0) {
9585                 dev_err(&pdev->dev,
9586                         "Couldn't register memory range of registers\n");
9587                 goto out_scsi_host_put;
9588         }
9589
9590         rc = pci_enable_device(pdev);
9591
9592         if (rc || pci_channel_offline(pdev)) {
9593                 if (pci_channel_offline(pdev)) {
9594                         ipr_wait_for_pci_err_recovery(ioa_cfg);
9595                         rc = pci_enable_device(pdev);
9596                 }
9597
9598                 if (rc) {
9599                         dev_err(&pdev->dev, "Cannot enable adapter\n");
9600                         ipr_wait_for_pci_err_recovery(ioa_cfg);
9601                         goto out_release_regions;
9602                 }
9603         }
9604
9605         ipr_regs = pci_ioremap_bar(pdev, 0);
9606
9607         if (!ipr_regs) {
9608                 dev_err(&pdev->dev,
9609                         "Couldn't map memory range of registers\n");
9610                 rc = -ENOMEM;
9611                 goto out_disable;
9612         }
9613
9614         ioa_cfg->hdw_dma_regs = ipr_regs;
9615         ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
9616         ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
9617
9618         ipr_init_regs(ioa_cfg);
9619
9620         if (ioa_cfg->sis64) {
9621                 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
9622                 if (rc < 0) {
9623                         dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n");
9624                         rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
9625                 }
9626         } else
9627                 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
9628
9629         if (rc < 0) {
9630                 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
9631                 goto cleanup_nomem;
9632         }
9633
9634         rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
9635                                    ioa_cfg->chip_cfg->cache_line_size);
9636
9637         if (rc != PCIBIOS_SUCCESSFUL) {
9638                 dev_err(&pdev->dev, "Write of cache line size failed\n");
9639                 ipr_wait_for_pci_err_recovery(ioa_cfg);
9640                 rc = -EIO;
9641                 goto cleanup_nomem;
9642         }
9643
9644         /* Issue MMIO read to ensure card is not in EEH */
9645         interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
9646         ipr_wait_for_pci_err_recovery(ioa_cfg);
9647
9648         if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
9649                 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
9650                         IPR_MAX_MSIX_VECTORS);
9651                 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
9652         }
9653
9654         if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
9655                         ipr_enable_msix(ioa_cfg) == 0)
9656                 ioa_cfg->intr_flag = IPR_USE_MSIX;
9657         else if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
9658                         ipr_enable_msi(ioa_cfg) == 0)
9659                 ioa_cfg->intr_flag = IPR_USE_MSI;
9660         else {
9661                 ioa_cfg->intr_flag = IPR_USE_LSI;
9662                 ioa_cfg->nvectors = 1;
9663                 dev_info(&pdev->dev, "Cannot enable MSI.\n");
9664         }
9665
9666         pci_set_master(pdev);
9667
9668         if (pci_channel_offline(pdev)) {
9669                 ipr_wait_for_pci_err_recovery(ioa_cfg);
9670                 pci_set_master(pdev);
9671                 if (pci_channel_offline(pdev)) {
9672                         rc = -EIO;
9673                         goto out_msi_disable;
9674                 }
9675         }
9676
9677         if (ioa_cfg->intr_flag == IPR_USE_MSI ||
9678             ioa_cfg->intr_flag == IPR_USE_MSIX) {
9679                 rc = ipr_test_msi(ioa_cfg, pdev);
9680                 if (rc == -EOPNOTSUPP) {
9681                         ipr_wait_for_pci_err_recovery(ioa_cfg);
9682                         if (ioa_cfg->intr_flag == IPR_USE_MSI) {
9683                                 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
9684                                 pci_disable_msi(pdev);
9685                          } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
9686                                 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
9687                                 pci_disable_msix(pdev);
9688                         }
9689
9690                         ioa_cfg->intr_flag = IPR_USE_LSI;
9691                         ioa_cfg->nvectors = 1;
9692                 }
9693                 else if (rc)
9694                         goto out_msi_disable;
9695                 else {
9696                         if (ioa_cfg->intr_flag == IPR_USE_MSI)
9697                                 dev_info(&pdev->dev,
9698                                         "Request for %d MSIs succeeded with starting IRQ: %d\n",
9699                                         ioa_cfg->nvectors, pdev->irq);
9700                         else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9701                                 dev_info(&pdev->dev,
9702                                         "Request for %d MSIXs succeeded.",
9703                                         ioa_cfg->nvectors);
9704                 }
9705         }
9706
9707         ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
9708                                 (unsigned int)num_online_cpus(),
9709                                 (unsigned int)IPR_MAX_HRRQ_NUM);
9710
9711         if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
9712                 goto out_msi_disable;
9713
9714         if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
9715                 goto out_msi_disable;
9716
9717         rc = ipr_alloc_mem(ioa_cfg);
9718         if (rc < 0) {
9719                 dev_err(&pdev->dev,
9720                         "Couldn't allocate enough memory for device driver!\n");
9721                 goto out_msi_disable;
9722         }
9723
9724         /* Save away PCI config space for use following IOA reset */
9725         rc = pci_save_state(pdev);
9726
9727         if (rc != PCIBIOS_SUCCESSFUL) {
9728                 dev_err(&pdev->dev, "Failed to save PCI config space\n");
9729                 rc = -EIO;
9730                 goto cleanup_nolog;
9731         }
9732
9733         /*
9734          * If HRRQ updated interrupt is not masked, or reset alert is set,
9735          * the card is in an unknown state and needs a hard reset
9736          */
9737         mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
9738         interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
9739         uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
9740         if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
9741                 ioa_cfg->needs_hard_reset = 1;
9742         if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
9743                 ioa_cfg->needs_hard_reset = 1;
9744         if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
9745                 ioa_cfg->ioa_unit_checked = 1;
9746
9747         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9748         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9749         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9750
9751         if (ioa_cfg->intr_flag == IPR_USE_MSI
9752                         || ioa_cfg->intr_flag == IPR_USE_MSIX) {
9753                 name_msi_vectors(ioa_cfg);
9754                 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_isr,
9755                         0,
9756                         ioa_cfg->vectors_info[0].desc,
9757                         &ioa_cfg->hrrq[0]);
9758                 if (!rc)
9759                         rc = ipr_request_other_msi_irqs(ioa_cfg);
9760         } else {
9761                 rc = request_irq(pdev->irq, ipr_isr,
9762                          IRQF_SHARED,
9763                          IPR_NAME, &ioa_cfg->hrrq[0]);
9764         }
9765         if (rc) {
9766                 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
9767                         pdev->irq, rc);
9768                 goto cleanup_nolog;
9769         }
9770
9771         if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
9772             (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
9773                 ioa_cfg->needs_warm_reset = 1;
9774                 ioa_cfg->reset = ipr_reset_slot_reset;
9775         } else
9776                 ioa_cfg->reset = ipr_reset_start_bist;
9777
9778         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
9779         list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
9780         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
9781
9782         LEAVE;
9783 out:
9784         return rc;
9785
9786 cleanup_nolog:
9787         ipr_free_mem(ioa_cfg);
9788 out_msi_disable:
9789         ipr_wait_for_pci_err_recovery(ioa_cfg);
9790         if (ioa_cfg->intr_flag == IPR_USE_MSI)
9791                 pci_disable_msi(pdev);
9792         else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9793                 pci_disable_msix(pdev);
9794 cleanup_nomem:
9795         iounmap(ipr_regs);
9796 out_disable:
9797         pci_disable_device(pdev);
9798 out_release_regions:
9799         pci_release_regions(pdev);
9800 out_scsi_host_put:
9801         scsi_host_put(host);
9802         goto out;
9803 }
9804
9805 /**
9806  * ipr_scan_vsets - Scans for VSET devices
9807  * @ioa_cfg:    ioa config struct
9808  *
9809  * Description: Since the VSET resources do not follow SAM in that we can have
9810  * sparse LUNs with no LUN 0, we have to scan for these ourselves.
9811  *
9812  * Return value:
9813  *      none
9814  **/
9815 static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
9816 {
9817         int target, lun;
9818
9819         for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
9820                 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++)
9821                         scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
9822 }
9823
9824 /**
9825  * ipr_initiate_ioa_bringdown - Bring down an adapter
9826  * @ioa_cfg:            ioa config struct
9827  * @shutdown_type:      shutdown type
9828  *
9829  * Description: This function will initiate bringing down the adapter.
9830  * This consists of issuing an IOA shutdown to the adapter
9831  * to flush the cache, and running BIST.
9832  * If the caller needs to wait on the completion of the reset,
9833  * the caller must sleep on the reset_wait_q.
9834  *
9835  * Return value:
9836  *      none
9837  **/
9838 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
9839                                        enum ipr_shutdown_type shutdown_type)
9840 {
9841         ENTER;
9842         if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9843                 ioa_cfg->sdt_state = ABORT_DUMP;
9844         ioa_cfg->reset_retries = 0;
9845         ioa_cfg->in_ioa_bringdown = 1;
9846         ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
9847         LEAVE;
9848 }
9849
9850 /**
9851  * __ipr_remove - Remove a single adapter
9852  * @pdev:       pci device struct
9853  *
9854  * Adapter hot plug remove entry point.
9855  *
9856  * Return value:
9857  *      none
9858  **/
9859 static void __ipr_remove(struct pci_dev *pdev)
9860 {
9861         unsigned long host_lock_flags = 0;
9862         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9863         int i;
9864         unsigned long driver_lock_flags;
9865         ENTER;
9866
9867         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9868         while (ioa_cfg->in_reset_reload) {
9869                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9870                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9871                 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9872         }
9873
9874         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9875                 spin_lock(&ioa_cfg->hrrq[i]._lock);
9876                 ioa_cfg->hrrq[i].removing_ioa = 1;
9877                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9878         }
9879         wmb();
9880         ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9881
9882         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9883         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9884         flush_work(&ioa_cfg->work_q);
9885         INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9886         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9887
9888         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
9889         list_del(&ioa_cfg->queue);
9890         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
9891
9892         if (ioa_cfg->sdt_state == ABORT_DUMP)
9893                 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9894         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9895
9896         ipr_free_all_resources(ioa_cfg);
9897
9898         LEAVE;
9899 }
9900
9901 /**
9902  * ipr_remove - IOA hot plug remove entry point
9903  * @pdev:       pci device struct
9904  *
9905  * Adapter hot plug remove entry point.
9906  *
9907  * Return value:
9908  *      none
9909  **/
9910 static void ipr_remove(struct pci_dev *pdev)
9911 {
9912         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9913
9914         ENTER;
9915
9916         ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
9917                               &ipr_trace_attr);
9918         ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
9919                              &ipr_dump_attr);
9920         scsi_remove_host(ioa_cfg->host);
9921
9922         __ipr_remove(pdev);
9923
9924         LEAVE;
9925 }
9926
9927 /**
9928  * ipr_probe - Adapter hot plug add entry point
9929  *
9930  * Return value:
9931  *      0 on success / non-zero on failure
9932  **/
9933 static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
9934 {
9935         struct ipr_ioa_cfg *ioa_cfg;
9936         int rc, i;
9937
9938         rc = ipr_probe_ioa(pdev, dev_id);
9939
9940         if (rc)
9941                 return rc;
9942
9943         ioa_cfg = pci_get_drvdata(pdev);
9944         rc = ipr_probe_ioa_part2(ioa_cfg);
9945
9946         if (rc) {
9947                 __ipr_remove(pdev);
9948                 return rc;
9949         }
9950
9951         rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
9952
9953         if (rc) {
9954                 __ipr_remove(pdev);
9955                 return rc;
9956         }
9957
9958         rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
9959                                    &ipr_trace_attr);
9960
9961         if (rc) {
9962                 scsi_remove_host(ioa_cfg->host);
9963                 __ipr_remove(pdev);
9964                 return rc;
9965         }
9966
9967         rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
9968                                    &ipr_dump_attr);
9969
9970         if (rc) {
9971                 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
9972                                       &ipr_trace_attr);
9973                 scsi_remove_host(ioa_cfg->host);
9974                 __ipr_remove(pdev);
9975                 return rc;
9976         }
9977
9978         scsi_scan_host(ioa_cfg->host);
9979         ipr_scan_vsets(ioa_cfg);
9980         scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
9981         ioa_cfg->allow_ml_add_del = 1;
9982         ioa_cfg->host->max_channel = IPR_VSET_BUS;
9983         ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
9984
9985         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
9986                 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
9987                         blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
9988                                         ioa_cfg->iopoll_weight, ipr_iopoll);
9989                         blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
9990                 }
9991         }
9992
9993         schedule_work(&ioa_cfg->work_q);
9994         return 0;
9995 }
9996
9997 /**
9998  * ipr_shutdown - Shutdown handler.
9999  * @pdev:       pci device struct
10000  *
10001  * This function is invoked upon system shutdown/reboot. It will issue
10002  * an adapter shutdown to the adapter to flush the write cache.
10003  *
10004  * Return value:
10005  *      none
10006  **/
10007 static void ipr_shutdown(struct pci_dev *pdev)
10008 {
10009         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10010         unsigned long lock_flags = 0;
10011         int i;
10012
10013         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10014         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10015                 ioa_cfg->iopoll_weight = 0;
10016                 for (i = 1; i < ioa_cfg->hrrq_num; i++)
10017                         blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
10018         }
10019
10020         while (ioa_cfg->in_reset_reload) {
10021                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10022                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10023                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10024         }
10025
10026         ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
10027         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10028         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10029 }
10030
10031 static struct pci_device_id ipr_pci_table[] = {
10032         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10033                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
10034         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10035                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
10036         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10037                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
10038         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10039                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
10040         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10041                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
10042         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10043                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
10044         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10045                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
10046         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10047                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
10048                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10049         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10050               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10051         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10052               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10053               IPR_USE_LONG_TRANSOP_TIMEOUT },
10054         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10055               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10056               IPR_USE_LONG_TRANSOP_TIMEOUT },
10057         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10058               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10059         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10060               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10061               IPR_USE_LONG_TRANSOP_TIMEOUT},
10062         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10063               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10064               IPR_USE_LONG_TRANSOP_TIMEOUT },
10065         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10066               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
10067               IPR_USE_LONG_TRANSOP_TIMEOUT },
10068         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10069               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
10070         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10071               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
10072         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10073               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
10074               IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
10075         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
10076                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
10077         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10078                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
10079         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10080                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
10081                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10082         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10083                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
10084                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10085         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10086                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
10087         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10088                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
10089         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10090                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
10091         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10092                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
10093         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10094                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
10095         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10096                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
10097         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10098                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
10099         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10100                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
10101         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10102                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
10103         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10104                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
10105         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10106                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
10107         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10108                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
10109         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10110                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
10111         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10112                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
10113         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10114                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
10115         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10116                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
10117         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10118                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
10119         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10120                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
10121         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10122                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
10123         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10124                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
10125         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10126                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
10127         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10128                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
10129         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10130                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
10131         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10132                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
10133         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10134                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
10135         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10136                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
10137         { }
10138 };
10139 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10140
10141 static const struct pci_error_handlers ipr_err_handler = {
10142         .error_detected = ipr_pci_error_detected,
10143         .mmio_enabled = ipr_pci_mmio_enabled,
10144         .slot_reset = ipr_pci_slot_reset,
10145 };
10146
10147 static struct pci_driver ipr_driver = {
10148         .name = IPR_NAME,
10149         .id_table = ipr_pci_table,
10150         .probe = ipr_probe,
10151         .remove = ipr_remove,
10152         .shutdown = ipr_shutdown,
10153         .err_handler = &ipr_err_handler,
10154 };
10155
10156 /**
10157  * ipr_halt_done - Shutdown prepare completion
10158  *
10159  * Return value:
10160  *      none
10161  **/
10162 static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10163 {
10164         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
10165 }
10166
10167 /**
10168  * ipr_halt - Issue shutdown prepare to all adapters
10169  *
10170  * Return value:
10171  *      NOTIFY_OK on success / NOTIFY_DONE on failure
10172  **/
10173 static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10174 {
10175         struct ipr_cmnd *ipr_cmd;
10176         struct ipr_ioa_cfg *ioa_cfg;
10177         unsigned long flags = 0, driver_lock_flags;
10178
10179         if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10180                 return NOTIFY_DONE;
10181
10182         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10183
10184         list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10185                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10186                 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
10187                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10188                         continue;
10189                 }
10190
10191                 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10192                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10193                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10194                 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10195                 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10196
10197                 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10198                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10199         }
10200         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10201
10202         return NOTIFY_OK;
10203 }
10204
10205 static struct notifier_block ipr_notifier = {
10206         ipr_halt, NULL, 0
10207 };
10208
10209 /**
10210  * ipr_init - Module entry point
10211  *
10212  * Return value:
10213  *      0 on success / negative value on failure
10214  **/
10215 static int __init ipr_init(void)
10216 {
10217         ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10218                  IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10219
10220         register_reboot_notifier(&ipr_notifier);
10221         return pci_register_driver(&ipr_driver);
10222 }
10223
10224 /**
10225  * ipr_exit - Module unload
10226  *
10227  * Module unload entry point.
10228  *
10229  * Return value:
10230  *      none
10231  **/
10232 static void __exit ipr_exit(void)
10233 {
10234         unregister_reboot_notifier(&ipr_notifier);
10235         pci_unregister_driver(&ipr_driver);
10236 }
10237
10238 module_init(ipr_init);
10239 module_exit(ipr_exit);