9922e31f0b145f7c4c7f5e96c3524308c3d77529
[cascardo/linux.git] / drivers / scsi / smartpqi / smartpqi_init.c
1 /*
2  *    driver for Microsemi PQI-based storage controllers
3  *    Copyright (c) 2016 Microsemi Corporation
4  *    Copyright (c) 2016 PMC-Sierra, Inc.
5  *
6  *    This program is free software; you can redistribute it and/or modify
7  *    it under the terms of the GNU General Public License as published by
8  *    the Free Software Foundation; version 2 of the License.
9  *
10  *    This program is distributed in the hope that it will be useful,
11  *    but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13  *    NON INFRINGEMENT.  See the GNU General Public License for more details.
14  *
15  *    Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
16  *
17  */
18
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/pci.h>
22 #include <linux/delay.h>
23 #include <linux/interrupt.h>
24 #include <linux/sched.h>
25 #include <linux/rtc.h>
26 #include <linux/bcd.h>
27 #include <linux/cciss_ioctl.h>
28 #include <scsi/scsi_host.h>
29 #include <scsi/scsi_cmnd.h>
30 #include <scsi/scsi_device.h>
31 #include <scsi/scsi_eh.h>
32 #include <scsi/scsi_transport_sas.h>
33 #include <asm/unaligned.h>
34 #include "smartpqi.h"
35 #include "smartpqi_sis.h"
36
37 #if !defined(BUILD_TIMESTAMP)
38 #define BUILD_TIMESTAMP
39 #endif
40
41 #define DRIVER_VERSION          "0.9.9-100"
42 #define DRIVER_MAJOR            0
43 #define DRIVER_MINOR            9
44 #define DRIVER_RELEASE          9
45 #define DRIVER_REVISION         100
46
47 #define DRIVER_NAME             "Microsemi PQI Driver (v" DRIVER_VERSION ")"
48 #define DRIVER_NAME_SHORT       "smartpqi"
49
50 MODULE_AUTHOR("Microsemi");
51 MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version "
52         DRIVER_VERSION);
53 MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers");
54 MODULE_VERSION(DRIVER_VERSION);
55 MODULE_LICENSE("GPL");
56
57 #define PQI_ENABLE_MULTI_QUEUE_SUPPORT  0
58
59 static char *hpe_branded_controller = "HPE Smart Array Controller";
60 static char *microsemi_branded_controller = "Microsemi Smart Family Controller";
61
62 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info);
63 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
64 static void pqi_scan_start(struct Scsi_Host *shost);
65 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
66         struct pqi_queue_group *queue_group, enum pqi_io_path path,
67         struct pqi_io_request *io_request);
68 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
69         struct pqi_iu_header *request, unsigned int flags,
70         struct pqi_raid_error_info *error_info, unsigned long timeout_msecs);
71 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
72         struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
73         unsigned int cdb_length, struct pqi_queue_group *queue_group,
74         struct pqi_encryption_info *encryption_info);
75
76 /* for flags argument to pqi_submit_raid_request_synchronous() */
77 #define PQI_SYNC_FLAGS_INTERRUPTABLE    0x1
78
79 static struct scsi_transport_template *pqi_sas_transport_template;
80
81 static atomic_t pqi_controller_count = ATOMIC_INIT(0);
82
83 static int pqi_disable_device_id_wildcards;
84 module_param_named(disable_device_id_wildcards,
85         pqi_disable_device_id_wildcards, int, S_IRUGO | S_IWUSR);
86 MODULE_PARM_DESC(disable_device_id_wildcards,
87         "Disable device ID wildcards.");
88
89 static char *raid_levels[] = {
90         "RAID-0",
91         "RAID-4",
92         "RAID-1(1+0)",
93         "RAID-5",
94         "RAID-5+1",
95         "RAID-ADG",
96         "RAID-1(ADM)",
97 };
98
99 static char *pqi_raid_level_to_string(u8 raid_level)
100 {
101         if (raid_level < ARRAY_SIZE(raid_levels))
102                 return raid_levels[raid_level];
103
104         return "";
105 }
106
107 #define SA_RAID_0               0
108 #define SA_RAID_4               1
109 #define SA_RAID_1               2       /* also used for RAID 10 */
110 #define SA_RAID_5               3       /* also used for RAID 50 */
111 #define SA_RAID_51              4
112 #define SA_RAID_6               5       /* also used for RAID 60 */
113 #define SA_RAID_ADM             6       /* also used for RAID 1+0 ADM */
114 #define SA_RAID_MAX             SA_RAID_ADM
115 #define SA_RAID_UNKNOWN         0xff
116
117 static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
118 {
119         scmd->scsi_done(scmd);
120 }
121
122 static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
123 {
124         return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
125 }
126
127 static inline struct pqi_ctrl_info *shost_to_hba(struct Scsi_Host *shost)
128 {
129         void *hostdata = shost_priv(shost);
130
131         return *((struct pqi_ctrl_info **)hostdata);
132 }
133
134 static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
135 {
136         return !device->is_physical_device;
137 }
138
139 static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
140 {
141         return !ctrl_info->controller_online;
142 }
143
144 static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
145 {
146         if (ctrl_info->controller_online)
147                 if (!sis_is_firmware_running(ctrl_info))
148                         pqi_take_ctrl_offline(ctrl_info);
149 }
150
151 static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
152 {
153         return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
154 }
155
156 #define PQI_RESCAN_WORK_INTERVAL        (10 * HZ)
157
158 static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
159 {
160         schedule_delayed_work(&ctrl_info->rescan_work,
161                 PQI_RESCAN_WORK_INTERVAL);
162 }
163
164 static int pqi_map_single(struct pci_dev *pci_dev,
165         struct pqi_sg_descriptor *sg_descriptor, void *buffer,
166         size_t buffer_length, int data_direction)
167 {
168         dma_addr_t bus_address;
169
170         if (!buffer || buffer_length == 0 || data_direction == PCI_DMA_NONE)
171                 return 0;
172
173         bus_address = pci_map_single(pci_dev, buffer, buffer_length,
174                 data_direction);
175         if (pci_dma_mapping_error(pci_dev, bus_address))
176                 return -ENOMEM;
177
178         put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
179         put_unaligned_le32(buffer_length, &sg_descriptor->length);
180         put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
181
182         return 0;
183 }
184
185 static void pqi_pci_unmap(struct pci_dev *pci_dev,
186         struct pqi_sg_descriptor *descriptors, int num_descriptors,
187         int data_direction)
188 {
189         int i;
190
191         if (data_direction == PCI_DMA_NONE)
192                 return;
193
194         for (i = 0; i < num_descriptors; i++)
195                 pci_unmap_single(pci_dev,
196                         (dma_addr_t)get_unaligned_le64(&descriptors[i].address),
197                         get_unaligned_le32(&descriptors[i].length),
198                         data_direction);
199 }
200
201 static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
202         struct pqi_raid_path_request *request, u8 cmd,
203         u8 *scsi3addr, void *buffer, size_t buffer_length,
204         u16 vpd_page, int *pci_direction)
205 {
206         u8 *cdb;
207         int pci_dir;
208
209         memset(request, 0, sizeof(*request));
210
211         request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
212         put_unaligned_le16(offsetof(struct pqi_raid_path_request,
213                 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH,
214                 &request->header.iu_length);
215         put_unaligned_le32(buffer_length, &request->buffer_length);
216         memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
217         request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
218         request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
219
220         cdb = request->cdb;
221
222         switch (cmd) {
223         case INQUIRY:
224                 request->data_direction = SOP_READ_FLAG;
225                 cdb[0] = INQUIRY;
226                 if (vpd_page & VPD_PAGE) {
227                         cdb[1] = 0x1;
228                         cdb[2] = (u8)vpd_page;
229                 }
230                 cdb[4] = (u8)buffer_length;
231                 break;
232         case CISS_REPORT_LOG:
233         case CISS_REPORT_PHYS:
234                 request->data_direction = SOP_READ_FLAG;
235                 cdb[0] = cmd;
236                 if (cmd == CISS_REPORT_PHYS)
237                         cdb[1] = CISS_REPORT_PHYS_EXTENDED;
238                 else
239                         cdb[1] = CISS_REPORT_LOG_EXTENDED;
240                 put_unaligned_be32(buffer_length, &cdb[6]);
241                 break;
242         case CISS_GET_RAID_MAP:
243                 request->data_direction = SOP_READ_FLAG;
244                 cdb[0] = CISS_READ;
245                 cdb[1] = CISS_GET_RAID_MAP;
246                 put_unaligned_be32(buffer_length, &cdb[6]);
247                 break;
248         case SA_CACHE_FLUSH:
249                 request->data_direction = SOP_WRITE_FLAG;
250                 cdb[0] = BMIC_WRITE;
251                 cdb[6] = BMIC_CACHE_FLUSH;
252                 put_unaligned_be16(buffer_length, &cdb[7]);
253                 break;
254         case BMIC_IDENTIFY_CONTROLLER:
255         case BMIC_IDENTIFY_PHYSICAL_DEVICE:
256                 request->data_direction = SOP_READ_FLAG;
257                 cdb[0] = BMIC_READ;
258                 cdb[6] = cmd;
259                 put_unaligned_be16(buffer_length, &cdb[7]);
260                 break;
261         case BMIC_WRITE_HOST_WELLNESS:
262                 request->data_direction = SOP_WRITE_FLAG;
263                 cdb[0] = BMIC_WRITE;
264                 cdb[6] = cmd;
265                 put_unaligned_be16(buffer_length, &cdb[7]);
266                 break;
267         default:
268                 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n",
269                         cmd);
270                 WARN_ON(cmd);
271                 break;
272         }
273
274         switch (request->data_direction) {
275         case SOP_READ_FLAG:
276                 pci_dir = PCI_DMA_FROMDEVICE;
277                 break;
278         case SOP_WRITE_FLAG:
279                 pci_dir = PCI_DMA_TODEVICE;
280                 break;
281         case SOP_NO_DIRECTION_FLAG:
282                 pci_dir = PCI_DMA_NONE;
283                 break;
284         default:
285                 pci_dir = PCI_DMA_BIDIRECTIONAL;
286                 break;
287         }
288
289         *pci_direction = pci_dir;
290
291         return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
292                 buffer, buffer_length, pci_dir);
293 }
294
295 static struct pqi_io_request *pqi_alloc_io_request(
296         struct pqi_ctrl_info *ctrl_info)
297 {
298         struct pqi_io_request *io_request;
299         u16 i = ctrl_info->next_io_request_slot;        /* benignly racy */
300
301         while (1) {
302                 io_request = &ctrl_info->io_request_pool[i];
303                 if (atomic_inc_return(&io_request->refcount) == 1)
304                         break;
305                 atomic_dec(&io_request->refcount);
306                 i = (i + 1) % ctrl_info->max_io_slots;
307         }
308
309         /* benignly racy */
310         ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots;
311
312         io_request->scmd = NULL;
313         io_request->status = 0;
314         io_request->error_info = NULL;
315
316         return io_request;
317 }
318
319 static void pqi_free_io_request(struct pqi_io_request *io_request)
320 {
321         atomic_dec(&io_request->refcount);
322 }
323
324 static int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
325         struct bmic_identify_controller *buffer)
326 {
327         int rc;
328         int pci_direction;
329         struct pqi_raid_path_request request;
330
331         rc = pqi_build_raid_path_request(ctrl_info, &request,
332                 BMIC_IDENTIFY_CONTROLLER, RAID_CTLR_LUNID, buffer,
333                 sizeof(*buffer), 0, &pci_direction);
334         if (rc)
335                 return rc;
336
337         rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
338                 NULL, NO_TIMEOUT);
339
340         pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
341                 pci_direction);
342
343         return rc;
344 }
345
346 static int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
347         u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
348 {
349         int rc;
350         int pci_direction;
351         struct pqi_raid_path_request request;
352
353         rc = pqi_build_raid_path_request(ctrl_info, &request,
354                 INQUIRY, scsi3addr, buffer, buffer_length, vpd_page,
355                 &pci_direction);
356         if (rc)
357                 return rc;
358
359         rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
360                 NULL, NO_TIMEOUT);
361
362         pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
363                 pci_direction);
364
365         return rc;
366 }
367
368 static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
369         struct pqi_scsi_dev *device,
370         struct bmic_identify_physical_device *buffer,
371         size_t buffer_length)
372 {
373         int rc;
374         int pci_direction;
375         u16 bmic_device_index;
376         struct pqi_raid_path_request request;
377
378         rc = pqi_build_raid_path_request(ctrl_info, &request,
379                 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
380                 buffer_length, 0, &pci_direction);
381         if (rc)
382                 return rc;
383
384         bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr);
385         request.cdb[2] = (u8)bmic_device_index;
386         request.cdb[9] = (u8)(bmic_device_index >> 8);
387
388         rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
389                 0, NULL, NO_TIMEOUT);
390
391         pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
392                 pci_direction);
393
394         return rc;
395 }
396
397 #define SA_CACHE_FLUSH_BUFFER_LENGTH    4
398 #define PQI_FLUSH_CACHE_TIMEOUT         (30 * 1000)
399
400 static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info)
401 {
402         int rc;
403         struct pqi_raid_path_request request;
404         int pci_direction;
405         u8 *buffer;
406
407         /*
408          * Don't bother trying to flush the cache if the controller is
409          * locked up.
410          */
411         if (pqi_ctrl_offline(ctrl_info))
412                 return -ENXIO;
413
414         buffer = kzalloc(SA_CACHE_FLUSH_BUFFER_LENGTH, GFP_KERNEL);
415         if (!buffer)
416                 return -ENOMEM;
417
418         rc = pqi_build_raid_path_request(ctrl_info, &request,
419                 SA_CACHE_FLUSH, RAID_CTLR_LUNID, buffer,
420                 SA_CACHE_FLUSH_BUFFER_LENGTH, 0, &pci_direction);
421         if (rc)
422                 goto out;
423
424         rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
425                 0, NULL, PQI_FLUSH_CACHE_TIMEOUT);
426
427         pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
428                 pci_direction);
429
430 out:
431         kfree(buffer);
432
433         return rc;
434 }
435
436 static int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
437         void *buffer, size_t buffer_length)
438 {
439         int rc;
440         struct pqi_raid_path_request request;
441         int pci_direction;
442
443         rc = pqi_build_raid_path_request(ctrl_info, &request,
444                 BMIC_WRITE_HOST_WELLNESS, RAID_CTLR_LUNID, buffer,
445                 buffer_length, 0, &pci_direction);
446         if (rc)
447                 return rc;
448
449         rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
450                 0, NULL, NO_TIMEOUT);
451
452         pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
453                 pci_direction);
454
455         return rc;
456 }
457
458 #pragma pack(1)
459
460 struct bmic_host_wellness_driver_version {
461         u8      start_tag[4];
462         u8      driver_version_tag[2];
463         __le16  driver_version_length;
464         char    driver_version[32];
465         u8      end_tag[2];
466 };
467
468 #pragma pack()
469
470 static int pqi_write_driver_version_to_host_wellness(
471         struct pqi_ctrl_info *ctrl_info)
472 {
473         int rc;
474         struct bmic_host_wellness_driver_version *buffer;
475         size_t buffer_length;
476
477         buffer_length = sizeof(*buffer);
478
479         buffer = kmalloc(buffer_length, GFP_KERNEL);
480         if (!buffer)
481                 return -ENOMEM;
482
483         buffer->start_tag[0] = '<';
484         buffer->start_tag[1] = 'H';
485         buffer->start_tag[2] = 'W';
486         buffer->start_tag[3] = '>';
487         buffer->driver_version_tag[0] = 'D';
488         buffer->driver_version_tag[1] = 'V';
489         put_unaligned_le16(sizeof(buffer->driver_version),
490                 &buffer->driver_version_length);
491         strncpy(buffer->driver_version, DRIVER_VERSION,
492                 sizeof(buffer->driver_version) - 1);
493         buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
494         buffer->end_tag[0] = 'Z';
495         buffer->end_tag[1] = 'Z';
496
497         rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
498
499         kfree(buffer);
500
501         return rc;
502 }
503
504 #pragma pack(1)
505
506 struct bmic_host_wellness_time {
507         u8      start_tag[4];
508         u8      time_tag[2];
509         __le16  time_length;
510         u8      time[8];
511         u8      dont_write_tag[2];
512         u8      end_tag[2];
513 };
514
515 #pragma pack()
516
517 static int pqi_write_current_time_to_host_wellness(
518         struct pqi_ctrl_info *ctrl_info)
519 {
520         int rc;
521         struct bmic_host_wellness_time *buffer;
522         size_t buffer_length;
523         time64_t local_time;
524         unsigned int year;
525         struct timeval time;
526         struct rtc_time tm;
527
528         buffer_length = sizeof(*buffer);
529
530         buffer = kmalloc(buffer_length, GFP_KERNEL);
531         if (!buffer)
532                 return -ENOMEM;
533
534         buffer->start_tag[0] = '<';
535         buffer->start_tag[1] = 'H';
536         buffer->start_tag[2] = 'W';
537         buffer->start_tag[3] = '>';
538         buffer->time_tag[0] = 'T';
539         buffer->time_tag[1] = 'D';
540         put_unaligned_le16(sizeof(buffer->time),
541                 &buffer->time_length);
542
543         do_gettimeofday(&time);
544         local_time = time.tv_sec - (sys_tz.tz_minuteswest * 60);
545         rtc_time64_to_tm(local_time, &tm);
546         year = tm.tm_year + 1900;
547
548         buffer->time[0] = bin2bcd(tm.tm_hour);
549         buffer->time[1] = bin2bcd(tm.tm_min);
550         buffer->time[2] = bin2bcd(tm.tm_sec);
551         buffer->time[3] = 0;
552         buffer->time[4] = bin2bcd(tm.tm_mon + 1);
553         buffer->time[5] = bin2bcd(tm.tm_mday);
554         buffer->time[6] = bin2bcd(year / 100);
555         buffer->time[7] = bin2bcd(year % 100);
556
557         buffer->dont_write_tag[0] = 'D';
558         buffer->dont_write_tag[1] = 'W';
559         buffer->end_tag[0] = 'Z';
560         buffer->end_tag[1] = 'Z';
561
562         rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
563
564         kfree(buffer);
565
566         return rc;
567 }
568
569 #define PQI_UPDATE_TIME_WORK_INTERVAL   (24UL * 60 * 60 * HZ)
570
571 static void pqi_update_time_worker(struct work_struct *work)
572 {
573         int rc;
574         struct pqi_ctrl_info *ctrl_info;
575
576         ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
577                 update_time_work);
578
579         if (!ctrl_info) {
580                 printk("%s: NULL controller pointer.\n", __func__);
581                 return;
582         }
583         rc = pqi_write_current_time_to_host_wellness(ctrl_info);
584         if (rc)
585                 dev_warn(&ctrl_info->pci_dev->dev,
586                         "error updating time on controller\n");
587
588         schedule_delayed_work(&ctrl_info->update_time_work,
589                 PQI_UPDATE_TIME_WORK_INTERVAL);
590 }
591
592 static inline void pqi_schedule_update_time_worker(
593                         struct pqi_ctrl_info *ctrl_info)
594 {
595         schedule_delayed_work(&ctrl_info->update_time_work, 120);
596 }
597
598 static int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
599         void *buffer, size_t buffer_length)
600 {
601         int rc;
602         int pci_direction;
603         struct pqi_raid_path_request request;
604
605         rc = pqi_build_raid_path_request(ctrl_info, &request,
606                 cmd, RAID_CTLR_LUNID, buffer, buffer_length, 0, &pci_direction);
607         if (rc)
608                 return rc;
609
610         rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
611                 NULL, NO_TIMEOUT);
612
613         pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
614                 pci_direction);
615
616         return rc;
617 }
618
619 static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
620         void **buffer)
621 {
622         int rc;
623         size_t lun_list_length;
624         size_t lun_data_length;
625         size_t new_lun_list_length;
626         void *lun_data = NULL;
627         struct report_lun_header *report_lun_header;
628
629         report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL);
630         if (!report_lun_header) {
631                 rc = -ENOMEM;
632                 goto out;
633         }
634
635         rc = pqi_report_luns(ctrl_info, cmd, report_lun_header,
636                 sizeof(*report_lun_header));
637         if (rc)
638                 goto out;
639
640         lun_list_length = get_unaligned_be32(&report_lun_header->list_length);
641
642 again:
643         lun_data_length = sizeof(struct report_lun_header) + lun_list_length;
644
645         lun_data = kmalloc(lun_data_length, GFP_KERNEL);
646         if (!lun_data) {
647                 rc = -ENOMEM;
648                 goto out;
649         }
650
651         if (lun_list_length == 0) {
652                 memcpy(lun_data, report_lun_header, sizeof(*report_lun_header));
653                 goto out;
654         }
655
656         rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length);
657         if (rc)
658                 goto out;
659
660         new_lun_list_length = get_unaligned_be32(
661                 &((struct report_lun_header *)lun_data)->list_length);
662
663         if (new_lun_list_length > lun_list_length) {
664                 lun_list_length = new_lun_list_length;
665                 kfree(lun_data);
666                 goto again;
667         }
668
669 out:
670         kfree(report_lun_header);
671
672         if (rc) {
673                 kfree(lun_data);
674                 lun_data = NULL;
675         }
676
677         *buffer = lun_data;
678
679         return rc;
680 }
681
682 static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info,
683         void **buffer)
684 {
685         return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS,
686                 buffer);
687 }
688
689 static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info,
690         void **buffer)
691 {
692         return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
693 }
694
695 static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
696         struct report_phys_lun_extended **physdev_list,
697         struct report_log_lun_extended **logdev_list)
698 {
699         int rc;
700         size_t logdev_list_length;
701         size_t logdev_data_length;
702         struct report_log_lun_extended *internal_logdev_list;
703         struct report_log_lun_extended *logdev_data;
704         struct report_lun_header report_lun_header;
705
706         rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list);
707         if (rc)
708                 dev_err(&ctrl_info->pci_dev->dev,
709                         "report physical LUNs failed\n");
710
711         rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list);
712         if (rc)
713                 dev_err(&ctrl_info->pci_dev->dev,
714                         "report logical LUNs failed\n");
715
716         /*
717          * Tack the controller itself onto the end of the logical device list.
718          */
719
720         logdev_data = *logdev_list;
721
722         if (logdev_data) {
723                 logdev_list_length =
724                         get_unaligned_be32(&logdev_data->header.list_length);
725         } else {
726                 memset(&report_lun_header, 0, sizeof(report_lun_header));
727                 logdev_data =
728                         (struct report_log_lun_extended *)&report_lun_header;
729                 logdev_list_length = 0;
730         }
731
732         logdev_data_length = sizeof(struct report_lun_header) +
733                 logdev_list_length;
734
735         internal_logdev_list = kmalloc(logdev_data_length +
736                 sizeof(struct report_log_lun_extended), GFP_KERNEL);
737         if (!internal_logdev_list) {
738                 kfree(*logdev_list);
739                 *logdev_list = NULL;
740                 return -ENOMEM;
741         }
742
743         memcpy(internal_logdev_list, logdev_data, logdev_data_length);
744         memset((u8 *)internal_logdev_list + logdev_data_length, 0,
745                 sizeof(struct report_log_lun_extended_entry));
746         put_unaligned_be32(logdev_list_length +
747                 sizeof(struct report_log_lun_extended_entry),
748                 &internal_logdev_list->header.list_length);
749
750         kfree(*logdev_list);
751         *logdev_list = internal_logdev_list;
752
753         return 0;
754 }
755
756 static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device,
757         int bus, int target, int lun)
758 {
759         device->bus = bus;
760         device->target = target;
761         device->lun = lun;
762 }
763
764 static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
765 {
766         u8 *scsi3addr;
767         u32 lunid;
768
769         scsi3addr = device->scsi3addr;
770         lunid = get_unaligned_le32(scsi3addr);
771
772         if (pqi_is_hba_lunid(scsi3addr)) {
773                 /* The specified device is the controller. */
774                 pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff);
775                 device->target_lun_valid = true;
776                 return;
777         }
778
779         if (pqi_is_logical_device(device)) {
780                 pqi_set_bus_target_lun(device, PQI_RAID_VOLUME_BUS, 0,
781                         lunid & 0x3fff);
782                 device->target_lun_valid = true;
783                 return;
784         }
785
786         /*
787          * Defer target and LUN assignment for non-controller physical devices
788          * because the SAS transport layer will make these assignments later.
789          */
790         pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0);
791 }
792
793 static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info,
794         struct pqi_scsi_dev *device)
795 {
796         int rc;
797         u8 raid_level;
798         u8 *buffer;
799
800         raid_level = SA_RAID_UNKNOWN;
801
802         buffer = kmalloc(64, GFP_KERNEL);
803         if (buffer) {
804                 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
805                         VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64);
806                 if (rc == 0) {
807                         raid_level = buffer[8];
808                         if (raid_level > SA_RAID_MAX)
809                                 raid_level = SA_RAID_UNKNOWN;
810                 }
811                 kfree(buffer);
812         }
813
814         device->raid_level = raid_level;
815 }
816
817 static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
818         struct pqi_scsi_dev *device, struct raid_map *raid_map)
819 {
820         char *err_msg;
821         u32 raid_map_size;
822         u32 r5or6_blocks_per_row;
823         unsigned int num_phys_disks;
824         unsigned int num_raid_map_entries;
825
826         raid_map_size = get_unaligned_le32(&raid_map->structure_size);
827
828         if (raid_map_size < offsetof(struct raid_map, disk_data)) {
829                 err_msg = "RAID map too small";
830                 goto bad_raid_map;
831         }
832
833         if (raid_map_size > sizeof(*raid_map)) {
834                 err_msg = "RAID map too large";
835                 goto bad_raid_map;
836         }
837
838         num_phys_disks = get_unaligned_le16(&raid_map->layout_map_count) *
839                 (get_unaligned_le16(&raid_map->data_disks_per_row) +
840                 get_unaligned_le16(&raid_map->metadata_disks_per_row));
841         num_raid_map_entries = num_phys_disks *
842                 get_unaligned_le16(&raid_map->row_cnt);
843
844         if (num_raid_map_entries > RAID_MAP_MAX_ENTRIES) {
845                 err_msg = "invalid number of map entries in RAID map";
846                 goto bad_raid_map;
847         }
848
849         if (device->raid_level == SA_RAID_1) {
850                 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) {
851                         err_msg = "invalid RAID-1 map";
852                         goto bad_raid_map;
853                 }
854         } else if (device->raid_level == SA_RAID_ADM) {
855                 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
856                         err_msg = "invalid RAID-1(ADM) map";
857                         goto bad_raid_map;
858                 }
859         } else if ((device->raid_level == SA_RAID_5 ||
860                 device->raid_level == SA_RAID_6) &&
861                 get_unaligned_le16(&raid_map->layout_map_count) > 1) {
862                 /* RAID 50/60 */
863                 r5or6_blocks_per_row =
864                         get_unaligned_le16(&raid_map->strip_size) *
865                         get_unaligned_le16(&raid_map->data_disks_per_row);
866                 if (r5or6_blocks_per_row == 0) {
867                         err_msg = "invalid RAID-5 or RAID-6 map";
868                         goto bad_raid_map;
869                 }
870         }
871
872         return 0;
873
874 bad_raid_map:
875         dev_warn(&ctrl_info->pci_dev->dev, "%s\n", err_msg);
876
877         return -EINVAL;
878 }
879
880 static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
881         struct pqi_scsi_dev *device)
882 {
883         int rc;
884         int pci_direction;
885         struct pqi_raid_path_request request;
886         struct raid_map *raid_map;
887
888         raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
889         if (!raid_map)
890                 return -ENOMEM;
891
892         rc = pqi_build_raid_path_request(ctrl_info, &request,
893                 CISS_GET_RAID_MAP, device->scsi3addr, raid_map,
894                 sizeof(*raid_map), 0, &pci_direction);
895         if (rc)
896                 goto error;
897
898         rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
899                 NULL, NO_TIMEOUT);
900
901         pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
902                 pci_direction);
903
904         if (rc)
905                 goto error;
906
907         rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
908         if (rc)
909                 goto error;
910
911         device->raid_map = raid_map;
912
913         return 0;
914
915 error:
916         kfree(raid_map);
917
918         return rc;
919 }
920
921 static void pqi_get_offload_status(struct pqi_ctrl_info *ctrl_info,
922         struct pqi_scsi_dev *device)
923 {
924         int rc;
925         u8 *buffer;
926         u8 offload_status;
927
928         buffer = kmalloc(64, GFP_KERNEL);
929         if (!buffer)
930                 return;
931
932         rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
933                 VPD_PAGE | CISS_VPD_LV_OFFLOAD_STATUS, buffer, 64);
934         if (rc)
935                 goto out;
936
937 #define OFFLOAD_STATUS_BYTE     4
938 #define OFFLOAD_CONFIGURED_BIT  0x1
939 #define OFFLOAD_ENABLED_BIT     0x2
940
941         offload_status = buffer[OFFLOAD_STATUS_BYTE];
942         device->offload_configured =
943                 !!(offload_status & OFFLOAD_CONFIGURED_BIT);
944         if (device->offload_configured) {
945                 device->offload_enabled_pending =
946                         !!(offload_status & OFFLOAD_ENABLED_BIT);
947                 if (pqi_get_raid_map(ctrl_info, device))
948                         device->offload_enabled_pending = false;
949         }
950
951 out:
952         kfree(buffer);
953 }
954
955 /*
956  * Use vendor-specific VPD to determine online/offline status of a volume.
957  */
958
959 static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
960         struct pqi_scsi_dev *device)
961 {
962         int rc;
963         size_t page_length;
964         u8 volume_status = CISS_LV_STATUS_UNAVAILABLE;
965         bool volume_offline = true;
966         u32 volume_flags;
967         struct ciss_vpd_logical_volume_status *vpd;
968
969         vpd = kmalloc(sizeof(*vpd), GFP_KERNEL);
970         if (!vpd)
971                 goto no_buffer;
972
973         rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
974                 VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd));
975         if (rc)
976                 goto out;
977
978         page_length = offsetof(struct ciss_vpd_logical_volume_status,
979                 volume_status) + vpd->page_length;
980         if (page_length < sizeof(*vpd))
981                 goto out;
982
983         volume_status = vpd->volume_status;
984         volume_flags = get_unaligned_be32(&vpd->flags);
985         volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0;
986
987 out:
988         kfree(vpd);
989 no_buffer:
990         device->volume_status = volume_status;
991         device->volume_offline = volume_offline;
992 }
993
994 static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
995         struct pqi_scsi_dev *device)
996 {
997         int rc;
998         u8 *buffer;
999
1000         buffer = kmalloc(64, GFP_KERNEL);
1001         if (!buffer)
1002                 return -ENOMEM;
1003
1004         /* Send an inquiry to the device to see what it is. */
1005         rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64);
1006         if (rc)
1007                 goto out;
1008
1009         scsi_sanitize_inquiry_string(&buffer[8], 8);
1010         scsi_sanitize_inquiry_string(&buffer[16], 16);
1011
1012         device->devtype = buffer[0] & 0x1f;
1013         memcpy(device->vendor, &buffer[8],
1014                 sizeof(device->vendor));
1015         memcpy(device->model, &buffer[16],
1016                 sizeof(device->model));
1017
1018         if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) {
1019                 pqi_get_raid_level(ctrl_info, device);
1020                 pqi_get_offload_status(ctrl_info, device);
1021                 pqi_get_volume_status(ctrl_info, device);
1022         }
1023
1024 out:
1025         kfree(buffer);
1026
1027         return rc;
1028 }
1029
1030 static void pqi_get_physical_disk_info(struct pqi_ctrl_info *ctrl_info,
1031         struct pqi_scsi_dev *device,
1032         struct bmic_identify_physical_device *id_phys)
1033 {
1034         int rc;
1035
1036         memset(id_phys, 0, sizeof(*id_phys));
1037
1038         rc = pqi_identify_physical_device(ctrl_info, device,
1039                 id_phys, sizeof(*id_phys));
1040         if (rc) {
1041                 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1042                 return;
1043         }
1044
1045         device->queue_depth =
1046                 get_unaligned_le16(&id_phys->current_queue_depth_limit);
1047         device->device_type = id_phys->device_type;
1048         device->active_path_index = id_phys->active_path_number;
1049         device->path_map = id_phys->redundant_path_present_map;
1050         memcpy(&device->box,
1051                 &id_phys->alternate_paths_phys_box_on_port,
1052                 sizeof(device->box));
1053         memcpy(&device->phys_connector,
1054                 &id_phys->alternate_paths_phys_connector,
1055                 sizeof(device->phys_connector));
1056         device->bay = id_phys->phys_bay_in_box;
1057 }
1058
1059 static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
1060         struct pqi_scsi_dev *device)
1061 {
1062         char *status;
1063         static const char unknown_state_str[] =
1064                 "Volume is in an unknown state (%u)";
1065         char unknown_state_buffer[sizeof(unknown_state_str) + 10];
1066
1067         switch (device->volume_status) {
1068         case CISS_LV_OK:
1069                 status = "Volume online";
1070                 break;
1071         case CISS_LV_FAILED:
1072                 status = "Volume failed";
1073                 break;
1074         case CISS_LV_NOT_CONFIGURED:
1075                 status = "Volume not configured";
1076                 break;
1077         case CISS_LV_DEGRADED:
1078                 status = "Volume degraded";
1079                 break;
1080         case CISS_LV_READY_FOR_RECOVERY:
1081                 status = "Volume ready for recovery operation";
1082                 break;
1083         case CISS_LV_UNDERGOING_RECOVERY:
1084                 status = "Volume undergoing recovery";
1085                 break;
1086         case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED:
1087                 status = "Wrong physical drive was replaced";
1088                 break;
1089         case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM:
1090                 status = "A physical drive not properly connected";
1091                 break;
1092         case CISS_LV_HARDWARE_OVERHEATING:
1093                 status = "Hardware is overheating";
1094                 break;
1095         case CISS_LV_HARDWARE_HAS_OVERHEATED:
1096                 status = "Hardware has overheated";
1097                 break;
1098         case CISS_LV_UNDERGOING_EXPANSION:
1099                 status = "Volume undergoing expansion";
1100                 break;
1101         case CISS_LV_NOT_AVAILABLE:
1102                 status = "Volume waiting for transforming volume";
1103                 break;
1104         case CISS_LV_QUEUED_FOR_EXPANSION:
1105                 status = "Volume queued for expansion";
1106                 break;
1107         case CISS_LV_DISABLED_SCSI_ID_CONFLICT:
1108                 status = "Volume disabled due to SCSI ID conflict";
1109                 break;
1110         case CISS_LV_EJECTED:
1111                 status = "Volume has been ejected";
1112                 break;
1113         case CISS_LV_UNDERGOING_ERASE:
1114                 status = "Volume undergoing background erase";
1115                 break;
1116         case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD:
1117                 status = "Volume ready for predictive spare rebuild";
1118                 break;
1119         case CISS_LV_UNDERGOING_RPI:
1120                 status = "Volume undergoing rapid parity initialization";
1121                 break;
1122         case CISS_LV_PENDING_RPI:
1123                 status = "Volume queued for rapid parity initialization";
1124                 break;
1125         case CISS_LV_ENCRYPTED_NO_KEY:
1126                 status = "Encrypted volume inaccessible - key not present";
1127                 break;
1128         case CISS_LV_UNDERGOING_ENCRYPTION:
1129                 status = "Volume undergoing encryption process";
1130                 break;
1131         case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING:
1132                 status = "Volume undergoing encryption re-keying process";
1133                 break;
1134         case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1135                 status =
1136                         "Encrypted volume inaccessible - disabled on ctrl";
1137                 break;
1138         case CISS_LV_PENDING_ENCRYPTION:
1139                 status = "Volume pending migration to encrypted state";
1140                 break;
1141         case CISS_LV_PENDING_ENCRYPTION_REKEYING:
1142                 status = "Volume pending encryption rekeying";
1143                 break;
1144         case CISS_LV_NOT_SUPPORTED:
1145                 status = "Volume not supported on this controller";
1146                 break;
1147         case CISS_LV_STATUS_UNAVAILABLE:
1148                 status = "Volume status not available";
1149                 break;
1150         default:
1151                 snprintf(unknown_state_buffer, sizeof(unknown_state_buffer),
1152                         unknown_state_str, device->volume_status);
1153                 status = unknown_state_buffer;
1154                 break;
1155         }
1156
1157         dev_info(&ctrl_info->pci_dev->dev,
1158                 "scsi %d:%d:%d:%d %s\n",
1159                 ctrl_info->scsi_host->host_no,
1160                 device->bus, device->target, device->lun, status);
1161 }
1162
1163 static struct pqi_scsi_dev *pqi_find_disk_by_aio_handle(
1164         struct pqi_ctrl_info *ctrl_info, u32 aio_handle)
1165 {
1166         struct pqi_scsi_dev *device;
1167
1168         list_for_each_entry(device, &ctrl_info->scsi_device_list,
1169                 scsi_device_list_entry) {
1170                 if (device->devtype != TYPE_DISK && device->devtype != TYPE_ZBC)
1171                         continue;
1172                 if (pqi_is_logical_device(device))
1173                         continue;
1174                 if (device->aio_handle == aio_handle)
1175                         return device;
1176         }
1177
1178         return NULL;
1179 }
1180
1181 static void pqi_update_logical_drive_queue_depth(
1182         struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *logical_drive)
1183 {
1184         unsigned int i;
1185         struct raid_map *raid_map;
1186         struct raid_map_disk_data *disk_data;
1187         struct pqi_scsi_dev *phys_disk;
1188         unsigned int num_phys_disks;
1189         unsigned int num_raid_map_entries;
1190         unsigned int queue_depth;
1191
1192         logical_drive->queue_depth = PQI_LOGICAL_DRIVE_DEFAULT_MAX_QUEUE_DEPTH;
1193
1194         raid_map = logical_drive->raid_map;
1195         if (!raid_map)
1196                 return;
1197
1198         disk_data = raid_map->disk_data;
1199         num_phys_disks = get_unaligned_le16(&raid_map->layout_map_count) *
1200                 (get_unaligned_le16(&raid_map->data_disks_per_row) +
1201                 get_unaligned_le16(&raid_map->metadata_disks_per_row));
1202         num_raid_map_entries = num_phys_disks *
1203                 get_unaligned_le16(&raid_map->row_cnt);
1204
1205         queue_depth = 0;
1206         for (i = 0; i < num_raid_map_entries; i++) {
1207                 phys_disk = pqi_find_disk_by_aio_handle(ctrl_info,
1208                         disk_data[i].aio_handle);
1209
1210                 if (!phys_disk) {
1211                         dev_warn(&ctrl_info->pci_dev->dev,
1212                                 "failed to find physical disk for logical drive %016llx\n",
1213                                 get_unaligned_be64(logical_drive->scsi3addr));
1214                         logical_drive->offload_enabled = false;
1215                         logical_drive->offload_enabled_pending = false;
1216                         kfree(raid_map);
1217                         logical_drive->raid_map = NULL;
1218                         return;
1219                 }
1220
1221                 queue_depth += phys_disk->queue_depth;
1222         }
1223
1224         logical_drive->queue_depth = queue_depth;
1225 }
1226
1227 static void pqi_update_all_logical_drive_queue_depths(
1228         struct pqi_ctrl_info *ctrl_info)
1229 {
1230         struct pqi_scsi_dev *device;
1231
1232         list_for_each_entry(device, &ctrl_info->scsi_device_list,
1233                 scsi_device_list_entry) {
1234                 if (device->devtype != TYPE_DISK && device->devtype != TYPE_ZBC)
1235                         continue;
1236                 if (!pqi_is_logical_device(device))
1237                         continue;
1238                 pqi_update_logical_drive_queue_depth(ctrl_info, device);
1239         }
1240 }
1241
1242 static void pqi_rescan_worker(struct work_struct *work)
1243 {
1244         struct pqi_ctrl_info *ctrl_info;
1245
1246         ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1247                 rescan_work);
1248
1249         pqi_scan_scsi_devices(ctrl_info);
1250 }
1251
1252 static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
1253         struct pqi_scsi_dev *device)
1254 {
1255         int rc;
1256
1257         if (pqi_is_logical_device(device))
1258                 rc = scsi_add_device(ctrl_info->scsi_host, device->bus,
1259                         device->target, device->lun);
1260         else
1261                 rc = pqi_add_sas_device(ctrl_info->sas_host, device);
1262
1263         return rc;
1264 }
1265
1266 static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info,
1267         struct pqi_scsi_dev *device)
1268 {
1269         if (pqi_is_logical_device(device))
1270                 scsi_remove_device(device->sdev);
1271         else
1272                 pqi_remove_sas_device(device);
1273 }
1274
1275 /* Assumes the SCSI device list lock is held. */
1276
1277 static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
1278         int bus, int target, int lun)
1279 {
1280         struct pqi_scsi_dev *device;
1281
1282         list_for_each_entry(device, &ctrl_info->scsi_device_list,
1283                 scsi_device_list_entry)
1284                 if (device->bus == bus && device->target == target &&
1285                         device->lun == lun)
1286                         return device;
1287
1288         return NULL;
1289 }
1290
1291 static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1,
1292         struct pqi_scsi_dev *dev2)
1293 {
1294         if (dev1->is_physical_device != dev2->is_physical_device)
1295                 return false;
1296
1297         if (dev1->is_physical_device)
1298                 return dev1->wwid == dev2->wwid;
1299
1300         return memcmp(dev1->volume_id, dev2->volume_id,
1301                 sizeof(dev1->volume_id)) == 0;
1302 }
1303
1304 enum pqi_find_result {
1305         DEVICE_NOT_FOUND,
1306         DEVICE_CHANGED,
1307         DEVICE_SAME,
1308 };
1309
1310 static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
1311         struct pqi_scsi_dev *device_to_find,
1312         struct pqi_scsi_dev **matching_device)
1313 {
1314         struct pqi_scsi_dev *device;
1315
1316         list_for_each_entry(device, &ctrl_info->scsi_device_list,
1317                 scsi_device_list_entry) {
1318                 if (pqi_scsi3addr_equal(device_to_find->scsi3addr,
1319                         device->scsi3addr)) {
1320                         *matching_device = device;
1321                         if (pqi_device_equal(device_to_find, device)) {
1322                                 if (device_to_find->volume_offline)
1323                                         return DEVICE_CHANGED;
1324                                 return DEVICE_SAME;
1325                         }
1326                         return DEVICE_CHANGED;
1327                 }
1328         }
1329
1330         return DEVICE_NOT_FOUND;
1331 }
1332
1333 static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
1334         char *action, struct pqi_scsi_dev *device)
1335 {
1336         dev_info(&ctrl_info->pci_dev->dev,
1337                 "%s scsi %d:%d:%d:%d: %s %.8s %.16s %-12s SSDSmartPathCap%c En%c Exp%c qd=%d\n",
1338                 action,
1339                 ctrl_info->scsi_host->host_no,
1340                 device->bus,
1341                 device->target,
1342                 device->lun,
1343                 scsi_device_type(device->devtype),
1344                 device->vendor,
1345                 device->model,
1346                 pqi_raid_level_to_string(device->raid_level),
1347                 device->offload_configured ? '+' : '-',
1348                 device->offload_enabled_pending ? '+' : '-',
1349                 device->expose_device ? '+' : '-',
1350                 device->queue_depth);
1351 }
1352
1353 /* Assumes the SCSI device list lock is held. */
1354
1355 static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
1356         struct pqi_scsi_dev *new_device)
1357 {
1358         existing_device->devtype = new_device->devtype;
1359         existing_device->device_type = new_device->device_type;
1360         existing_device->bus = new_device->bus;
1361         if (new_device->target_lun_valid) {
1362                 existing_device->target = new_device->target;
1363                 existing_device->lun = new_device->lun;
1364                 existing_device->target_lun_valid = true;
1365         }
1366
1367         /* By definition, the scsi3addr and wwid fields are already the same. */
1368
1369         existing_device->is_physical_device = new_device->is_physical_device;
1370         existing_device->expose_device = new_device->expose_device;
1371         existing_device->no_uld_attach = new_device->no_uld_attach;
1372         existing_device->aio_enabled = new_device->aio_enabled;
1373         memcpy(existing_device->vendor, new_device->vendor,
1374                 sizeof(existing_device->vendor));
1375         memcpy(existing_device->model, new_device->model,
1376                 sizeof(existing_device->model));
1377         existing_device->sas_address = new_device->sas_address;
1378         existing_device->raid_level = new_device->raid_level;
1379         existing_device->queue_depth = new_device->queue_depth;
1380         existing_device->aio_handle = new_device->aio_handle;
1381         existing_device->volume_status = new_device->volume_status;
1382         existing_device->active_path_index = new_device->active_path_index;
1383         existing_device->path_map = new_device->path_map;
1384         existing_device->bay = new_device->bay;
1385         memcpy(existing_device->box, new_device->box,
1386                 sizeof(existing_device->box));
1387         memcpy(existing_device->phys_connector, new_device->phys_connector,
1388                 sizeof(existing_device->phys_connector));
1389         existing_device->offload_configured = new_device->offload_configured;
1390         existing_device->offload_enabled = false;
1391         existing_device->offload_enabled_pending =
1392                 new_device->offload_enabled_pending;
1393         existing_device->offload_to_mirror = 0;
1394         kfree(existing_device->raid_map);
1395         existing_device->raid_map = new_device->raid_map;
1396
1397         /* To prevent this from being freed later. */
1398         new_device->raid_map = NULL;
1399 }
1400
1401 static inline void pqi_free_device(struct pqi_scsi_dev *device)
1402 {
1403         if (device) {
1404                 kfree(device->raid_map);
1405                 kfree(device);
1406         }
1407 }
1408
1409 /*
1410  * Called when exposing a new device to the OS fails in order to re-adjust
1411  * our internal SCSI device list to match the SCSI ML's view.
1412  */
1413
1414 static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info,
1415         struct pqi_scsi_dev *device)
1416 {
1417         unsigned long flags;
1418
1419         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1420         list_del(&device->scsi_device_list_entry);
1421         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1422
1423         /* Allow the device structure to be freed later. */
1424         device->keep_device = false;
1425 }
1426
1427 static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
1428         struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
1429 {
1430         int rc;
1431         unsigned int i;
1432         unsigned long flags;
1433         enum pqi_find_result find_result;
1434         struct pqi_scsi_dev *device;
1435         struct pqi_scsi_dev *next;
1436         struct pqi_scsi_dev *matching_device;
1437         struct list_head add_list;
1438         struct list_head delete_list;
1439
1440         INIT_LIST_HEAD(&add_list);
1441         INIT_LIST_HEAD(&delete_list);
1442
1443         /*
1444          * The idea here is to do as little work as possible while holding the
1445          * spinlock.  That's why we go to great pains to defer anything other
1446          * than updating the internal device list until after we release the
1447          * spinlock.
1448          */
1449
1450         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1451
1452         /* Assume that all devices in the existing list have gone away. */
1453         list_for_each_entry(device, &ctrl_info->scsi_device_list,
1454                 scsi_device_list_entry)
1455                 device->device_gone = true;
1456
1457         for (i = 0; i < num_new_devices; i++) {
1458                 device = new_device_list[i];
1459
1460                 find_result = pqi_scsi_find_entry(ctrl_info, device,
1461                                                 &matching_device);
1462
1463                 switch (find_result) {
1464                 case DEVICE_SAME:
1465                         /*
1466                          * The newly found device is already in the existing
1467                          * device list.
1468                          */
1469                         device->new_device = false;
1470                         matching_device->device_gone = false;
1471                         pqi_scsi_update_device(matching_device, device);
1472                         break;
1473                 case DEVICE_NOT_FOUND:
1474                         /*
1475                          * The newly found device is NOT in the existing device
1476                          * list.
1477                          */
1478                         device->new_device = true;
1479                         break;
1480                 case DEVICE_CHANGED:
1481                         /*
1482                          * The original device has gone away and we need to add
1483                          * the new device.
1484                          */
1485                         device->new_device = true;
1486                         break;
1487                 default:
1488                         WARN_ON(find_result);
1489                         break;
1490                 }
1491         }
1492
1493         /* Process all devices that have gone away. */
1494         list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
1495                 scsi_device_list_entry) {
1496                 if (device->device_gone) {
1497                         list_del(&device->scsi_device_list_entry);
1498                         list_add_tail(&device->delete_list_entry, &delete_list);
1499                 }
1500         }
1501
1502         /* Process all new devices. */
1503         for (i = 0; i < num_new_devices; i++) {
1504                 device = new_device_list[i];
1505                 if (!device->new_device)
1506                         continue;
1507                 if (device->volume_offline)
1508                         continue;
1509                 list_add_tail(&device->scsi_device_list_entry,
1510                         &ctrl_info->scsi_device_list);
1511                 list_add_tail(&device->add_list_entry, &add_list);
1512                 /* To prevent this device structure from being freed later. */
1513                 device->keep_device = true;
1514         }
1515
1516         pqi_update_all_logical_drive_queue_depths(ctrl_info);
1517
1518         list_for_each_entry(device, &ctrl_info->scsi_device_list,
1519                 scsi_device_list_entry)
1520                 device->offload_enabled =
1521                         device->offload_enabled_pending;
1522
1523         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1524
1525         /* Remove all devices that have gone away. */
1526         list_for_each_entry_safe(device, next, &delete_list,
1527                 delete_list_entry) {
1528                 if (device->sdev)
1529                         pqi_remove_device(ctrl_info, device);
1530                 if (device->volume_offline) {
1531                         pqi_dev_info(ctrl_info, "offline", device);
1532                         pqi_show_volume_status(ctrl_info, device);
1533                 } else {
1534                         pqi_dev_info(ctrl_info, "removed", device);
1535                 }
1536                 list_del(&device->delete_list_entry);
1537                 pqi_free_device(device);
1538         }
1539
1540         /*
1541          * Notify the SCSI ML if the queue depth of any existing device has
1542          * changed.
1543          */
1544         list_for_each_entry(device, &ctrl_info->scsi_device_list,
1545                 scsi_device_list_entry) {
1546                 if (device->sdev && device->queue_depth !=
1547                         device->advertised_queue_depth) {
1548                         device->advertised_queue_depth = device->queue_depth;
1549                         scsi_change_queue_depth(device->sdev,
1550                                 device->advertised_queue_depth);
1551                 }
1552         }
1553
1554         /* Expose any new devices. */
1555         list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
1556                 if (device->expose_device && !device->sdev) {
1557                         rc = pqi_add_device(ctrl_info, device);
1558                         if (rc) {
1559                                 dev_warn(&ctrl_info->pci_dev->dev,
1560                                         "scsi %d:%d:%d:%d addition failed, device not added\n",
1561                                         ctrl_info->scsi_host->host_no,
1562                                         device->bus, device->target,
1563                                         device->lun);
1564                                 pqi_fixup_botched_add(ctrl_info, device);
1565                                 continue;
1566                         }
1567                 }
1568                 pqi_dev_info(ctrl_info, "added", device);
1569         }
1570 }
1571
1572 static bool pqi_is_supported_device(struct pqi_scsi_dev *device)
1573 {
1574         bool is_supported = false;
1575
1576         switch (device->devtype) {
1577         case TYPE_DISK:
1578         case TYPE_ZBC:
1579         case TYPE_TAPE:
1580         case TYPE_MEDIUM_CHANGER:
1581         case TYPE_ENCLOSURE:
1582                 is_supported = true;
1583                 break;
1584         case TYPE_RAID:
1585                 /*
1586                  * Only support the HBA controller itself as a RAID
1587                  * controller.  If it's a RAID controller other than
1588                  * the HBA itself (an external RAID controller, MSA500
1589                  * or similar), we don't support it.
1590                  */
1591                 if (pqi_is_hba_lunid(device->scsi3addr))
1592                         is_supported = true;
1593                 break;
1594         }
1595
1596         return is_supported;
1597 }
1598
1599 static inline bool pqi_skip_device(u8 *scsi3addr,
1600         struct report_phys_lun_extended_entry *phys_lun_ext_entry)
1601 {
1602         u8 device_flags;
1603
1604         if (!MASKED_DEVICE(scsi3addr))
1605                 return false;
1606
1607         /* The device is masked. */
1608
1609         device_flags = phys_lun_ext_entry->device_flags;
1610
1611         if (device_flags & REPORT_PHYS_LUN_DEV_FLAG_NON_DISK) {
1612                 /*
1613                  * It's a non-disk device.  We ignore all devices of this type
1614                  * when they're masked.
1615                  */
1616                 return true;
1617         }
1618
1619         return false;
1620 }
1621
1622 static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
1623 {
1624         /* Expose all devices except for physical devices that are masked. */
1625         if (device->is_physical_device && MASKED_DEVICE(device->scsi3addr))
1626                 return false;
1627
1628         return true;
1629 }
1630
1631 static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1632 {
1633         int i;
1634         int rc;
1635         struct list_head new_device_list_head;
1636         struct report_phys_lun_extended *physdev_list = NULL;
1637         struct report_log_lun_extended *logdev_list = NULL;
1638         struct report_phys_lun_extended_entry *phys_lun_ext_entry;
1639         struct report_log_lun_extended_entry *log_lun_ext_entry;
1640         struct bmic_identify_physical_device *id_phys = NULL;
1641         u32 num_physicals;
1642         u32 num_logicals;
1643         struct pqi_scsi_dev **new_device_list = NULL;
1644         struct pqi_scsi_dev *device;
1645         struct pqi_scsi_dev *next;
1646         unsigned int num_new_devices;
1647         unsigned int num_valid_devices;
1648         bool is_physical_device;
1649         u8 *scsi3addr;
1650         static char *out_of_memory_msg =
1651                 "out of memory, device discovery stopped";
1652
1653         INIT_LIST_HEAD(&new_device_list_head);
1654
1655         rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list);
1656         if (rc)
1657                 goto out;
1658
1659         if (physdev_list)
1660                 num_physicals =
1661                         get_unaligned_be32(&physdev_list->header.list_length)
1662                                 / sizeof(physdev_list->lun_entries[0]);
1663         else
1664                 num_physicals = 0;
1665
1666         if (logdev_list)
1667                 num_logicals =
1668                         get_unaligned_be32(&logdev_list->header.list_length)
1669                                 / sizeof(logdev_list->lun_entries[0]);
1670         else
1671                 num_logicals = 0;
1672
1673         if (num_physicals) {
1674                 /*
1675                  * We need this buffer for calls to pqi_get_physical_disk_info()
1676                  * below.  We allocate it here instead of inside
1677                  * pqi_get_physical_disk_info() because it's a fairly large
1678                  * buffer.
1679                  */
1680                 id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL);
1681                 if (!id_phys) {
1682                         dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1683                                 out_of_memory_msg);
1684                         rc = -ENOMEM;
1685                         goto out;
1686                 }
1687         }
1688
1689         num_new_devices = num_physicals + num_logicals;
1690
1691         new_device_list = kmalloc(sizeof(*new_device_list) *
1692                 num_new_devices, GFP_KERNEL);
1693         if (!new_device_list) {
1694                 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg);
1695                 rc = -ENOMEM;
1696                 goto out;
1697         }
1698
1699         for (i = 0; i < num_new_devices; i++) {
1700                 device = kzalloc(sizeof(*device), GFP_KERNEL);
1701                 if (!device) {
1702                         dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1703                                 out_of_memory_msg);
1704                         rc = -ENOMEM;
1705                         goto out;
1706                 }
1707                 list_add_tail(&device->new_device_list_entry,
1708                         &new_device_list_head);
1709         }
1710
1711         device = NULL;
1712         num_valid_devices = 0;
1713
1714         for (i = 0; i < num_new_devices; i++) {
1715
1716                 if (i < num_physicals) {
1717                         is_physical_device = true;
1718                         phys_lun_ext_entry = &physdev_list->lun_entries[i];
1719                         log_lun_ext_entry = NULL;
1720                         scsi3addr = phys_lun_ext_entry->lunid;
1721                 } else {
1722                         is_physical_device = false;
1723                         phys_lun_ext_entry = NULL;
1724                         log_lun_ext_entry =
1725                                 &logdev_list->lun_entries[i - num_physicals];
1726                         scsi3addr = log_lun_ext_entry->lunid;
1727                 }
1728
1729                 if (is_physical_device &&
1730                         pqi_skip_device(scsi3addr, phys_lun_ext_entry))
1731                         continue;
1732
1733                 if (device)
1734                         device = list_next_entry(device, new_device_list_entry);
1735                 else
1736                         device = list_first_entry(&new_device_list_head,
1737                                 struct pqi_scsi_dev, new_device_list_entry);
1738
1739                 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1740                 device->is_physical_device = is_physical_device;
1741                 device->raid_level = SA_RAID_UNKNOWN;
1742
1743                 /* Gather information about the device. */
1744                 rc = pqi_get_device_info(ctrl_info, device);
1745                 if (rc == -ENOMEM) {
1746                         dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1747                                 out_of_memory_msg);
1748                         goto out;
1749                 }
1750                 if (rc) {
1751                         dev_warn(&ctrl_info->pci_dev->dev,
1752                                 "obtaining device info failed, skipping device %016llx\n",
1753                                 get_unaligned_be64(device->scsi3addr));
1754                         rc = 0;
1755                         continue;
1756                 }
1757
1758                 if (!pqi_is_supported_device(device))
1759                         continue;
1760
1761                 pqi_assign_bus_target_lun(device);
1762
1763                 device->expose_device = pqi_expose_device(device);
1764
1765                 if (device->is_physical_device) {
1766                         device->wwid = phys_lun_ext_entry->wwid;
1767                         if ((phys_lun_ext_entry->device_flags &
1768                                 REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED) &&
1769                                 phys_lun_ext_entry->aio_handle)
1770                                 device->aio_enabled = true;
1771                 } else {
1772                         memcpy(device->volume_id, log_lun_ext_entry->volume_id,
1773                                 sizeof(device->volume_id));
1774                 }
1775
1776                 switch (device->devtype) {
1777                 case TYPE_DISK:
1778                 case TYPE_ZBC:
1779                 case TYPE_ENCLOSURE:
1780                         if (device->is_physical_device) {
1781                                 device->sas_address =
1782                                         get_unaligned_be64(&device->wwid);
1783                                 if (device->devtype == TYPE_DISK ||
1784                                         device->devtype == TYPE_ZBC) {
1785                                         device->aio_handle =
1786                                                 phys_lun_ext_entry->aio_handle;
1787                                         pqi_get_physical_disk_info(ctrl_info,
1788                                                 device, id_phys);
1789                                 }
1790                         }
1791                         break;
1792                 }
1793
1794                 new_device_list[num_valid_devices++] = device;
1795         }
1796
1797         pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices);
1798
1799 out:
1800         list_for_each_entry_safe(device, next, &new_device_list_head,
1801                 new_device_list_entry) {
1802                 if (device->keep_device)
1803                         continue;
1804                 list_del(&device->new_device_list_entry);
1805                 pqi_free_device(device);
1806         }
1807
1808         kfree(new_device_list);
1809         kfree(physdev_list);
1810         kfree(logdev_list);
1811         kfree(id_phys);
1812
1813         return rc;
1814 }
1815
1816 static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1817 {
1818         unsigned long flags;
1819         struct pqi_scsi_dev *device;
1820         struct pqi_scsi_dev *next;
1821
1822         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1823
1824         list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
1825                 scsi_device_list_entry) {
1826                 if (device->sdev)
1827                         pqi_remove_device(ctrl_info, device);
1828                 list_del(&device->scsi_device_list_entry);
1829                 pqi_free_device(device);
1830         }
1831
1832         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1833 }
1834
1835 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1836 {
1837         int rc;
1838
1839         if (pqi_ctrl_offline(ctrl_info))
1840                 return -ENXIO;
1841
1842         mutex_lock(&ctrl_info->scan_mutex);
1843
1844         rc = pqi_update_scsi_devices(ctrl_info);
1845         if (rc)
1846                 pqi_schedule_rescan_worker(ctrl_info);
1847
1848         mutex_unlock(&ctrl_info->scan_mutex);
1849
1850         return rc;
1851 }
1852
1853 static void pqi_scan_start(struct Scsi_Host *shost)
1854 {
1855         pqi_scan_scsi_devices(shost_to_hba(shost));
1856 }
1857
1858 /* Returns TRUE if scan is finished. */
1859
1860 static int pqi_scan_finished(struct Scsi_Host *shost,
1861         unsigned long elapsed_time)
1862 {
1863         struct pqi_ctrl_info *ctrl_info;
1864
1865         ctrl_info = shost_priv(shost);
1866
1867         return !mutex_is_locked(&ctrl_info->scan_mutex);
1868 }
1869
1870 static inline void pqi_set_encryption_info(
1871         struct pqi_encryption_info *encryption_info, struct raid_map *raid_map,
1872         u64 first_block)
1873 {
1874         u32 volume_blk_size;
1875
1876         /*
1877          * Set the encryption tweak values based on logical block address.
1878          * If the block size is 512, the tweak value is equal to the LBA.
1879          * For other block sizes, tweak value is (LBA * block size) / 512.
1880          */
1881         volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size);
1882         if (volume_blk_size != 512)
1883                 first_block = (first_block * volume_blk_size) / 512;
1884
1885         encryption_info->data_encryption_key_index =
1886                 get_unaligned_le16(&raid_map->data_encryption_key_index);
1887         encryption_info->encrypt_tweak_lower = lower_32_bits(first_block);
1888         encryption_info->encrypt_tweak_upper = upper_32_bits(first_block);
1889 }
1890
1891 /*
1892  * Attempt to perform offload RAID mapping for a logical volume I/O.
1893  */
1894
1895 #define PQI_RAID_BYPASS_INELIGIBLE      1
1896
1897 static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
1898         struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
1899         struct pqi_queue_group *queue_group)
1900 {
1901         struct raid_map *raid_map;
1902         bool is_write = false;
1903         u32 map_index;
1904         u64 first_block;
1905         u64 last_block;
1906         u32 block_cnt;
1907         u32 blocks_per_row;
1908         u64 first_row;
1909         u64 last_row;
1910         u32 first_row_offset;
1911         u32 last_row_offset;
1912         u32 first_column;
1913         u32 last_column;
1914         u64 r0_first_row;
1915         u64 r0_last_row;
1916         u32 r5or6_blocks_per_row;
1917         u64 r5or6_first_row;
1918         u64 r5or6_last_row;
1919         u32 r5or6_first_row_offset;
1920         u32 r5or6_last_row_offset;
1921         u32 r5or6_first_column;
1922         u32 r5or6_last_column;
1923         u16 data_disks_per_row;
1924         u32 total_disks_per_row;
1925         u16 layout_map_count;
1926         u32 stripesize;
1927         u16 strip_size;
1928         u32 first_group;
1929         u32 last_group;
1930         u32 current_group;
1931         u32 map_row;
1932         u32 aio_handle;
1933         u64 disk_block;
1934         u32 disk_block_cnt;
1935         u8 cdb[16];
1936         u8 cdb_length;
1937         int offload_to_mirror;
1938         struct pqi_encryption_info *encryption_info_ptr;
1939         struct pqi_encryption_info encryption_info;
1940 #if BITS_PER_LONG == 32
1941         u64 tmpdiv;
1942 #endif
1943
1944         /* Check for valid opcode, get LBA and block count. */
1945         switch (scmd->cmnd[0]) {
1946         case WRITE_6:
1947                 is_write = true;
1948                 /* fall through */
1949         case READ_6:
1950                 first_block = (u64)get_unaligned_be16(&scmd->cmnd[2]);
1951                 block_cnt = (u32)scmd->cmnd[4];
1952                 if (block_cnt == 0)
1953                         block_cnt = 256;
1954                 break;
1955         case WRITE_10:
1956                 is_write = true;
1957                 /* fall through */
1958         case READ_10:
1959                 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
1960                 block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
1961                 break;
1962         case WRITE_12:
1963                 is_write = true;
1964                 /* fall through */
1965         case READ_12:
1966                 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
1967                 block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
1968                 break;
1969         case WRITE_16:
1970                 is_write = true;
1971                 /* fall through */
1972         case READ_16:
1973                 first_block = get_unaligned_be64(&scmd->cmnd[2]);
1974                 block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
1975                 break;
1976         default:
1977                 /* Process via normal I/O path. */
1978                 return PQI_RAID_BYPASS_INELIGIBLE;
1979         }
1980
1981         /* Check for write to non-RAID-0. */
1982         if (is_write && device->raid_level != SA_RAID_0)
1983                 return PQI_RAID_BYPASS_INELIGIBLE;
1984
1985         if (unlikely(block_cnt == 0))
1986                 return PQI_RAID_BYPASS_INELIGIBLE;
1987
1988         last_block = first_block + block_cnt - 1;
1989         raid_map = device->raid_map;
1990
1991         /* Check for invalid block or wraparound. */
1992         if (last_block >= get_unaligned_le64(&raid_map->volume_blk_cnt) ||
1993                 last_block < first_block)
1994                 return PQI_RAID_BYPASS_INELIGIBLE;
1995
1996         data_disks_per_row = get_unaligned_le16(&raid_map->data_disks_per_row);
1997         strip_size = get_unaligned_le16(&raid_map->strip_size);
1998         layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
1999
2000         /* Calculate stripe information for the request. */
2001         blocks_per_row = data_disks_per_row * strip_size;
2002 #if BITS_PER_LONG == 32
2003         tmpdiv = first_block;
2004         do_div(tmpdiv, blocks_per_row);
2005         first_row = tmpdiv;
2006         tmpdiv = last_block;
2007         do_div(tmpdiv, blocks_per_row);
2008         last_row = tmpdiv;
2009         first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2010         last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2011         tmpdiv = first_row_offset;
2012         do_div(tmpdiv, strip_size);
2013         first_column = tmpdiv;
2014         tmpdiv = last_row_offset;
2015         do_div(tmpdiv, strip_size);
2016         last_column = tmpdiv;
2017 #else
2018         first_row = first_block / blocks_per_row;
2019         last_row = last_block / blocks_per_row;
2020         first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2021         last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2022         first_column = first_row_offset / strip_size;
2023         last_column = last_row_offset / strip_size;
2024 #endif
2025
2026         /* If this isn't a single row/column then give to the controller. */
2027         if (first_row != last_row || first_column != last_column)
2028                 return PQI_RAID_BYPASS_INELIGIBLE;
2029
2030         /* Proceeding with driver mapping. */
2031         total_disks_per_row = data_disks_per_row +
2032                 get_unaligned_le16(&raid_map->metadata_disks_per_row);
2033         map_row = ((u32)(first_row >> raid_map->parity_rotation_shift)) %
2034                 get_unaligned_le16(&raid_map->row_cnt);
2035         map_index = (map_row * total_disks_per_row) + first_column;
2036
2037         /* RAID 1 */
2038         if (device->raid_level == SA_RAID_1) {
2039                 if (device->offload_to_mirror)
2040                         map_index += data_disks_per_row;
2041                 device->offload_to_mirror = !device->offload_to_mirror;
2042         } else if (device->raid_level == SA_RAID_ADM) {
2043                 /* RAID ADM */
2044                 /*
2045                  * Handles N-way mirrors  (R1-ADM) and R10 with # of drives
2046                  * divisible by 3.
2047                  */
2048                 offload_to_mirror = device->offload_to_mirror;
2049                 if (offload_to_mirror == 0)  {
2050                         /* use physical disk in the first mirrored group. */
2051                         map_index %= data_disks_per_row;
2052                 } else {
2053                         do {
2054                                 /*
2055                                  * Determine mirror group that map_index
2056                                  * indicates.
2057                                  */
2058                                 current_group = map_index / data_disks_per_row;
2059
2060                                 if (offload_to_mirror != current_group) {
2061                                         if (current_group <
2062                                                 layout_map_count - 1) {
2063                                                 /*
2064                                                  * Select raid index from
2065                                                  * next group.
2066                                                  */
2067                                                 map_index += data_disks_per_row;
2068                                                 current_group++;
2069                                         } else {
2070                                                 /*
2071                                                  * Select raid index from first
2072                                                  * group.
2073                                                  */
2074                                                 map_index %= data_disks_per_row;
2075                                                 current_group = 0;
2076                                         }
2077                                 }
2078                         } while (offload_to_mirror != current_group);
2079                 }
2080
2081                 /* Set mirror group to use next time. */
2082                 offload_to_mirror =
2083                         (offload_to_mirror >= layout_map_count - 1) ?
2084                                 0 : offload_to_mirror + 1;
2085                 WARN_ON(offload_to_mirror >= layout_map_count);
2086                 device->offload_to_mirror = offload_to_mirror;
2087                 /*
2088                  * Avoid direct use of device->offload_to_mirror within this
2089                  * function since multiple threads might simultaneously
2090                  * increment it beyond the range of device->layout_map_count -1.
2091                  */
2092         } else if ((device->raid_level == SA_RAID_5 ||
2093                 device->raid_level == SA_RAID_6) && layout_map_count > 1) {
2094                 /* RAID 50/60 */
2095                 /* Verify first and last block are in same RAID group */
2096                 r5or6_blocks_per_row = strip_size * data_disks_per_row;
2097                 stripesize = r5or6_blocks_per_row * layout_map_count;
2098 #if BITS_PER_LONG == 32
2099                 tmpdiv = first_block;
2100                 first_group = do_div(tmpdiv, stripesize);
2101                 tmpdiv = first_group;
2102                 do_div(tmpdiv, r5or6_blocks_per_row);
2103                 first_group = tmpdiv;
2104                 tmpdiv = last_block;
2105                 last_group = do_div(tmpdiv, stripesize);
2106                 tmpdiv = last_group;
2107                 do_div(tmpdiv, r5or6_blocks_per_row);
2108                 last_group = tmpdiv;
2109 #else
2110                 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
2111                 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
2112 #endif
2113                 if (first_group != last_group)
2114                         return PQI_RAID_BYPASS_INELIGIBLE;
2115
2116                 /* Verify request is in a single row of RAID 5/6 */
2117 #if BITS_PER_LONG == 32
2118                 tmpdiv = first_block;
2119                 do_div(tmpdiv, stripesize);
2120                 first_row = r5or6_first_row = r0_first_row = tmpdiv;
2121                 tmpdiv = last_block;
2122                 do_div(tmpdiv, stripesize);
2123                 r5or6_last_row = r0_last_row = tmpdiv;
2124 #else
2125                 first_row = r5or6_first_row = r0_first_row =
2126                         first_block / stripesize;
2127                 r5or6_last_row = r0_last_row = last_block / stripesize;
2128 #endif
2129                 if (r5or6_first_row != r5or6_last_row)
2130                         return PQI_RAID_BYPASS_INELIGIBLE;
2131
2132                 /* Verify request is in a single column */
2133 #if BITS_PER_LONG == 32
2134                 tmpdiv = first_block;
2135                 first_row_offset = do_div(tmpdiv, stripesize);
2136                 tmpdiv = first_row_offset;
2137                 first_row_offset = (u32)do_div(tmpdiv, r5or6_blocks_per_row);
2138                 r5or6_first_row_offset = first_row_offset;
2139                 tmpdiv = last_block;
2140                 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
2141                 tmpdiv = r5or6_last_row_offset;
2142                 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
2143                 tmpdiv = r5or6_first_row_offset;
2144                 do_div(tmpdiv, strip_size);
2145                 first_column = r5or6_first_column = tmpdiv;
2146                 tmpdiv = r5or6_last_row_offset;
2147                 do_div(tmpdiv, strip_size);
2148                 r5or6_last_column = tmpdiv;
2149 #else
2150                 first_row_offset = r5or6_first_row_offset =
2151                         (u32)((first_block % stripesize) %
2152                         r5or6_blocks_per_row);
2153
2154                 r5or6_last_row_offset =
2155                         (u32)((last_block % stripesize) %
2156                         r5or6_blocks_per_row);
2157
2158                 first_column = r5or6_first_row_offset / strip_size;
2159                 r5or6_first_column = first_column;
2160                 r5or6_last_column = r5or6_last_row_offset / strip_size;
2161 #endif
2162                 if (r5or6_first_column != r5or6_last_column)
2163                         return PQI_RAID_BYPASS_INELIGIBLE;
2164
2165                 /* Request is eligible */
2166                 map_row =
2167                         ((u32)(first_row >> raid_map->parity_rotation_shift)) %
2168                         get_unaligned_le16(&raid_map->row_cnt);
2169
2170                 map_index = (first_group *
2171                         (get_unaligned_le16(&raid_map->row_cnt) *
2172                         total_disks_per_row)) +
2173                         (map_row * total_disks_per_row) + first_column;
2174         }
2175
2176         if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
2177                 return PQI_RAID_BYPASS_INELIGIBLE;
2178
2179         aio_handle = raid_map->disk_data[map_index].aio_handle;
2180         disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
2181                 first_row * strip_size +
2182                 (first_row_offset - first_column * strip_size);
2183         disk_block_cnt = block_cnt;
2184
2185         /* Handle differing logical/physical block sizes. */
2186         if (raid_map->phys_blk_shift) {
2187                 disk_block <<= raid_map->phys_blk_shift;
2188                 disk_block_cnt <<= raid_map->phys_blk_shift;
2189         }
2190
2191         if (unlikely(disk_block_cnt > 0xffff))
2192                 return PQI_RAID_BYPASS_INELIGIBLE;
2193
2194         /* Build the new CDB for the physical disk I/O. */
2195         if (disk_block > 0xffffffff) {
2196                 cdb[0] = is_write ? WRITE_16 : READ_16;
2197                 cdb[1] = 0;
2198                 put_unaligned_be64(disk_block, &cdb[2]);
2199                 put_unaligned_be32(disk_block_cnt, &cdb[10]);
2200                 cdb[14] = 0;
2201                 cdb[15] = 0;
2202                 cdb_length = 16;
2203         } else {
2204                 cdb[0] = is_write ? WRITE_10 : READ_10;
2205                 cdb[1] = 0;
2206                 put_unaligned_be32((u32)disk_block, &cdb[2]);
2207                 cdb[6] = 0;
2208                 put_unaligned_be16((u16)disk_block_cnt, &cdb[7]);
2209                 cdb[9] = 0;
2210                 cdb_length = 10;
2211         }
2212
2213         if (get_unaligned_le16(&raid_map->flags) &
2214                 RAID_MAP_ENCRYPTION_ENABLED) {
2215                 pqi_set_encryption_info(&encryption_info, raid_map,
2216                         first_block);
2217                 encryption_info_ptr = &encryption_info;
2218         } else {
2219                 encryption_info_ptr = NULL;
2220         }
2221
2222         return pqi_aio_submit_io(ctrl_info, scmd, aio_handle,
2223                 cdb, cdb_length, queue_group, encryption_info_ptr);
2224 }
2225
2226 #define PQI_STATUS_IDLE         0x0
2227
2228 #define PQI_CREATE_ADMIN_QUEUE_PAIR     1
2229 #define PQI_DELETE_ADMIN_QUEUE_PAIR     2
2230
2231 #define PQI_DEVICE_STATE_POWER_ON_AND_RESET             0x0
2232 #define PQI_DEVICE_STATE_STATUS_AVAILABLE               0x1
2233 #define PQI_DEVICE_STATE_ALL_REGISTERS_READY            0x2
2234 #define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY         0x3
2235 #define PQI_DEVICE_STATE_ERROR                          0x4
2236
2237 #define PQI_MODE_READY_TIMEOUT_SECS             30
2238 #define PQI_MODE_READY_POLL_INTERVAL_MSECS      1
2239
2240 static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
2241 {
2242         struct pqi_device_registers __iomem *pqi_registers;
2243         unsigned long timeout;
2244         u64 signature;
2245         u8 status;
2246
2247         pqi_registers = ctrl_info->pqi_registers;
2248         timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies;
2249
2250         while (1) {
2251                 signature = readq(&pqi_registers->signature);
2252                 if (memcmp(&signature, PQI_DEVICE_SIGNATURE,
2253                         sizeof(signature)) == 0)
2254                         break;
2255                 if (time_after(jiffies, timeout)) {
2256                         dev_err(&ctrl_info->pci_dev->dev,
2257                                 "timed out waiting for PQI signature\n");
2258                         return -ETIMEDOUT;
2259                 }
2260                 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2261         }
2262
2263         while (1) {
2264                 status = readb(&pqi_registers->function_and_status_code);
2265                 if (status == PQI_STATUS_IDLE)
2266                         break;
2267                 if (time_after(jiffies, timeout)) {
2268                         dev_err(&ctrl_info->pci_dev->dev,
2269                                 "timed out waiting for PQI IDLE\n");
2270                         return -ETIMEDOUT;
2271                 }
2272                 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2273         }
2274
2275         while (1) {
2276                 if (readl(&pqi_registers->device_status) ==
2277                         PQI_DEVICE_STATE_ALL_REGISTERS_READY)
2278                         break;
2279                 if (time_after(jiffies, timeout)) {
2280                         dev_err(&ctrl_info->pci_dev->dev,
2281                                 "timed out waiting for PQI all registers ready\n");
2282                         return -ETIMEDOUT;
2283                 }
2284                 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2285         }
2286
2287         return 0;
2288 }
2289
2290 static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request)
2291 {
2292         struct pqi_scsi_dev *device;
2293
2294         device = io_request->scmd->device->hostdata;
2295         device->offload_enabled = false;
2296 }
2297
2298 static inline void pqi_take_device_offline(struct scsi_device *sdev)
2299 {
2300         struct pqi_ctrl_info *ctrl_info;
2301
2302         if (scsi_device_online(sdev)) {
2303                 scsi_device_set_state(sdev, SDEV_OFFLINE);
2304                 ctrl_info = shost_to_hba(sdev->host);
2305                 schedule_delayed_work(&ctrl_info->rescan_work, 0);
2306         }
2307 }
2308
2309 static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
2310 {
2311         u8 scsi_status;
2312         u8 host_byte;
2313         struct scsi_cmnd *scmd;
2314         struct pqi_raid_error_info *error_info;
2315         size_t sense_data_length;
2316         int residual_count;
2317         int xfer_count;
2318         struct scsi_sense_hdr sshdr;
2319
2320         scmd = io_request->scmd;
2321         if (!scmd)
2322                 return;
2323
2324         error_info = io_request->error_info;
2325         scsi_status = error_info->status;
2326         host_byte = DID_OK;
2327
2328         if (error_info->data_out_result == PQI_DATA_IN_OUT_UNDERFLOW) {
2329                 xfer_count =
2330                         get_unaligned_le32(&error_info->data_out_transferred);
2331                 residual_count = scsi_bufflen(scmd) - xfer_count;
2332                 scsi_set_resid(scmd, residual_count);
2333                 if (xfer_count < scmd->underflow)
2334                         host_byte = DID_SOFT_ERROR;
2335         }
2336
2337         sense_data_length = get_unaligned_le16(&error_info->sense_data_length);
2338         if (sense_data_length == 0)
2339                 sense_data_length =
2340                         get_unaligned_le16(&error_info->response_data_length);
2341         if (sense_data_length) {
2342                 if (sense_data_length > sizeof(error_info->data))
2343                         sense_data_length = sizeof(error_info->data);
2344
2345                 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
2346                         scsi_normalize_sense(error_info->data,
2347                                 sense_data_length, &sshdr) &&
2348                                 sshdr.sense_key == HARDWARE_ERROR &&
2349                                 sshdr.asc == 0x3e &&
2350                                 sshdr.ascq == 0x1) {
2351                         pqi_take_device_offline(scmd->device);
2352                         host_byte = DID_NO_CONNECT;
2353                 }
2354
2355                 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2356                         sense_data_length = SCSI_SENSE_BUFFERSIZE;
2357                 memcpy(scmd->sense_buffer, error_info->data,
2358                         sense_data_length);
2359         }
2360
2361         scmd->result = scsi_status;
2362         set_host_byte(scmd, host_byte);
2363 }
2364
2365 static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
2366 {
2367         u8 scsi_status;
2368         u8 host_byte;
2369         struct scsi_cmnd *scmd;
2370         struct pqi_aio_error_info *error_info;
2371         size_t sense_data_length;
2372         int residual_count;
2373         int xfer_count;
2374         bool device_offline;
2375
2376         scmd = io_request->scmd;
2377         error_info = io_request->error_info;
2378         host_byte = DID_OK;
2379         sense_data_length = 0;
2380         device_offline = false;
2381
2382         switch (error_info->service_response) {
2383         case PQI_AIO_SERV_RESPONSE_COMPLETE:
2384                 scsi_status = error_info->status;
2385                 break;
2386         case PQI_AIO_SERV_RESPONSE_FAILURE:
2387                 switch (error_info->status) {
2388                 case PQI_AIO_STATUS_IO_ABORTED:
2389                         scsi_status = SAM_STAT_TASK_ABORTED;
2390                         break;
2391                 case PQI_AIO_STATUS_UNDERRUN:
2392                         scsi_status = SAM_STAT_GOOD;
2393                         residual_count = get_unaligned_le32(
2394                                                 &error_info->residual_count);
2395                         scsi_set_resid(scmd, residual_count);
2396                         xfer_count = scsi_bufflen(scmd) - residual_count;
2397                         if (xfer_count < scmd->underflow)
2398                                 host_byte = DID_SOFT_ERROR;
2399                         break;
2400                 case PQI_AIO_STATUS_OVERRUN:
2401                         scsi_status = SAM_STAT_GOOD;
2402                         break;
2403                 case PQI_AIO_STATUS_AIO_PATH_DISABLED:
2404                         pqi_aio_path_disabled(io_request);
2405                         scsi_status = SAM_STAT_GOOD;
2406                         io_request->status = -EAGAIN;
2407                         break;
2408                 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
2409                 case PQI_AIO_STATUS_INVALID_DEVICE:
2410                         device_offline = true;
2411                         pqi_take_device_offline(scmd->device);
2412                         host_byte = DID_NO_CONNECT;
2413                         scsi_status = SAM_STAT_CHECK_CONDITION;
2414                         break;
2415                 case PQI_AIO_STATUS_IO_ERROR:
2416                 default:
2417                         scsi_status = SAM_STAT_CHECK_CONDITION;
2418                         break;
2419                 }
2420                 break;
2421         case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
2422         case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
2423                 scsi_status = SAM_STAT_GOOD;
2424                 break;
2425         case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
2426         case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
2427         default:
2428                 scsi_status = SAM_STAT_CHECK_CONDITION;
2429                 break;
2430         }
2431
2432         if (error_info->data_present) {
2433                 sense_data_length =
2434                         get_unaligned_le16(&error_info->data_length);
2435                 if (sense_data_length) {
2436                         if (sense_data_length > sizeof(error_info->data))
2437                                 sense_data_length = sizeof(error_info->data);
2438                         if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2439                                 sense_data_length = SCSI_SENSE_BUFFERSIZE;
2440                         memcpy(scmd->sense_buffer, error_info->data,
2441                                 sense_data_length);
2442                 }
2443         }
2444
2445         if (device_offline && sense_data_length == 0)
2446                 scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR,
2447                         0x3e, 0x1);
2448
2449         scmd->result = scsi_status;
2450         set_host_byte(scmd, host_byte);
2451 }
2452
2453 static void pqi_process_io_error(unsigned int iu_type,
2454         struct pqi_io_request *io_request)
2455 {
2456         switch (iu_type) {
2457         case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
2458                 pqi_process_raid_io_error(io_request);
2459                 break;
2460         case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
2461                 pqi_process_aio_io_error(io_request);
2462                 break;
2463         }
2464 }
2465
2466 static int pqi_interpret_task_management_response(
2467         struct pqi_task_management_response *response)
2468 {
2469         int rc;
2470
2471         switch (response->response_code) {
2472         case SOP_TMF_COMPLETE:
2473         case SOP_TMF_FUNCTION_SUCCEEDED:
2474                 rc = 0;
2475                 break;
2476         default:
2477                 rc = -EIO;
2478                 break;
2479         }
2480
2481         return rc;
2482 }
2483
2484 static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
2485         struct pqi_queue_group *queue_group)
2486 {
2487         unsigned int num_responses;
2488         pqi_index_t oq_pi;
2489         pqi_index_t oq_ci;
2490         struct pqi_io_request *io_request;
2491         struct pqi_io_response *response;
2492         u16 request_id;
2493
2494         num_responses = 0;
2495         oq_ci = queue_group->oq_ci_copy;
2496
2497         while (1) {
2498                 oq_pi = *queue_group->oq_pi;
2499                 if (oq_pi == oq_ci)
2500                         break;
2501
2502                 num_responses++;
2503                 response = queue_group->oq_element_array +
2504                         (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
2505
2506                 request_id = get_unaligned_le16(&response->request_id);
2507                 WARN_ON(request_id >= ctrl_info->max_io_slots);
2508
2509                 io_request = &ctrl_info->io_request_pool[request_id];
2510                 WARN_ON(atomic_read(&io_request->refcount) == 0);
2511
2512                 switch (response->header.iu_type) {
2513                 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
2514                 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
2515                 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
2516                         break;
2517                 case PQI_RESPONSE_IU_TASK_MANAGEMENT:
2518                         io_request->status =
2519                                 pqi_interpret_task_management_response(
2520                                         (void *)response);
2521                         break;
2522                 case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
2523                         pqi_aio_path_disabled(io_request);
2524                         io_request->status = -EAGAIN;
2525                         break;
2526                 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
2527                 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
2528                         io_request->error_info = ctrl_info->error_buffer +
2529                                 (get_unaligned_le16(&response->error_index) *
2530                                 PQI_ERROR_BUFFER_ELEMENT_LENGTH);
2531                         pqi_process_io_error(response->header.iu_type,
2532                                 io_request);
2533                         break;
2534                 default:
2535                         dev_err(&ctrl_info->pci_dev->dev,
2536                                 "unexpected IU type: 0x%x\n",
2537                                 response->header.iu_type);
2538                         WARN_ON(response->header.iu_type);
2539                         break;
2540                 }
2541
2542                 io_request->io_complete_callback(io_request,
2543                         io_request->context);
2544
2545                 /*
2546                  * Note that the I/O request structure CANNOT BE TOUCHED after
2547                  * returning from the I/O completion callback!
2548                  */
2549
2550                 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
2551         }
2552
2553         if (num_responses) {
2554                 queue_group->oq_ci_copy = oq_ci;
2555                 writel(oq_ci, queue_group->oq_ci);
2556         }
2557
2558         return num_responses;
2559 }
2560
2561 static inline unsigned int pqi_num_elements_free(unsigned int pi,
2562                                                 unsigned int ci,
2563                                                 unsigned int elements_in_queue)
2564 {
2565         unsigned int num_elements_used;
2566
2567         if (pi >= ci)
2568                 num_elements_used = pi - ci;
2569         else
2570                 num_elements_used = elements_in_queue - ci + pi;
2571
2572         return elements_in_queue - num_elements_used - 1;
2573 }
2574
2575 #define PQI_EVENT_ACK_TIMEOUT   30
2576
2577 static void pqi_start_event_ack(struct pqi_ctrl_info *ctrl_info,
2578         struct pqi_event_acknowledge_request *iu, size_t iu_length)
2579 {
2580         pqi_index_t iq_pi;
2581         pqi_index_t iq_ci;
2582         unsigned long flags;
2583         void *next_element;
2584         unsigned long timeout;
2585         struct pqi_queue_group *queue_group;
2586
2587         queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP];
2588         put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id);
2589
2590         timeout = (PQI_EVENT_ACK_TIMEOUT * HZ) + jiffies;
2591
2592         while (1) {
2593                 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags);
2594
2595                 iq_pi = queue_group->iq_pi_copy[RAID_PATH];
2596                 iq_ci = *queue_group->iq_ci[RAID_PATH];
2597
2598                 if (pqi_num_elements_free(iq_pi, iq_ci,
2599                         ctrl_info->num_elements_per_iq))
2600                         break;
2601
2602                 spin_unlock_irqrestore(
2603                         &queue_group->submit_lock[RAID_PATH], flags);
2604
2605                 if (time_after(jiffies, timeout)) {
2606                         dev_err(&ctrl_info->pci_dev->dev,
2607                                 "sending event acknowledge timed out\n");
2608                         return;
2609                 }
2610         }
2611
2612         next_element = queue_group->iq_element_array[RAID_PATH] +
2613                 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
2614
2615         memcpy(next_element, iu, iu_length);
2616
2617         iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq;
2618
2619         queue_group->iq_pi_copy[RAID_PATH] = iq_pi;
2620
2621         /*
2622          * This write notifies the controller that an IU is available to be
2623          * processed.
2624          */
2625         writel(iq_pi, queue_group->iq_pi[RAID_PATH]);
2626
2627         spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags);
2628
2629         /*
2630          * We have to special-case this type of request because the firmware
2631          * does not generate an interrupt when this type of request completes.
2632          * Therefore, we have to poll until we see that the firmware has
2633          * consumed the request before we move on.
2634          */
2635
2636         timeout = (PQI_EVENT_ACK_TIMEOUT * HZ) + jiffies;
2637
2638         while (1) {
2639                 if (*queue_group->iq_ci[RAID_PATH] == iq_pi)
2640                         break;
2641                 if (time_after(jiffies, timeout)) {
2642                         dev_err(&ctrl_info->pci_dev->dev,
2643                                 "completing event acknowledge timed out\n");
2644                         break;
2645                 }
2646                 usleep_range(1000, 2000);
2647         }
2648 }
2649
2650 static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
2651         struct pqi_event *event)
2652 {
2653         struct pqi_event_acknowledge_request request;
2654
2655         memset(&request, 0, sizeof(request));
2656
2657         request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT;
2658         put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
2659                 &request.header.iu_length);
2660         request.event_type = event->event_type;
2661         request.event_id = event->event_id;
2662         request.additional_event_id = event->additional_event_id;
2663
2664         pqi_start_event_ack(ctrl_info, &request, sizeof(request));
2665 }
2666
2667 static void pqi_event_worker(struct work_struct *work)
2668 {
2669         unsigned int i;
2670         struct pqi_ctrl_info *ctrl_info;
2671         struct pqi_event *pending_event;
2672         bool got_non_heartbeat_event = false;
2673
2674         ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
2675
2676         pending_event = ctrl_info->pending_events;
2677         for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
2678                 if (pending_event->pending) {
2679                         pending_event->pending = false;
2680                         pqi_acknowledge_event(ctrl_info, pending_event);
2681                         if (i != PQI_EVENT_HEARTBEAT)
2682                                 got_non_heartbeat_event = true;
2683                 }
2684                 pending_event++;
2685         }
2686
2687         if (got_non_heartbeat_event)
2688                 pqi_schedule_rescan_worker(ctrl_info);
2689 }
2690
2691 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
2692 {
2693         unsigned int i;
2694         unsigned int path;
2695         struct pqi_queue_group *queue_group;
2696         unsigned long flags;
2697         struct pqi_io_request *io_request;
2698         struct pqi_io_request *next;
2699         struct scsi_cmnd *scmd;
2700
2701         ctrl_info->controller_online = false;
2702         dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
2703
2704         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
2705                 queue_group = &ctrl_info->queue_groups[i];
2706
2707                 for (path = 0; path < 2; path++) {
2708                         spin_lock_irqsave(
2709                                 &queue_group->submit_lock[path], flags);
2710
2711                         list_for_each_entry_safe(io_request, next,
2712                                 &queue_group->request_list[path],
2713                                 request_list_entry) {
2714
2715                                 scmd = io_request->scmd;
2716                                 if (scmd) {
2717                                         set_host_byte(scmd, DID_NO_CONNECT);
2718                                         pqi_scsi_done(scmd);
2719                                 }
2720
2721                                 list_del(&io_request->request_list_entry);
2722                         }
2723
2724                         spin_unlock_irqrestore(
2725                                 &queue_group->submit_lock[path], flags);
2726                 }
2727         }
2728 }
2729
2730 #define PQI_HEARTBEAT_TIMER_INTERVAL    (5 * HZ)
2731 #define PQI_MAX_HEARTBEAT_REQUESTS      5
2732
2733 static void pqi_heartbeat_timer_handler(unsigned long data)
2734 {
2735         int num_interrupts;
2736         struct pqi_ctrl_info *ctrl_info = (struct pqi_ctrl_info *)data;
2737
2738         num_interrupts = atomic_read(&ctrl_info->num_interrupts);
2739
2740         if (num_interrupts == ctrl_info->previous_num_interrupts) {
2741                 ctrl_info->num_heartbeats_requested++;
2742                 if (ctrl_info->num_heartbeats_requested >
2743                         PQI_MAX_HEARTBEAT_REQUESTS) {
2744                         pqi_take_ctrl_offline(ctrl_info);
2745                         return;
2746                 }
2747                 ctrl_info->pending_events[PQI_EVENT_HEARTBEAT].pending = true;
2748                 schedule_work(&ctrl_info->event_work);
2749         } else {
2750                 ctrl_info->num_heartbeats_requested = 0;
2751         }
2752
2753         ctrl_info->previous_num_interrupts = num_interrupts;
2754         mod_timer(&ctrl_info->heartbeat_timer,
2755                 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL);
2756 }
2757
2758 static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
2759 {
2760         ctrl_info->previous_num_interrupts =
2761                 atomic_read(&ctrl_info->num_interrupts);
2762
2763         init_timer(&ctrl_info->heartbeat_timer);
2764         ctrl_info->heartbeat_timer.expires =
2765                 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
2766         ctrl_info->heartbeat_timer.data = (unsigned long)ctrl_info;
2767         ctrl_info->heartbeat_timer.function = pqi_heartbeat_timer_handler;
2768         add_timer(&ctrl_info->heartbeat_timer);
2769         ctrl_info->heartbeat_timer_started = true;
2770 }
2771
2772 static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
2773 {
2774         if (ctrl_info->heartbeat_timer_started)
2775                 del_timer_sync(&ctrl_info->heartbeat_timer);
2776 }
2777
2778 static int pqi_event_type_to_event_index(unsigned int event_type)
2779 {
2780         int index;
2781
2782         switch (event_type) {
2783         case PQI_EVENT_TYPE_HEARTBEAT:
2784                 index = PQI_EVENT_HEARTBEAT;
2785                 break;
2786         case PQI_EVENT_TYPE_HOTPLUG:
2787                 index = PQI_EVENT_HOTPLUG;
2788                 break;
2789         case PQI_EVENT_TYPE_HARDWARE:
2790                 index = PQI_EVENT_HARDWARE;
2791                 break;
2792         case PQI_EVENT_TYPE_PHYSICAL_DEVICE:
2793                 index = PQI_EVENT_PHYSICAL_DEVICE;
2794                 break;
2795         case PQI_EVENT_TYPE_LOGICAL_DEVICE:
2796                 index = PQI_EVENT_LOGICAL_DEVICE;
2797                 break;
2798         case PQI_EVENT_TYPE_AIO_STATE_CHANGE:
2799                 index = PQI_EVENT_AIO_STATE_CHANGE;
2800                 break;
2801         case PQI_EVENT_TYPE_AIO_CONFIG_CHANGE:
2802                 index = PQI_EVENT_AIO_CONFIG_CHANGE;
2803                 break;
2804         default:
2805                 index = -1;
2806                 break;
2807         }
2808
2809         return index;
2810 }
2811
2812 static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
2813 {
2814         unsigned int num_events;
2815         pqi_index_t oq_pi;
2816         pqi_index_t oq_ci;
2817         struct pqi_event_queue *event_queue;
2818         struct pqi_event_response *response;
2819         struct pqi_event *pending_event;
2820         bool need_delayed_work;
2821         int event_index;
2822
2823         event_queue = &ctrl_info->event_queue;
2824         num_events = 0;
2825         need_delayed_work = false;
2826         oq_ci = event_queue->oq_ci_copy;
2827
2828         while (1) {
2829                 oq_pi = *event_queue->oq_pi;
2830                 if (oq_pi == oq_ci)
2831                         break;
2832
2833                 num_events++;
2834                 response = event_queue->oq_element_array +
2835                         (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
2836
2837                 event_index =
2838                         pqi_event_type_to_event_index(response->event_type);
2839
2840                 if (event_index >= 0) {
2841                         if (response->request_acknowlege) {
2842                                 pending_event =
2843                                         &ctrl_info->pending_events[event_index];
2844                                 pending_event->event_type =
2845                                         response->event_type;
2846                                 pending_event->event_id = response->event_id;
2847                                 pending_event->additional_event_id =
2848                                         response->additional_event_id;
2849                                 if (event_index != PQI_EVENT_HEARTBEAT) {
2850                                         pending_event->pending = true;
2851                                         need_delayed_work = true;
2852                                 }
2853                         }
2854                 }
2855
2856                 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
2857         }
2858
2859         if (num_events) {
2860                 event_queue->oq_ci_copy = oq_ci;
2861                 writel(oq_ci, event_queue->oq_ci);
2862
2863                 if (need_delayed_work)
2864                         schedule_work(&ctrl_info->event_work);
2865         }
2866
2867         return num_events;
2868 }
2869
2870 static irqreturn_t pqi_irq_handler(int irq, void *data)
2871 {
2872         struct pqi_ctrl_info *ctrl_info;
2873         struct pqi_queue_group *queue_group;
2874         unsigned int num_responses_handled;
2875
2876         queue_group = data;
2877         ctrl_info = queue_group->ctrl_info;
2878
2879         if (!ctrl_info || !queue_group->oq_ci)
2880                 return IRQ_NONE;
2881
2882         num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
2883
2884         if (irq == ctrl_info->event_irq)
2885                 num_responses_handled += pqi_process_event_intr(ctrl_info);
2886
2887         if (num_responses_handled)
2888                 atomic_inc(&ctrl_info->num_interrupts);
2889
2890         pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
2891         pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
2892
2893         return IRQ_HANDLED;
2894 }
2895
2896 static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
2897 {
2898         int i;
2899         int rc;
2900
2901         ctrl_info->event_irq = ctrl_info->msix_vectors[0];
2902
2903         for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
2904                 rc = request_irq(ctrl_info->msix_vectors[i],
2905                         pqi_irq_handler, 0,
2906                         DRIVER_NAME_SHORT, ctrl_info->intr_data[i]);
2907                 if (rc) {
2908                         dev_err(&ctrl_info->pci_dev->dev,
2909                                 "irq %u init failed with error %d\n",
2910                                 ctrl_info->msix_vectors[i], rc);
2911                         return rc;
2912                 }
2913                 ctrl_info->num_msix_vectors_initialized++;
2914         }
2915
2916         return 0;
2917 }
2918
2919 static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
2920 {
2921         int i;
2922
2923         for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
2924                 free_irq(ctrl_info->msix_vectors[i],
2925                         ctrl_info->intr_data[i]);
2926 }
2927
2928 static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
2929 {
2930         unsigned int i;
2931         int max_vectors;
2932         int num_vectors_enabled;
2933         struct msix_entry msix_entries[PQI_MAX_MSIX_VECTORS];
2934
2935         max_vectors = ctrl_info->num_queue_groups;
2936
2937         for (i = 0; i < max_vectors; i++)
2938                 msix_entries[i].entry = i;
2939
2940         num_vectors_enabled = pci_enable_msix_range(ctrl_info->pci_dev,
2941                 msix_entries, PQI_MIN_MSIX_VECTORS, max_vectors);
2942
2943         if (num_vectors_enabled < 0) {
2944                 dev_err(&ctrl_info->pci_dev->dev,
2945                         "MSI-X init failed with error %d\n",
2946                         num_vectors_enabled);
2947                 return num_vectors_enabled;
2948         }
2949
2950         ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
2951         for (i = 0; i < num_vectors_enabled; i++) {
2952                 ctrl_info->msix_vectors[i] = msix_entries[i].vector;
2953                 ctrl_info->intr_data[i] = &ctrl_info->queue_groups[i];
2954         }
2955
2956         return 0;
2957 }
2958
2959 static void pqi_irq_set_affinity_hint(struct pqi_ctrl_info *ctrl_info)
2960 {
2961         int i;
2962         int rc;
2963         int cpu;
2964
2965         cpu = cpumask_first(cpu_online_mask);
2966         for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++) {
2967                 rc = irq_set_affinity_hint(ctrl_info->msix_vectors[i],
2968                         get_cpu_mask(cpu));
2969                 if (rc)
2970                         dev_err(&ctrl_info->pci_dev->dev,
2971                                 "error %d setting affinity hint for irq vector %u\n",
2972                                 rc, ctrl_info->msix_vectors[i]);
2973                 cpu = cpumask_next(cpu, cpu_online_mask);
2974         }
2975 }
2976
2977 static void pqi_irq_unset_affinity_hint(struct pqi_ctrl_info *ctrl_info)
2978 {
2979         int i;
2980
2981         for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
2982                 irq_set_affinity_hint(ctrl_info->msix_vectors[i], NULL);
2983 }
2984
2985 static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
2986 {
2987         unsigned int i;
2988         size_t alloc_length;
2989         size_t element_array_length_per_iq;
2990         size_t element_array_length_per_oq;
2991         void *element_array;
2992         void *next_queue_index;
2993         void *aligned_pointer;
2994         unsigned int num_inbound_queues;
2995         unsigned int num_outbound_queues;
2996         unsigned int num_queue_indexes;
2997         struct pqi_queue_group *queue_group;
2998
2999         element_array_length_per_iq =
3000                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH *
3001                 ctrl_info->num_elements_per_iq;
3002         element_array_length_per_oq =
3003                 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH *
3004                 ctrl_info->num_elements_per_oq;
3005         num_inbound_queues = ctrl_info->num_queue_groups * 2;
3006         num_outbound_queues = ctrl_info->num_queue_groups;
3007         num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1;
3008
3009         aligned_pointer = NULL;
3010
3011         for (i = 0; i < num_inbound_queues; i++) {
3012                 aligned_pointer = PTR_ALIGN(aligned_pointer,
3013                         PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3014                 aligned_pointer += element_array_length_per_iq;
3015         }
3016
3017         for (i = 0; i < num_outbound_queues; i++) {
3018                 aligned_pointer = PTR_ALIGN(aligned_pointer,
3019                         PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3020                 aligned_pointer += element_array_length_per_oq;
3021         }
3022
3023         aligned_pointer = PTR_ALIGN(aligned_pointer,
3024                 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3025         aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3026                 PQI_EVENT_OQ_ELEMENT_LENGTH;
3027
3028         for (i = 0; i < num_queue_indexes; i++) {
3029                 aligned_pointer = PTR_ALIGN(aligned_pointer,
3030                         PQI_OPERATIONAL_INDEX_ALIGNMENT);
3031                 aligned_pointer += sizeof(pqi_index_t);
3032         }
3033
3034         alloc_length = (size_t)aligned_pointer +
3035                 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3036
3037         ctrl_info->queue_memory_base =
3038                 dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
3039                         alloc_length,
3040                         &ctrl_info->queue_memory_base_dma_handle, GFP_KERNEL);
3041
3042         if (!ctrl_info->queue_memory_base) {
3043                 dev_err(&ctrl_info->pci_dev->dev,
3044                         "failed to allocate memory for PQI admin queues\n");
3045                 return -ENOMEM;
3046         }
3047
3048         ctrl_info->queue_memory_length = alloc_length;
3049
3050         element_array = PTR_ALIGN(ctrl_info->queue_memory_base,
3051                 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3052
3053         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3054                 queue_group = &ctrl_info->queue_groups[i];
3055                 queue_group->iq_element_array[RAID_PATH] = element_array;
3056                 queue_group->iq_element_array_bus_addr[RAID_PATH] =
3057                         ctrl_info->queue_memory_base_dma_handle +
3058                                 (element_array - ctrl_info->queue_memory_base);
3059                 element_array += element_array_length_per_iq;
3060                 element_array = PTR_ALIGN(element_array,
3061                         PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3062                 queue_group->iq_element_array[AIO_PATH] = element_array;
3063                 queue_group->iq_element_array_bus_addr[AIO_PATH] =
3064                         ctrl_info->queue_memory_base_dma_handle +
3065                         (element_array - ctrl_info->queue_memory_base);
3066                 element_array += element_array_length_per_iq;
3067                 element_array = PTR_ALIGN(element_array,
3068                         PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3069         }
3070
3071         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3072                 queue_group = &ctrl_info->queue_groups[i];
3073                 queue_group->oq_element_array = element_array;
3074                 queue_group->oq_element_array_bus_addr =
3075                         ctrl_info->queue_memory_base_dma_handle +
3076                         (element_array - ctrl_info->queue_memory_base);
3077                 element_array += element_array_length_per_oq;
3078                 element_array = PTR_ALIGN(element_array,
3079                         PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3080         }
3081
3082         ctrl_info->event_queue.oq_element_array = element_array;
3083         ctrl_info->event_queue.oq_element_array_bus_addr =
3084                 ctrl_info->queue_memory_base_dma_handle +
3085                 (element_array - ctrl_info->queue_memory_base);
3086         element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3087                 PQI_EVENT_OQ_ELEMENT_LENGTH;
3088
3089         next_queue_index = PTR_ALIGN(element_array,
3090                 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3091
3092         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3093                 queue_group = &ctrl_info->queue_groups[i];
3094                 queue_group->iq_ci[RAID_PATH] = next_queue_index;
3095                 queue_group->iq_ci_bus_addr[RAID_PATH] =
3096                         ctrl_info->queue_memory_base_dma_handle +
3097                         (next_queue_index - ctrl_info->queue_memory_base);
3098                 next_queue_index += sizeof(pqi_index_t);
3099                 next_queue_index = PTR_ALIGN(next_queue_index,
3100                         PQI_OPERATIONAL_INDEX_ALIGNMENT);
3101                 queue_group->iq_ci[AIO_PATH] = next_queue_index;
3102                 queue_group->iq_ci_bus_addr[AIO_PATH] =
3103                         ctrl_info->queue_memory_base_dma_handle +
3104                         (next_queue_index - ctrl_info->queue_memory_base);
3105                 next_queue_index += sizeof(pqi_index_t);
3106                 next_queue_index = PTR_ALIGN(next_queue_index,
3107                         PQI_OPERATIONAL_INDEX_ALIGNMENT);
3108                 queue_group->oq_pi = next_queue_index;
3109                 queue_group->oq_pi_bus_addr =
3110                         ctrl_info->queue_memory_base_dma_handle +
3111                         (next_queue_index - ctrl_info->queue_memory_base);
3112                 next_queue_index += sizeof(pqi_index_t);
3113                 next_queue_index = PTR_ALIGN(next_queue_index,
3114                         PQI_OPERATIONAL_INDEX_ALIGNMENT);
3115         }
3116
3117         ctrl_info->event_queue.oq_pi = next_queue_index;
3118         ctrl_info->event_queue.oq_pi_bus_addr =
3119                 ctrl_info->queue_memory_base_dma_handle +
3120                 (next_queue_index - ctrl_info->queue_memory_base);
3121
3122         return 0;
3123 }
3124
3125 static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info)
3126 {
3127         unsigned int i;
3128         u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3129         u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3130
3131         /*
3132          * Initialize the backpointers to the controller structure in
3133          * each operational queue group structure.
3134          */
3135         for (i = 0; i < ctrl_info->num_queue_groups; i++)
3136                 ctrl_info->queue_groups[i].ctrl_info = ctrl_info;
3137
3138         /*
3139          * Assign IDs to all operational queues.  Note that the IDs
3140          * assigned to operational IQs are independent of the IDs
3141          * assigned to operational OQs.
3142          */
3143         ctrl_info->event_queue.oq_id = next_oq_id++;
3144         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3145                 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++;
3146                 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++;
3147                 ctrl_info->queue_groups[i].oq_id = next_oq_id++;
3148         }
3149
3150         /*
3151          * Assign MSI-X table entry indexes to all queues.  Note that the
3152          * interrupt for the event queue is shared with the first queue group.
3153          */
3154         ctrl_info->event_queue.int_msg_num = 0;
3155         for (i = 0; i < ctrl_info->num_queue_groups; i++)
3156                 ctrl_info->queue_groups[i].int_msg_num = i;
3157
3158         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3159                 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]);
3160                 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]);
3161                 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]);
3162                 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]);
3163         }
3164 }
3165
3166 static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
3167 {
3168         size_t alloc_length;
3169         struct pqi_admin_queues_aligned *admin_queues_aligned;
3170         struct pqi_admin_queues *admin_queues;
3171
3172         alloc_length = sizeof(struct pqi_admin_queues_aligned) +
3173                 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3174
3175         ctrl_info->admin_queue_memory_base =
3176                 dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
3177                         alloc_length,
3178                         &ctrl_info->admin_queue_memory_base_dma_handle,
3179                         GFP_KERNEL);
3180
3181         if (!ctrl_info->admin_queue_memory_base)
3182                 return -ENOMEM;
3183
3184         ctrl_info->admin_queue_memory_length = alloc_length;
3185
3186         admin_queues = &ctrl_info->admin_queues;
3187         admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base,
3188                 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3189         admin_queues->iq_element_array =
3190                 &admin_queues_aligned->iq_element_array;
3191         admin_queues->oq_element_array =
3192                 &admin_queues_aligned->oq_element_array;
3193         admin_queues->iq_ci = &admin_queues_aligned->iq_ci;
3194         admin_queues->oq_pi = &admin_queues_aligned->oq_pi;
3195
3196         admin_queues->iq_element_array_bus_addr =
3197                 ctrl_info->admin_queue_memory_base_dma_handle +
3198                 (admin_queues->iq_element_array -
3199                 ctrl_info->admin_queue_memory_base);
3200         admin_queues->oq_element_array_bus_addr =
3201                 ctrl_info->admin_queue_memory_base_dma_handle +
3202                 (admin_queues->oq_element_array -
3203                 ctrl_info->admin_queue_memory_base);
3204         admin_queues->iq_ci_bus_addr =
3205                 ctrl_info->admin_queue_memory_base_dma_handle +
3206                 ((void *)admin_queues->iq_ci -
3207                 ctrl_info->admin_queue_memory_base);
3208         admin_queues->oq_pi_bus_addr =
3209                 ctrl_info->admin_queue_memory_base_dma_handle +
3210                 ((void *)admin_queues->oq_pi -
3211                 ctrl_info->admin_queue_memory_base);
3212
3213         return 0;
3214 }
3215
3216 #define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES          HZ
3217 #define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS      1
3218
3219 static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
3220 {
3221         struct pqi_device_registers __iomem *pqi_registers;
3222         struct pqi_admin_queues *admin_queues;
3223         unsigned long timeout;
3224         u8 status;
3225         u32 reg;
3226
3227         pqi_registers = ctrl_info->pqi_registers;
3228         admin_queues = &ctrl_info->admin_queues;
3229
3230         writeq((u64)admin_queues->iq_element_array_bus_addr,
3231                 &pqi_registers->admin_iq_element_array_addr);
3232         writeq((u64)admin_queues->oq_element_array_bus_addr,
3233                 &pqi_registers->admin_oq_element_array_addr);
3234         writeq((u64)admin_queues->iq_ci_bus_addr,
3235                 &pqi_registers->admin_iq_ci_addr);
3236         writeq((u64)admin_queues->oq_pi_bus_addr,
3237                 &pqi_registers->admin_oq_pi_addr);
3238
3239         reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
3240                 (PQI_ADMIN_OQ_NUM_ELEMENTS) << 8 |
3241                 (admin_queues->int_msg_num << 16);
3242         writel(reg, &pqi_registers->admin_iq_num_elements);
3243         writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
3244                 &pqi_registers->function_and_status_code);
3245
3246         timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies;
3247         while (1) {
3248                 status = readb(&pqi_registers->function_and_status_code);
3249                 if (status == PQI_STATUS_IDLE)
3250                         break;
3251                 if (time_after(jiffies, timeout))
3252                         return -ETIMEDOUT;
3253                 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
3254         }
3255
3256         /*
3257          * The offset registers are not initialized to the correct
3258          * offsets until *after* the create admin queue pair command
3259          * completes successfully.
3260          */
3261         admin_queues->iq_pi = ctrl_info->iomem_base +
3262                 PQI_DEVICE_REGISTERS_OFFSET +
3263                 readq(&pqi_registers->admin_iq_pi_offset);
3264         admin_queues->oq_ci = ctrl_info->iomem_base +
3265                 PQI_DEVICE_REGISTERS_OFFSET +
3266                 readq(&pqi_registers->admin_oq_ci_offset);
3267
3268         return 0;
3269 }
3270
3271 static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info,
3272         struct pqi_general_admin_request *request)
3273 {
3274         struct pqi_admin_queues *admin_queues;
3275         void *next_element;
3276         pqi_index_t iq_pi;
3277
3278         admin_queues = &ctrl_info->admin_queues;
3279         iq_pi = admin_queues->iq_pi_copy;
3280
3281         next_element = admin_queues->iq_element_array +
3282                 (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH);
3283
3284         memcpy(next_element, request, sizeof(*request));
3285
3286         iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS;
3287         admin_queues->iq_pi_copy = iq_pi;
3288
3289         /*
3290          * This write notifies the controller that an IU is available to be
3291          * processed.
3292          */
3293         writel(iq_pi, admin_queues->iq_pi);
3294 }
3295
3296 static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
3297         struct pqi_general_admin_response *response)
3298 {
3299         struct pqi_admin_queues *admin_queues;
3300         pqi_index_t oq_pi;
3301         pqi_index_t oq_ci;
3302         unsigned long timeout;
3303
3304         admin_queues = &ctrl_info->admin_queues;
3305         oq_ci = admin_queues->oq_ci_copy;
3306
3307         timeout = (3 * HZ) + jiffies;
3308
3309         while (1) {
3310                 oq_pi = *admin_queues->oq_pi;
3311                 if (oq_pi != oq_ci)
3312                         break;
3313                 if (time_after(jiffies, timeout)) {
3314                         dev_err(&ctrl_info->pci_dev->dev,
3315                                 "timed out waiting for admin response\n");
3316                         return -ETIMEDOUT;
3317                 }
3318                 usleep_range(1000, 2000);
3319         }
3320
3321         memcpy(response, admin_queues->oq_element_array +
3322                 (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response));
3323
3324         oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS;
3325         admin_queues->oq_ci_copy = oq_ci;
3326         writel(oq_ci, admin_queues->oq_ci);
3327
3328         return 0;
3329 }
3330
3331 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
3332         struct pqi_queue_group *queue_group, enum pqi_io_path path,
3333         struct pqi_io_request *io_request)
3334 {
3335         struct pqi_io_request *next;
3336         void *next_element;
3337         pqi_index_t iq_pi;
3338         pqi_index_t iq_ci;
3339         size_t iu_length;
3340         unsigned long flags;
3341         unsigned int num_elements_needed;
3342         unsigned int num_elements_to_end_of_queue;
3343         size_t copy_count;
3344         struct pqi_iu_header *request;
3345
3346         spin_lock_irqsave(&queue_group->submit_lock[path], flags);
3347
3348         if (io_request)
3349                 list_add_tail(&io_request->request_list_entry,
3350                         &queue_group->request_list[path]);
3351
3352         iq_pi = queue_group->iq_pi_copy[path];
3353
3354         list_for_each_entry_safe(io_request, next,
3355                 &queue_group->request_list[path], request_list_entry) {
3356
3357                 request = io_request->iu;
3358
3359                 iu_length = get_unaligned_le16(&request->iu_length) +
3360                         PQI_REQUEST_HEADER_LENGTH;
3361                 num_elements_needed =
3362                         DIV_ROUND_UP(iu_length,
3363                                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3364
3365                 iq_ci = *queue_group->iq_ci[path];
3366
3367                 if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci,
3368                         ctrl_info->num_elements_per_iq))
3369                         break;
3370
3371                 put_unaligned_le16(queue_group->oq_id,
3372                         &request->response_queue_id);
3373
3374                 next_element = queue_group->iq_element_array[path] +
3375                         (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3376
3377                 num_elements_to_end_of_queue =
3378                         ctrl_info->num_elements_per_iq - iq_pi;
3379
3380                 if (num_elements_needed <= num_elements_to_end_of_queue) {
3381                         memcpy(next_element, request, iu_length);
3382                 } else {
3383                         copy_count = num_elements_to_end_of_queue *
3384                                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
3385                         memcpy(next_element, request, copy_count);
3386                         memcpy(queue_group->iq_element_array[path],
3387                                 (u8 *)request + copy_count,
3388                                 iu_length - copy_count);
3389                 }
3390
3391                 iq_pi = (iq_pi + num_elements_needed) %
3392                         ctrl_info->num_elements_per_iq;
3393
3394                 list_del(&io_request->request_list_entry);
3395         }
3396
3397         if (iq_pi != queue_group->iq_pi_copy[path]) {
3398                 queue_group->iq_pi_copy[path] = iq_pi;
3399                 /*
3400                  * This write notifies the controller that one or more IUs are
3401                  * available to be processed.
3402                  */
3403                 writel(iq_pi, queue_group->iq_pi[path]);
3404         }
3405
3406         spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
3407 }
3408
3409 static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
3410         void *context)
3411 {
3412         struct completion *waiting = context;
3413
3414         complete(waiting);
3415 }
3416
3417 static int pqi_submit_raid_request_synchronous_with_io_request(
3418         struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
3419         unsigned long timeout_msecs)
3420 {
3421         int rc = 0;
3422         DECLARE_COMPLETION_ONSTACK(wait);
3423
3424         io_request->io_complete_callback = pqi_raid_synchronous_complete;
3425         io_request->context = &wait;
3426
3427         pqi_start_io(ctrl_info,
3428                 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
3429                 io_request);
3430
3431         if (timeout_msecs == NO_TIMEOUT) {
3432                 wait_for_completion_io(&wait);
3433         } else {
3434                 if (!wait_for_completion_io_timeout(&wait,
3435                         msecs_to_jiffies(timeout_msecs))) {
3436                         dev_warn(&ctrl_info->pci_dev->dev,
3437                                 "command timed out\n");
3438                         rc = -ETIMEDOUT;
3439                 }
3440         }
3441
3442         return rc;
3443 }
3444
3445 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
3446         struct pqi_iu_header *request, unsigned int flags,
3447         struct pqi_raid_error_info *error_info, unsigned long timeout_msecs)
3448 {
3449         int rc;
3450         struct pqi_io_request *io_request;
3451         unsigned long start_jiffies;
3452         unsigned long msecs_blocked;
3453         size_t iu_length;
3454
3455         /*
3456          * Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value
3457          * are mutually exclusive.
3458          */
3459
3460         if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) {
3461                 if (down_interruptible(&ctrl_info->sync_request_sem))
3462                         return -ERESTARTSYS;
3463         } else {
3464                 if (timeout_msecs == NO_TIMEOUT) {
3465                         down(&ctrl_info->sync_request_sem);
3466                 } else {
3467                         start_jiffies = jiffies;
3468                         if (down_timeout(&ctrl_info->sync_request_sem,
3469                                 msecs_to_jiffies(timeout_msecs)))
3470                                 return -ETIMEDOUT;
3471                         msecs_blocked =
3472                                 jiffies_to_msecs(jiffies - start_jiffies);
3473                         if (msecs_blocked >= timeout_msecs)
3474                                 return -ETIMEDOUT;
3475                         timeout_msecs -= msecs_blocked;
3476                 }
3477         }
3478
3479         io_request = pqi_alloc_io_request(ctrl_info);
3480
3481         put_unaligned_le16(io_request->index,
3482                 &(((struct pqi_raid_path_request *)request)->request_id));
3483
3484         if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO)
3485                 ((struct pqi_raid_path_request *)request)->error_index =
3486                         ((struct pqi_raid_path_request *)request)->request_id;
3487
3488         iu_length = get_unaligned_le16(&request->iu_length) +
3489                 PQI_REQUEST_HEADER_LENGTH;
3490         memcpy(io_request->iu, request, iu_length);
3491
3492         rc = pqi_submit_raid_request_synchronous_with_io_request(ctrl_info,
3493                 io_request, timeout_msecs);
3494
3495         if (error_info) {
3496                 if (io_request->error_info)
3497                         memcpy(error_info, io_request->error_info,
3498                                 sizeof(*error_info));
3499                 else
3500                         memset(error_info, 0, sizeof(*error_info));
3501         } else if (rc == 0 && io_request->error_info) {
3502                 u8 scsi_status;
3503                 struct pqi_raid_error_info *raid_error_info;
3504
3505                 raid_error_info = io_request->error_info;
3506                 scsi_status = raid_error_info->status;
3507
3508                 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
3509                         raid_error_info->data_out_result ==
3510                         PQI_DATA_IN_OUT_UNDERFLOW)
3511                         scsi_status = SAM_STAT_GOOD;
3512
3513                 if (scsi_status != SAM_STAT_GOOD)
3514                         rc = -EIO;
3515         }
3516
3517         pqi_free_io_request(io_request);
3518
3519         up(&ctrl_info->sync_request_sem);
3520
3521         return rc;
3522 }
3523
3524 static int pqi_validate_admin_response(
3525         struct pqi_general_admin_response *response, u8 expected_function_code)
3526 {
3527         if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN)
3528                 return -EINVAL;
3529
3530         if (get_unaligned_le16(&response->header.iu_length) !=
3531                 PQI_GENERAL_ADMIN_IU_LENGTH)
3532                 return -EINVAL;
3533
3534         if (response->function_code != expected_function_code)
3535                 return -EINVAL;
3536
3537         if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS)
3538                 return -EINVAL;
3539
3540         return 0;
3541 }
3542
3543 static int pqi_submit_admin_request_synchronous(
3544         struct pqi_ctrl_info *ctrl_info,
3545         struct pqi_general_admin_request *request,
3546         struct pqi_general_admin_response *response)
3547 {
3548         int rc;
3549
3550         pqi_submit_admin_request(ctrl_info, request);
3551
3552         rc = pqi_poll_for_admin_response(ctrl_info, response);
3553
3554         if (rc == 0)
3555                 rc = pqi_validate_admin_response(response,
3556                         request->function_code);
3557
3558         return rc;
3559 }
3560
3561 static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
3562 {
3563         int rc;
3564         struct pqi_general_admin_request request;
3565         struct pqi_general_admin_response response;
3566         struct pqi_device_capability *capability;
3567         struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor;
3568
3569         capability = kmalloc(sizeof(*capability), GFP_KERNEL);
3570         if (!capability)
3571                 return -ENOMEM;
3572
3573         memset(&request, 0, sizeof(request));
3574
3575         request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3576         put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3577                 &request.header.iu_length);
3578         request.function_code =
3579                 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY;
3580         put_unaligned_le32(sizeof(*capability),
3581                 &request.data.report_device_capability.buffer_length);
3582
3583         rc = pqi_map_single(ctrl_info->pci_dev,
3584                 &request.data.report_device_capability.sg_descriptor,
3585                 capability, sizeof(*capability),
3586                 PCI_DMA_FROMDEVICE);
3587         if (rc)
3588                 goto out;
3589
3590         rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3591                 &response);
3592
3593         pqi_pci_unmap(ctrl_info->pci_dev,
3594                 &request.data.report_device_capability.sg_descriptor, 1,
3595                 PCI_DMA_FROMDEVICE);
3596
3597         if (rc)
3598                 goto out;
3599
3600         if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) {
3601                 rc = -EIO;
3602                 goto out;
3603         }
3604
3605         ctrl_info->max_inbound_queues =
3606                 get_unaligned_le16(&capability->max_inbound_queues);
3607         ctrl_info->max_elements_per_iq =
3608                 get_unaligned_le16(&capability->max_elements_per_iq);
3609         ctrl_info->max_iq_element_length =
3610                 get_unaligned_le16(&capability->max_iq_element_length)
3611                 * 16;
3612         ctrl_info->max_outbound_queues =
3613                 get_unaligned_le16(&capability->max_outbound_queues);
3614         ctrl_info->max_elements_per_oq =
3615                 get_unaligned_le16(&capability->max_elements_per_oq);
3616         ctrl_info->max_oq_element_length =
3617                 get_unaligned_le16(&capability->max_oq_element_length)
3618                 * 16;
3619
3620         sop_iu_layer_descriptor =
3621                 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP];
3622
3623         ctrl_info->max_inbound_iu_length_per_firmware =
3624                 get_unaligned_le16(
3625                         &sop_iu_layer_descriptor->max_inbound_iu_length);
3626         ctrl_info->inbound_spanning_supported =
3627                 sop_iu_layer_descriptor->inbound_spanning_supported;
3628         ctrl_info->outbound_spanning_supported =
3629                 sop_iu_layer_descriptor->outbound_spanning_supported;
3630
3631 out:
3632         kfree(capability);
3633
3634         return rc;
3635 }
3636
3637 static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info)
3638 {
3639         if (ctrl_info->max_iq_element_length <
3640                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
3641                 dev_err(&ctrl_info->pci_dev->dev,
3642                         "max. inbound queue element length of %d is less than the required length of %d\n",
3643                         ctrl_info->max_iq_element_length,
3644                         PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3645                 return -EINVAL;
3646         }
3647
3648         if (ctrl_info->max_oq_element_length <
3649                 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) {
3650                 dev_err(&ctrl_info->pci_dev->dev,
3651                         "max. outbound queue element length of %d is less than the required length of %d\n",
3652                         ctrl_info->max_oq_element_length,
3653                         PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
3654                 return -EINVAL;
3655         }
3656
3657         if (ctrl_info->max_inbound_iu_length_per_firmware <
3658                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
3659                 dev_err(&ctrl_info->pci_dev->dev,
3660                         "max. inbound IU length of %u is less than the min. required length of %d\n",
3661                         ctrl_info->max_inbound_iu_length_per_firmware,
3662                         PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3663                 return -EINVAL;
3664         }
3665
3666         if (!ctrl_info->inbound_spanning_supported) {
3667                 dev_err(&ctrl_info->pci_dev->dev,
3668                         "the controller does not support inbound spanning\n");
3669                 return -EINVAL;
3670         }
3671
3672         if (ctrl_info->outbound_spanning_supported) {
3673                 dev_err(&ctrl_info->pci_dev->dev,
3674                         "the controller supports outbound spanning but this driver does not\n");
3675                 return -EINVAL;
3676         }
3677
3678         return 0;
3679 }
3680
3681 static int pqi_delete_operational_queue(struct pqi_ctrl_info *ctrl_info,
3682         bool inbound_queue, u16 queue_id)
3683 {
3684         struct pqi_general_admin_request request;
3685         struct pqi_general_admin_response response;
3686
3687         memset(&request, 0, sizeof(request));
3688         request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3689         put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3690                 &request.header.iu_length);
3691         if (inbound_queue)
3692                 request.function_code =
3693                         PQI_GENERAL_ADMIN_FUNCTION_DELETE_IQ;
3694         else
3695                 request.function_code =
3696                         PQI_GENERAL_ADMIN_FUNCTION_DELETE_OQ;
3697         put_unaligned_le16(queue_id,
3698                 &request.data.delete_operational_queue.queue_id);
3699
3700         return pqi_submit_admin_request_synchronous(ctrl_info, &request,
3701                 &response);
3702 }
3703
3704 static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info)
3705 {
3706         int rc;
3707         struct pqi_event_queue *event_queue;
3708         struct pqi_general_admin_request request;
3709         struct pqi_general_admin_response response;
3710
3711         event_queue = &ctrl_info->event_queue;
3712
3713         /*
3714          * Create OQ (Outbound Queue - device to host queue) to dedicate
3715          * to events.
3716          */
3717         memset(&request, 0, sizeof(request));
3718         request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3719         put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3720                 &request.header.iu_length);
3721         request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
3722         put_unaligned_le16(event_queue->oq_id,
3723                 &request.data.create_operational_oq.queue_id);
3724         put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr,
3725                 &request.data.create_operational_oq.element_array_addr);
3726         put_unaligned_le64((u64)event_queue->oq_pi_bus_addr,
3727                 &request.data.create_operational_oq.pi_addr);
3728         put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS,
3729                 &request.data.create_operational_oq.num_elements);
3730         put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16,
3731                 &request.data.create_operational_oq.element_length);
3732         request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
3733         put_unaligned_le16(event_queue->int_msg_num,
3734                 &request.data.create_operational_oq.int_msg_num);
3735
3736         rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3737                 &response);
3738         if (rc)
3739                 return rc;
3740
3741         event_queue->oq_ci = ctrl_info->iomem_base +
3742                 PQI_DEVICE_REGISTERS_OFFSET +
3743                 get_unaligned_le64(
3744                         &response.data.create_operational_oq.oq_ci_offset);
3745
3746         return 0;
3747 }
3748
3749 static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info)
3750 {
3751         unsigned int i;
3752         int rc;
3753         struct pqi_queue_group *queue_group;
3754         struct pqi_general_admin_request request;
3755         struct pqi_general_admin_response response;
3756
3757         i = ctrl_info->num_active_queue_groups;
3758         queue_group = &ctrl_info->queue_groups[i];
3759
3760         /*
3761          * Create IQ (Inbound Queue - host to device queue) for
3762          * RAID path.
3763          */
3764         memset(&request, 0, sizeof(request));
3765         request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3766         put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3767                 &request.header.iu_length);
3768         request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
3769         put_unaligned_le16(queue_group->iq_id[RAID_PATH],
3770                 &request.data.create_operational_iq.queue_id);
3771         put_unaligned_le64(
3772                 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH],
3773                 &request.data.create_operational_iq.element_array_addr);
3774         put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH],
3775                 &request.data.create_operational_iq.ci_addr);
3776         put_unaligned_le16(ctrl_info->num_elements_per_iq,
3777                 &request.data.create_operational_iq.num_elements);
3778         put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
3779                 &request.data.create_operational_iq.element_length);
3780         request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
3781
3782         rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3783                 &response);
3784         if (rc) {
3785                 dev_err(&ctrl_info->pci_dev->dev,
3786                         "error creating inbound RAID queue\n");
3787                 return rc;
3788         }
3789
3790         queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base +
3791                 PQI_DEVICE_REGISTERS_OFFSET +
3792                 get_unaligned_le64(
3793                         &response.data.create_operational_iq.iq_pi_offset);
3794
3795         /*
3796          * Create IQ (Inbound Queue - host to device queue) for
3797          * Advanced I/O (AIO) path.
3798          */
3799         memset(&request, 0, sizeof(request));
3800         request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3801         put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3802                 &request.header.iu_length);
3803         request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
3804         put_unaligned_le16(queue_group->iq_id[AIO_PATH],
3805                 &request.data.create_operational_iq.queue_id);
3806         put_unaligned_le64((u64)queue_group->
3807                 iq_element_array_bus_addr[AIO_PATH],
3808                 &request.data.create_operational_iq.element_array_addr);
3809         put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH],
3810                 &request.data.create_operational_iq.ci_addr);
3811         put_unaligned_le16(ctrl_info->num_elements_per_iq,
3812                 &request.data.create_operational_iq.num_elements);
3813         put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
3814                 &request.data.create_operational_iq.element_length);
3815         request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
3816
3817         rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3818                 &response);
3819         if (rc) {
3820                 dev_err(&ctrl_info->pci_dev->dev,
3821                         "error creating inbound AIO queue\n");
3822                 goto delete_inbound_queue_raid;
3823         }
3824
3825         queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base +
3826                 PQI_DEVICE_REGISTERS_OFFSET +
3827                 get_unaligned_le64(
3828                         &response.data.create_operational_iq.iq_pi_offset);
3829
3830         /*
3831          * Designate the 2nd IQ as the AIO path.  By default, all IQs are
3832          * assumed to be for RAID path I/O unless we change the queue's
3833          * property.
3834          */
3835         memset(&request, 0, sizeof(request));
3836         request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3837         put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3838                 &request.header.iu_length);
3839         request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY;
3840         put_unaligned_le16(queue_group->iq_id[AIO_PATH],
3841                 &request.data.change_operational_iq_properties.queue_id);
3842         put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE,
3843                 &request.data.change_operational_iq_properties.vendor_specific);
3844
3845         rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3846                 &response);
3847         if (rc) {
3848                 dev_err(&ctrl_info->pci_dev->dev,
3849                         "error changing queue property\n");
3850                 goto delete_inbound_queue_aio;
3851         }
3852
3853         /*
3854          * Create OQ (Outbound Queue - device to host queue).
3855          */
3856         memset(&request, 0, sizeof(request));
3857         request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3858         put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3859                 &request.header.iu_length);
3860         request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
3861         put_unaligned_le16(queue_group->oq_id,
3862                 &request.data.create_operational_oq.queue_id);
3863         put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr,
3864                 &request.data.create_operational_oq.element_array_addr);
3865         put_unaligned_le64((u64)queue_group->oq_pi_bus_addr,
3866                 &request.data.create_operational_oq.pi_addr);
3867         put_unaligned_le16(ctrl_info->num_elements_per_oq,
3868                 &request.data.create_operational_oq.num_elements);
3869         put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16,
3870                 &request.data.create_operational_oq.element_length);
3871         request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
3872         put_unaligned_le16(queue_group->int_msg_num,
3873                 &request.data.create_operational_oq.int_msg_num);
3874
3875         rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3876                 &response);
3877         if (rc) {
3878                 dev_err(&ctrl_info->pci_dev->dev,
3879                         "error creating outbound queue\n");
3880                 goto delete_inbound_queue_aio;
3881         }
3882
3883         queue_group->oq_ci = ctrl_info->iomem_base +
3884                 PQI_DEVICE_REGISTERS_OFFSET +
3885                 get_unaligned_le64(
3886                         &response.data.create_operational_oq.oq_ci_offset);
3887
3888         ctrl_info->num_active_queue_groups++;
3889
3890         return 0;
3891
3892 delete_inbound_queue_aio:
3893         pqi_delete_operational_queue(ctrl_info, true,
3894                 queue_group->iq_id[AIO_PATH]);
3895
3896 delete_inbound_queue_raid:
3897         pqi_delete_operational_queue(ctrl_info, true,
3898                 queue_group->iq_id[RAID_PATH]);
3899
3900         return rc;
3901 }
3902
3903 static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
3904 {
3905         int rc;
3906         unsigned int i;
3907
3908         rc = pqi_create_event_queue(ctrl_info);
3909         if (rc) {
3910                 dev_err(&ctrl_info->pci_dev->dev,
3911                         "error creating event queue\n");
3912                 return rc;
3913         }
3914
3915         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3916                 rc = pqi_create_queue_group(ctrl_info);
3917                 if (rc) {
3918                         dev_err(&ctrl_info->pci_dev->dev,
3919                                 "error creating queue group number %u/%u\n",
3920                                 i, ctrl_info->num_queue_groups);
3921                         return rc;
3922                 }
3923         }
3924
3925         return 0;
3926 }
3927
3928 #define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH   \
3929         (offsetof(struct pqi_event_config, descriptors) + \
3930         (PQI_MAX_EVENT_DESCRIPTORS * sizeof(struct pqi_event_descriptor)))
3931
3932 static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info)
3933 {
3934         int rc;
3935         unsigned int i;
3936         struct pqi_event_config *event_config;
3937         struct pqi_general_management_request request;
3938
3939         event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
3940                 GFP_KERNEL);
3941         if (!event_config)
3942                 return -ENOMEM;
3943
3944         memset(&request, 0, sizeof(request));
3945
3946         request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
3947         put_unaligned_le16(offsetof(struct pqi_general_management_request,
3948                 data.report_event_configuration.sg_descriptors[1]) -
3949                 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
3950         put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
3951                 &request.data.report_event_configuration.buffer_length);
3952
3953         rc = pqi_map_single(ctrl_info->pci_dev,
3954                 request.data.report_event_configuration.sg_descriptors,
3955                 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
3956                 PCI_DMA_FROMDEVICE);
3957         if (rc)
3958                 goto out;
3959
3960         rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
3961                 0, NULL, NO_TIMEOUT);
3962
3963         pqi_pci_unmap(ctrl_info->pci_dev,
3964                 request.data.report_event_configuration.sg_descriptors, 1,
3965                 PCI_DMA_FROMDEVICE);
3966
3967         if (rc)
3968                 goto out;
3969
3970         for (i = 0; i < event_config->num_event_descriptors; i++)
3971                 put_unaligned_le16(ctrl_info->event_queue.oq_id,
3972                         &event_config->descriptors[i].oq_id);
3973
3974         memset(&request, 0, sizeof(request));
3975
3976         request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG;
3977         put_unaligned_le16(offsetof(struct pqi_general_management_request,
3978                 data.report_event_configuration.sg_descriptors[1]) -
3979                 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
3980         put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
3981                 &request.data.report_event_configuration.buffer_length);
3982
3983         rc = pqi_map_single(ctrl_info->pci_dev,
3984                 request.data.report_event_configuration.sg_descriptors,
3985                 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
3986                 PCI_DMA_TODEVICE);
3987         if (rc)
3988                 goto out;
3989
3990         rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
3991                 NULL, NO_TIMEOUT);
3992
3993         pqi_pci_unmap(ctrl_info->pci_dev,
3994                 request.data.report_event_configuration.sg_descriptors, 1,
3995                 PCI_DMA_TODEVICE);
3996
3997 out:
3998         kfree(event_config);
3999
4000         return rc;
4001 }
4002
4003 static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
4004 {
4005         unsigned int i;
4006         struct device *dev;
4007         size_t sg_chain_buffer_length;
4008         struct pqi_io_request *io_request;
4009
4010         if (!ctrl_info->io_request_pool)
4011                 return;
4012
4013         dev = &ctrl_info->pci_dev->dev;
4014         sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4015         io_request = ctrl_info->io_request_pool;
4016
4017         for (i = 0; i < ctrl_info->max_io_slots; i++) {
4018                 kfree(io_request->iu);
4019                 if (!io_request->sg_chain_buffer)
4020                         break;
4021                 dma_free_coherent(dev, sg_chain_buffer_length,
4022                         io_request->sg_chain_buffer,
4023                         io_request->sg_chain_buffer_dma_handle);
4024                 io_request++;
4025         }
4026
4027         kfree(ctrl_info->io_request_pool);
4028         ctrl_info->io_request_pool = NULL;
4029 }
4030
4031 static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
4032 {
4033         ctrl_info->error_buffer = dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
4034                 ctrl_info->error_buffer_length,
4035                 &ctrl_info->error_buffer_dma_handle, GFP_KERNEL);
4036
4037         if (!ctrl_info->error_buffer)
4038                 return -ENOMEM;
4039
4040         return 0;
4041 }
4042
4043 static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
4044 {
4045         unsigned int i;
4046         void *sg_chain_buffer;
4047         size_t sg_chain_buffer_length;
4048         dma_addr_t sg_chain_buffer_dma_handle;
4049         struct device *dev;
4050         struct pqi_io_request *io_request;
4051
4052         ctrl_info->io_request_pool = kzalloc(ctrl_info->max_io_slots *
4053                 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
4054
4055         if (!ctrl_info->io_request_pool) {
4056                 dev_err(&ctrl_info->pci_dev->dev,
4057                         "failed to allocate I/O request pool\n");
4058                 goto error;
4059         }
4060
4061         dev = &ctrl_info->pci_dev->dev;
4062         sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4063         io_request = ctrl_info->io_request_pool;
4064
4065         for (i = 0; i < ctrl_info->max_io_slots; i++) {
4066                 io_request->iu =
4067                         kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
4068
4069                 if (!io_request->iu) {
4070                         dev_err(&ctrl_info->pci_dev->dev,
4071                                 "failed to allocate IU buffers\n");
4072                         goto error;
4073                 }
4074
4075                 sg_chain_buffer = dma_alloc_coherent(dev,
4076                         sg_chain_buffer_length, &sg_chain_buffer_dma_handle,
4077                         GFP_KERNEL);
4078
4079                 if (!sg_chain_buffer) {
4080                         dev_err(&ctrl_info->pci_dev->dev,
4081                                 "failed to allocate PQI scatter-gather chain buffers\n");
4082                         goto error;
4083                 }
4084
4085                 io_request->index = i;
4086                 io_request->sg_chain_buffer = sg_chain_buffer;
4087                 io_request->sg_chain_buffer_dma_handle =
4088                         sg_chain_buffer_dma_handle;
4089                 io_request++;
4090         }
4091
4092         return 0;
4093
4094 error:
4095         pqi_free_all_io_requests(ctrl_info);
4096
4097         return -ENOMEM;
4098 }
4099
4100 /*
4101  * Calculate required resources that are sized based on max. outstanding
4102  * requests and max. transfer size.
4103  */
4104
4105 static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
4106 {
4107         u32 max_transfer_size;
4108         u32 max_sg_entries;
4109
4110         ctrl_info->scsi_ml_can_queue =
4111                 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS;
4112         ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests;
4113
4114         ctrl_info->error_buffer_length =
4115                 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
4116
4117         max_transfer_size =
4118                 min(ctrl_info->max_transfer_size, PQI_MAX_TRANSFER_SIZE);
4119
4120         max_sg_entries = max_transfer_size / PAGE_SIZE;
4121
4122         /* +1 to cover when the buffer is not page-aligned. */
4123         max_sg_entries++;
4124
4125         max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries);
4126
4127         max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE;
4128
4129         ctrl_info->sg_chain_buffer_length =
4130                 max_sg_entries * sizeof(struct pqi_sg_descriptor);
4131         ctrl_info->sg_tablesize = max_sg_entries;
4132         ctrl_info->max_sectors = max_transfer_size / 512;
4133 }
4134
4135 static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
4136 {
4137         int num_cpus;
4138         int max_queue_groups;
4139         int num_queue_groups;
4140         u16 num_elements_per_iq;
4141         u16 num_elements_per_oq;
4142
4143         max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
4144                 ctrl_info->max_outbound_queues - 1);
4145         max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
4146
4147         num_cpus = num_online_cpus();
4148         num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
4149         num_queue_groups = min(num_queue_groups, max_queue_groups);
4150
4151         ctrl_info->num_queue_groups = num_queue_groups;
4152
4153         /*
4154          * Make sure that the max. inbound IU length is an even multiple
4155          * of our inbound element length.
4156          */
4157         ctrl_info->max_inbound_iu_length =
4158                 (ctrl_info->max_inbound_iu_length_per_firmware /
4159                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) *
4160                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
4161
4162         num_elements_per_iq =
4163                 (ctrl_info->max_inbound_iu_length /
4164                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4165
4166         /* Add one because one element in each queue is unusable. */
4167         num_elements_per_iq++;
4168
4169         num_elements_per_iq = min(num_elements_per_iq,
4170                 ctrl_info->max_elements_per_iq);
4171
4172         num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1;
4173         num_elements_per_oq = min(num_elements_per_oq,
4174                 ctrl_info->max_elements_per_oq);
4175
4176         ctrl_info->num_elements_per_iq = num_elements_per_iq;
4177         ctrl_info->num_elements_per_oq = num_elements_per_oq;
4178
4179         ctrl_info->max_sg_per_iu =
4180                 ((ctrl_info->max_inbound_iu_length -
4181                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
4182                 sizeof(struct pqi_sg_descriptor)) +
4183                 PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
4184 }
4185
4186 static inline void pqi_set_sg_descriptor(
4187         struct pqi_sg_descriptor *sg_descriptor, struct scatterlist *sg)
4188 {
4189         u64 address = (u64)sg_dma_address(sg);
4190         unsigned int length = sg_dma_len(sg);
4191
4192         put_unaligned_le64(address, &sg_descriptor->address);
4193         put_unaligned_le32(length, &sg_descriptor->length);
4194         put_unaligned_le32(0, &sg_descriptor->flags);
4195 }
4196
4197 static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
4198         struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
4199         struct pqi_io_request *io_request)
4200 {
4201         int i;
4202         u16 iu_length;
4203         int sg_count;
4204         bool chained;
4205         unsigned int num_sg_in_iu;
4206         unsigned int max_sg_per_iu;
4207         struct scatterlist *sg;
4208         struct pqi_sg_descriptor *sg_descriptor;
4209
4210         sg_count = scsi_dma_map(scmd);
4211         if (sg_count < 0)
4212                 return sg_count;
4213
4214         iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
4215                 PQI_REQUEST_HEADER_LENGTH;
4216
4217         if (sg_count == 0)
4218                 goto out;
4219
4220         sg = scsi_sglist(scmd);
4221         sg_descriptor = request->sg_descriptors;
4222         max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4223         chained = false;
4224         num_sg_in_iu = 0;
4225         i = 0;
4226
4227         while (1) {
4228                 pqi_set_sg_descriptor(sg_descriptor, sg);
4229                 if (!chained)
4230                         num_sg_in_iu++;
4231                 i++;
4232                 if (i == sg_count)
4233                         break;
4234                 sg_descriptor++;
4235                 if (i == max_sg_per_iu) {
4236                         put_unaligned_le64(
4237                                 (u64)io_request->sg_chain_buffer_dma_handle,
4238                                 &sg_descriptor->address);
4239                         put_unaligned_le32((sg_count - num_sg_in_iu)
4240                                 * sizeof(*sg_descriptor),
4241                                 &sg_descriptor->length);
4242                         put_unaligned_le32(CISS_SG_CHAIN,
4243                                 &sg_descriptor->flags);
4244                         chained = true;
4245                         num_sg_in_iu++;
4246                         sg_descriptor = io_request->sg_chain_buffer;
4247                 }
4248                 sg = sg_next(sg);
4249         }
4250
4251         put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
4252         request->partial = chained;
4253         iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
4254
4255 out:
4256         put_unaligned_le16(iu_length, &request->header.iu_length);
4257
4258         return 0;
4259 }
4260
4261 static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
4262         struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
4263         struct pqi_io_request *io_request)
4264 {
4265         int i;
4266         u16 iu_length;
4267         int sg_count;
4268         bool chained;
4269         unsigned int num_sg_in_iu;
4270         unsigned int max_sg_per_iu;
4271         struct scatterlist *sg;
4272         struct pqi_sg_descriptor *sg_descriptor;
4273
4274         sg_count = scsi_dma_map(scmd);
4275         if (sg_count < 0)
4276                 return sg_count;
4277
4278         iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) -
4279                 PQI_REQUEST_HEADER_LENGTH;
4280         num_sg_in_iu = 0;
4281
4282         if (sg_count == 0)
4283                 goto out;
4284
4285         sg = scsi_sglist(scmd);
4286         sg_descriptor = request->sg_descriptors;
4287         max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4288         chained = false;
4289         i = 0;
4290
4291         while (1) {
4292                 pqi_set_sg_descriptor(sg_descriptor, sg);
4293                 if (!chained)
4294                         num_sg_in_iu++;
4295                 i++;
4296                 if (i == sg_count)
4297                         break;
4298                 sg_descriptor++;
4299                 if (i == max_sg_per_iu) {
4300                         put_unaligned_le64(
4301                                 (u64)io_request->sg_chain_buffer_dma_handle,
4302                                 &sg_descriptor->address);
4303                         put_unaligned_le32((sg_count - num_sg_in_iu)
4304                                 * sizeof(*sg_descriptor),
4305                                 &sg_descriptor->length);
4306                         put_unaligned_le32(CISS_SG_CHAIN,
4307                                 &sg_descriptor->flags);
4308                         chained = true;
4309                         num_sg_in_iu++;
4310                         sg_descriptor = io_request->sg_chain_buffer;
4311                 }
4312                 sg = sg_next(sg);
4313         }
4314
4315         put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
4316         request->partial = chained;
4317         iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
4318
4319 out:
4320         put_unaligned_le16(iu_length, &request->header.iu_length);
4321         request->num_sg_descriptors = num_sg_in_iu;
4322
4323         return 0;
4324 }
4325
4326 static void pqi_raid_io_complete(struct pqi_io_request *io_request,
4327         void *context)
4328 {
4329         struct scsi_cmnd *scmd;
4330
4331         scmd = io_request->scmd;
4332         pqi_free_io_request(io_request);
4333         scsi_dma_unmap(scmd);
4334         pqi_scsi_done(scmd);
4335 }
4336
4337 static int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
4338         struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
4339         struct pqi_queue_group *queue_group)
4340 {
4341         int rc;
4342         size_t cdb_length;
4343         struct pqi_io_request *io_request;
4344         struct pqi_raid_path_request *request;
4345
4346         io_request = pqi_alloc_io_request(ctrl_info);
4347         io_request->io_complete_callback = pqi_raid_io_complete;
4348         io_request->scmd = scmd;
4349
4350         scmd->host_scribble = (unsigned char *)io_request;
4351
4352         request = io_request->iu;
4353         memset(request, 0,
4354                 offsetof(struct pqi_raid_path_request, sg_descriptors));
4355
4356         request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
4357         put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
4358         request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
4359         put_unaligned_le16(io_request->index, &request->request_id);
4360         request->error_index = request->request_id;
4361         memcpy(request->lun_number, device->scsi3addr,
4362                 sizeof(request->lun_number));
4363
4364         cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
4365         memcpy(request->cdb, scmd->cmnd, cdb_length);
4366
4367         switch (cdb_length) {
4368         case 6:
4369         case 10:
4370         case 12:
4371         case 16:
4372                 /* No bytes in the Additional CDB bytes field */
4373                 request->additional_cdb_bytes_usage =
4374                         SOP_ADDITIONAL_CDB_BYTES_0;
4375                 break;
4376         case 20:
4377                 /* 4 bytes in the Additional cdb field */
4378                 request->additional_cdb_bytes_usage =
4379                         SOP_ADDITIONAL_CDB_BYTES_4;
4380                 break;
4381         case 24:
4382                 /* 8 bytes in the Additional cdb field */
4383                 request->additional_cdb_bytes_usage =
4384                         SOP_ADDITIONAL_CDB_BYTES_8;
4385                 break;
4386         case 28:
4387                 /* 12 bytes in the Additional cdb field */
4388                 request->additional_cdb_bytes_usage =
4389                         SOP_ADDITIONAL_CDB_BYTES_12;
4390                 break;
4391         case 32:
4392         default:
4393                 /* 16 bytes in the Additional cdb field */
4394                 request->additional_cdb_bytes_usage =
4395                         SOP_ADDITIONAL_CDB_BYTES_16;
4396                 break;
4397         }
4398
4399         switch (scmd->sc_data_direction) {
4400         case DMA_TO_DEVICE:
4401                 request->data_direction = SOP_READ_FLAG;
4402                 break;
4403         case DMA_FROM_DEVICE:
4404                 request->data_direction = SOP_WRITE_FLAG;
4405                 break;
4406         case DMA_NONE:
4407                 request->data_direction = SOP_NO_DIRECTION_FLAG;
4408                 break;
4409         case DMA_BIDIRECTIONAL:
4410                 request->data_direction = SOP_BIDIRECTIONAL;
4411                 break;
4412         default:
4413                 dev_err(&ctrl_info->pci_dev->dev,
4414                         "unknown data direction: %d\n",
4415                         scmd->sc_data_direction);
4416                 WARN_ON(scmd->sc_data_direction);
4417                 break;
4418         }
4419
4420         rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request);
4421         if (rc) {
4422                 pqi_free_io_request(io_request);
4423                 return SCSI_MLQUEUE_HOST_BUSY;
4424         }
4425
4426         pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
4427
4428         return 0;
4429 }
4430
4431 static void pqi_aio_io_complete(struct pqi_io_request *io_request,
4432         void *context)
4433 {
4434         struct scsi_cmnd *scmd;
4435
4436         scmd = io_request->scmd;
4437         scsi_dma_unmap(scmd);
4438         if (io_request->status == -EAGAIN)
4439                 set_host_byte(scmd, DID_IMM_RETRY);
4440         pqi_free_io_request(io_request);
4441         pqi_scsi_done(scmd);
4442 }
4443
4444 static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
4445         struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
4446         struct pqi_queue_group *queue_group)
4447 {
4448         return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
4449                 scmd->cmnd, scmd->cmd_len, queue_group, NULL);
4450 }
4451
4452 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
4453         struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
4454         unsigned int cdb_length, struct pqi_queue_group *queue_group,
4455         struct pqi_encryption_info *encryption_info)
4456 {
4457         int rc;
4458         struct pqi_io_request *io_request;
4459         struct pqi_aio_path_request *request;
4460
4461         io_request = pqi_alloc_io_request(ctrl_info);
4462         io_request->io_complete_callback = pqi_aio_io_complete;
4463         io_request->scmd = scmd;
4464
4465         scmd->host_scribble = (unsigned char *)io_request;
4466
4467         request = io_request->iu;
4468         memset(request, 0,
4469                 offsetof(struct pqi_raid_path_request, sg_descriptors));
4470
4471         request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
4472         put_unaligned_le32(aio_handle, &request->nexus_id);
4473         put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
4474         request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
4475         put_unaligned_le16(io_request->index, &request->request_id);
4476         request->error_index = request->request_id;
4477         if (cdb_length > sizeof(request->cdb))
4478                 cdb_length = sizeof(request->cdb);
4479         request->cdb_length = cdb_length;
4480         memcpy(request->cdb, cdb, cdb_length);
4481
4482         switch (scmd->sc_data_direction) {
4483         case DMA_TO_DEVICE:
4484                 request->data_direction = SOP_READ_FLAG;
4485                 break;
4486         case DMA_FROM_DEVICE:
4487                 request->data_direction = SOP_WRITE_FLAG;
4488                 break;
4489         case DMA_NONE:
4490                 request->data_direction = SOP_NO_DIRECTION_FLAG;
4491                 break;
4492         case DMA_BIDIRECTIONAL:
4493                 request->data_direction = SOP_BIDIRECTIONAL;
4494                 break;
4495         default:
4496                 dev_err(&ctrl_info->pci_dev->dev,
4497                         "unknown data direction: %d\n",
4498                         scmd->sc_data_direction);
4499                 WARN_ON(scmd->sc_data_direction);
4500                 break;
4501         }
4502
4503         if (encryption_info) {
4504                 request->encryption_enable = true;
4505                 put_unaligned_le16(encryption_info->data_encryption_key_index,
4506                         &request->data_encryption_key_index);
4507                 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
4508                         &request->encrypt_tweak_lower);
4509                 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
4510                         &request->encrypt_tweak_upper);
4511         }
4512
4513         rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request);
4514         if (rc) {
4515                 pqi_free_io_request(io_request);
4516                 return SCSI_MLQUEUE_HOST_BUSY;
4517         }
4518
4519         pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
4520
4521         return 0;
4522 }
4523
4524 static int pqi_scsi_queue_command(struct Scsi_Host *shost,
4525                                 struct scsi_cmnd *scmd)
4526 {
4527         int rc;
4528         struct pqi_ctrl_info *ctrl_info;
4529         struct pqi_scsi_dev *device;
4530         u16 hwq;
4531         struct pqi_queue_group *queue_group;
4532         bool raid_bypassed;
4533
4534         device = scmd->device->hostdata;
4535
4536         if (device->reset_in_progress) {
4537                 set_host_byte(scmd, DID_RESET);
4538                 pqi_scsi_done(scmd);
4539                 return 0;
4540         }
4541
4542         ctrl_info = shost_to_hba(shost);
4543
4544         if (pqi_ctrl_offline(ctrl_info)) {
4545                 set_host_byte(scmd, DID_NO_CONNECT);
4546                 pqi_scsi_done(scmd);
4547                 return 0;
4548         }
4549
4550         hwq = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request));
4551         if (hwq >= ctrl_info->num_queue_groups)
4552                 hwq = 0;
4553
4554         queue_group = &ctrl_info->queue_groups[hwq];
4555
4556         if (pqi_is_logical_device(device)) {
4557                 raid_bypassed = false;
4558                 if (device->offload_enabled &&
4559                         scmd->request->cmd_type == REQ_TYPE_FS) {
4560                         rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device,
4561                                 scmd, queue_group);
4562                         if (rc == 0 ||
4563                                 rc == SCSI_MLQUEUE_HOST_BUSY ||
4564                                 rc == SAM_STAT_CHECK_CONDITION ||
4565                                 rc == SAM_STAT_RESERVATION_CONFLICT)
4566                                 raid_bypassed = true;
4567                 }
4568                 if (!raid_bypassed)
4569                         rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
4570                                 queue_group);
4571         } else {
4572                 if (device->aio_enabled)
4573                         rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd,
4574                                 queue_group);
4575                 else
4576                         rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
4577                                 queue_group);
4578         }
4579
4580         return rc;
4581 }
4582
4583 static inline void pqi_complete_queued_requests_queue_group(
4584         struct pqi_queue_group *queue_group,
4585         struct pqi_scsi_dev *device_in_reset)
4586 {
4587         unsigned int path;
4588         unsigned long flags;
4589         struct pqi_io_request *io_request;
4590         struct pqi_io_request *next;
4591         struct scsi_cmnd *scmd;
4592         struct pqi_scsi_dev *device;
4593
4594         for (path = 0; path < 2; path++) {
4595                 spin_lock_irqsave(&queue_group->submit_lock[path], flags);
4596
4597                 list_for_each_entry_safe(io_request, next,
4598                         &queue_group->request_list[path],
4599                         request_list_entry) {
4600                         scmd = io_request->scmd;
4601                         if (!scmd)
4602                                 continue;
4603                         device = scmd->device->hostdata;
4604                         if (device == device_in_reset) {
4605                                 set_host_byte(scmd, DID_RESET);
4606                                 pqi_scsi_done(scmd);
4607                                 list_del(&io_request->
4608                                         request_list_entry);
4609                         }
4610                 }
4611
4612                 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
4613         }
4614 }
4615
4616 static void pqi_complete_queued_requests(struct pqi_ctrl_info *ctrl_info,
4617         struct pqi_scsi_dev *device_in_reset)
4618 {
4619         unsigned int i;
4620         struct pqi_queue_group *queue_group;
4621
4622         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4623                 queue_group = &ctrl_info->queue_groups[i];
4624                 pqi_complete_queued_requests_queue_group(queue_group,
4625                         device_in_reset);
4626         }
4627 }
4628
4629 static void pqi_reset_lun_complete(struct pqi_io_request *io_request,
4630         void *context)
4631 {
4632         struct completion *waiting = context;
4633
4634         complete(waiting);
4635 }
4636
4637 static int pqi_reset_lun(struct pqi_ctrl_info *ctrl_info,
4638         struct pqi_scsi_dev *device)
4639 {
4640         int rc;
4641         struct pqi_io_request *io_request;
4642         DECLARE_COMPLETION_ONSTACK(wait);
4643         struct pqi_task_management_request *request;
4644
4645         down(&ctrl_info->lun_reset_sem);
4646
4647         io_request = pqi_alloc_io_request(ctrl_info);
4648         io_request->io_complete_callback = pqi_reset_lun_complete;
4649         io_request->context = &wait;
4650
4651         request = io_request->iu;
4652         memset(request, 0, sizeof(*request));
4653
4654         request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
4655         put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH,
4656                 &request->header.iu_length);
4657         put_unaligned_le16(io_request->index, &request->request_id);
4658         memcpy(request->lun_number, device->scsi3addr,
4659                 sizeof(request->lun_number));
4660         request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
4661
4662         pqi_start_io(ctrl_info,
4663                 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
4664                 io_request);
4665
4666         if (!wait_for_completion_io_timeout(&wait,
4667                 msecs_to_jiffies(PQI_ABORT_TIMEOUT_MSECS))) {
4668                 rc = -ETIMEDOUT;
4669         } else {
4670                 rc = io_request->status;
4671         }
4672
4673         pqi_free_io_request(io_request);
4674         up(&ctrl_info->lun_reset_sem);
4675
4676         return rc;
4677 }
4678
4679 /* Performs a reset at the LUN level. */
4680
4681 static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
4682         struct pqi_scsi_dev *device)
4683 {
4684         int rc;
4685
4686         pqi_check_ctrl_health(ctrl_info);
4687         if (pqi_ctrl_offline(ctrl_info))
4688                 return FAILED;
4689
4690         device->reset_in_progress = true;
4691         pqi_complete_queued_requests(ctrl_info, device);
4692         rc = pqi_reset_lun(ctrl_info, device);
4693         device->reset_in_progress = false;
4694
4695         if (rc)
4696                 return FAILED;
4697
4698         return SUCCESS;
4699 }
4700
4701 static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
4702 {
4703         int rc;
4704         struct pqi_ctrl_info *ctrl_info;
4705         struct pqi_scsi_dev *device;
4706
4707         ctrl_info = shost_to_hba(scmd->device->host);
4708
4709         device = scmd->device->hostdata;
4710
4711         dev_err(&ctrl_info->pci_dev->dev,
4712                 "resetting scsi %d:%d:%d:%d\n",
4713                 ctrl_info->scsi_host->host_no,
4714                 device->bus, device->target, device->lun);
4715
4716         rc = pqi_device_reset(ctrl_info, device);
4717
4718         dev_err(&ctrl_info->pci_dev->dev,
4719                 "reset of scsi %d:%d:%d:%d: %s\n",
4720                 ctrl_info->scsi_host->host_no,
4721                 device->bus, device->target, device->lun,
4722                 rc == SUCCESS ? "SUCCESS" : "FAILED");
4723
4724         return rc;
4725 }
4726
4727 static int pqi_slave_alloc(struct scsi_device *sdev)
4728 {
4729         struct pqi_scsi_dev *device;
4730         unsigned long flags;
4731         struct pqi_ctrl_info *ctrl_info;
4732         struct scsi_target *starget;
4733         struct sas_rphy *rphy;
4734
4735         ctrl_info = shost_to_hba(sdev->host);
4736
4737         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
4738
4739         if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) {
4740                 starget = scsi_target(sdev);
4741                 rphy = target_to_rphy(starget);
4742                 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
4743                 if (device) {
4744                         device->target = sdev_id(sdev);
4745                         device->lun = sdev->lun;
4746                         device->target_lun_valid = true;
4747                 }
4748         } else {
4749                 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev),
4750                         sdev_id(sdev), sdev->lun);
4751         }
4752
4753         if (device && device->expose_device) {
4754                 sdev->hostdata = device;
4755                 device->sdev = sdev;
4756                 if (device->queue_depth) {
4757                         device->advertised_queue_depth = device->queue_depth;
4758                         scsi_change_queue_depth(sdev,
4759                                 device->advertised_queue_depth);
4760                 }
4761         }
4762
4763         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
4764
4765         return 0;
4766 }
4767
4768 static int pqi_slave_configure(struct scsi_device *sdev)
4769 {
4770         struct pqi_scsi_dev *device;
4771
4772         device = sdev->hostdata;
4773         if (!device->expose_device)
4774                 sdev->no_uld_attach = true;
4775
4776         return 0;
4777 }
4778
4779 static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info,
4780         void __user *arg)
4781 {
4782         struct pci_dev *pci_dev;
4783         u32 subsystem_vendor;
4784         u32 subsystem_device;
4785         cciss_pci_info_struct pciinfo;
4786
4787         if (!arg)
4788                 return -EINVAL;
4789
4790         pci_dev = ctrl_info->pci_dev;
4791
4792         pciinfo.domain = pci_domain_nr(pci_dev->bus);
4793         pciinfo.bus = pci_dev->bus->number;
4794         pciinfo.dev_fn = pci_dev->devfn;
4795         subsystem_vendor = pci_dev->subsystem_vendor;
4796         subsystem_device = pci_dev->subsystem_device;
4797         pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) |
4798                 subsystem_vendor;
4799
4800         if (copy_to_user(arg, &pciinfo, sizeof(pciinfo)))
4801                 return -EFAULT;
4802
4803         return 0;
4804 }
4805
4806 static int pqi_getdrivver_ioctl(void __user *arg)
4807 {
4808         u32 version;
4809
4810         if (!arg)
4811                 return -EINVAL;
4812
4813         version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) |
4814                 (DRIVER_RELEASE << 16) | DRIVER_REVISION;
4815
4816         if (copy_to_user(arg, &version, sizeof(version)))
4817                 return -EFAULT;
4818
4819         return 0;
4820 }
4821
4822 struct ciss_error_info {
4823         u8      scsi_status;
4824         int     command_status;
4825         size_t  sense_data_length;
4826 };
4827
4828 static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info,
4829         struct ciss_error_info *ciss_error_info)
4830 {
4831         int ciss_cmd_status;
4832         size_t sense_data_length;
4833
4834         switch (pqi_error_info->data_out_result) {
4835         case PQI_DATA_IN_OUT_GOOD:
4836                 ciss_cmd_status = CISS_CMD_STATUS_SUCCESS;
4837                 break;
4838         case PQI_DATA_IN_OUT_UNDERFLOW:
4839                 ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN;
4840                 break;
4841         case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
4842                 ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN;
4843                 break;
4844         case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
4845         case PQI_DATA_IN_OUT_BUFFER_ERROR:
4846         case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
4847         case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
4848         case PQI_DATA_IN_OUT_ERROR:
4849                 ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR;
4850                 break;
4851         case PQI_DATA_IN_OUT_HARDWARE_ERROR:
4852         case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
4853         case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
4854         case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
4855         case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
4856         case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
4857         case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
4858         case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
4859         case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
4860         case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
4861                 ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR;
4862                 break;
4863         case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
4864                 ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT;
4865                 break;
4866         case PQI_DATA_IN_OUT_ABORTED:
4867                 ciss_cmd_status = CISS_CMD_STATUS_ABORTED;
4868                 break;
4869         case PQI_DATA_IN_OUT_TIMEOUT:
4870                 ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT;
4871                 break;
4872         default:
4873                 ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS;
4874                 break;
4875         }
4876
4877         sense_data_length =
4878                 get_unaligned_le16(&pqi_error_info->sense_data_length);
4879         if (sense_data_length == 0)
4880                 sense_data_length =
4881                 get_unaligned_le16(&pqi_error_info->response_data_length);
4882         if (sense_data_length)
4883                 if (sense_data_length > sizeof(pqi_error_info->data))
4884                         sense_data_length = sizeof(pqi_error_info->data);
4885
4886         ciss_error_info->scsi_status = pqi_error_info->status;
4887         ciss_error_info->command_status = ciss_cmd_status;
4888         ciss_error_info->sense_data_length = sense_data_length;
4889 }
4890
4891 static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
4892 {
4893         int rc;
4894         char *kernel_buffer = NULL;
4895         u16 iu_length;
4896         size_t sense_data_length;
4897         IOCTL_Command_struct iocommand;
4898         struct pqi_raid_path_request request;
4899         struct pqi_raid_error_info pqi_error_info;
4900         struct ciss_error_info ciss_error_info;
4901
4902         if (pqi_ctrl_offline(ctrl_info))
4903                 return -ENXIO;
4904         if (!arg)
4905                 return -EINVAL;
4906         if (!capable(CAP_SYS_RAWIO))
4907                 return -EPERM;
4908         if (copy_from_user(&iocommand, arg, sizeof(iocommand)))
4909                 return -EFAULT;
4910         if (iocommand.buf_size < 1 &&
4911                 iocommand.Request.Type.Direction != XFER_NONE)
4912                 return -EINVAL;
4913         if (iocommand.Request.CDBLen > sizeof(request.cdb))
4914                 return -EINVAL;
4915         if (iocommand.Request.Type.Type != TYPE_CMD)
4916                 return -EINVAL;
4917
4918         switch (iocommand.Request.Type.Direction) {
4919         case XFER_NONE:
4920         case XFER_WRITE:
4921         case XFER_READ:
4922                 break;
4923         default:
4924                 return -EINVAL;
4925         }
4926
4927         if (iocommand.buf_size > 0) {
4928                 kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL);
4929                 if (!kernel_buffer)
4930                         return -ENOMEM;
4931                 if (iocommand.Request.Type.Direction & XFER_WRITE) {
4932                         if (copy_from_user(kernel_buffer, iocommand.buf,
4933                                 iocommand.buf_size)) {
4934                                 rc = -EFAULT;
4935                                 goto out;
4936                         }
4937                 } else {
4938                         memset(kernel_buffer, 0, iocommand.buf_size);
4939                 }
4940         }
4941
4942         memset(&request, 0, sizeof(request));
4943
4944         request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
4945         iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
4946                 PQI_REQUEST_HEADER_LENGTH;
4947         memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes,
4948                 sizeof(request.lun_number));
4949         memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen);
4950         request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
4951
4952         switch (iocommand.Request.Type.Direction) {
4953         case XFER_NONE:
4954                 request.data_direction = SOP_NO_DIRECTION_FLAG;
4955                 break;
4956         case XFER_WRITE:
4957                 request.data_direction = SOP_WRITE_FLAG;
4958                 break;
4959         case XFER_READ:
4960                 request.data_direction = SOP_READ_FLAG;
4961                 break;
4962         }
4963
4964         request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
4965
4966         if (iocommand.buf_size > 0) {
4967                 put_unaligned_le32(iocommand.buf_size, &request.buffer_length);
4968
4969                 rc = pqi_map_single(ctrl_info->pci_dev,
4970                         &request.sg_descriptors[0], kernel_buffer,
4971                         iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
4972                 if (rc)
4973                         goto out;
4974
4975                 iu_length += sizeof(request.sg_descriptors[0]);
4976         }
4977
4978         put_unaligned_le16(iu_length, &request.header.iu_length);
4979
4980         rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
4981                 PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info, NO_TIMEOUT);
4982
4983         if (iocommand.buf_size > 0)
4984                 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
4985                         PCI_DMA_BIDIRECTIONAL);
4986
4987         memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
4988
4989         if (rc == 0) {
4990                 pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info);
4991                 iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status;
4992                 iocommand.error_info.CommandStatus =
4993                         ciss_error_info.command_status;
4994                 sense_data_length = ciss_error_info.sense_data_length;
4995                 if (sense_data_length) {
4996                         if (sense_data_length >
4997                                 sizeof(iocommand.error_info.SenseInfo))
4998                                 sense_data_length =
4999                                         sizeof(iocommand.error_info.SenseInfo);
5000                         memcpy(iocommand.error_info.SenseInfo,
5001                                 pqi_error_info.data, sense_data_length);
5002                         iocommand.error_info.SenseLen = sense_data_length;
5003                 }
5004         }
5005
5006         if (copy_to_user(arg, &iocommand, sizeof(iocommand))) {
5007                 rc = -EFAULT;
5008                 goto out;
5009         }
5010
5011         if (rc == 0 && iocommand.buf_size > 0 &&
5012                 (iocommand.Request.Type.Direction & XFER_READ)) {
5013                 if (copy_to_user(iocommand.buf, kernel_buffer,
5014                         iocommand.buf_size)) {
5015                         rc = -EFAULT;
5016                 }
5017         }
5018
5019 out:
5020         kfree(kernel_buffer);
5021
5022         return rc;
5023 }
5024
5025 static int pqi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
5026 {
5027         int rc;
5028         struct pqi_ctrl_info *ctrl_info;
5029
5030         ctrl_info = shost_to_hba(sdev->host);
5031
5032         switch (cmd) {
5033         case CCISS_DEREGDISK:
5034         case CCISS_REGNEWDISK:
5035         case CCISS_REGNEWD:
5036                 rc = pqi_scan_scsi_devices(ctrl_info);
5037                 break;
5038         case CCISS_GETPCIINFO:
5039                 rc = pqi_getpciinfo_ioctl(ctrl_info, arg);
5040                 break;
5041         case CCISS_GETDRIVVER:
5042                 rc = pqi_getdrivver_ioctl(arg);
5043                 break;
5044         case CCISS_PASSTHRU:
5045                 rc = pqi_passthru_ioctl(ctrl_info, arg);
5046                 break;
5047         default:
5048                 rc = -EINVAL;
5049                 break;
5050         }
5051
5052         return rc;
5053 }
5054
5055 static ssize_t pqi_version_show(struct device *dev,
5056         struct device_attribute *attr, char *buffer)
5057 {
5058         ssize_t count = 0;
5059         struct Scsi_Host *shost;
5060         struct pqi_ctrl_info *ctrl_info;
5061
5062         shost = class_to_shost(dev);
5063         ctrl_info = shost_to_hba(shost);
5064
5065         count += snprintf(buffer + count, PAGE_SIZE - count,
5066                 "  driver: %s\n", DRIVER_VERSION BUILD_TIMESTAMP);
5067
5068         count += snprintf(buffer + count, PAGE_SIZE - count,
5069                 "firmware: %s\n", ctrl_info->firmware_version);
5070
5071         return count;
5072 }
5073
5074 static ssize_t pqi_host_rescan_store(struct device *dev,
5075         struct device_attribute *attr, const char *buffer, size_t count)
5076 {
5077         struct Scsi_Host *shost = class_to_shost(dev);
5078
5079         pqi_scan_start(shost);
5080
5081         return count;
5082 }
5083
5084 static DEVICE_ATTR(version, S_IRUGO, pqi_version_show, NULL);
5085 static DEVICE_ATTR(rescan, S_IWUSR, NULL, pqi_host_rescan_store);
5086
5087 static struct device_attribute *pqi_shost_attrs[] = {
5088         &dev_attr_version,
5089         &dev_attr_rescan,
5090         NULL
5091 };
5092
5093 static ssize_t pqi_sas_address_show(struct device *dev,
5094         struct device_attribute *attr, char *buffer)
5095 {
5096         struct pqi_ctrl_info *ctrl_info;
5097         struct scsi_device *sdev;
5098         struct pqi_scsi_dev *device;
5099         unsigned long flags;
5100         u64 sas_address;
5101
5102         sdev = to_scsi_device(dev);
5103         ctrl_info = shost_to_hba(sdev->host);
5104
5105         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5106
5107         device = sdev->hostdata;
5108         if (pqi_is_logical_device(device)) {
5109                 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
5110                         flags);
5111                 return -ENODEV;
5112         }
5113         sas_address = device->sas_address;
5114
5115         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5116
5117         return snprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
5118 }
5119
5120 static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
5121         struct device_attribute *attr, char *buffer)
5122 {
5123         struct pqi_ctrl_info *ctrl_info;
5124         struct scsi_device *sdev;
5125         struct pqi_scsi_dev *device;
5126         unsigned long flags;
5127
5128         sdev = to_scsi_device(dev);
5129         ctrl_info = shost_to_hba(sdev->host);
5130
5131         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5132
5133         device = sdev->hostdata;
5134         buffer[0] = device->offload_enabled ? '1' : '0';
5135         buffer[1] = '\n';
5136         buffer[2] = '\0';
5137
5138         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5139
5140         return 2;
5141 }
5142
5143 static DEVICE_ATTR(sas_address, S_IRUGO, pqi_sas_address_show, NULL);
5144 static DEVICE_ATTR(ssd_smart_path_enabled, S_IRUGO,
5145         pqi_ssd_smart_path_enabled_show, NULL);
5146
5147 static struct device_attribute *pqi_sdev_attrs[] = {
5148         &dev_attr_sas_address,
5149         &dev_attr_ssd_smart_path_enabled,
5150         NULL
5151 };
5152
5153 static struct scsi_host_template pqi_driver_template = {
5154         .module = THIS_MODULE,
5155         .name = DRIVER_NAME_SHORT,
5156         .proc_name = DRIVER_NAME_SHORT,
5157         .queuecommand = pqi_scsi_queue_command,
5158         .scan_start = pqi_scan_start,
5159         .scan_finished = pqi_scan_finished,
5160         .this_id = -1,
5161         .use_clustering = ENABLE_CLUSTERING,
5162         .eh_device_reset_handler = pqi_eh_device_reset_handler,
5163         .ioctl = pqi_ioctl,
5164         .slave_alloc = pqi_slave_alloc,
5165         .slave_configure = pqi_slave_configure,
5166         .sdev_attrs = pqi_sdev_attrs,
5167         .shost_attrs = pqi_shost_attrs,
5168 };
5169
5170 static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
5171 {
5172         int rc;
5173         struct Scsi_Host *shost;
5174
5175         shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
5176         if (!shost) {
5177                 dev_err(&ctrl_info->pci_dev->dev,
5178                         "scsi_host_alloc failed for controller %u\n",
5179                         ctrl_info->ctrl_id);
5180                 return -ENOMEM;
5181         }
5182
5183         shost->io_port = 0;
5184         shost->n_io_port = 0;
5185         shost->this_id = -1;
5186         shost->max_channel = PQI_MAX_BUS;
5187         shost->max_cmd_len = MAX_COMMAND_SIZE;
5188         shost->max_lun = ~0;
5189         shost->max_id = ~0;
5190         shost->max_sectors = ctrl_info->max_sectors;
5191         shost->can_queue = ctrl_info->scsi_ml_can_queue;
5192         shost->cmd_per_lun = shost->can_queue;
5193         shost->sg_tablesize = ctrl_info->sg_tablesize;
5194         shost->transportt = pqi_sas_transport_template;
5195         shost->irq = ctrl_info->msix_vectors[0];
5196         shost->unique_id = shost->irq;
5197         shost->nr_hw_queues = ctrl_info->num_queue_groups;
5198         shost->hostdata[0] = (unsigned long)ctrl_info;
5199
5200         rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
5201         if (rc) {
5202                 dev_err(&ctrl_info->pci_dev->dev,
5203                         "scsi_add_host failed for controller %u\n",
5204                         ctrl_info->ctrl_id);
5205                 goto free_host;
5206         }
5207
5208         rc = pqi_add_sas_host(shost, ctrl_info);
5209         if (rc) {
5210                 dev_err(&ctrl_info->pci_dev->dev,
5211                         "add SAS host failed for controller %u\n",
5212                         ctrl_info->ctrl_id);
5213                 goto remove_host;
5214         }
5215
5216         ctrl_info->scsi_host = shost;
5217
5218         return 0;
5219
5220 remove_host:
5221         scsi_remove_host(shost);
5222 free_host:
5223         scsi_host_put(shost);
5224
5225         return rc;
5226 }
5227
5228 static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info)
5229 {
5230         struct Scsi_Host *shost;
5231
5232         pqi_delete_sas_host(ctrl_info);
5233
5234         shost = ctrl_info->scsi_host;
5235         if (!shost)
5236                 return;
5237
5238         scsi_remove_host(shost);
5239         scsi_host_put(shost);
5240 }
5241
5242 #define PQI_RESET_ACTION_RESET          0x1
5243
5244 #define PQI_RESET_TYPE_NO_RESET         0x0
5245 #define PQI_RESET_TYPE_SOFT_RESET       0x1
5246 #define PQI_RESET_TYPE_FIRM_RESET       0x2
5247 #define PQI_RESET_TYPE_HARD_RESET       0x3
5248
5249 static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
5250 {
5251         int rc;
5252         u32 reset_params;
5253
5254         reset_params = (PQI_RESET_ACTION_RESET << 5) |
5255                 PQI_RESET_TYPE_HARD_RESET;
5256
5257         writel(reset_params,
5258                 &ctrl_info->pqi_registers->device_reset);
5259
5260         rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
5261         if (rc)
5262                 dev_err(&ctrl_info->pci_dev->dev,
5263                         "PQI reset failed\n");
5264
5265         return rc;
5266 }
5267
5268 static int pqi_get_ctrl_firmware_version(struct pqi_ctrl_info *ctrl_info)
5269 {
5270         int rc;
5271         struct bmic_identify_controller *identify;
5272
5273         identify = kmalloc(sizeof(*identify), GFP_KERNEL);
5274         if (!identify)
5275                 return -ENOMEM;
5276
5277         rc = pqi_identify_controller(ctrl_info, identify);
5278         if (rc)
5279                 goto out;
5280
5281         memcpy(ctrl_info->firmware_version, identify->firmware_version,
5282                 sizeof(identify->firmware_version));
5283         ctrl_info->firmware_version[sizeof(identify->firmware_version)] = '\0';
5284         snprintf(ctrl_info->firmware_version +
5285                 strlen(ctrl_info->firmware_version),
5286                 sizeof(ctrl_info->firmware_version),
5287                 "-%u", get_unaligned_le16(&identify->firmware_build_number));
5288
5289 out:
5290         kfree(identify);
5291
5292         return rc;
5293 }
5294
5295 static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
5296 {
5297         int rc;
5298
5299         /*
5300          * When the controller comes out of reset, it is always running
5301          * in legacy SIS mode.  This is so that it can be compatible
5302          * with legacy drivers shipped with OSes.  So we have to talk
5303          * to it using SIS commands at first.  Once we are satisified
5304          * that the controller supports PQI, we transition it into PQI
5305          * mode.
5306          */
5307
5308         /*
5309          * Wait until the controller is ready to start accepting SIS
5310          * commands.
5311          */
5312         rc = sis_wait_for_ctrl_ready(ctrl_info);
5313         if (rc) {
5314                 dev_err(&ctrl_info->pci_dev->dev,
5315                         "error initializing SIS interface\n");
5316                 return rc;
5317         }
5318
5319         /*
5320          * Get the controller properties.  This allows us to determine
5321          * whether or not it supports PQI mode.
5322          */
5323         rc = sis_get_ctrl_properties(ctrl_info);
5324         if (rc) {
5325                 dev_err(&ctrl_info->pci_dev->dev,
5326                         "error obtaining controller properties\n");
5327                 return rc;
5328         }
5329
5330         rc = sis_get_pqi_capabilities(ctrl_info);
5331         if (rc) {
5332                 dev_err(&ctrl_info->pci_dev->dev,
5333                         "error obtaining controller capabilities\n");
5334                 return rc;
5335         }
5336
5337         if (ctrl_info->max_outstanding_requests > PQI_MAX_OUTSTANDING_REQUESTS)
5338                 ctrl_info->max_outstanding_requests =
5339                         PQI_MAX_OUTSTANDING_REQUESTS;
5340
5341         pqi_calculate_io_resources(ctrl_info);
5342
5343         rc = pqi_alloc_error_buffer(ctrl_info);
5344         if (rc) {
5345                 dev_err(&ctrl_info->pci_dev->dev,
5346                         "failed to allocate PQI error buffer\n");
5347                 return rc;
5348         }
5349
5350         /*
5351          * If the function we are about to call succeeds, the
5352          * controller will transition from legacy SIS mode
5353          * into PQI mode.
5354          */
5355         rc = sis_init_base_struct_addr(ctrl_info);
5356         if (rc) {
5357                 dev_err(&ctrl_info->pci_dev->dev,
5358                         "error initializing PQI mode\n");
5359                 return rc;
5360         }
5361
5362         /* Wait for the controller to complete the SIS -> PQI transition. */
5363         rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
5364         if (rc) {
5365                 dev_err(&ctrl_info->pci_dev->dev,
5366                         "transition to PQI mode failed\n");
5367                 return rc;
5368         }
5369
5370         /* From here on, we are running in PQI mode. */
5371         ctrl_info->pqi_mode_enabled = true;
5372
5373         rc = pqi_alloc_admin_queues(ctrl_info);
5374         if (rc) {
5375                 dev_err(&ctrl_info->pci_dev->dev,
5376                         "error allocating admin queues\n");
5377                 return rc;
5378         }
5379
5380         rc = pqi_create_admin_queues(ctrl_info);
5381         if (rc) {
5382                 dev_err(&ctrl_info->pci_dev->dev,
5383                         "error creating admin queues\n");
5384                 return rc;
5385         }
5386
5387         rc = pqi_report_device_capability(ctrl_info);
5388         if (rc) {
5389                 dev_err(&ctrl_info->pci_dev->dev,
5390                         "obtaining device capability failed\n");
5391                 return rc;
5392         }
5393
5394         rc = pqi_validate_device_capability(ctrl_info);
5395         if (rc)
5396                 return rc;
5397
5398         pqi_calculate_queue_resources(ctrl_info);
5399
5400         rc = pqi_enable_msix_interrupts(ctrl_info);
5401         if (rc)
5402                 return rc;
5403
5404         if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) {
5405                 ctrl_info->max_msix_vectors =
5406                         ctrl_info->num_msix_vectors_enabled;
5407                 pqi_calculate_queue_resources(ctrl_info);
5408         }
5409
5410         rc = pqi_alloc_io_resources(ctrl_info);
5411         if (rc)
5412                 return rc;
5413
5414         rc = pqi_alloc_operational_queues(ctrl_info);
5415         if (rc)
5416                 return rc;
5417
5418         pqi_init_operational_queues(ctrl_info);
5419
5420         rc = pqi_request_irqs(ctrl_info);
5421         if (rc)
5422                 return rc;
5423
5424         pqi_irq_set_affinity_hint(ctrl_info);
5425
5426         rc = pqi_create_queues(ctrl_info);
5427         if (rc)
5428                 return rc;
5429
5430         sis_enable_msix(ctrl_info);
5431
5432         rc = pqi_configure_events(ctrl_info);
5433         if (rc) {
5434                 dev_err(&ctrl_info->pci_dev->dev,
5435                         "error configuring events\n");
5436                 return rc;
5437         }
5438
5439         pqi_start_heartbeat_timer(ctrl_info);
5440
5441         ctrl_info->controller_online = true;
5442
5443         /* Register with the SCSI subsystem. */
5444         rc = pqi_register_scsi(ctrl_info);
5445         if (rc)
5446                 return rc;
5447
5448         rc = pqi_get_ctrl_firmware_version(ctrl_info);
5449         if (rc) {
5450                 dev_err(&ctrl_info->pci_dev->dev,
5451                         "error obtaining firmware version\n");
5452                 return rc;
5453         }
5454
5455         rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
5456         if (rc) {
5457                 dev_err(&ctrl_info->pci_dev->dev,
5458                         "error updating host wellness\n");
5459                 return rc;
5460         }
5461
5462         pqi_schedule_update_time_worker(ctrl_info);
5463
5464         pqi_scan_scsi_devices(ctrl_info);
5465
5466         return 0;
5467 }
5468
5469 static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
5470 {
5471         int rc;
5472         u64 mask;
5473
5474         rc = pci_enable_device(ctrl_info->pci_dev);
5475         if (rc) {
5476                 dev_err(&ctrl_info->pci_dev->dev,
5477                         "failed to enable PCI device\n");
5478                 return rc;
5479         }
5480
5481         if (sizeof(dma_addr_t) > 4)
5482                 mask = DMA_BIT_MASK(64);
5483         else
5484                 mask = DMA_BIT_MASK(32);
5485
5486         rc = dma_set_mask(&ctrl_info->pci_dev->dev, mask);
5487         if (rc) {
5488                 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
5489                 goto disable_device;
5490         }
5491
5492         rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT);
5493         if (rc) {
5494                 dev_err(&ctrl_info->pci_dev->dev,
5495                         "failed to obtain PCI resources\n");
5496                 goto disable_device;
5497         }
5498
5499         ctrl_info->iomem_base = ioremap_nocache(pci_resource_start(
5500                 ctrl_info->pci_dev, 0),
5501                 sizeof(struct pqi_ctrl_registers));
5502         if (!ctrl_info->iomem_base) {
5503                 dev_err(&ctrl_info->pci_dev->dev,
5504                         "failed to map memory for controller registers\n");
5505                 rc = -ENOMEM;
5506                 goto release_regions;
5507         }
5508
5509         ctrl_info->registers = ctrl_info->iomem_base;
5510         ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers;
5511
5512         /* Enable bus mastering. */
5513         pci_set_master(ctrl_info->pci_dev);
5514
5515         pci_set_drvdata(ctrl_info->pci_dev, ctrl_info);
5516
5517         return 0;
5518
5519 release_regions:
5520         pci_release_regions(ctrl_info->pci_dev);
5521 disable_device:
5522         pci_disable_device(ctrl_info->pci_dev);
5523
5524         return rc;
5525 }
5526
5527 static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info)
5528 {
5529         iounmap(ctrl_info->iomem_base);
5530         pci_release_regions(ctrl_info->pci_dev);
5531         pci_disable_device(ctrl_info->pci_dev);
5532         pci_set_drvdata(ctrl_info->pci_dev, NULL);
5533 }
5534
5535 static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
5536 {
5537         struct pqi_ctrl_info *ctrl_info;
5538
5539         ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info),
5540                         GFP_KERNEL, numa_node);
5541         if (!ctrl_info)
5542                 return NULL;
5543
5544         mutex_init(&ctrl_info->scan_mutex);
5545
5546         INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
5547         spin_lock_init(&ctrl_info->scsi_device_list_lock);
5548
5549         INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
5550         atomic_set(&ctrl_info->num_interrupts, 0);
5551
5552         INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
5553         INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
5554
5555         sema_init(&ctrl_info->sync_request_sem,
5556                 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
5557         sema_init(&ctrl_info->lun_reset_sem, PQI_RESERVED_IO_SLOTS_LUN_RESET);
5558
5559         ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
5560         ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
5561
5562         return ctrl_info;
5563 }
5564
5565 static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
5566 {
5567         kfree(ctrl_info);
5568 }
5569
5570 static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
5571 {
5572         pqi_irq_unset_affinity_hint(ctrl_info);
5573         pqi_free_irqs(ctrl_info);
5574         if (ctrl_info->num_msix_vectors_enabled)
5575                 pci_disable_msix(ctrl_info->pci_dev);
5576 }
5577
5578 static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
5579 {
5580         pqi_stop_heartbeat_timer(ctrl_info);
5581         pqi_free_interrupts(ctrl_info);
5582         if (ctrl_info->queue_memory_base)
5583                 dma_free_coherent(&ctrl_info->pci_dev->dev,
5584                         ctrl_info->queue_memory_length,
5585                         ctrl_info->queue_memory_base,
5586                         ctrl_info->queue_memory_base_dma_handle);
5587         if (ctrl_info->admin_queue_memory_base)
5588                 dma_free_coherent(&ctrl_info->pci_dev->dev,
5589                         ctrl_info->admin_queue_memory_length,
5590                         ctrl_info->admin_queue_memory_base,
5591                         ctrl_info->admin_queue_memory_base_dma_handle);
5592         pqi_free_all_io_requests(ctrl_info);
5593         if (ctrl_info->error_buffer)
5594                 dma_free_coherent(&ctrl_info->pci_dev->dev,
5595                         ctrl_info->error_buffer_length,
5596                         ctrl_info->error_buffer,
5597                         ctrl_info->error_buffer_dma_handle);
5598         if (ctrl_info->iomem_base)
5599                 pqi_cleanup_pci_init(ctrl_info);
5600         pqi_free_ctrl_info(ctrl_info);
5601 }
5602
5603 static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
5604 {
5605         int rc;
5606
5607         if (ctrl_info->controller_online) {
5608                 cancel_delayed_work_sync(&ctrl_info->rescan_work);
5609                 cancel_delayed_work_sync(&ctrl_info->update_time_work);
5610                 pqi_remove_all_scsi_devices(ctrl_info);
5611                 pqi_unregister_scsi(ctrl_info);
5612                 ctrl_info->controller_online = false;
5613         }
5614         if (ctrl_info->pqi_mode_enabled) {
5615                 sis_disable_msix(ctrl_info);
5616                 rc = pqi_reset(ctrl_info);
5617                 if (rc == 0)
5618                         sis_reenable_sis_mode(ctrl_info);
5619         }
5620         pqi_free_ctrl_resources(ctrl_info);
5621 }
5622
5623 static void pqi_print_ctrl_info(struct pci_dev *pdev,
5624         const struct pci_device_id *id)
5625 {
5626         char *ctrl_description;
5627
5628         if (id->driver_data) {
5629                 ctrl_description = (char *)id->driver_data;
5630         } else {
5631                 switch (id->subvendor) {
5632                 case PCI_VENDOR_ID_HP:
5633                         ctrl_description = hpe_branded_controller;
5634                         break;
5635                 case PCI_VENDOR_ID_ADAPTEC2:
5636                 default:
5637                         ctrl_description = microsemi_branded_controller;
5638                         break;
5639                 }
5640         }
5641
5642         dev_info(&pdev->dev, "%s found\n", ctrl_description);
5643 }
5644
5645 static int pqi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
5646 {
5647         int rc;
5648         int node;
5649         struct pqi_ctrl_info *ctrl_info;
5650
5651         pqi_print_ctrl_info(pdev, id);
5652
5653         if (pqi_disable_device_id_wildcards &&
5654                 id->subvendor == PCI_ANY_ID &&
5655                 id->subdevice == PCI_ANY_ID) {
5656                 dev_warn(&pdev->dev,
5657                         "controller not probed because device ID wildcards are disabled\n");
5658                 return -ENODEV;
5659         }
5660
5661         if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID)
5662                 dev_warn(&pdev->dev,
5663                         "controller device ID matched using wildcards\n");
5664
5665         node = dev_to_node(&pdev->dev);
5666         if (node == NUMA_NO_NODE)
5667                 set_dev_node(&pdev->dev, 0);
5668
5669         ctrl_info = pqi_alloc_ctrl_info(node);
5670         if (!ctrl_info) {
5671                 dev_err(&pdev->dev,
5672                         "failed to allocate controller info block\n");
5673                 return -ENOMEM;
5674         }
5675
5676         ctrl_info->pci_dev = pdev;
5677
5678         rc = pqi_pci_init(ctrl_info);
5679         if (rc)
5680                 goto error;
5681
5682         rc = pqi_ctrl_init(ctrl_info);
5683         if (rc)
5684                 goto error;
5685
5686         return 0;
5687
5688 error:
5689         pqi_remove_ctrl(ctrl_info);
5690
5691         return rc;
5692 }
5693
5694 static void pqi_pci_remove(struct pci_dev *pdev)
5695 {
5696         struct pqi_ctrl_info *ctrl_info;
5697
5698         ctrl_info = pci_get_drvdata(pdev);
5699         if (!ctrl_info)
5700                 return;
5701
5702         pqi_remove_ctrl(ctrl_info);
5703 }
5704
5705 static void pqi_shutdown(struct pci_dev *pdev)
5706 {
5707         int rc;
5708         struct pqi_ctrl_info *ctrl_info;
5709
5710         ctrl_info = pci_get_drvdata(pdev);
5711         if (!ctrl_info)
5712                 goto error;
5713
5714         /*
5715          * Write all data in the controller's battery-backed cache to
5716          * storage.
5717          */
5718         rc = pqi_flush_cache(ctrl_info);
5719         if (rc == 0)
5720                 return;
5721
5722 error:
5723         dev_warn(&pdev->dev,
5724                 "unable to flush controller cache\n");
5725 }
5726
5727 /* Define the PCI IDs for the controllers that we support. */
5728 static const struct pci_device_id pqi_pci_id_table[] = {
5729         {
5730                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5731                                PCI_VENDOR_ID_ADAPTEC2, 0x0110)
5732         },
5733         {
5734                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5735                                PCI_VENDOR_ID_HP, 0x0600)
5736         },
5737         {
5738                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5739                                PCI_VENDOR_ID_HP, 0x0601)
5740         },
5741         {
5742                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5743                                PCI_VENDOR_ID_HP, 0x0602)
5744         },
5745         {
5746                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5747                                PCI_VENDOR_ID_HP, 0x0603)
5748         },
5749         {
5750                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5751                                PCI_VENDOR_ID_HP, 0x0650)
5752         },
5753         {
5754                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5755                                PCI_VENDOR_ID_HP, 0x0651)
5756         },
5757         {
5758                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5759                                PCI_VENDOR_ID_HP, 0x0652)
5760         },
5761         {
5762                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5763                                PCI_VENDOR_ID_HP, 0x0653)
5764         },
5765         {
5766                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5767                                PCI_VENDOR_ID_HP, 0x0654)
5768         },
5769         {
5770                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5771                                PCI_VENDOR_ID_HP, 0x0655)
5772         },
5773         {
5774                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5775                                PCI_VENDOR_ID_HP, 0x0700)
5776         },
5777         {
5778                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5779                                PCI_VENDOR_ID_HP, 0x0701)
5780         },
5781         {
5782                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5783                                PCI_VENDOR_ID_ADAPTEC2, 0x0800)
5784         },
5785         {
5786                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5787                                PCI_VENDOR_ID_ADAPTEC2, 0x0801)
5788         },
5789         {
5790                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5791                                PCI_VENDOR_ID_ADAPTEC2, 0x0802)
5792         },
5793         {
5794                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5795                                PCI_VENDOR_ID_ADAPTEC2, 0x0803)
5796         },
5797         {
5798                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5799                                PCI_VENDOR_ID_ADAPTEC2, 0x0804)
5800         },
5801         {
5802                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5803                                PCI_VENDOR_ID_ADAPTEC2, 0x0805)
5804         },
5805         {
5806                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5807                                PCI_VENDOR_ID_ADAPTEC2, 0x0900)
5808         },
5809         {
5810                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5811                                PCI_VENDOR_ID_ADAPTEC2, 0x0901)
5812         },
5813         {
5814                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5815                                PCI_VENDOR_ID_ADAPTEC2, 0x0902)
5816         },
5817         {
5818                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5819                                PCI_VENDOR_ID_ADAPTEC2, 0x0903)
5820         },
5821         {
5822                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5823                                PCI_VENDOR_ID_ADAPTEC2, 0x0904)
5824         },
5825         {
5826                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5827                                PCI_VENDOR_ID_ADAPTEC2, 0x0905)
5828         },
5829         {
5830                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5831                                PCI_VENDOR_ID_ADAPTEC2, 0x0906)
5832         },
5833         {
5834                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5835                                PCI_VENDOR_ID_HP, 0x1001)
5836         },
5837         {
5838                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5839                                PCI_VENDOR_ID_HP, 0x1100)
5840         },
5841         {
5842                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5843                                PCI_VENDOR_ID_HP, 0x1101)
5844         },
5845         {
5846                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5847                                PCI_VENDOR_ID_HP, 0x1102)
5848         },
5849         {
5850                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5851                                PCI_VENDOR_ID_HP, 0x1150)
5852         },
5853         {
5854                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5855                                PCI_ANY_ID, PCI_ANY_ID)
5856         },
5857         { 0 }
5858 };
5859
5860 MODULE_DEVICE_TABLE(pci, pqi_pci_id_table);
5861
5862 static struct pci_driver pqi_pci_driver = {
5863         .name = DRIVER_NAME_SHORT,
5864         .id_table = pqi_pci_id_table,
5865         .probe = pqi_pci_probe,
5866         .remove = pqi_pci_remove,
5867         .shutdown = pqi_shutdown,
5868 };
5869
5870 static int __init pqi_init(void)
5871 {
5872         int rc;
5873
5874         pr_info(DRIVER_NAME "\n");
5875
5876         pqi_sas_transport_template =
5877                 sas_attach_transport(&pqi_sas_transport_functions);
5878         if (!pqi_sas_transport_template)
5879                 return -ENODEV;
5880
5881         rc = pci_register_driver(&pqi_pci_driver);
5882         if (rc)
5883                 sas_release_transport(pqi_sas_transport_template);
5884
5885         return rc;
5886 }
5887
5888 static void __exit pqi_cleanup(void)
5889 {
5890         pci_unregister_driver(&pqi_pci_driver);
5891         sas_release_transport(pqi_sas_transport_template);
5892 }
5893
5894 module_init(pqi_init);
5895 module_exit(pqi_cleanup);
5896
5897 static void __attribute__((unused)) verify_structures(void)
5898 {
5899         BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5900                 sis_host_to_ctrl_doorbell) != 0x20);
5901         BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5902                 sis_interrupt_mask) != 0x34);
5903         BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5904                 sis_ctrl_to_host_doorbell) != 0x9c);
5905         BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5906                 sis_ctrl_to_host_doorbell_clear) != 0xa0);
5907         BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5908                 sis_firmware_status) != 0xbc);
5909         BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5910                 sis_mailbox) != 0x1000);
5911         BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5912                 pqi_registers) != 0x4000);
5913
5914         BUILD_BUG_ON(offsetof(struct pqi_iu_header,
5915                 iu_type) != 0x0);
5916         BUILD_BUG_ON(offsetof(struct pqi_iu_header,
5917                 iu_length) != 0x2);
5918         BUILD_BUG_ON(offsetof(struct pqi_iu_header,
5919                 response_queue_id) != 0x4);
5920         BUILD_BUG_ON(offsetof(struct pqi_iu_header,
5921                 work_area) != 0x6);
5922         BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8);
5923
5924         BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5925                 status) != 0x0);
5926         BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5927                 service_response) != 0x1);
5928         BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5929                 data_present) != 0x2);
5930         BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5931                 reserved) != 0x3);
5932         BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5933                 residual_count) != 0x4);
5934         BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5935                 data_length) != 0x8);
5936         BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5937                 reserved1) != 0xa);
5938         BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5939                 data) != 0xc);
5940         BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c);
5941
5942         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5943                 data_in_result) != 0x0);
5944         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5945                 data_out_result) != 0x1);
5946         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5947                 reserved) != 0x2);
5948         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5949                 status) != 0x5);
5950         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5951                 status_qualifier) != 0x6);
5952         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5953                 sense_data_length) != 0x8);
5954         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5955                 response_data_length) != 0xa);
5956         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5957                 data_in_transferred) != 0xc);
5958         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5959                 data_out_transferred) != 0x10);
5960         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5961                 data) != 0x14);
5962         BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114);
5963
5964         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5965                 signature) != 0x0);
5966         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5967                 function_and_status_code) != 0x8);
5968         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5969                 max_admin_iq_elements) != 0x10);
5970         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5971                 max_admin_oq_elements) != 0x11);
5972         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5973                 admin_iq_element_length) != 0x12);
5974         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5975                 admin_oq_element_length) != 0x13);
5976         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5977                 max_reset_timeout) != 0x14);
5978         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5979                 legacy_intx_status) != 0x18);
5980         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5981                 legacy_intx_mask_set) != 0x1c);
5982         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5983                 legacy_intx_mask_clear) != 0x20);
5984         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5985                 device_status) != 0x40);
5986         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5987                 admin_iq_pi_offset) != 0x48);
5988         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5989                 admin_oq_ci_offset) != 0x50);
5990         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5991                 admin_iq_element_array_addr) != 0x58);
5992         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5993                 admin_oq_element_array_addr) != 0x60);
5994         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5995                 admin_iq_ci_addr) != 0x68);
5996         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5997                 admin_oq_pi_addr) != 0x70);
5998         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5999                 admin_iq_num_elements) != 0x78);
6000         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6001                 admin_oq_num_elements) != 0x79);
6002         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6003                 admin_queue_int_msg_num) != 0x7a);
6004         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6005                 device_error) != 0x80);
6006         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6007                 error_details) != 0x88);
6008         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6009                 device_reset) != 0x90);
6010         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6011                 power_action) != 0x94);
6012         BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100);
6013
6014         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6015                 header.iu_type) != 0);
6016         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6017                 header.iu_length) != 2);
6018         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6019                 header.work_area) != 6);
6020         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6021                 request_id) != 8);
6022         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6023                 function_code) != 10);
6024         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6025                 data.report_device_capability.buffer_length) != 44);
6026         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6027                 data.report_device_capability.sg_descriptor) != 48);
6028         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6029                 data.create_operational_iq.queue_id) != 12);
6030         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6031                 data.create_operational_iq.element_array_addr) != 16);
6032         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6033                 data.create_operational_iq.ci_addr) != 24);
6034         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6035                 data.create_operational_iq.num_elements) != 32);
6036         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6037                 data.create_operational_iq.element_length) != 34);
6038         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6039                 data.create_operational_iq.queue_protocol) != 36);
6040         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6041                 data.create_operational_oq.queue_id) != 12);
6042         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6043                 data.create_operational_oq.element_array_addr) != 16);
6044         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6045                 data.create_operational_oq.pi_addr) != 24);
6046         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6047                 data.create_operational_oq.num_elements) != 32);
6048         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6049                 data.create_operational_oq.element_length) != 34);
6050         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6051                 data.create_operational_oq.queue_protocol) != 36);
6052         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6053                 data.create_operational_oq.int_msg_num) != 40);
6054         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6055                 data.create_operational_oq.coalescing_count) != 42);
6056         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6057                 data.create_operational_oq.min_coalescing_time) != 44);
6058         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6059                 data.create_operational_oq.max_coalescing_time) != 48);
6060         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6061                 data.delete_operational_queue.queue_id) != 12);
6062         BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64);
6063         BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
6064                 data.create_operational_iq) != 64 - 11);
6065         BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
6066                 data.create_operational_oq) != 64 - 11);
6067         BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
6068                 data.delete_operational_queue) != 64 - 11);
6069
6070         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6071                 header.iu_type) != 0);
6072         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6073                 header.iu_length) != 2);
6074         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6075                 header.work_area) != 6);
6076         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6077                 request_id) != 8);
6078         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6079                 function_code) != 10);
6080         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6081                 status) != 11);
6082         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6083                 data.create_operational_iq.status_descriptor) != 12);
6084         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6085                 data.create_operational_iq.iq_pi_offset) != 16);
6086         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6087                 data.create_operational_oq.status_descriptor) != 12);
6088         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6089                 data.create_operational_oq.oq_ci_offset) != 16);
6090         BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64);
6091
6092         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6093                 header.iu_type) != 0);
6094         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6095                 header.iu_length) != 2);
6096         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6097                 header.response_queue_id) != 4);
6098         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6099                 header.work_area) != 6);
6100         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6101                 request_id) != 8);
6102         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6103                 nexus_id) != 10);
6104         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6105                 buffer_length) != 12);
6106         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6107                 lun_number) != 16);
6108         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6109                 protocol_specific) != 24);
6110         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6111                 error_index) != 27);
6112         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6113                 cdb) != 32);
6114         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6115                 sg_descriptors) != 64);
6116         BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
6117                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
6118
6119         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6120                 header.iu_type) != 0);
6121         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6122                 header.iu_length) != 2);
6123         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6124                 header.response_queue_id) != 4);
6125         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6126                 header.work_area) != 6);
6127         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6128                 request_id) != 8);
6129         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6130                 nexus_id) != 12);
6131         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6132                 buffer_length) != 16);
6133         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6134                 data_encryption_key_index) != 22);
6135         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6136                 encrypt_tweak_lower) != 24);
6137         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6138                 encrypt_tweak_upper) != 28);
6139         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6140                 cdb) != 32);
6141         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6142                 error_index) != 48);
6143         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6144                 num_sg_descriptors) != 50);
6145         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6146                 cdb_length) != 51);
6147         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6148                 lun_number) != 52);
6149         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6150                 sg_descriptors) != 64);
6151         BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) !=
6152                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
6153
6154         BUILD_BUG_ON(offsetof(struct pqi_io_response,
6155                 header.iu_type) != 0);
6156         BUILD_BUG_ON(offsetof(struct pqi_io_response,
6157                 header.iu_length) != 2);
6158         BUILD_BUG_ON(offsetof(struct pqi_io_response,
6159                 request_id) != 8);
6160         BUILD_BUG_ON(offsetof(struct pqi_io_response,
6161                 error_index) != 10);
6162
6163         BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6164                 header.iu_type) != 0);
6165         BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6166                 header.iu_length) != 2);
6167         BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6168                 header.response_queue_id) != 4);
6169         BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6170                 request_id) != 8);
6171         BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6172                 data.report_event_configuration.buffer_length) != 12);
6173         BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6174                 data.report_event_configuration.sg_descriptors) != 16);
6175         BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6176                 data.set_event_configuration.global_event_oq_id) != 10);
6177         BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6178                 data.set_event_configuration.buffer_length) != 12);
6179         BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6180                 data.set_event_configuration.sg_descriptors) != 16);
6181
6182         BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
6183                 max_inbound_iu_length) != 6);
6184         BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
6185                 max_outbound_iu_length) != 14);
6186         BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16);
6187
6188         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6189                 data_length) != 0);
6190         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6191                 iq_arbitration_priority_support_bitmask) != 8);
6192         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6193                 maximum_aw_a) != 9);
6194         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6195                 maximum_aw_b) != 10);
6196         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6197                 maximum_aw_c) != 11);
6198         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6199                 max_inbound_queues) != 16);
6200         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6201                 max_elements_per_iq) != 18);
6202         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6203                 max_iq_element_length) != 24);
6204         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6205                 min_iq_element_length) != 26);
6206         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6207                 max_outbound_queues) != 30);
6208         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6209                 max_elements_per_oq) != 32);
6210         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6211                 intr_coalescing_time_granularity) != 34);
6212         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6213                 max_oq_element_length) != 36);
6214         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6215                 min_oq_element_length) != 38);
6216         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6217                 iu_layer_descriptors) != 64);
6218         BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576);
6219
6220         BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
6221                 event_type) != 0);
6222         BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
6223                 oq_id) != 2);
6224         BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4);
6225
6226         BUILD_BUG_ON(offsetof(struct pqi_event_config,
6227                 num_event_descriptors) != 2);
6228         BUILD_BUG_ON(offsetof(struct pqi_event_config,
6229                 descriptors) != 4);
6230
6231         BUILD_BUG_ON(offsetof(struct pqi_event_response,
6232                 header.iu_type) != 0);
6233         BUILD_BUG_ON(offsetof(struct pqi_event_response,
6234                 header.iu_length) != 2);
6235         BUILD_BUG_ON(offsetof(struct pqi_event_response,
6236                 event_type) != 8);
6237         BUILD_BUG_ON(offsetof(struct pqi_event_response,
6238                 event_id) != 10);
6239         BUILD_BUG_ON(offsetof(struct pqi_event_response,
6240                 additional_event_id) != 12);
6241         BUILD_BUG_ON(offsetof(struct pqi_event_response,
6242                 data) != 16);
6243         BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32);
6244
6245         BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6246                 header.iu_type) != 0);
6247         BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6248                 header.iu_length) != 2);
6249         BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6250                 event_type) != 8);
6251         BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6252                 event_id) != 10);
6253         BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6254                 additional_event_id) != 12);
6255         BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16);
6256
6257         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6258                 header.iu_type) != 0);
6259         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6260                 header.iu_length) != 2);
6261         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6262                 request_id) != 8);
6263         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6264                 nexus_id) != 10);
6265         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6266                 lun_number) != 16);
6267         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6268                 protocol_specific) != 24);
6269         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6270                 outbound_queue_id_to_manage) != 26);
6271         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6272                 request_id_to_manage) != 28);
6273         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6274                 task_management_function) != 30);
6275         BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32);
6276
6277         BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6278                 header.iu_type) != 0);
6279         BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6280                 header.iu_length) != 2);
6281         BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6282                 request_id) != 8);
6283         BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6284                 nexus_id) != 10);
6285         BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6286                 additional_response_info) != 12);
6287         BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6288                 response_code) != 15);
6289         BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16);
6290
6291         BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6292                 configured_logical_drive_count) != 0);
6293         BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6294                 configuration_signature) != 1);
6295         BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6296                 firmware_version) != 5);
6297         BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6298                 extended_logical_unit_count) != 154);
6299         BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6300                 firmware_build_number) != 190);
6301         BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6302                 controller_mode) != 292);
6303
6304         BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
6305         BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
6306         BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
6307                 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
6308         BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH %
6309                 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
6310         BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560);
6311         BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH %
6312                 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
6313         BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560);
6314         BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH %
6315                 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
6316
6317         BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS);
6318 }