Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/zohar/linux...
[cascardo/linux.git] / drivers / scsi / arcmsr / arcmsr_hba.c
1 /*
2 *******************************************************************************
3 **        O.S   : Linux
4 **   FILE NAME  : arcmsr_hba.c
5 **        BY    : Nick Cheng, C.L. Huang
6 **   Description: SCSI RAID Device Driver for Areca RAID Controller
7 *******************************************************************************
8 ** Copyright (C) 2002 - 2014, Areca Technology Corporation All rights reserved
9 **
10 **     Web site: www.areca.com.tw
11 **       E-mail: support@areca.com.tw
12 **
13 ** This program is free software; you can redistribute it and/or modify
14 ** it under the terms of the GNU General Public License version 2 as
15 ** published by the Free Software Foundation.
16 ** This program is distributed in the hope that it will be useful,
17 ** but WITHOUT ANY WARRANTY; without even the implied warranty of
18 ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19 ** GNU General Public License for more details.
20 *******************************************************************************
21 ** Redistribution and use in source and binary forms, with or without
22 ** modification, are permitted provided that the following conditions
23 ** are met:
24 ** 1. Redistributions of source code must retain the above copyright
25 **    notice, this list of conditions and the following disclaimer.
26 ** 2. Redistributions in binary form must reproduce the above copyright
27 **    notice, this list of conditions and the following disclaimer in the
28 **    documentation and/or other materials provided with the distribution.
29 ** 3. The name of the author may not be used to endorse or promote products
30 **    derived from this software without specific prior written permission.
31 **
32 ** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
33 ** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
34 ** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
35 ** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
36 ** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING,BUT
37 ** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
38 ** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY
39 ** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
40 ** (INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF
41 ** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 *******************************************************************************
43 ** For history of changes, see Documentation/scsi/ChangeLog.arcmsr
44 **     Firmware Specification, see Documentation/scsi/arcmsr_spec.txt
45 *******************************************************************************
46 */
47 #include <linux/module.h>
48 #include <linux/reboot.h>
49 #include <linux/spinlock.h>
50 #include <linux/pci_ids.h>
51 #include <linux/interrupt.h>
52 #include <linux/moduleparam.h>
53 #include <linux/errno.h>
54 #include <linux/types.h>
55 #include <linux/delay.h>
56 #include <linux/dma-mapping.h>
57 #include <linux/timer.h>
58 #include <linux/slab.h>
59 #include <linux/pci.h>
60 #include <linux/aer.h>
61 #include <linux/circ_buf.h>
62 #include <asm/dma.h>
63 #include <asm/io.h>
64 #include <asm/uaccess.h>
65 #include <scsi/scsi_host.h>
66 #include <scsi/scsi.h>
67 #include <scsi/scsi_cmnd.h>
68 #include <scsi/scsi_tcq.h>
69 #include <scsi/scsi_device.h>
70 #include <scsi/scsi_transport.h>
71 #include <scsi/scsicam.h>
72 #include "arcmsr.h"
73 MODULE_AUTHOR("Nick Cheng, C.L. Huang <support@areca.com.tw>");
74 MODULE_DESCRIPTION("Areca ARC11xx/12xx/16xx/188x SAS/SATA RAID Controller Driver");
75 MODULE_LICENSE("Dual BSD/GPL");
76 MODULE_VERSION(ARCMSR_DRIVER_VERSION);
77
78 #define ARCMSR_SLEEPTIME        10
79 #define ARCMSR_RETRYCOUNT       12
80
81 static wait_queue_head_t wait_q;
82 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
83                                         struct scsi_cmnd *cmd);
84 static int arcmsr_iop_confirm(struct AdapterControlBlock *acb);
85 static int arcmsr_abort(struct scsi_cmnd *);
86 static int arcmsr_bus_reset(struct scsi_cmnd *);
87 static int arcmsr_bios_param(struct scsi_device *sdev,
88                 struct block_device *bdev, sector_t capacity, int *info);
89 static int arcmsr_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
90 static int arcmsr_probe(struct pci_dev *pdev,
91                                 const struct pci_device_id *id);
92 static int arcmsr_suspend(struct pci_dev *pdev, pm_message_t state);
93 static int arcmsr_resume(struct pci_dev *pdev);
94 static void arcmsr_remove(struct pci_dev *pdev);
95 static void arcmsr_shutdown(struct pci_dev *pdev);
96 static void arcmsr_iop_init(struct AdapterControlBlock *acb);
97 static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb);
98 static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb);
99 static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
100         u32 intmask_org);
101 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
102 static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock *acb);
103 static void arcmsr_hbaB_flush_cache(struct AdapterControlBlock *acb);
104 static void arcmsr_request_device_map(unsigned long pacb);
105 static void arcmsr_hbaA_request_device_map(struct AdapterControlBlock *acb);
106 static void arcmsr_hbaB_request_device_map(struct AdapterControlBlock *acb);
107 static void arcmsr_hbaC_request_device_map(struct AdapterControlBlock *acb);
108 static void arcmsr_message_isr_bh_fn(struct work_struct *work);
109 static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb);
110 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb);
111 static void arcmsr_hbaC_message_isr(struct AdapterControlBlock *pACB);
112 static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb);
113 static void arcmsr_hardware_reset(struct AdapterControlBlock *acb);
114 static const char *arcmsr_info(struct Scsi_Host *);
115 static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb);
116 static void arcmsr_free_irq(struct pci_dev *, struct AdapterControlBlock *);
117 static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev,
118                                           int queue_depth, int reason)
119 {
120         if (reason != SCSI_QDEPTH_DEFAULT)
121                 return -EOPNOTSUPP;
122
123         if (queue_depth > ARCMSR_MAX_CMD_PERLUN)
124                 queue_depth = ARCMSR_MAX_CMD_PERLUN;
125         scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
126         return queue_depth;
127 }
128
129 static struct scsi_host_template arcmsr_scsi_host_template = {
130         .module                 = THIS_MODULE,
131         .name                   = "Areca SAS/SATA RAID driver",
132         .info                   = arcmsr_info,
133         .queuecommand           = arcmsr_queue_command,
134         .eh_abort_handler               = arcmsr_abort,
135         .eh_bus_reset_handler   = arcmsr_bus_reset,
136         .bios_param             = arcmsr_bios_param,
137         .change_queue_depth     = arcmsr_adjust_disk_queue_depth,
138         .can_queue              = ARCMSR_MAX_OUTSTANDING_CMD,
139         .this_id                        = ARCMSR_SCSI_INITIATOR_ID,
140         .sg_tablesize                   = ARCMSR_DEFAULT_SG_ENTRIES, 
141         .max_sectors                    = ARCMSR_MAX_XFER_SECTORS_C, 
142         .cmd_per_lun            = ARCMSR_MAX_CMD_PERLUN,
143         .use_clustering         = ENABLE_CLUSTERING,
144         .shost_attrs            = arcmsr_host_attrs,
145         .no_write_same          = 1,
146 };
147
148 static struct pci_device_id arcmsr_device_id_table[] = {
149         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110),
150                 .driver_data = ACB_ADAPTER_TYPE_A},
151         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1120),
152                 .driver_data = ACB_ADAPTER_TYPE_A},
153         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1130),
154                 .driver_data = ACB_ADAPTER_TYPE_A},
155         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1160),
156                 .driver_data = ACB_ADAPTER_TYPE_A},
157         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1170),
158                 .driver_data = ACB_ADAPTER_TYPE_A},
159         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1200),
160                 .driver_data = ACB_ADAPTER_TYPE_B},
161         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1201),
162                 .driver_data = ACB_ADAPTER_TYPE_B},
163         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1202),
164                 .driver_data = ACB_ADAPTER_TYPE_B},
165         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1210),
166                 .driver_data = ACB_ADAPTER_TYPE_A},
167         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1214),
168                 .driver_data = ACB_ADAPTER_TYPE_D},
169         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1220),
170                 .driver_data = ACB_ADAPTER_TYPE_A},
171         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1230),
172                 .driver_data = ACB_ADAPTER_TYPE_A},
173         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1260),
174                 .driver_data = ACB_ADAPTER_TYPE_A},
175         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1270),
176                 .driver_data = ACB_ADAPTER_TYPE_A},
177         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1280),
178                 .driver_data = ACB_ADAPTER_TYPE_A},
179         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1380),
180                 .driver_data = ACB_ADAPTER_TYPE_A},
181         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1381),
182                 .driver_data = ACB_ADAPTER_TYPE_A},
183         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1680),
184                 .driver_data = ACB_ADAPTER_TYPE_A},
185         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1681),
186                 .driver_data = ACB_ADAPTER_TYPE_A},
187         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1880),
188                 .driver_data = ACB_ADAPTER_TYPE_C},
189         {0, 0}, /* Terminating entry */
190 };
191 MODULE_DEVICE_TABLE(pci, arcmsr_device_id_table);
192
193 static struct pci_driver arcmsr_pci_driver = {
194         .name                   = "arcmsr",
195         .id_table                       = arcmsr_device_id_table,
196         .probe                  = arcmsr_probe,
197         .remove                 = arcmsr_remove,
198         .suspend                = arcmsr_suspend,
199         .resume                 = arcmsr_resume,
200         .shutdown               = arcmsr_shutdown,
201 };
202 /*
203 ****************************************************************************
204 ****************************************************************************
205 */
206
207 static void arcmsr_free_mu(struct AdapterControlBlock *acb)
208 {
209         switch (acb->adapter_type) {
210         case ACB_ADAPTER_TYPE_B:
211         case ACB_ADAPTER_TYPE_D: {
212                 dma_free_coherent(&acb->pdev->dev, acb->roundup_ccbsize,
213                         acb->dma_coherent2, acb->dma_coherent_handle2);
214                 break;
215         }
216         }
217 }
218
219 static bool arcmsr_remap_pciregion(struct AdapterControlBlock *acb)
220 {
221         struct pci_dev *pdev = acb->pdev;
222         switch (acb->adapter_type){
223         case ACB_ADAPTER_TYPE_A:{
224                 acb->pmuA = ioremap(pci_resource_start(pdev,0), pci_resource_len(pdev,0));
225                 if (!acb->pmuA) {
226                         printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
227                         return false;
228                 }
229                 break;
230         }
231         case ACB_ADAPTER_TYPE_B:{
232                 void __iomem *mem_base0, *mem_base1;
233                 mem_base0 = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
234                 if (!mem_base0) {
235                         printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
236                         return false;
237                 }
238                 mem_base1 = ioremap(pci_resource_start(pdev, 2), pci_resource_len(pdev, 2));
239                 if (!mem_base1) {
240                         iounmap(mem_base0);
241                         printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
242                         return false;
243                 }
244                 acb->mem_base0 = mem_base0;
245                 acb->mem_base1 = mem_base1;
246                 break;
247         }
248         case ACB_ADAPTER_TYPE_C:{
249                 acb->pmuC = ioremap_nocache(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
250                 if (!acb->pmuC) {
251                         printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
252                         return false;
253                 }
254                 if (readl(&acb->pmuC->outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
255                         writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &acb->pmuC->outbound_doorbell_clear);/*clear interrupt*/
256                         return true;
257                 }
258                 break;
259         }
260         case ACB_ADAPTER_TYPE_D: {
261                 void __iomem *mem_base0;
262                 unsigned long addr, range, flags;
263
264                 addr = (unsigned long)pci_resource_start(pdev, 0);
265                 range = pci_resource_len(pdev, 0);
266                 flags = pci_resource_flags(pdev, 0);
267                 if (flags & IORESOURCE_CACHEABLE)
268                         mem_base0 = ioremap(addr, range);
269                 else
270                         mem_base0 = ioremap_nocache(addr, range);
271                 if (!mem_base0) {
272                         pr_notice("arcmsr%d: memory mapping region fail\n",
273                                 acb->host->host_no);
274                         return false;
275                 }
276                 acb->mem_base0 = mem_base0;
277                 break;
278                 }
279         }
280         return true;
281 }
282
283 static void arcmsr_unmap_pciregion(struct AdapterControlBlock *acb)
284 {
285         switch (acb->adapter_type) {
286         case ACB_ADAPTER_TYPE_A:{
287                 iounmap(acb->pmuA);
288         }
289         break;
290         case ACB_ADAPTER_TYPE_B:{
291                 iounmap(acb->mem_base0);
292                 iounmap(acb->mem_base1);
293         }
294
295         break;
296         case ACB_ADAPTER_TYPE_C:{
297                 iounmap(acb->pmuC);
298         }
299         break;
300         case ACB_ADAPTER_TYPE_D:
301                 iounmap(acb->mem_base0);
302                 break;
303         }
304 }
305
306 static irqreturn_t arcmsr_do_interrupt(int irq, void *dev_id)
307 {
308         irqreturn_t handle_state;
309         struct AdapterControlBlock *acb = dev_id;
310
311         handle_state = arcmsr_interrupt(acb);
312         return handle_state;
313 }
314
315 static int arcmsr_bios_param(struct scsi_device *sdev,
316                 struct block_device *bdev, sector_t capacity, int *geom)
317 {
318         int ret, heads, sectors, cylinders, total_capacity;
319         unsigned char *buffer;/* return copy of block device's partition table */
320
321         buffer = scsi_bios_ptable(bdev);
322         if (buffer) {
323                 ret = scsi_partsize(buffer, capacity, &geom[2], &geom[0], &geom[1]);
324                 kfree(buffer);
325                 if (ret != -1)
326                         return ret;
327         }
328         total_capacity = capacity;
329         heads = 64;
330         sectors = 32;
331         cylinders = total_capacity / (heads * sectors);
332         if (cylinders > 1024) {
333                 heads = 255;
334                 sectors = 63;
335                 cylinders = total_capacity / (heads * sectors);
336         }
337         geom[0] = heads;
338         geom[1] = sectors;
339         geom[2] = cylinders;
340         return 0;
341 }
342
343 static uint8_t arcmsr_hbaA_wait_msgint_ready(struct AdapterControlBlock *acb)
344 {
345         struct MessageUnit_A __iomem *reg = acb->pmuA;
346         int i;
347
348         for (i = 0; i < 2000; i++) {
349                 if (readl(&reg->outbound_intstatus) &
350                                 ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
351                         writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT,
352                                 &reg->outbound_intstatus);
353                         return true;
354                 }
355                 msleep(10);
356         } /* max 20 seconds */
357
358         return false;
359 }
360
361 static uint8_t arcmsr_hbaB_wait_msgint_ready(struct AdapterControlBlock *acb)
362 {
363         struct MessageUnit_B *reg = acb->pmuB;
364         int i;
365
366         for (i = 0; i < 2000; i++) {
367                 if (readl(reg->iop2drv_doorbell)
368                         & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
369                         writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN,
370                                         reg->iop2drv_doorbell);
371                         writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT,
372                                         reg->drv2iop_doorbell);
373                         return true;
374                 }
375                 msleep(10);
376         } /* max 20 seconds */
377
378         return false;
379 }
380
381 static uint8_t arcmsr_hbaC_wait_msgint_ready(struct AdapterControlBlock *pACB)
382 {
383         struct MessageUnit_C __iomem *phbcmu = pACB->pmuC;
384         int i;
385
386         for (i = 0; i < 2000; i++) {
387                 if (readl(&phbcmu->outbound_doorbell)
388                                 & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
389                         writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR,
390                                 &phbcmu->outbound_doorbell_clear); /*clear interrupt*/
391                         return true;
392                 }
393                 msleep(10);
394         } /* max 20 seconds */
395
396         return false;
397 }
398
399 static bool arcmsr_hbaD_wait_msgint_ready(struct AdapterControlBlock *pACB)
400 {
401         struct MessageUnit_D *reg = pACB->pmuD;
402         int i;
403
404         for (i = 0; i < 2000; i++) {
405                 if (readl(reg->outbound_doorbell)
406                         & ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE) {
407                         writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE,
408                                 reg->outbound_doorbell);
409                         return true;
410                 }
411                 msleep(10);
412         } /* max 20 seconds */
413         return false;
414 }
415
416 static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock *acb)
417 {
418         struct MessageUnit_A __iomem *reg = acb->pmuA;
419         int retry_count = 30;
420         writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, &reg->inbound_msgaddr0);
421         do {
422                 if (arcmsr_hbaA_wait_msgint_ready(acb))
423                         break;
424                 else {
425                         retry_count--;
426                         printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
427                         timeout, retry count down = %d \n", acb->host->host_no, retry_count);
428                 }
429         } while (retry_count != 0);
430 }
431
432 static void arcmsr_hbaB_flush_cache(struct AdapterControlBlock *acb)
433 {
434         struct MessageUnit_B *reg = acb->pmuB;
435         int retry_count = 30;
436         writel(ARCMSR_MESSAGE_FLUSH_CACHE, reg->drv2iop_doorbell);
437         do {
438                 if (arcmsr_hbaB_wait_msgint_ready(acb))
439                         break;
440                 else {
441                         retry_count--;
442                         printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
443                         timeout,retry count down = %d \n", acb->host->host_no, retry_count);
444                 }
445         } while (retry_count != 0);
446 }
447
448 static void arcmsr_hbaC_flush_cache(struct AdapterControlBlock *pACB)
449 {
450         struct MessageUnit_C __iomem *reg = pACB->pmuC;
451         int retry_count = 30;/* enlarge wait flush adapter cache time: 10 minute */
452         writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, &reg->inbound_msgaddr0);
453         writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
454         do {
455                 if (arcmsr_hbaC_wait_msgint_ready(pACB)) {
456                         break;
457                 } else {
458                         retry_count--;
459                         printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
460                         timeout,retry count down = %d \n", pACB->host->host_no, retry_count);
461                 }
462         } while (retry_count != 0);
463         return;
464 }
465
466 static void arcmsr_hbaD_flush_cache(struct AdapterControlBlock *pACB)
467 {
468         int retry_count = 15;
469         struct MessageUnit_D *reg = pACB->pmuD;
470
471         writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, reg->inbound_msgaddr0);
472         do {
473                 if (arcmsr_hbaD_wait_msgint_ready(pACB))
474                         break;
475
476                 retry_count--;
477                 pr_notice("arcmsr%d: wait 'flush adapter "
478                         "cache' timeout, retry count down = %d\n",
479                         pACB->host->host_no, retry_count);
480         } while (retry_count != 0);
481 }
482
483 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
484 {
485         switch (acb->adapter_type) {
486
487         case ACB_ADAPTER_TYPE_A: {
488                 arcmsr_hbaA_flush_cache(acb);
489                 }
490                 break;
491
492         case ACB_ADAPTER_TYPE_B: {
493                 arcmsr_hbaB_flush_cache(acb);
494                 }
495                 break;
496         case ACB_ADAPTER_TYPE_C: {
497                 arcmsr_hbaC_flush_cache(acb);
498                 }
499                 break;
500         case ACB_ADAPTER_TYPE_D:
501                 arcmsr_hbaD_flush_cache(acb);
502                 break;
503         }
504 }
505
506 static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
507 {
508         struct pci_dev *pdev = acb->pdev;
509         void *dma_coherent;
510         dma_addr_t dma_coherent_handle;
511         struct CommandControlBlock *ccb_tmp;
512         int i = 0, j = 0;
513         dma_addr_t cdb_phyaddr;
514         unsigned long roundup_ccbsize;
515         unsigned long max_xfer_len;
516         unsigned long max_sg_entrys;
517         uint32_t  firm_config_version;
518
519         for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
520                 for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
521                         acb->devstate[i][j] = ARECA_RAID_GONE;
522
523         max_xfer_len = ARCMSR_MAX_XFER_LEN;
524         max_sg_entrys = ARCMSR_DEFAULT_SG_ENTRIES;
525         firm_config_version = acb->firm_cfg_version;
526         if((firm_config_version & 0xFF) >= 3){
527                 max_xfer_len = (ARCMSR_CDB_SG_PAGE_LENGTH << ((firm_config_version >> 8) & 0xFF)) * 1024;/* max 4M byte */
528                 max_sg_entrys = (max_xfer_len/4096);
529         }
530         acb->host->max_sectors = max_xfer_len/512;
531         acb->host->sg_tablesize = max_sg_entrys;
532         roundup_ccbsize = roundup(sizeof(struct CommandControlBlock) + (max_sg_entrys - 1) * sizeof(struct SG64ENTRY), 32);
533         acb->uncache_size = roundup_ccbsize * ARCMSR_MAX_FREECCB_NUM;
534         dma_coherent = dma_alloc_coherent(&pdev->dev, acb->uncache_size, &dma_coherent_handle, GFP_KERNEL);
535         if(!dma_coherent){
536                 printk(KERN_NOTICE "arcmsr%d: dma_alloc_coherent got error\n", acb->host->host_no);
537                 return -ENOMEM;
538         }
539         acb->dma_coherent = dma_coherent;
540         acb->dma_coherent_handle = dma_coherent_handle;
541         memset(dma_coherent, 0, acb->uncache_size);
542         ccb_tmp = dma_coherent;
543         acb->vir2phy_offset = (unsigned long)dma_coherent - (unsigned long)dma_coherent_handle;
544         for(i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++){
545                 cdb_phyaddr = dma_coherent_handle + offsetof(struct CommandControlBlock, arcmsr_cdb);
546                 switch (acb->adapter_type) {
547                 case ACB_ADAPTER_TYPE_A:
548                 case ACB_ADAPTER_TYPE_B:
549                         ccb_tmp->cdb_phyaddr = cdb_phyaddr >> 5;
550                         break;
551                 case ACB_ADAPTER_TYPE_C:
552                 case ACB_ADAPTER_TYPE_D:
553                         ccb_tmp->cdb_phyaddr = cdb_phyaddr;
554                         break;
555                 }
556                 acb->pccb_pool[i] = ccb_tmp;
557                 ccb_tmp->acb = acb;
558                 INIT_LIST_HEAD(&ccb_tmp->list);
559                 list_add_tail(&ccb_tmp->list, &acb->ccb_free_list);
560                 ccb_tmp = (struct CommandControlBlock *)((unsigned long)ccb_tmp + roundup_ccbsize);
561                 dma_coherent_handle = dma_coherent_handle + roundup_ccbsize;
562         }
563         return 0;
564 }
565
566 static void arcmsr_message_isr_bh_fn(struct work_struct *work) 
567 {
568         struct AdapterControlBlock *acb = container_of(work,
569                 struct AdapterControlBlock, arcmsr_do_message_isr_bh);
570         char *acb_dev_map = (char *)acb->device_map;
571         uint32_t __iomem *signature = NULL;
572         char __iomem *devicemap = NULL;
573         int target, lun;
574         struct scsi_device *psdev;
575         char diff, temp;
576
577         switch (acb->adapter_type) {
578         case ACB_ADAPTER_TYPE_A: {
579                 struct MessageUnit_A __iomem *reg  = acb->pmuA;
580
581                 signature = (uint32_t __iomem *)(&reg->message_rwbuffer[0]);
582                 devicemap = (char __iomem *)(&reg->message_rwbuffer[21]);
583                 break;
584         }
585         case ACB_ADAPTER_TYPE_B: {
586                 struct MessageUnit_B *reg  = acb->pmuB;
587
588                 signature = (uint32_t __iomem *)(&reg->message_rwbuffer[0]);
589                 devicemap = (char __iomem *)(&reg->message_rwbuffer[21]);
590                 break;
591         }
592         case ACB_ADAPTER_TYPE_C: {
593                 struct MessageUnit_C __iomem *reg  = acb->pmuC;
594
595                 signature = (uint32_t __iomem *)(&reg->msgcode_rwbuffer[0]);
596                 devicemap = (char __iomem *)(&reg->msgcode_rwbuffer[21]);
597                 break;
598         }
599         case ACB_ADAPTER_TYPE_D: {
600                 struct MessageUnit_D *reg  = acb->pmuD;
601
602                 signature = (uint32_t __iomem *)(&reg->msgcode_rwbuffer[0]);
603                 devicemap = (char __iomem *)(&reg->msgcode_rwbuffer[21]);
604                 break;
605         }
606         }
607         atomic_inc(&acb->rq_map_token);
608         if (readl(signature) != ARCMSR_SIGNATURE_GET_CONFIG)
609                 return;
610         for (target = 0; target < ARCMSR_MAX_TARGETID - 1;
611                 target++) {
612                 temp = readb(devicemap);
613                 diff = (*acb_dev_map) ^ temp;
614                 if (diff != 0) {
615                         *acb_dev_map = temp;
616                         for (lun = 0; lun < ARCMSR_MAX_TARGETLUN;
617                                 lun++) {
618                                 if ((diff & 0x01) == 1 &&
619                                         (temp & 0x01) == 1) {
620                                         scsi_add_device(acb->host,
621                                                 0, target, lun);
622                                 } else if ((diff & 0x01) == 1
623                                         && (temp & 0x01) == 0) {
624                                         psdev = scsi_device_lookup(acb->host,
625                                                 0, target, lun);
626                                         if (psdev != NULL) {
627                                                 scsi_remove_device(psdev);
628                                                 scsi_device_put(psdev);
629                                         }
630                                 }
631                                 temp >>= 1;
632                                 diff >>= 1;
633                         }
634                 }
635                 devicemap++;
636                 acb_dev_map++;
637         }
638 }
639
640 static int
641 arcmsr_request_irq(struct pci_dev *pdev, struct AdapterControlBlock *acb)
642 {
643         int     i, j, r;
644         struct msix_entry entries[ARCMST_NUM_MSIX_VECTORS];
645
646         for (i = 0; i < ARCMST_NUM_MSIX_VECTORS; i++)
647                 entries[i].entry = i;
648         r = pci_enable_msix_range(pdev, entries, 1, ARCMST_NUM_MSIX_VECTORS);
649         if (r < 0)
650                 goto msi_int;
651         acb->msix_vector_count = r;
652         for (i = 0; i < r; i++) {
653                 if (request_irq(entries[i].vector,
654                         arcmsr_do_interrupt, 0, "arcmsr", acb)) {
655                         pr_warn("arcmsr%d: request_irq =%d failed!\n",
656                                 acb->host->host_no, entries[i].vector);
657                         for (j = 0 ; j < i ; j++)
658                                 free_irq(entries[j].vector, acb);
659                         pci_disable_msix(pdev);
660                         goto msi_int;
661                 }
662                 acb->entries[i] = entries[i];
663         }
664         acb->acb_flags |= ACB_F_MSIX_ENABLED;
665         pr_info("arcmsr%d: msi-x enabled\n", acb->host->host_no);
666         return SUCCESS;
667 msi_int:
668         if (pci_enable_msi_exact(pdev, 1) < 0)
669                 goto legacy_int;
670         if (request_irq(pdev->irq, arcmsr_do_interrupt,
671                 IRQF_SHARED, "arcmsr", acb)) {
672                 pr_warn("arcmsr%d: request_irq =%d failed!\n",
673                         acb->host->host_no, pdev->irq);
674                 pci_disable_msi(pdev);
675                 goto legacy_int;
676         }
677         acb->acb_flags |= ACB_F_MSI_ENABLED;
678         pr_info("arcmsr%d: msi enabled\n", acb->host->host_no);
679         return SUCCESS;
680 legacy_int:
681         if (request_irq(pdev->irq, arcmsr_do_interrupt,
682                 IRQF_SHARED, "arcmsr", acb)) {
683                 pr_warn("arcmsr%d: request_irq = %d failed!\n",
684                         acb->host->host_no, pdev->irq);
685                 return FAILED;
686         }
687         return SUCCESS;
688 }
689
690 static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
691 {
692         struct Scsi_Host *host;
693         struct AdapterControlBlock *acb;
694         uint8_t bus,dev_fun;
695         int error;
696         error = pci_enable_device(pdev);
697         if(error){
698                 return -ENODEV;
699         }
700         host = scsi_host_alloc(&arcmsr_scsi_host_template, sizeof(struct AdapterControlBlock));
701         if(!host){
702                 goto pci_disable_dev;
703         }
704         error = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
705         if(error){
706                 error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
707                 if(error){
708                         printk(KERN_WARNING
709                                "scsi%d: No suitable DMA mask available\n",
710                                host->host_no);
711                         goto scsi_host_release;
712                 }
713         }
714         init_waitqueue_head(&wait_q);
715         bus = pdev->bus->number;
716         dev_fun = pdev->devfn;
717         acb = (struct AdapterControlBlock *) host->hostdata;
718         memset(acb,0,sizeof(struct AdapterControlBlock));
719         acb->pdev = pdev;
720         acb->host = host;
721         host->max_lun = ARCMSR_MAX_TARGETLUN;
722         host->max_id = ARCMSR_MAX_TARGETID;             /*16:8*/
723         host->max_cmd_len = 16;                         /*this is issue of 64bit LBA ,over 2T byte*/
724         host->can_queue = ARCMSR_MAX_OUTSTANDING_CMD;
725         host->cmd_per_lun = ARCMSR_MAX_CMD_PERLUN;          
726         host->this_id = ARCMSR_SCSI_INITIATOR_ID;
727         host->unique_id = (bus << 8) | dev_fun;
728         pci_set_drvdata(pdev, host);
729         pci_set_master(pdev);
730         error = pci_request_regions(pdev, "arcmsr");
731         if(error){
732                 goto scsi_host_release;
733         }
734         spin_lock_init(&acb->eh_lock);
735         spin_lock_init(&acb->ccblist_lock);
736         spin_lock_init(&acb->postq_lock);
737         spin_lock_init(&acb->doneq_lock);
738         spin_lock_init(&acb->rqbuffer_lock);
739         spin_lock_init(&acb->wqbuffer_lock);
740         acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
741                         ACB_F_MESSAGE_RQBUFFER_CLEARED |
742                         ACB_F_MESSAGE_WQBUFFER_READED);
743         acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
744         INIT_LIST_HEAD(&acb->ccb_free_list);
745         acb->adapter_type = id->driver_data;
746         error = arcmsr_remap_pciregion(acb);
747         if(!error){
748                 goto pci_release_regs;
749         }
750         error = arcmsr_get_firmware_spec(acb);
751         if(!error){
752                 goto unmap_pci_region;
753         }
754         error = arcmsr_alloc_ccb_pool(acb);
755         if(error){
756                 goto free_hbb_mu;
757         }
758         error = scsi_add_host(host, &pdev->dev);
759         if(error){
760                 goto free_ccb_pool;
761         }
762         if (arcmsr_request_irq(pdev, acb) == FAILED)
763                 goto scsi_host_remove;
764         arcmsr_iop_init(acb);
765         INIT_WORK(&acb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn);
766         atomic_set(&acb->rq_map_token, 16);
767         atomic_set(&acb->ante_token_value, 16);
768         acb->fw_flag = FW_NORMAL;
769         init_timer(&acb->eternal_timer);
770         acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
771         acb->eternal_timer.data = (unsigned long) acb;
772         acb->eternal_timer.function = &arcmsr_request_device_map;
773         add_timer(&acb->eternal_timer);
774         if(arcmsr_alloc_sysfs_attr(acb))
775                 goto out_free_sysfs;
776         scsi_scan_host(host);
777         return 0;
778 out_free_sysfs:
779         del_timer_sync(&acb->eternal_timer);
780         flush_work(&acb->arcmsr_do_message_isr_bh);
781         arcmsr_stop_adapter_bgrb(acb);
782         arcmsr_flush_adapter_cache(acb);
783         arcmsr_free_irq(pdev, acb);
784 scsi_host_remove:
785         scsi_remove_host(host);
786 free_ccb_pool:
787         arcmsr_free_ccb_pool(acb);
788 free_hbb_mu:
789         arcmsr_free_mu(acb);
790 unmap_pci_region:
791         arcmsr_unmap_pciregion(acb);
792 pci_release_regs:
793         pci_release_regions(pdev);
794 scsi_host_release:
795         scsi_host_put(host);
796 pci_disable_dev:
797         pci_disable_device(pdev);
798         return -ENODEV;
799 }
800
801 static void arcmsr_free_irq(struct pci_dev *pdev,
802                 struct AdapterControlBlock *acb)
803 {
804         int i;
805
806         if (acb->acb_flags & ACB_F_MSI_ENABLED) {
807                 free_irq(pdev->irq, acb);
808                 pci_disable_msi(pdev);
809         } else if (acb->acb_flags & ACB_F_MSIX_ENABLED) {
810                 for (i = 0; i < acb->msix_vector_count; i++)
811                         free_irq(acb->entries[i].vector, acb);
812                 pci_disable_msix(pdev);
813         } else
814                 free_irq(pdev->irq, acb);
815 }
816
817 static int arcmsr_suspend(struct pci_dev *pdev, pm_message_t state)
818 {
819         uint32_t intmask_org;
820         struct Scsi_Host *host = pci_get_drvdata(pdev);
821         struct AdapterControlBlock *acb =
822                 (struct AdapterControlBlock *)host->hostdata;
823
824         intmask_org = arcmsr_disable_outbound_ints(acb);
825         arcmsr_free_irq(pdev, acb);
826         del_timer_sync(&acb->eternal_timer);
827         flush_work(&acb->arcmsr_do_message_isr_bh);
828         arcmsr_stop_adapter_bgrb(acb);
829         arcmsr_flush_adapter_cache(acb);
830         pci_set_drvdata(pdev, host);
831         pci_save_state(pdev);
832         pci_disable_device(pdev);
833         pci_set_power_state(pdev, pci_choose_state(pdev, state));
834         return 0;
835 }
836
837 static int arcmsr_resume(struct pci_dev *pdev)
838 {
839         int error;
840         struct Scsi_Host *host = pci_get_drvdata(pdev);
841         struct AdapterControlBlock *acb =
842                 (struct AdapterControlBlock *)host->hostdata;
843
844         pci_set_power_state(pdev, PCI_D0);
845         pci_enable_wake(pdev, PCI_D0, 0);
846         pci_restore_state(pdev);
847         if (pci_enable_device(pdev)) {
848                 pr_warn("%s: pci_enable_device error\n", __func__);
849                 return -ENODEV;
850         }
851         error = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
852         if (error) {
853                 error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
854                 if (error) {
855                         pr_warn("scsi%d: No suitable DMA mask available\n",
856                                host->host_no);
857                         goto controller_unregister;
858                 }
859         }
860         pci_set_master(pdev);
861         if (arcmsr_request_irq(pdev, acb) == FAILED)
862                 goto controller_stop;
863         arcmsr_iop_init(acb);
864         INIT_WORK(&acb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn);
865         atomic_set(&acb->rq_map_token, 16);
866         atomic_set(&acb->ante_token_value, 16);
867         acb->fw_flag = FW_NORMAL;
868         init_timer(&acb->eternal_timer);
869         acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
870         acb->eternal_timer.data = (unsigned long) acb;
871         acb->eternal_timer.function = &arcmsr_request_device_map;
872         add_timer(&acb->eternal_timer);
873         return 0;
874 controller_stop:
875         arcmsr_stop_adapter_bgrb(acb);
876         arcmsr_flush_adapter_cache(acb);
877 controller_unregister:
878         scsi_remove_host(host);
879         arcmsr_free_ccb_pool(acb);
880         arcmsr_unmap_pciregion(acb);
881         pci_release_regions(pdev);
882         scsi_host_put(host);
883         pci_disable_device(pdev);
884         return -ENODEV;
885 }
886
887 static uint8_t arcmsr_hbaA_abort_allcmd(struct AdapterControlBlock *acb)
888 {
889         struct MessageUnit_A __iomem *reg = acb->pmuA;
890         writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, &reg->inbound_msgaddr0);
891         if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
892                 printk(KERN_NOTICE
893                         "arcmsr%d: wait 'abort all outstanding command' timeout\n"
894                         , acb->host->host_no);
895                 return false;
896         }
897         return true;
898 }
899
900 static uint8_t arcmsr_hbaB_abort_allcmd(struct AdapterControlBlock *acb)
901 {
902         struct MessageUnit_B *reg = acb->pmuB;
903
904         writel(ARCMSR_MESSAGE_ABORT_CMD, reg->drv2iop_doorbell);
905         if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
906                 printk(KERN_NOTICE
907                         "arcmsr%d: wait 'abort all outstanding command' timeout\n"
908                         , acb->host->host_no);
909                 return false;
910         }
911         return true;
912 }
913 static uint8_t arcmsr_hbaC_abort_allcmd(struct AdapterControlBlock *pACB)
914 {
915         struct MessageUnit_C __iomem *reg = pACB->pmuC;
916         writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, &reg->inbound_msgaddr0);
917         writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
918         if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
919                 printk(KERN_NOTICE
920                         "arcmsr%d: wait 'abort all outstanding command' timeout\n"
921                         , pACB->host->host_no);
922                 return false;
923         }
924         return true;
925 }
926
927 static uint8_t arcmsr_hbaD_abort_allcmd(struct AdapterControlBlock *pACB)
928 {
929         struct MessageUnit_D *reg = pACB->pmuD;
930
931         writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, reg->inbound_msgaddr0);
932         if (!arcmsr_hbaD_wait_msgint_ready(pACB)) {
933                 pr_notice("arcmsr%d: wait 'abort all outstanding "
934                         "command' timeout\n", pACB->host->host_no);
935                 return false;
936         }
937         return true;
938 }
939
940 static uint8_t arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
941 {
942         uint8_t rtnval = 0;
943         switch (acb->adapter_type) {
944         case ACB_ADAPTER_TYPE_A: {
945                 rtnval = arcmsr_hbaA_abort_allcmd(acb);
946                 }
947                 break;
948
949         case ACB_ADAPTER_TYPE_B: {
950                 rtnval = arcmsr_hbaB_abort_allcmd(acb);
951                 }
952                 break;
953
954         case ACB_ADAPTER_TYPE_C: {
955                 rtnval = arcmsr_hbaC_abort_allcmd(acb);
956                 }
957                 break;
958
959         case ACB_ADAPTER_TYPE_D:
960                 rtnval = arcmsr_hbaD_abort_allcmd(acb);
961                 break;
962         }
963         return rtnval;
964 }
965
966 static void arcmsr_pci_unmap_dma(struct CommandControlBlock *ccb)
967 {
968         struct scsi_cmnd *pcmd = ccb->pcmd;
969
970         scsi_dma_unmap(pcmd);
971 }
972
973 static void arcmsr_ccb_complete(struct CommandControlBlock *ccb)
974 {
975         struct AdapterControlBlock *acb = ccb->acb;
976         struct scsi_cmnd *pcmd = ccb->pcmd;
977         unsigned long flags;
978         atomic_dec(&acb->ccboutstandingcount);
979         arcmsr_pci_unmap_dma(ccb);
980         ccb->startdone = ARCMSR_CCB_DONE;
981         spin_lock_irqsave(&acb->ccblist_lock, flags);
982         list_add_tail(&ccb->list, &acb->ccb_free_list);
983         spin_unlock_irqrestore(&acb->ccblist_lock, flags);
984         pcmd->scsi_done(pcmd);
985 }
986
987 static void arcmsr_report_sense_info(struct CommandControlBlock *ccb)
988 {
989
990         struct scsi_cmnd *pcmd = ccb->pcmd;
991         struct SENSE_DATA *sensebuffer = (struct SENSE_DATA *)pcmd->sense_buffer;
992         pcmd->result = DID_OK << 16;
993         if (sensebuffer) {
994                 int sense_data_length =
995                         sizeof(struct SENSE_DATA) < SCSI_SENSE_BUFFERSIZE
996                         ? sizeof(struct SENSE_DATA) : SCSI_SENSE_BUFFERSIZE;
997                 memset(sensebuffer, 0, SCSI_SENSE_BUFFERSIZE);
998                 memcpy(sensebuffer, ccb->arcmsr_cdb.SenseData, sense_data_length);
999                 sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS;
1000                 sensebuffer->Valid = 1;
1001         }
1002 }
1003
1004 static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb)
1005 {
1006         u32 orig_mask = 0;
1007         switch (acb->adapter_type) {    
1008         case ACB_ADAPTER_TYPE_A : {
1009                 struct MessageUnit_A __iomem *reg = acb->pmuA;
1010                 orig_mask = readl(&reg->outbound_intmask);
1011                 writel(orig_mask|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE, \
1012                                                 &reg->outbound_intmask);
1013                 }
1014                 break;
1015         case ACB_ADAPTER_TYPE_B : {
1016                 struct MessageUnit_B *reg = acb->pmuB;
1017                 orig_mask = readl(reg->iop2drv_doorbell_mask);
1018                 writel(0, reg->iop2drv_doorbell_mask);
1019                 }
1020                 break;
1021         case ACB_ADAPTER_TYPE_C:{
1022                 struct MessageUnit_C __iomem *reg = acb->pmuC;
1023                 /* disable all outbound interrupt */
1024                 orig_mask = readl(&reg->host_int_mask); /* disable outbound message0 int */
1025                 writel(orig_mask|ARCMSR_HBCMU_ALL_INTMASKENABLE, &reg->host_int_mask);
1026                 }
1027                 break;
1028         case ACB_ADAPTER_TYPE_D: {
1029                 struct MessageUnit_D *reg = acb->pmuD;
1030                 /* disable all outbound interrupt */
1031                 writel(ARCMSR_ARC1214_ALL_INT_DISABLE, reg->pcief0_int_enable);
1032                 }
1033                 break;
1034         }
1035         return orig_mask;
1036 }
1037
1038 static void arcmsr_report_ccb_state(struct AdapterControlBlock *acb, 
1039                         struct CommandControlBlock *ccb, bool error)
1040 {
1041         uint8_t id, lun;
1042         id = ccb->pcmd->device->id;
1043         lun = ccb->pcmd->device->lun;
1044         if (!error) {
1045                 if (acb->devstate[id][lun] == ARECA_RAID_GONE)
1046                         acb->devstate[id][lun] = ARECA_RAID_GOOD;
1047                 ccb->pcmd->result = DID_OK << 16;
1048                 arcmsr_ccb_complete(ccb);
1049         }else{
1050                 switch (ccb->arcmsr_cdb.DeviceStatus) {
1051                 case ARCMSR_DEV_SELECT_TIMEOUT: {
1052                         acb->devstate[id][lun] = ARECA_RAID_GONE;
1053                         ccb->pcmd->result = DID_NO_CONNECT << 16;
1054                         arcmsr_ccb_complete(ccb);
1055                         }
1056                         break;
1057
1058                 case ARCMSR_DEV_ABORTED:
1059
1060                 case ARCMSR_DEV_INIT_FAIL: {
1061                         acb->devstate[id][lun] = ARECA_RAID_GONE;
1062                         ccb->pcmd->result = DID_BAD_TARGET << 16;
1063                         arcmsr_ccb_complete(ccb);
1064                         }
1065                         break;
1066
1067                 case ARCMSR_DEV_CHECK_CONDITION: {
1068                         acb->devstate[id][lun] = ARECA_RAID_GOOD;
1069                         arcmsr_report_sense_info(ccb);
1070                         arcmsr_ccb_complete(ccb);
1071                         }
1072                         break;
1073
1074                 default:
1075                         printk(KERN_NOTICE
1076                                 "arcmsr%d: scsi id = %d lun = %d isr get command error done, \
1077                                 but got unknown DeviceStatus = 0x%x \n"
1078                                 , acb->host->host_no
1079                                 , id
1080                                 , lun
1081                                 , ccb->arcmsr_cdb.DeviceStatus);
1082                                 acb->devstate[id][lun] = ARECA_RAID_GONE;
1083                                 ccb->pcmd->result = DID_NO_CONNECT << 16;
1084                                 arcmsr_ccb_complete(ccb);
1085                         break;
1086                 }
1087         }
1088 }
1089
1090 static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, struct CommandControlBlock *pCCB, bool error)
1091 {
1092         int id, lun;
1093         if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) {
1094                 if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
1095                         struct scsi_cmnd *abortcmd = pCCB->pcmd;
1096                         if (abortcmd) {
1097                                 id = abortcmd->device->id;
1098                                 lun = abortcmd->device->lun;                            
1099                                 abortcmd->result |= DID_ABORT << 16;
1100                                 arcmsr_ccb_complete(pCCB);
1101                                 printk(KERN_NOTICE "arcmsr%d: pCCB ='0x%p' isr got aborted command \n",
1102                                 acb->host->host_no, pCCB);
1103                         }
1104                         return;
1105                 }
1106                 printk(KERN_NOTICE "arcmsr%d: isr get an illegal ccb command \
1107                                 done acb = '0x%p'"
1108                                 "ccb = '0x%p' ccbacb = '0x%p' startdone = 0x%x"
1109                                 " ccboutstandingcount = %d \n"
1110                                 , acb->host->host_no
1111                                 , acb
1112                                 , pCCB
1113                                 , pCCB->acb
1114                                 , pCCB->startdone
1115                                 , atomic_read(&acb->ccboutstandingcount));
1116                   return;
1117         }
1118         arcmsr_report_ccb_state(acb, pCCB, error);
1119 }
1120
1121 static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
1122 {
1123         int i = 0;
1124         uint32_t flag_ccb, ccb_cdb_phy;
1125         struct ARCMSR_CDB *pARCMSR_CDB;
1126         bool error;
1127         struct CommandControlBlock *pCCB;
1128         switch (acb->adapter_type) {
1129
1130         case ACB_ADAPTER_TYPE_A: {
1131                 struct MessageUnit_A __iomem *reg = acb->pmuA;
1132                 uint32_t outbound_intstatus;
1133                 outbound_intstatus = readl(&reg->outbound_intstatus) &
1134                                         acb->outbound_int_enable;
1135                 /*clear and abort all outbound posted Q*/
1136                 writel(outbound_intstatus, &reg->outbound_intstatus);/*clear interrupt*/
1137                 while(((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF)
1138                                 && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
1139                         pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));/*frame must be 32 bytes aligned*/
1140                         pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
1141                         error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
1142                         arcmsr_drain_donequeue(acb, pCCB, error);
1143                 }
1144                 }
1145                 break;
1146
1147         case ACB_ADAPTER_TYPE_B: {
1148                 struct MessageUnit_B *reg = acb->pmuB;
1149                 /*clear all outbound posted Q*/
1150                 writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); /* clear doorbell interrupt */
1151                 for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
1152                         flag_ccb = reg->done_qbuffer[i];
1153                         if (flag_ccb != 0) {
1154                                 reg->done_qbuffer[i] = 0;
1155                                 pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset+(flag_ccb << 5));/*frame must be 32 bytes aligned*/
1156                                 pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
1157                                 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
1158                                 arcmsr_drain_donequeue(acb, pCCB, error);
1159                         }
1160                         reg->post_qbuffer[i] = 0;
1161                 }
1162                 reg->doneq_index = 0;
1163                 reg->postq_index = 0;
1164                 }
1165                 break;
1166         case ACB_ADAPTER_TYPE_C: {
1167                 struct MessageUnit_C __iomem *reg = acb->pmuC;
1168                 while ((readl(&reg->host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
1169                         /*need to do*/
1170                         flag_ccb = readl(&reg->outbound_queueport_low);
1171                         ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
1172                         pARCMSR_CDB = (struct  ARCMSR_CDB *)(acb->vir2phy_offset+ccb_cdb_phy);/*frame must be 32 bytes aligned*/
1173                         pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
1174                         error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
1175                         arcmsr_drain_donequeue(acb, pCCB, error);
1176                 }
1177                 }
1178                 break;
1179         case ACB_ADAPTER_TYPE_D: {
1180                 struct MessageUnit_D  *pmu = acb->pmuD;
1181                 uint32_t outbound_write_pointer;
1182                 uint32_t doneq_index, index_stripped, addressLow, residual, toggle;
1183                 unsigned long flags;
1184
1185                 residual = atomic_read(&acb->ccboutstandingcount);
1186                 for (i = 0; i < residual; i++) {
1187                         spin_lock_irqsave(&acb->doneq_lock, flags);
1188                         outbound_write_pointer =
1189                                 pmu->done_qbuffer[0].addressLow + 1;
1190                         doneq_index = pmu->doneq_index;
1191                         if ((doneq_index & 0xFFF) !=
1192                                 (outbound_write_pointer & 0xFFF)) {
1193                                 toggle = doneq_index & 0x4000;
1194                                 index_stripped = (doneq_index & 0xFFF) + 1;
1195                                 index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE;
1196                                 pmu->doneq_index = index_stripped ? (index_stripped | toggle) :
1197                                         ((toggle ^ 0x4000) + 1);
1198                                 doneq_index = pmu->doneq_index;
1199                                 spin_unlock_irqrestore(&acb->doneq_lock, flags);
1200                                 addressLow = pmu->done_qbuffer[doneq_index &
1201                                         0xFFF].addressLow;
1202                                 ccb_cdb_phy = (addressLow & 0xFFFFFFF0);
1203                                 pARCMSR_CDB = (struct  ARCMSR_CDB *)
1204                                         (acb->vir2phy_offset + ccb_cdb_phy);
1205                                 pCCB = container_of(pARCMSR_CDB,
1206                                         struct CommandControlBlock, arcmsr_cdb);
1207                                 error = (addressLow &
1208                                         ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ?
1209                                         true : false;
1210                                 arcmsr_drain_donequeue(acb, pCCB, error);
1211                                 writel(doneq_index,
1212                                         pmu->outboundlist_read_pointer);
1213                         } else {
1214                                 spin_unlock_irqrestore(&acb->doneq_lock, flags);
1215                                 mdelay(10);
1216                         }
1217                 }
1218                 pmu->postq_index = 0;
1219                 pmu->doneq_index = 0x40FF;
1220                 }
1221                 break;
1222         }
1223 }
1224
1225 static void arcmsr_remove(struct pci_dev *pdev)
1226 {
1227         struct Scsi_Host *host = pci_get_drvdata(pdev);
1228         struct AdapterControlBlock *acb =
1229                 (struct AdapterControlBlock *) host->hostdata;
1230         int poll_count = 0;
1231         arcmsr_free_sysfs_attr(acb);
1232         scsi_remove_host(host);
1233         flush_work(&acb->arcmsr_do_message_isr_bh);
1234         del_timer_sync(&acb->eternal_timer);
1235         arcmsr_disable_outbound_ints(acb);
1236         arcmsr_stop_adapter_bgrb(acb);
1237         arcmsr_flush_adapter_cache(acb);        
1238         acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
1239         acb->acb_flags &= ~ACB_F_IOP_INITED;
1240
1241         for (poll_count = 0; poll_count < ARCMSR_MAX_OUTSTANDING_CMD; poll_count++){
1242                 if (!atomic_read(&acb->ccboutstandingcount))
1243                         break;
1244                 arcmsr_interrupt(acb);/* FIXME: need spinlock */
1245                 msleep(25);
1246         }
1247
1248         if (atomic_read(&acb->ccboutstandingcount)) {
1249                 int i;
1250
1251                 arcmsr_abort_allcmd(acb);
1252                 arcmsr_done4abort_postqueue(acb);
1253                 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
1254                         struct CommandControlBlock *ccb = acb->pccb_pool[i];
1255                         if (ccb->startdone == ARCMSR_CCB_START) {
1256                                 ccb->startdone = ARCMSR_CCB_ABORTED;
1257                                 ccb->pcmd->result = DID_ABORT << 16;
1258                                 arcmsr_ccb_complete(ccb);
1259                         }
1260                 }
1261         }
1262         arcmsr_free_irq(pdev, acb);
1263         arcmsr_free_ccb_pool(acb);
1264         arcmsr_free_mu(acb);
1265         arcmsr_unmap_pciregion(acb);
1266         pci_release_regions(pdev);
1267         scsi_host_put(host);
1268         pci_disable_device(pdev);
1269 }
1270
1271 static void arcmsr_shutdown(struct pci_dev *pdev)
1272 {
1273         struct Scsi_Host *host = pci_get_drvdata(pdev);
1274         struct AdapterControlBlock *acb =
1275                 (struct AdapterControlBlock *)host->hostdata;
1276         del_timer_sync(&acb->eternal_timer);
1277         arcmsr_disable_outbound_ints(acb);
1278         arcmsr_free_irq(pdev, acb);
1279         flush_work(&acb->arcmsr_do_message_isr_bh);
1280         arcmsr_stop_adapter_bgrb(acb);
1281         arcmsr_flush_adapter_cache(acb);
1282 }
1283
1284 static int arcmsr_module_init(void)
1285 {
1286         int error = 0;
1287         error = pci_register_driver(&arcmsr_pci_driver);
1288         return error;
1289 }
1290
1291 static void arcmsr_module_exit(void)
1292 {
1293         pci_unregister_driver(&arcmsr_pci_driver);
1294 }
1295 module_init(arcmsr_module_init);
1296 module_exit(arcmsr_module_exit);
1297
1298 static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
1299                                                 u32 intmask_org)
1300 {
1301         u32 mask;
1302         switch (acb->adapter_type) {
1303
1304         case ACB_ADAPTER_TYPE_A: {
1305                 struct MessageUnit_A __iomem *reg = acb->pmuA;
1306                 mask = intmask_org & ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE |
1307                              ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE|
1308                              ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE);
1309                 writel(mask, &reg->outbound_intmask);
1310                 acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff;
1311                 }
1312                 break;
1313
1314         case ACB_ADAPTER_TYPE_B: {
1315                 struct MessageUnit_B *reg = acb->pmuB;
1316                 mask = intmask_org | (ARCMSR_IOP2DRV_DATA_WRITE_OK |
1317                         ARCMSR_IOP2DRV_DATA_READ_OK |
1318                         ARCMSR_IOP2DRV_CDB_DONE |
1319                         ARCMSR_IOP2DRV_MESSAGE_CMD_DONE);
1320                 writel(mask, reg->iop2drv_doorbell_mask);
1321                 acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f;
1322                 }
1323                 break;
1324         case ACB_ADAPTER_TYPE_C: {
1325                 struct MessageUnit_C __iomem *reg = acb->pmuC;
1326                 mask = ~(ARCMSR_HBCMU_UTILITY_A_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR_MASK|ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK);
1327                 writel(intmask_org & mask, &reg->host_int_mask);
1328                 acb->outbound_int_enable = ~(intmask_org & mask) & 0x0000000f;
1329                 }
1330                 break;
1331         case ACB_ADAPTER_TYPE_D: {
1332                 struct MessageUnit_D *reg = acb->pmuD;
1333
1334                 mask = ARCMSR_ARC1214_ALL_INT_ENABLE;
1335                 writel(intmask_org | mask, reg->pcief0_int_enable);
1336                 break;
1337                 }
1338         }
1339 }
1340
1341 static int arcmsr_build_ccb(struct AdapterControlBlock *acb,
1342         struct CommandControlBlock *ccb, struct scsi_cmnd *pcmd)
1343 {
1344         struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
1345         int8_t *psge = (int8_t *)&arcmsr_cdb->u;
1346         __le32 address_lo, address_hi;
1347         int arccdbsize = 0x30;
1348         __le32 length = 0;
1349         int i;
1350         struct scatterlist *sg;
1351         int nseg;
1352         ccb->pcmd = pcmd;
1353         memset(arcmsr_cdb, 0, sizeof(struct ARCMSR_CDB));
1354         arcmsr_cdb->TargetID = pcmd->device->id;
1355         arcmsr_cdb->LUN = pcmd->device->lun;
1356         arcmsr_cdb->Function = 1;
1357         arcmsr_cdb->msgContext = 0;
1358         memcpy(arcmsr_cdb->Cdb, pcmd->cmnd, pcmd->cmd_len);
1359
1360         nseg = scsi_dma_map(pcmd);
1361         if (unlikely(nseg > acb->host->sg_tablesize || nseg < 0))
1362                 return FAILED;
1363         scsi_for_each_sg(pcmd, sg, nseg, i) {
1364                 /* Get the physical address of the current data pointer */
1365                 length = cpu_to_le32(sg_dma_len(sg));
1366                 address_lo = cpu_to_le32(dma_addr_lo32(sg_dma_address(sg)));
1367                 address_hi = cpu_to_le32(dma_addr_hi32(sg_dma_address(sg)));
1368                 if (address_hi == 0) {
1369                         struct SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge;
1370
1371                         pdma_sg->address = address_lo;
1372                         pdma_sg->length = length;
1373                         psge += sizeof (struct SG32ENTRY);
1374                         arccdbsize += sizeof (struct SG32ENTRY);
1375                 } else {
1376                         struct SG64ENTRY *pdma_sg = (struct SG64ENTRY *)psge;
1377
1378                         pdma_sg->addresshigh = address_hi;
1379                         pdma_sg->address = address_lo;
1380                         pdma_sg->length = length|cpu_to_le32(IS_SG64_ADDR);
1381                         psge += sizeof (struct SG64ENTRY);
1382                         arccdbsize += sizeof (struct SG64ENTRY);
1383                 }
1384         }
1385         arcmsr_cdb->sgcount = (uint8_t)nseg;
1386         arcmsr_cdb->DataLength = scsi_bufflen(pcmd);
1387         arcmsr_cdb->msgPages = arccdbsize/0x100 + (arccdbsize % 0x100 ? 1 : 0);
1388         if ( arccdbsize > 256)
1389                 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE;
1390         if (pcmd->sc_data_direction == DMA_TO_DEVICE)
1391                 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE;
1392         ccb->arc_cdb_size = arccdbsize;
1393         return SUCCESS;
1394 }
1395
1396 static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandControlBlock *ccb)
1397 {
1398         uint32_t cdb_phyaddr = ccb->cdb_phyaddr;
1399         struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
1400         atomic_inc(&acb->ccboutstandingcount);
1401         ccb->startdone = ARCMSR_CCB_START;
1402         switch (acb->adapter_type) {
1403         case ACB_ADAPTER_TYPE_A: {
1404                 struct MessageUnit_A __iomem *reg = acb->pmuA;
1405
1406                 if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE)
1407                         writel(cdb_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE,
1408                         &reg->inbound_queueport);
1409                 else
1410                         writel(cdb_phyaddr, &reg->inbound_queueport);
1411                 break;
1412         }
1413
1414         case ACB_ADAPTER_TYPE_B: {
1415                 struct MessageUnit_B *reg = acb->pmuB;
1416                 uint32_t ending_index, index = reg->postq_index;
1417
1418                 ending_index = ((index + 1) % ARCMSR_MAX_HBB_POSTQUEUE);
1419                 reg->post_qbuffer[ending_index] = 0;
1420                 if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
1421                         reg->post_qbuffer[index] =
1422                                 cdb_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE;
1423                 } else {
1424                         reg->post_qbuffer[index] = cdb_phyaddr;
1425                 }
1426                 index++;
1427                 index %= ARCMSR_MAX_HBB_POSTQUEUE;/*if last index number set it to 0 */
1428                 reg->postq_index = index;
1429                 writel(ARCMSR_DRV2IOP_CDB_POSTED, reg->drv2iop_doorbell);
1430                 }
1431                 break;
1432         case ACB_ADAPTER_TYPE_C: {
1433                 struct MessageUnit_C __iomem *phbcmu = acb->pmuC;
1434                 uint32_t ccb_post_stamp, arc_cdb_size;
1435
1436                 arc_cdb_size = (ccb->arc_cdb_size > 0x300) ? 0x300 : ccb->arc_cdb_size;
1437                 ccb_post_stamp = (cdb_phyaddr | ((arc_cdb_size - 1) >> 6) | 1);
1438                 if (acb->cdb_phyaddr_hi32) {
1439                         writel(acb->cdb_phyaddr_hi32, &phbcmu->inbound_queueport_high);
1440                         writel(ccb_post_stamp, &phbcmu->inbound_queueport_low);
1441                 } else {
1442                         writel(ccb_post_stamp, &phbcmu->inbound_queueport_low);
1443                 }
1444                 }
1445                 break;
1446         case ACB_ADAPTER_TYPE_D: {
1447                 struct MessageUnit_D  *pmu = acb->pmuD;
1448                 u16 index_stripped;
1449                 u16 postq_index, toggle;
1450                 unsigned long flags;
1451                 struct InBound_SRB *pinbound_srb;
1452
1453                 spin_lock_irqsave(&acb->postq_lock, flags);
1454                 postq_index = pmu->postq_index;
1455                 pinbound_srb = (struct InBound_SRB *)&(pmu->post_qbuffer[postq_index & 0xFF]);
1456                 pinbound_srb->addressHigh = dma_addr_hi32(cdb_phyaddr);
1457                 pinbound_srb->addressLow = dma_addr_lo32(cdb_phyaddr);
1458                 pinbound_srb->length = ccb->arc_cdb_size >> 2;
1459                 arcmsr_cdb->msgContext = dma_addr_lo32(cdb_phyaddr);
1460                 toggle = postq_index & 0x4000;
1461                 index_stripped = postq_index + 1;
1462                 index_stripped &= (ARCMSR_MAX_ARC1214_POSTQUEUE - 1);
1463                 pmu->postq_index = index_stripped ? (index_stripped | toggle) :
1464                         (toggle ^ 0x4000);
1465                 writel(postq_index, pmu->inboundlist_write_pointer);
1466                 spin_unlock_irqrestore(&acb->postq_lock, flags);
1467                 break;
1468                 }
1469         }
1470 }
1471
1472 static void arcmsr_hbaA_stop_bgrb(struct AdapterControlBlock *acb)
1473 {
1474         struct MessageUnit_A __iomem *reg = acb->pmuA;
1475         acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
1476         writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0);
1477         if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
1478                 printk(KERN_NOTICE
1479                         "arcmsr%d: wait 'stop adapter background rebulid' timeout\n"
1480                         , acb->host->host_no);
1481         }
1482 }
1483
1484 static void arcmsr_hbaB_stop_bgrb(struct AdapterControlBlock *acb)
1485 {
1486         struct MessageUnit_B *reg = acb->pmuB;
1487         acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
1488         writel(ARCMSR_MESSAGE_STOP_BGRB, reg->drv2iop_doorbell);
1489
1490         if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
1491                 printk(KERN_NOTICE
1492                         "arcmsr%d: wait 'stop adapter background rebulid' timeout\n"
1493                         , acb->host->host_no);
1494         }
1495 }
1496
1497 static void arcmsr_hbaC_stop_bgrb(struct AdapterControlBlock *pACB)
1498 {
1499         struct MessageUnit_C __iomem *reg = pACB->pmuC;
1500         pACB->acb_flags &= ~ACB_F_MSG_START_BGRB;
1501         writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0);
1502         writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
1503         if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
1504                 printk(KERN_NOTICE
1505                         "arcmsr%d: wait 'stop adapter background rebulid' timeout\n"
1506                         , pACB->host->host_no);
1507         }
1508         return;
1509 }
1510
1511 static void arcmsr_hbaD_stop_bgrb(struct AdapterControlBlock *pACB)
1512 {
1513         struct MessageUnit_D *reg = pACB->pmuD;
1514
1515         pACB->acb_flags &= ~ACB_F_MSG_START_BGRB;
1516         writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, reg->inbound_msgaddr0);
1517         if (!arcmsr_hbaD_wait_msgint_ready(pACB))
1518                 pr_notice("arcmsr%d: wait 'stop adapter background rebulid' "
1519                         "timeout\n", pACB->host->host_no);
1520 }
1521
1522 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
1523 {
1524         switch (acb->adapter_type) {
1525         case ACB_ADAPTER_TYPE_A: {
1526                 arcmsr_hbaA_stop_bgrb(acb);
1527                 }
1528                 break;
1529
1530         case ACB_ADAPTER_TYPE_B: {
1531                 arcmsr_hbaB_stop_bgrb(acb);
1532                 }
1533                 break;
1534         case ACB_ADAPTER_TYPE_C: {
1535                 arcmsr_hbaC_stop_bgrb(acb);
1536                 }
1537                 break;
1538         case ACB_ADAPTER_TYPE_D:
1539                 arcmsr_hbaD_stop_bgrb(acb);
1540                 break;
1541         }
1542 }
1543
1544 static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb)
1545 {
1546         dma_free_coherent(&acb->pdev->dev, acb->uncache_size, acb->dma_coherent, acb->dma_coherent_handle);
1547 }
1548
1549 static void arcmsr_iop_message_read(struct AdapterControlBlock *acb)
1550 {
1551         switch (acb->adapter_type) {
1552         case ACB_ADAPTER_TYPE_A: {
1553                 struct MessageUnit_A __iomem *reg = acb->pmuA;
1554                 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell);
1555                 }
1556                 break;
1557
1558         case ACB_ADAPTER_TYPE_B: {
1559                 struct MessageUnit_B *reg = acb->pmuB;
1560                 writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell);
1561                 }
1562                 break;
1563         case ACB_ADAPTER_TYPE_C: {
1564                 struct MessageUnit_C __iomem *reg = acb->pmuC;
1565
1566                 writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, &reg->inbound_doorbell);
1567                 }
1568                 break;
1569         case ACB_ADAPTER_TYPE_D: {
1570                 struct MessageUnit_D *reg = acb->pmuD;
1571                 writel(ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ,
1572                         reg->inbound_doorbell);
1573                 }
1574                 break;
1575         }
1576 }
1577
1578 static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb)
1579 {
1580         switch (acb->adapter_type) {
1581         case ACB_ADAPTER_TYPE_A: {
1582                 struct MessageUnit_A __iomem *reg = acb->pmuA;
1583                 /*
1584                 ** push inbound doorbell tell iop, driver data write ok
1585                 ** and wait reply on next hwinterrupt for next Qbuffer post
1586                 */
1587                 writel(ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK, &reg->inbound_doorbell);
1588                 }
1589                 break;
1590
1591         case ACB_ADAPTER_TYPE_B: {
1592                 struct MessageUnit_B *reg = acb->pmuB;
1593                 /*
1594                 ** push inbound doorbell tell iop, driver data write ok
1595                 ** and wait reply on next hwinterrupt for next Qbuffer post
1596                 */
1597                 writel(ARCMSR_DRV2IOP_DATA_WRITE_OK, reg->drv2iop_doorbell);
1598                 }
1599                 break;
1600         case ACB_ADAPTER_TYPE_C: {
1601                 struct MessageUnit_C __iomem *reg = acb->pmuC;
1602                 /*
1603                 ** push inbound doorbell tell iop, driver data write ok
1604                 ** and wait reply on next hwinterrupt for next Qbuffer post
1605                 */
1606                 writel(ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK, &reg->inbound_doorbell);
1607                 }
1608                 break;
1609         case ACB_ADAPTER_TYPE_D: {
1610                 struct MessageUnit_D *reg = acb->pmuD;
1611                 writel(ARCMSR_ARC1214_DRV2IOP_DATA_IN_READY,
1612                         reg->inbound_doorbell);
1613                 }
1614                 break;
1615         }
1616 }
1617
1618 struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *acb)
1619 {
1620         struct QBUFFER __iomem *qbuffer = NULL;
1621         switch (acb->adapter_type) {
1622
1623         case ACB_ADAPTER_TYPE_A: {
1624                 struct MessageUnit_A __iomem *reg = acb->pmuA;
1625                 qbuffer = (struct QBUFFER __iomem *)&reg->message_rbuffer;
1626                 }
1627                 break;
1628
1629         case ACB_ADAPTER_TYPE_B: {
1630                 struct MessageUnit_B *reg = acb->pmuB;
1631                 qbuffer = (struct QBUFFER __iomem *)reg->message_rbuffer;
1632                 }
1633                 break;
1634         case ACB_ADAPTER_TYPE_C: {
1635                 struct MessageUnit_C __iomem *phbcmu = acb->pmuC;
1636                 qbuffer = (struct QBUFFER __iomem *)&phbcmu->message_rbuffer;
1637                 }
1638                 break;
1639         case ACB_ADAPTER_TYPE_D: {
1640                 struct MessageUnit_D *reg = acb->pmuD;
1641                 qbuffer = (struct QBUFFER __iomem *)reg->message_rbuffer;
1642                 }
1643                 break;
1644         }
1645         return qbuffer;
1646 }
1647
1648 static struct QBUFFER __iomem *arcmsr_get_iop_wqbuffer(struct AdapterControlBlock *acb)
1649 {
1650         struct QBUFFER __iomem *pqbuffer = NULL;
1651         switch (acb->adapter_type) {
1652
1653         case ACB_ADAPTER_TYPE_A: {
1654                 struct MessageUnit_A __iomem *reg = acb->pmuA;
1655                 pqbuffer = (struct QBUFFER __iomem *) &reg->message_wbuffer;
1656                 }
1657                 break;
1658
1659         case ACB_ADAPTER_TYPE_B: {
1660                 struct MessageUnit_B  *reg = acb->pmuB;
1661                 pqbuffer = (struct QBUFFER __iomem *)reg->message_wbuffer;
1662                 }
1663                 break;
1664         case ACB_ADAPTER_TYPE_C: {
1665                 struct MessageUnit_C __iomem *reg = acb->pmuC;
1666                 pqbuffer = (struct QBUFFER __iomem *)&reg->message_wbuffer;
1667                 }
1668                 break;
1669         case ACB_ADAPTER_TYPE_D: {
1670                 struct MessageUnit_D *reg = acb->pmuD;
1671                 pqbuffer = (struct QBUFFER __iomem *)reg->message_wbuffer;
1672                 }
1673                 break;
1674         }
1675         return pqbuffer;
1676 }
1677
1678 static uint32_t
1679 arcmsr_Read_iop_rqbuffer_in_DWORD(struct AdapterControlBlock *acb,
1680                 struct QBUFFER __iomem *prbuffer)
1681 {
1682         uint8_t *pQbuffer;
1683         uint8_t *buf1 = NULL;
1684         uint32_t __iomem *iop_data;
1685         uint32_t iop_len, data_len, *buf2 = NULL;
1686
1687         iop_data = (uint32_t __iomem *)prbuffer->data;
1688         iop_len = readl(&prbuffer->data_len);
1689         if (iop_len > 0) {
1690                 buf1 = kmalloc(128, GFP_ATOMIC);
1691                 buf2 = (uint32_t *)buf1;
1692                 if (buf1 == NULL)
1693                         return 0;
1694                 data_len = iop_len;
1695                 while (data_len >= 4) {
1696                         *buf2++ = readl(iop_data);
1697                         iop_data++;
1698                         data_len -= 4;
1699                 }
1700                 if (data_len)
1701                         *buf2 = readl(iop_data);
1702                 buf2 = (uint32_t *)buf1;
1703         }
1704         while (iop_len > 0) {
1705                 pQbuffer = &acb->rqbuffer[acb->rqbuf_putIndex];
1706                 *pQbuffer = *buf1;
1707                 acb->rqbuf_putIndex++;
1708                 /* if last, index number set it to 0 */
1709                 acb->rqbuf_putIndex %= ARCMSR_MAX_QBUFFER;
1710                 buf1++;
1711                 iop_len--;
1712         }
1713         kfree(buf2);
1714         /* let IOP know data has been read */
1715         arcmsr_iop_message_read(acb);
1716         return 1;
1717 }
1718
1719 uint32_t
1720 arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *acb,
1721         struct QBUFFER __iomem *prbuffer) {
1722
1723         uint8_t *pQbuffer;
1724         uint8_t __iomem *iop_data;
1725         uint32_t iop_len;
1726
1727         if (acb->adapter_type & (ACB_ADAPTER_TYPE_C | ACB_ADAPTER_TYPE_D))
1728                 return arcmsr_Read_iop_rqbuffer_in_DWORD(acb, prbuffer);
1729         iop_data = (uint8_t __iomem *)prbuffer->data;
1730         iop_len = readl(&prbuffer->data_len);
1731         while (iop_len > 0) {
1732                 pQbuffer = &acb->rqbuffer[acb->rqbuf_putIndex];
1733                 *pQbuffer = readb(iop_data);
1734                 acb->rqbuf_putIndex++;
1735                 acb->rqbuf_putIndex %= ARCMSR_MAX_QBUFFER;
1736                 iop_data++;
1737                 iop_len--;
1738         }
1739         arcmsr_iop_message_read(acb);
1740         return 1;
1741 }
1742
1743 static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb)
1744 {
1745         unsigned long flags;
1746         struct QBUFFER __iomem  *prbuffer;
1747         int32_t buf_empty_len;
1748
1749         spin_lock_irqsave(&acb->rqbuffer_lock, flags);
1750         prbuffer = arcmsr_get_iop_rqbuffer(acb);
1751         buf_empty_len = (acb->rqbuf_putIndex - acb->rqbuf_getIndex - 1) &
1752                 (ARCMSR_MAX_QBUFFER - 1);
1753         if (buf_empty_len >= readl(&prbuffer->data_len)) {
1754                 if (arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0)
1755                         acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
1756         } else
1757                 acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
1758         spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
1759 }
1760
1761 static void arcmsr_write_ioctldata2iop_in_DWORD(struct AdapterControlBlock *acb)
1762 {
1763         uint8_t *pQbuffer;
1764         struct QBUFFER __iomem *pwbuffer;
1765         uint8_t *buf1 = NULL;
1766         uint32_t __iomem *iop_data;
1767         uint32_t allxfer_len = 0, data_len, *buf2 = NULL, data;
1768
1769         if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) {
1770                 buf1 = kmalloc(128, GFP_ATOMIC);
1771                 buf2 = (uint32_t *)buf1;
1772                 if (buf1 == NULL)
1773                         return;
1774
1775                 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
1776                 pwbuffer = arcmsr_get_iop_wqbuffer(acb);
1777                 iop_data = (uint32_t __iomem *)pwbuffer->data;
1778                 while ((acb->wqbuf_getIndex != acb->wqbuf_putIndex)
1779                         && (allxfer_len < 124)) {
1780                         pQbuffer = &acb->wqbuffer[acb->wqbuf_getIndex];
1781                         *buf1 = *pQbuffer;
1782                         acb->wqbuf_getIndex++;
1783                         acb->wqbuf_getIndex %= ARCMSR_MAX_QBUFFER;
1784                         buf1++;
1785                         allxfer_len++;
1786                 }
1787                 data_len = allxfer_len;
1788                 buf1 = (uint8_t *)buf2;
1789                 while (data_len >= 4) {
1790                         data = *buf2++;
1791                         writel(data, iop_data);
1792                         iop_data++;
1793                         data_len -= 4;
1794                 }
1795                 if (data_len) {
1796                         data = *buf2;
1797                         writel(data, iop_data);
1798                 }
1799                 writel(allxfer_len, &pwbuffer->data_len);
1800                 kfree(buf1);
1801                 arcmsr_iop_message_wrote(acb);
1802         }
1803 }
1804
1805 void
1806 arcmsr_write_ioctldata2iop(struct AdapterControlBlock *acb)
1807 {
1808         uint8_t *pQbuffer;
1809         struct QBUFFER __iomem *pwbuffer;
1810         uint8_t __iomem *iop_data;
1811         int32_t allxfer_len = 0;
1812
1813         if (acb->adapter_type & (ACB_ADAPTER_TYPE_C | ACB_ADAPTER_TYPE_D)) {
1814                 arcmsr_write_ioctldata2iop_in_DWORD(acb);
1815                 return;
1816         }
1817         if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) {
1818                 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
1819                 pwbuffer = arcmsr_get_iop_wqbuffer(acb);
1820                 iop_data = (uint8_t __iomem *)pwbuffer->data;
1821                 while ((acb->wqbuf_getIndex != acb->wqbuf_putIndex)
1822                         && (allxfer_len < 124)) {
1823                         pQbuffer = &acb->wqbuffer[acb->wqbuf_getIndex];
1824                         writeb(*pQbuffer, iop_data);
1825                         acb->wqbuf_getIndex++;
1826                         acb->wqbuf_getIndex %= ARCMSR_MAX_QBUFFER;
1827                         iop_data++;
1828                         allxfer_len++;
1829                 }
1830                 writel(allxfer_len, &pwbuffer->data_len);
1831                 arcmsr_iop_message_wrote(acb);
1832         }
1833 }
1834
1835 static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb)
1836 {
1837         unsigned long flags;
1838
1839         spin_lock_irqsave(&acb->wqbuffer_lock, flags);
1840         acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READED;
1841         if (acb->wqbuf_getIndex != acb->wqbuf_putIndex)
1842                 arcmsr_write_ioctldata2iop(acb);
1843         if (acb->wqbuf_getIndex == acb->wqbuf_putIndex)
1844                 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED;
1845         spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
1846 }
1847
1848 static void arcmsr_hbaA_doorbell_isr(struct AdapterControlBlock *acb)
1849 {
1850         uint32_t outbound_doorbell;
1851         struct MessageUnit_A __iomem *reg = acb->pmuA;
1852         outbound_doorbell = readl(&reg->outbound_doorbell);
1853         do {
1854                 writel(outbound_doorbell, &reg->outbound_doorbell);
1855                 if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK)
1856                         arcmsr_iop2drv_data_wrote_handle(acb);
1857                 if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK)
1858                         arcmsr_iop2drv_data_read_handle(acb);
1859                 outbound_doorbell = readl(&reg->outbound_doorbell);
1860         } while (outbound_doorbell & (ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK
1861                 | ARCMSR_OUTBOUND_IOP331_DATA_READ_OK));
1862 }
1863 static void arcmsr_hbaC_doorbell_isr(struct AdapterControlBlock *pACB)
1864 {
1865         uint32_t outbound_doorbell;
1866         struct MessageUnit_C __iomem *reg = pACB->pmuC;
1867         /*
1868         *******************************************************************
1869         **  Maybe here we need to check wrqbuffer_lock is lock or not
1870         **  DOORBELL: din! don!
1871         **  check if there are any mail need to pack from firmware
1872         *******************************************************************
1873         */
1874         outbound_doorbell = readl(&reg->outbound_doorbell);
1875         do {
1876                 writel(outbound_doorbell, &reg->outbound_doorbell_clear);
1877                 readl(&reg->outbound_doorbell_clear);
1878                 if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK)
1879                         arcmsr_iop2drv_data_wrote_handle(pACB);
1880                 if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK)
1881                         arcmsr_iop2drv_data_read_handle(pACB);
1882                 if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE)
1883                         arcmsr_hbaC_message_isr(pACB);
1884                 outbound_doorbell = readl(&reg->outbound_doorbell);
1885         } while (outbound_doorbell & (ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK
1886                 | ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK
1887                 | ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE));
1888 }
1889
1890 static void arcmsr_hbaD_doorbell_isr(struct AdapterControlBlock *pACB)
1891 {
1892         uint32_t outbound_doorbell;
1893         struct MessageUnit_D  *pmu = pACB->pmuD;
1894
1895         outbound_doorbell = readl(pmu->outbound_doorbell);
1896         do {
1897                 writel(outbound_doorbell, pmu->outbound_doorbell);
1898                 if (outbound_doorbell & ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE)
1899                         arcmsr_hbaD_message_isr(pACB);
1900                 if (outbound_doorbell & ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK)
1901                         arcmsr_iop2drv_data_wrote_handle(pACB);
1902                 if (outbound_doorbell & ARCMSR_ARC1214_IOP2DRV_DATA_READ_OK)
1903                         arcmsr_iop2drv_data_read_handle(pACB);
1904                 outbound_doorbell = readl(pmu->outbound_doorbell);
1905         } while (outbound_doorbell & (ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK
1906                 | ARCMSR_ARC1214_IOP2DRV_DATA_READ_OK
1907                 | ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE));
1908 }
1909
1910 static void arcmsr_hbaA_postqueue_isr(struct AdapterControlBlock *acb)
1911 {
1912         uint32_t flag_ccb;
1913         struct MessageUnit_A __iomem *reg = acb->pmuA;
1914         struct ARCMSR_CDB *pARCMSR_CDB;
1915         struct CommandControlBlock *pCCB;
1916         bool error;
1917         while ((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF) {
1918                 pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));/*frame must be 32 bytes aligned*/
1919                 pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
1920                 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
1921                 arcmsr_drain_donequeue(acb, pCCB, error);
1922         }
1923 }
1924 static void arcmsr_hbaB_postqueue_isr(struct AdapterControlBlock *acb)
1925 {
1926         uint32_t index;
1927         uint32_t flag_ccb;
1928         struct MessageUnit_B *reg = acb->pmuB;
1929         struct ARCMSR_CDB *pARCMSR_CDB;
1930         struct CommandControlBlock *pCCB;
1931         bool error;
1932         index = reg->doneq_index;
1933         while ((flag_ccb = reg->done_qbuffer[index]) != 0) {
1934                 reg->done_qbuffer[index] = 0;
1935                 pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset+(flag_ccb << 5));/*frame must be 32 bytes aligned*/
1936                 pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
1937                 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
1938                 arcmsr_drain_donequeue(acb, pCCB, error);
1939                 index++;
1940                 index %= ARCMSR_MAX_HBB_POSTQUEUE;
1941                 reg->doneq_index = index;
1942         }
1943 }
1944
1945 static void arcmsr_hbaC_postqueue_isr(struct AdapterControlBlock *acb)
1946 {
1947         struct MessageUnit_C __iomem *phbcmu;
1948         struct ARCMSR_CDB *arcmsr_cdb;
1949         struct CommandControlBlock *ccb;
1950         uint32_t flag_ccb, ccb_cdb_phy, throttling = 0;
1951         int error;
1952
1953         phbcmu = acb->pmuC;
1954         /* areca cdb command done */
1955         /* Use correct offset and size for syncing */
1956
1957         while ((flag_ccb = readl(&phbcmu->outbound_queueport_low)) !=
1958                         0xFFFFFFFF) {
1959                 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
1960                 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset
1961                         + ccb_cdb_phy);
1962                 ccb = container_of(arcmsr_cdb, struct CommandControlBlock,
1963                         arcmsr_cdb);
1964                 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1)
1965                         ? true : false;
1966                 /* check if command done with no error */
1967                 arcmsr_drain_donequeue(acb, ccb, error);
1968                 throttling++;
1969                 if (throttling == ARCMSR_HBC_ISR_THROTTLING_LEVEL) {
1970                         writel(ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING,
1971                                 &phbcmu->inbound_doorbell);
1972                         throttling = 0;
1973                 }
1974         }
1975 }
1976
1977 static void arcmsr_hbaD_postqueue_isr(struct AdapterControlBlock *acb)
1978 {
1979         u32 outbound_write_pointer, doneq_index, index_stripped, toggle;
1980         uint32_t addressLow, ccb_cdb_phy;
1981         int error;
1982         struct MessageUnit_D  *pmu;
1983         struct ARCMSR_CDB *arcmsr_cdb;
1984         struct CommandControlBlock *ccb;
1985         unsigned long flags;
1986
1987         spin_lock_irqsave(&acb->doneq_lock, flags);
1988         pmu = acb->pmuD;
1989         outbound_write_pointer = pmu->done_qbuffer[0].addressLow + 1;
1990         doneq_index = pmu->doneq_index;
1991         if ((doneq_index & 0xFFF) != (outbound_write_pointer & 0xFFF)) {
1992                 do {
1993                         toggle = doneq_index & 0x4000;
1994                         index_stripped = (doneq_index & 0xFFF) + 1;
1995                         index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE;
1996                         pmu->doneq_index = index_stripped ? (index_stripped | toggle) :
1997                                 ((toggle ^ 0x4000) + 1);
1998                         doneq_index = pmu->doneq_index;
1999                         addressLow = pmu->done_qbuffer[doneq_index &
2000                                 0xFFF].addressLow;
2001                         ccb_cdb_phy = (addressLow & 0xFFFFFFF0);
2002                         arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset
2003                                 + ccb_cdb_phy);
2004                         ccb = container_of(arcmsr_cdb,
2005                                 struct CommandControlBlock, arcmsr_cdb);
2006                         error = (addressLow & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1)
2007                                 ? true : false;
2008                         arcmsr_drain_donequeue(acb, ccb, error);
2009                         writel(doneq_index, pmu->outboundlist_read_pointer);
2010                 } while ((doneq_index & 0xFFF) !=
2011                         (outbound_write_pointer & 0xFFF));
2012         }
2013         writel(ARCMSR_ARC1214_OUTBOUND_LIST_INTERRUPT_CLEAR,
2014                 pmu->outboundlist_interrupt_cause);
2015         readl(pmu->outboundlist_interrupt_cause);
2016         spin_unlock_irqrestore(&acb->doneq_lock, flags);
2017 }
2018
2019 /*
2020 **********************************************************************************
2021 ** Handle a message interrupt
2022 **
2023 ** The only message interrupt we expect is in response to a query for the current adapter config.  
2024 ** We want this in order to compare the drivemap so that we can detect newly-attached drives.
2025 **********************************************************************************
2026 */
2027 static void arcmsr_hbaA_message_isr(struct AdapterControlBlock *acb)
2028 {
2029         struct MessageUnit_A __iomem *reg  = acb->pmuA;
2030         /*clear interrupt and message state*/
2031         writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT, &reg->outbound_intstatus);
2032         schedule_work(&acb->arcmsr_do_message_isr_bh);
2033 }
2034 static void arcmsr_hbaB_message_isr(struct AdapterControlBlock *acb)
2035 {
2036         struct MessageUnit_B *reg  = acb->pmuB;
2037
2038         /*clear interrupt and message state*/
2039         writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
2040         schedule_work(&acb->arcmsr_do_message_isr_bh);
2041 }
2042 /*
2043 **********************************************************************************
2044 ** Handle a message interrupt
2045 **
2046 ** The only message interrupt we expect is in response to a query for the
2047 ** current adapter config.
2048 ** We want this in order to compare the drivemap so that we can detect newly-attached drives.
2049 **********************************************************************************
2050 */
2051 static void arcmsr_hbaC_message_isr(struct AdapterControlBlock *acb)
2052 {
2053         struct MessageUnit_C __iomem *reg  = acb->pmuC;
2054         /*clear interrupt and message state*/
2055         writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &reg->outbound_doorbell_clear);
2056         schedule_work(&acb->arcmsr_do_message_isr_bh);
2057 }
2058
2059 static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb)
2060 {
2061         struct MessageUnit_D *reg  = acb->pmuD;
2062
2063         writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE, reg->outbound_doorbell);
2064         readl(reg->outbound_doorbell);
2065         schedule_work(&acb->arcmsr_do_message_isr_bh);
2066 }
2067
2068 static int arcmsr_hbaA_handle_isr(struct AdapterControlBlock *acb)
2069 {
2070         uint32_t outbound_intstatus;
2071         struct MessageUnit_A __iomem *reg = acb->pmuA;
2072         outbound_intstatus = readl(&reg->outbound_intstatus) &
2073                 acb->outbound_int_enable;
2074         if (!(outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT))
2075                 return IRQ_NONE;
2076         do {
2077                 writel(outbound_intstatus, &reg->outbound_intstatus);
2078                 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT)
2079                         arcmsr_hbaA_doorbell_isr(acb);
2080                 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT)
2081                         arcmsr_hbaA_postqueue_isr(acb);
2082                 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT)
2083                         arcmsr_hbaA_message_isr(acb);
2084                 outbound_intstatus = readl(&reg->outbound_intstatus) &
2085                         acb->outbound_int_enable;
2086         } while (outbound_intstatus & (ARCMSR_MU_OUTBOUND_DOORBELL_INT
2087                 | ARCMSR_MU_OUTBOUND_POSTQUEUE_INT
2088                 | ARCMSR_MU_OUTBOUND_MESSAGE0_INT));
2089         return IRQ_HANDLED;
2090 }
2091
2092 static int arcmsr_hbaB_handle_isr(struct AdapterControlBlock *acb)
2093 {
2094         uint32_t outbound_doorbell;
2095         struct MessageUnit_B *reg = acb->pmuB;
2096         outbound_doorbell = readl(reg->iop2drv_doorbell) &
2097                                 acb->outbound_int_enable;
2098         if (!outbound_doorbell)
2099                 return IRQ_NONE;
2100         do {
2101                 writel(~outbound_doorbell, reg->iop2drv_doorbell);
2102                 writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell);
2103                 if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK)
2104                         arcmsr_iop2drv_data_wrote_handle(acb);
2105                 if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK)
2106                         arcmsr_iop2drv_data_read_handle(acb);
2107                 if (outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE)
2108                         arcmsr_hbaB_postqueue_isr(acb);
2109                 if (outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE)
2110                         arcmsr_hbaB_message_isr(acb);
2111                 outbound_doorbell = readl(reg->iop2drv_doorbell) &
2112                         acb->outbound_int_enable;
2113         } while (outbound_doorbell & (ARCMSR_IOP2DRV_DATA_WRITE_OK
2114                 | ARCMSR_IOP2DRV_DATA_READ_OK
2115                 | ARCMSR_IOP2DRV_CDB_DONE
2116                 | ARCMSR_IOP2DRV_MESSAGE_CMD_DONE));
2117         return IRQ_HANDLED;
2118 }
2119
2120 static int arcmsr_hbaC_handle_isr(struct AdapterControlBlock *pACB)
2121 {
2122         uint32_t host_interrupt_status;
2123         struct MessageUnit_C __iomem *phbcmu = pACB->pmuC;
2124         /*
2125         *********************************************
2126         **   check outbound intstatus
2127         *********************************************
2128         */
2129         host_interrupt_status = readl(&phbcmu->host_int_status) &
2130                 (ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR |
2131                 ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR);
2132         if (!host_interrupt_status)
2133                 return IRQ_NONE;
2134         do {
2135                 if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR)
2136                         arcmsr_hbaC_doorbell_isr(pACB);
2137                 /* MU post queue interrupts*/
2138                 if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR)
2139                         arcmsr_hbaC_postqueue_isr(pACB);
2140                 host_interrupt_status = readl(&phbcmu->host_int_status);
2141         } while (host_interrupt_status & (ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR |
2142                 ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR));
2143         return IRQ_HANDLED;
2144 }
2145
2146 static irqreturn_t arcmsr_hbaD_handle_isr(struct AdapterControlBlock *pACB)
2147 {
2148         u32 host_interrupt_status;
2149         struct MessageUnit_D  *pmu = pACB->pmuD;
2150
2151         host_interrupt_status = readl(pmu->host_int_status) &
2152                 (ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR |
2153                 ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR);
2154         if (!host_interrupt_status)
2155                 return IRQ_NONE;
2156         do {
2157                 /* MU post queue interrupts*/
2158                 if (host_interrupt_status &
2159                         ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR)
2160                         arcmsr_hbaD_postqueue_isr(pACB);
2161                 if (host_interrupt_status &
2162                         ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR)
2163                         arcmsr_hbaD_doorbell_isr(pACB);
2164                 host_interrupt_status = readl(pmu->host_int_status);
2165         } while (host_interrupt_status &
2166                 (ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR |
2167                 ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR));
2168         return IRQ_HANDLED;
2169 }
2170
2171 static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb)
2172 {
2173         switch (acb->adapter_type) {
2174         case ACB_ADAPTER_TYPE_A:
2175                 return arcmsr_hbaA_handle_isr(acb);
2176                 break;
2177         case ACB_ADAPTER_TYPE_B:
2178                 return arcmsr_hbaB_handle_isr(acb);
2179                 break;
2180         case ACB_ADAPTER_TYPE_C:
2181                 return arcmsr_hbaC_handle_isr(acb);
2182         case ACB_ADAPTER_TYPE_D:
2183                 return arcmsr_hbaD_handle_isr(acb);
2184         default:
2185                 return IRQ_NONE;
2186         }
2187 }
2188
2189 static void arcmsr_iop_parking(struct AdapterControlBlock *acb)
2190 {
2191         if (acb) {
2192                 /* stop adapter background rebuild */
2193                 if (acb->acb_flags & ACB_F_MSG_START_BGRB) {
2194                         uint32_t intmask_org;
2195                         acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
2196                         intmask_org = arcmsr_disable_outbound_ints(acb);
2197                         arcmsr_stop_adapter_bgrb(acb);
2198                         arcmsr_flush_adapter_cache(acb);
2199                         arcmsr_enable_outbound_ints(acb, intmask_org);
2200                 }
2201         }
2202 }
2203
2204
2205 void arcmsr_clear_iop2drv_rqueue_buffer(struct AdapterControlBlock *acb)
2206 {
2207         uint32_t        i;
2208
2209         if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2210                 for (i = 0; i < 15; i++) {
2211                         if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2212                                 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2213                                 acb->rqbuf_getIndex = 0;
2214                                 acb->rqbuf_putIndex = 0;
2215                                 arcmsr_iop_message_read(acb);
2216                                 mdelay(30);
2217                         } else if (acb->rqbuf_getIndex !=
2218                                    acb->rqbuf_putIndex) {
2219                                 acb->rqbuf_getIndex = 0;
2220                                 acb->rqbuf_putIndex = 0;
2221                                 mdelay(30);
2222                         } else
2223                                 break;
2224                 }
2225         }
2226 }
2227
2228 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
2229                 struct scsi_cmnd *cmd)
2230 {
2231         char *buffer;
2232         unsigned short use_sg;
2233         int retvalue = 0, transfer_len = 0;
2234         unsigned long flags;
2235         struct CMD_MESSAGE_FIELD *pcmdmessagefld;
2236         uint32_t controlcode = (uint32_t)cmd->cmnd[5] << 24 |
2237                 (uint32_t)cmd->cmnd[6] << 16 |
2238                 (uint32_t)cmd->cmnd[7] << 8 |
2239                 (uint32_t)cmd->cmnd[8];
2240         struct scatterlist *sg;
2241
2242         use_sg = scsi_sg_count(cmd);
2243         sg = scsi_sglist(cmd);
2244         buffer = kmap_atomic(sg_page(sg)) + sg->offset;
2245         if (use_sg > 1) {
2246                 retvalue = ARCMSR_MESSAGE_FAIL;
2247                 goto message_out;
2248         }
2249         transfer_len += sg->length;
2250         if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) {
2251                 retvalue = ARCMSR_MESSAGE_FAIL;
2252                 pr_info("%s: ARCMSR_MESSAGE_FAIL!\n", __func__);
2253                 goto message_out;
2254         }
2255         pcmdmessagefld = (struct CMD_MESSAGE_FIELD *)buffer;
2256         switch (controlcode) {
2257         case ARCMSR_MESSAGE_READ_RQBUFFER: {
2258                 unsigned char *ver_addr;
2259                 uint8_t *ptmpQbuffer;
2260                 uint32_t allxfer_len = 0;
2261                 ver_addr = kmalloc(ARCMSR_API_DATA_BUFLEN, GFP_ATOMIC);
2262                 if (!ver_addr) {
2263                         retvalue = ARCMSR_MESSAGE_FAIL;
2264                         pr_info("%s: memory not enough!\n", __func__);
2265                         goto message_out;
2266                 }
2267                 ptmpQbuffer = ver_addr;
2268                 spin_lock_irqsave(&acb->rqbuffer_lock, flags);
2269                 if (acb->rqbuf_getIndex != acb->rqbuf_putIndex) {
2270                         unsigned int tail = acb->rqbuf_getIndex;
2271                         unsigned int head = acb->rqbuf_putIndex;
2272                         unsigned int cnt_to_end = CIRC_CNT_TO_END(head, tail, ARCMSR_MAX_QBUFFER);
2273
2274                         allxfer_len = CIRC_CNT(head, tail, ARCMSR_MAX_QBUFFER);
2275                         if (allxfer_len > ARCMSR_API_DATA_BUFLEN)
2276                                 allxfer_len = ARCMSR_API_DATA_BUFLEN;
2277
2278                         if (allxfer_len <= cnt_to_end)
2279                                 memcpy(ptmpQbuffer, acb->rqbuffer + tail, allxfer_len);
2280                         else {
2281                                 memcpy(ptmpQbuffer, acb->rqbuffer + tail, cnt_to_end);
2282                                 memcpy(ptmpQbuffer + cnt_to_end, acb->rqbuffer, allxfer_len - cnt_to_end);
2283                         }
2284                         acb->rqbuf_getIndex = (acb->rqbuf_getIndex + allxfer_len) % ARCMSR_MAX_QBUFFER;
2285                 }
2286                 memcpy(pcmdmessagefld->messagedatabuffer, ver_addr,
2287                         allxfer_len);
2288                 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2289                         struct QBUFFER __iomem *prbuffer;
2290                         acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2291                         prbuffer = arcmsr_get_iop_rqbuffer(acb);
2292                         if (arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0)
2293                                 acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
2294                 }
2295                 spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
2296                 kfree(ver_addr);
2297                 pcmdmessagefld->cmdmessage.Length = allxfer_len;
2298                 if (acb->fw_flag == FW_DEADLOCK)
2299                         pcmdmessagefld->cmdmessage.ReturnCode =
2300                                 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
2301                 else
2302                         pcmdmessagefld->cmdmessage.ReturnCode =
2303                                 ARCMSR_MESSAGE_RETURNCODE_OK;
2304                 break;
2305         }
2306         case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
2307                 unsigned char *ver_addr;
2308                 int32_t user_len, cnt2end;
2309                 uint8_t *pQbuffer, *ptmpuserbuffer;
2310                 ver_addr = kmalloc(ARCMSR_API_DATA_BUFLEN, GFP_ATOMIC);
2311                 if (!ver_addr) {
2312                         retvalue = ARCMSR_MESSAGE_FAIL;
2313                         goto message_out;
2314                 }
2315                 ptmpuserbuffer = ver_addr;
2316                 user_len = pcmdmessagefld->cmdmessage.Length;
2317                 memcpy(ptmpuserbuffer,
2318                         pcmdmessagefld->messagedatabuffer, user_len);
2319                 spin_lock_irqsave(&acb->wqbuffer_lock, flags);
2320                 if (acb->wqbuf_putIndex != acb->wqbuf_getIndex) {
2321                         struct SENSE_DATA *sensebuffer =
2322                                 (struct SENSE_DATA *)cmd->sense_buffer;
2323                         arcmsr_write_ioctldata2iop(acb);
2324                         /* has error report sensedata */
2325                         sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS;
2326                         sensebuffer->SenseKey = ILLEGAL_REQUEST;
2327                         sensebuffer->AdditionalSenseLength = 0x0A;
2328                         sensebuffer->AdditionalSenseCode = 0x20;
2329                         sensebuffer->Valid = 1;
2330                         retvalue = ARCMSR_MESSAGE_FAIL;
2331                 } else {
2332                         pQbuffer = &acb->wqbuffer[acb->wqbuf_putIndex];
2333                         cnt2end = ARCMSR_MAX_QBUFFER - acb->wqbuf_putIndex;
2334                         if (user_len > cnt2end) {
2335                                 memcpy(pQbuffer, ptmpuserbuffer, cnt2end);
2336                                 ptmpuserbuffer += cnt2end;
2337                                 user_len -= cnt2end;
2338                                 acb->wqbuf_putIndex = 0;
2339                                 pQbuffer = acb->wqbuffer;
2340                         }
2341                         memcpy(pQbuffer, ptmpuserbuffer, user_len);
2342                         acb->wqbuf_putIndex += user_len;
2343                         acb->wqbuf_putIndex %= ARCMSR_MAX_QBUFFER;
2344                         if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
2345                                 acb->acb_flags &=
2346                                                 ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
2347                                 arcmsr_write_ioctldata2iop(acb);
2348                         }
2349                 }
2350                 spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
2351                 kfree(ver_addr);
2352                 if (acb->fw_flag == FW_DEADLOCK)
2353                         pcmdmessagefld->cmdmessage.ReturnCode =
2354                                 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
2355                 else
2356                         pcmdmessagefld->cmdmessage.ReturnCode =
2357                                 ARCMSR_MESSAGE_RETURNCODE_OK;
2358                 break;
2359         }
2360         case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
2361                 uint8_t *pQbuffer = acb->rqbuffer;
2362
2363                 arcmsr_clear_iop2drv_rqueue_buffer(acb);
2364                 spin_lock_irqsave(&acb->rqbuffer_lock, flags);
2365                 acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
2366                 acb->rqbuf_getIndex = 0;
2367                 acb->rqbuf_putIndex = 0;
2368                 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
2369                 spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
2370                 if (acb->fw_flag == FW_DEADLOCK)
2371                         pcmdmessagefld->cmdmessage.ReturnCode =
2372                                 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
2373                 else
2374                         pcmdmessagefld->cmdmessage.ReturnCode =
2375                                 ARCMSR_MESSAGE_RETURNCODE_OK;
2376                 break;
2377         }
2378         case ARCMSR_MESSAGE_CLEAR_WQBUFFER: {
2379                 uint8_t *pQbuffer = acb->wqbuffer;
2380                 spin_lock_irqsave(&acb->wqbuffer_lock, flags);
2381                 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
2382                         ACB_F_MESSAGE_WQBUFFER_READED);
2383                 acb->wqbuf_getIndex = 0;
2384                 acb->wqbuf_putIndex = 0;
2385                 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
2386                 spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
2387                 if (acb->fw_flag == FW_DEADLOCK)
2388                         pcmdmessagefld->cmdmessage.ReturnCode =
2389                                 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
2390                 else
2391                         pcmdmessagefld->cmdmessage.ReturnCode =
2392                                 ARCMSR_MESSAGE_RETURNCODE_OK;
2393                 break;
2394         }
2395         case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
2396                 uint8_t *pQbuffer;
2397                 arcmsr_clear_iop2drv_rqueue_buffer(acb);
2398                 spin_lock_irqsave(&acb->rqbuffer_lock, flags);
2399                 acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
2400                 acb->rqbuf_getIndex = 0;
2401                 acb->rqbuf_putIndex = 0;
2402                 pQbuffer = acb->rqbuffer;
2403                 memset(pQbuffer, 0, sizeof(struct QBUFFER));
2404                 spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
2405                 spin_lock_irqsave(&acb->wqbuffer_lock, flags);
2406                 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
2407                         ACB_F_MESSAGE_WQBUFFER_READED);
2408                 acb->wqbuf_getIndex = 0;
2409                 acb->wqbuf_putIndex = 0;
2410                 pQbuffer = acb->wqbuffer;
2411                 memset(pQbuffer, 0, sizeof(struct QBUFFER));
2412                 spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
2413                 if (acb->fw_flag == FW_DEADLOCK)
2414                         pcmdmessagefld->cmdmessage.ReturnCode =
2415                                 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
2416                 else
2417                         pcmdmessagefld->cmdmessage.ReturnCode =
2418                                 ARCMSR_MESSAGE_RETURNCODE_OK;
2419                 break;
2420         }
2421         case ARCMSR_MESSAGE_RETURN_CODE_3F: {
2422                 if (acb->fw_flag == FW_DEADLOCK)
2423                         pcmdmessagefld->cmdmessage.ReturnCode =
2424                                 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
2425                 else
2426                         pcmdmessagefld->cmdmessage.ReturnCode =
2427                                 ARCMSR_MESSAGE_RETURNCODE_3F;
2428                 break;
2429         }
2430         case ARCMSR_MESSAGE_SAY_HELLO: {
2431                 int8_t *hello_string = "Hello! I am ARCMSR";
2432                 if (acb->fw_flag == FW_DEADLOCK)
2433                         pcmdmessagefld->cmdmessage.ReturnCode =
2434                                 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
2435                 else
2436                         pcmdmessagefld->cmdmessage.ReturnCode =
2437                                 ARCMSR_MESSAGE_RETURNCODE_OK;
2438                 memcpy(pcmdmessagefld->messagedatabuffer,
2439                         hello_string, (int16_t)strlen(hello_string));
2440                 break;
2441         }
2442         case ARCMSR_MESSAGE_SAY_GOODBYE: {
2443                 if (acb->fw_flag == FW_DEADLOCK)
2444                         pcmdmessagefld->cmdmessage.ReturnCode =
2445                                 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
2446                 else
2447                         pcmdmessagefld->cmdmessage.ReturnCode =
2448                                 ARCMSR_MESSAGE_RETURNCODE_OK;
2449                 arcmsr_iop_parking(acb);
2450                 break;
2451         }
2452         case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: {
2453                 if (acb->fw_flag == FW_DEADLOCK)
2454                         pcmdmessagefld->cmdmessage.ReturnCode =
2455                                 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
2456                 else
2457                         pcmdmessagefld->cmdmessage.ReturnCode =
2458                                 ARCMSR_MESSAGE_RETURNCODE_OK;
2459                 arcmsr_flush_adapter_cache(acb);
2460                 break;
2461         }
2462         default:
2463                 retvalue = ARCMSR_MESSAGE_FAIL;
2464                 pr_info("%s: unknown controlcode!\n", __func__);
2465         }
2466 message_out:
2467         if (use_sg) {
2468                 struct scatterlist *sg = scsi_sglist(cmd);
2469                 kunmap_atomic(buffer - sg->offset);
2470         }
2471         return retvalue;
2472 }
2473
2474 static struct CommandControlBlock *arcmsr_get_freeccb(struct AdapterControlBlock *acb)
2475 {
2476         struct list_head *head = &acb->ccb_free_list;
2477         struct CommandControlBlock *ccb = NULL;
2478         unsigned long flags;
2479         spin_lock_irqsave(&acb->ccblist_lock, flags);
2480         if (!list_empty(head)) {
2481                 ccb = list_entry(head->next, struct CommandControlBlock, list);
2482                 list_del_init(&ccb->list);
2483         }else{
2484                 spin_unlock_irqrestore(&acb->ccblist_lock, flags);
2485                 return NULL;
2486         }
2487         spin_unlock_irqrestore(&acb->ccblist_lock, flags);
2488         return ccb;
2489 }
2490
2491 static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
2492                 struct scsi_cmnd *cmd)
2493 {
2494         switch (cmd->cmnd[0]) {
2495         case INQUIRY: {
2496                 unsigned char inqdata[36];
2497                 char *buffer;
2498                 struct scatterlist *sg;
2499
2500                 if (cmd->device->lun) {
2501                         cmd->result = (DID_TIME_OUT << 16);
2502                         cmd->scsi_done(cmd);
2503                         return;
2504                 }
2505                 inqdata[0] = TYPE_PROCESSOR;
2506                 /* Periph Qualifier & Periph Dev Type */
2507                 inqdata[1] = 0;
2508                 /* rem media bit & Dev Type Modifier */
2509                 inqdata[2] = 0;
2510                 /* ISO, ECMA, & ANSI versions */
2511                 inqdata[4] = 31;
2512                 /* length of additional data */
2513                 strncpy(&inqdata[8], "Areca   ", 8);
2514                 /* Vendor Identification */
2515                 strncpy(&inqdata[16], "RAID controller ", 16);
2516                 /* Product Identification */
2517                 strncpy(&inqdata[32], "R001", 4); /* Product Revision */
2518
2519                 sg = scsi_sglist(cmd);
2520                 buffer = kmap_atomic(sg_page(sg)) + sg->offset;
2521
2522                 memcpy(buffer, inqdata, sizeof(inqdata));
2523                 sg = scsi_sglist(cmd);
2524                 kunmap_atomic(buffer - sg->offset);
2525
2526                 cmd->scsi_done(cmd);
2527         }
2528         break;
2529         case WRITE_BUFFER:
2530         case READ_BUFFER: {
2531                 if (arcmsr_iop_message_xfer(acb, cmd))
2532                         cmd->result = (DID_ERROR << 16);
2533                 cmd->scsi_done(cmd);
2534         }
2535         break;
2536         default:
2537                 cmd->scsi_done(cmd);
2538         }
2539 }
2540
2541 static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd,
2542         void (* done)(struct scsi_cmnd *))
2543 {
2544         struct Scsi_Host *host = cmd->device->host;
2545         struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
2546         struct CommandControlBlock *ccb;
2547         int target = cmd->device->id;
2548         int lun = cmd->device->lun;
2549         uint8_t scsicmd = cmd->cmnd[0];
2550         cmd->scsi_done = done;
2551         cmd->host_scribble = NULL;
2552         cmd->result = 0;
2553         if ((scsicmd == SYNCHRONIZE_CACHE) ||(scsicmd == SEND_DIAGNOSTIC)){
2554                 if(acb->devstate[target][lun] == ARECA_RAID_GONE) {
2555                         cmd->result = (DID_NO_CONNECT << 16);
2556                 }
2557                 cmd->scsi_done(cmd);
2558                 return 0;
2559         }
2560         if (target == 16) {
2561                 /* virtual device for iop message transfer */
2562                 arcmsr_handle_virtual_command(acb, cmd);
2563                 return 0;
2564         }
2565         ccb = arcmsr_get_freeccb(acb);
2566         if (!ccb)
2567                 return SCSI_MLQUEUE_HOST_BUSY;
2568         if (arcmsr_build_ccb( acb, ccb, cmd ) == FAILED) {
2569                 cmd->result = (DID_ERROR << 16) | (RESERVATION_CONFLICT << 1);
2570                 cmd->scsi_done(cmd);
2571                 return 0;
2572         }
2573         arcmsr_post_ccb(acb, ccb);
2574         return 0;
2575 }
2576
2577 static DEF_SCSI_QCMD(arcmsr_queue_command)
2578
2579 static bool arcmsr_hbaA_get_config(struct AdapterControlBlock *acb)
2580 {
2581         struct MessageUnit_A __iomem *reg = acb->pmuA;
2582         char *acb_firm_model = acb->firm_model;
2583         char *acb_firm_version = acb->firm_version;
2584         char *acb_device_map = acb->device_map;
2585         char __iomem *iop_firm_model = (char __iomem *)(&reg->message_rwbuffer[15]);
2586         char __iomem *iop_firm_version = (char __iomem *)(&reg->message_rwbuffer[17]);
2587         char __iomem *iop_device_map = (char __iomem *)(&reg->message_rwbuffer[21]);
2588         int count;
2589         writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
2590         if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
2591                 printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
2592                         miscellaneous data' timeout \n", acb->host->host_no);
2593                 return false;
2594         }
2595         count = 8;
2596         while (count){
2597                 *acb_firm_model = readb(iop_firm_model);
2598                 acb_firm_model++;
2599                 iop_firm_model++;
2600                 count--;
2601         }
2602
2603         count = 16;
2604         while (count){
2605                 *acb_firm_version = readb(iop_firm_version);
2606                 acb_firm_version++;
2607                 iop_firm_version++;
2608                 count--;
2609         }
2610
2611         count=16;
2612         while(count){
2613                 *acb_device_map = readb(iop_device_map);
2614                 acb_device_map++;
2615                 iop_device_map++;
2616                 count--;
2617         }
2618         pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
2619                 acb->host->host_no,
2620                 acb->firm_model,
2621                 acb->firm_version);
2622         acb->signature = readl(&reg->message_rwbuffer[0]);
2623         acb->firm_request_len = readl(&reg->message_rwbuffer[1]);
2624         acb->firm_numbers_queue = readl(&reg->message_rwbuffer[2]);
2625         acb->firm_sdram_size = readl(&reg->message_rwbuffer[3]);
2626         acb->firm_hd_channels = readl(&reg->message_rwbuffer[4]);
2627         acb->firm_cfg_version = readl(&reg->message_rwbuffer[25]);  /*firm_cfg_version,25,100-103*/
2628         return true;
2629 }
2630 static bool arcmsr_hbaB_get_config(struct AdapterControlBlock *acb)
2631 {
2632         struct MessageUnit_B *reg = acb->pmuB;
2633         struct pci_dev *pdev = acb->pdev;
2634         void *dma_coherent;
2635         dma_addr_t dma_coherent_handle;
2636         char *acb_firm_model = acb->firm_model;
2637         char *acb_firm_version = acb->firm_version;
2638         char *acb_device_map = acb->device_map;
2639         char __iomem *iop_firm_model;
2640         /*firm_model,15,60-67*/
2641         char __iomem *iop_firm_version;
2642         /*firm_version,17,68-83*/
2643         char __iomem *iop_device_map;
2644         /*firm_version,21,84-99*/
2645         int count;
2646
2647         acb->roundup_ccbsize = roundup(sizeof(struct MessageUnit_B), 32);
2648         dma_coherent = dma_alloc_coherent(&pdev->dev, acb->roundup_ccbsize,
2649                         &dma_coherent_handle, GFP_KERNEL);
2650         if (!dma_coherent){
2651                 printk(KERN_NOTICE
2652                         "arcmsr%d: dma_alloc_coherent got error for hbb mu\n",
2653                         acb->host->host_no);
2654                 return false;
2655         }
2656         acb->dma_coherent_handle2 = dma_coherent_handle;
2657         acb->dma_coherent2 = dma_coherent;
2658         reg = (struct MessageUnit_B *)dma_coherent;
2659         acb->pmuB = reg;
2660         reg->drv2iop_doorbell= (uint32_t __iomem *)((unsigned long)acb->mem_base0 + ARCMSR_DRV2IOP_DOORBELL);
2661         reg->drv2iop_doorbell_mask = (uint32_t __iomem *)((unsigned long)acb->mem_base0 + ARCMSR_DRV2IOP_DOORBELL_MASK);
2662         reg->iop2drv_doorbell = (uint32_t __iomem *)((unsigned long)acb->mem_base0 + ARCMSR_IOP2DRV_DOORBELL);
2663         reg->iop2drv_doorbell_mask = (uint32_t __iomem *)((unsigned long)acb->mem_base0 + ARCMSR_IOP2DRV_DOORBELL_MASK);
2664         reg->message_wbuffer = (uint32_t __iomem *)((unsigned long)acb->mem_base1 + ARCMSR_MESSAGE_WBUFFER);
2665         reg->message_rbuffer =  (uint32_t __iomem *)((unsigned long)acb->mem_base1 + ARCMSR_MESSAGE_RBUFFER);
2666         reg->message_rwbuffer = (uint32_t __iomem *)((unsigned long)acb->mem_base1 + ARCMSR_MESSAGE_RWBUFFER);
2667         iop_firm_model = (char __iomem *)(&reg->message_rwbuffer[15]);  /*firm_model,15,60-67*/
2668         iop_firm_version = (char __iomem *)(&reg->message_rwbuffer[17]);        /*firm_version,17,68-83*/
2669         iop_device_map = (char __iomem *)(&reg->message_rwbuffer[21]);  /*firm_version,21,84-99*/
2670
2671         writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell);
2672         if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
2673                 printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
2674                         miscellaneous data' timeout \n", acb->host->host_no);
2675                 return false;
2676         }
2677         count = 8;
2678         while (count){
2679                 *acb_firm_model = readb(iop_firm_model);
2680                 acb_firm_model++;
2681                 iop_firm_model++;
2682                 count--;
2683         }
2684         count = 16;
2685         while (count){
2686                 *acb_firm_version = readb(iop_firm_version);
2687                 acb_firm_version++;
2688                 iop_firm_version++;
2689                 count--;
2690         }
2691
2692         count = 16;
2693         while(count){
2694                 *acb_device_map = readb(iop_device_map);
2695                 acb_device_map++;
2696                 iop_device_map++;
2697                 count--;
2698         }
2699         
2700         pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
2701                 acb->host->host_no,
2702                 acb->firm_model,
2703                 acb->firm_version);
2704
2705         acb->signature = readl(&reg->message_rwbuffer[1]);
2706         /*firm_signature,1,00-03*/
2707         acb->firm_request_len = readl(&reg->message_rwbuffer[2]);
2708         /*firm_request_len,1,04-07*/
2709         acb->firm_numbers_queue = readl(&reg->message_rwbuffer[3]);
2710         /*firm_numbers_queue,2,08-11*/
2711         acb->firm_sdram_size = readl(&reg->message_rwbuffer[4]);
2712         /*firm_sdram_size,3,12-15*/
2713         acb->firm_hd_channels = readl(&reg->message_rwbuffer[5]);
2714         /*firm_ide_channels,4,16-19*/
2715         acb->firm_cfg_version = readl(&reg->message_rwbuffer[25]);  /*firm_cfg_version,25,100-103*/
2716         /*firm_ide_channels,4,16-19*/
2717         return true;
2718 }
2719
2720 static bool arcmsr_hbaC_get_config(struct AdapterControlBlock *pACB)
2721 {
2722         uint32_t intmask_org, Index, firmware_state = 0;
2723         struct MessageUnit_C __iomem *reg = pACB->pmuC;
2724         char *acb_firm_model = pACB->firm_model;
2725         char *acb_firm_version = pACB->firm_version;
2726         char __iomem *iop_firm_model = (char __iomem *)(&reg->msgcode_rwbuffer[15]);    /*firm_model,15,60-67*/
2727         char __iomem *iop_firm_version = (char __iomem *)(&reg->msgcode_rwbuffer[17]);  /*firm_version,17,68-83*/
2728         int count;
2729         /* disable all outbound interrupt */
2730         intmask_org = readl(&reg->host_int_mask); /* disable outbound message0 int */
2731         writel(intmask_org|ARCMSR_HBCMU_ALL_INTMASKENABLE, &reg->host_int_mask);
2732         /* wait firmware ready */
2733         do {
2734                 firmware_state = readl(&reg->outbound_msgaddr1);
2735         } while ((firmware_state & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0);
2736         /* post "get config" instruction */
2737         writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
2738         writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
2739         /* wait message ready */
2740         for (Index = 0; Index < 2000; Index++) {
2741                 if (readl(&reg->outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
2742                         writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &reg->outbound_doorbell_clear);/*clear interrupt*/
2743                         break;
2744                 }
2745                 udelay(10);
2746         } /*max 1 seconds*/
2747         if (Index >= 2000) {
2748                 printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
2749                         miscellaneous data' timeout \n", pACB->host->host_no);
2750                 return false;
2751         }
2752         count = 8;
2753         while (count) {
2754                 *acb_firm_model = readb(iop_firm_model);
2755                 acb_firm_model++;
2756                 iop_firm_model++;
2757                 count--;
2758         }
2759         count = 16;
2760         while (count) {
2761                 *acb_firm_version = readb(iop_firm_version);
2762                 acb_firm_version++;
2763                 iop_firm_version++;
2764                 count--;
2765         }
2766         pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
2767                 pACB->host->host_no,
2768                 pACB->firm_model,
2769                 pACB->firm_version);
2770         pACB->firm_request_len = readl(&reg->msgcode_rwbuffer[1]);   /*firm_request_len,1,04-07*/
2771         pACB->firm_numbers_queue = readl(&reg->msgcode_rwbuffer[2]); /*firm_numbers_queue,2,08-11*/
2772         pACB->firm_sdram_size = readl(&reg->msgcode_rwbuffer[3]);    /*firm_sdram_size,3,12-15*/
2773         pACB->firm_hd_channels = readl(&reg->msgcode_rwbuffer[4]);  /*firm_ide_channels,4,16-19*/
2774         pACB->firm_cfg_version = readl(&reg->msgcode_rwbuffer[25]);  /*firm_cfg_version,25,100-103*/
2775         /*all interrupt service will be enable at arcmsr_iop_init*/
2776         return true;
2777 }
2778
2779 static bool arcmsr_hbaD_get_config(struct AdapterControlBlock *acb)
2780 {
2781         char *acb_firm_model = acb->firm_model;
2782         char *acb_firm_version = acb->firm_version;
2783         char *acb_device_map = acb->device_map;
2784         char __iomem *iop_firm_model;
2785         char __iomem *iop_firm_version;
2786         char __iomem *iop_device_map;
2787         u32 count;
2788         struct MessageUnit_D *reg;
2789         void *dma_coherent2;
2790         dma_addr_t dma_coherent_handle2;
2791         struct pci_dev *pdev = acb->pdev;
2792
2793         acb->roundup_ccbsize = roundup(sizeof(struct MessageUnit_D), 32);
2794         dma_coherent2 = dma_alloc_coherent(&pdev->dev, acb->roundup_ccbsize,
2795                 &dma_coherent_handle2, GFP_KERNEL);
2796         if (!dma_coherent2) {
2797                 pr_notice("DMA allocation failed...\n");
2798                 return false;
2799         }
2800         memset(dma_coherent2, 0, acb->roundup_ccbsize);
2801         acb->dma_coherent_handle2 = dma_coherent_handle2;
2802         acb->dma_coherent2 = dma_coherent2;
2803         reg = (struct MessageUnit_D *)dma_coherent2;
2804         acb->pmuD = reg;
2805         reg->chip_id = acb->mem_base0 + ARCMSR_ARC1214_CHIP_ID;
2806         reg->cpu_mem_config = acb->mem_base0 +
2807                 ARCMSR_ARC1214_CPU_MEMORY_CONFIGURATION;
2808         reg->i2o_host_interrupt_mask = acb->mem_base0 +
2809                 ARCMSR_ARC1214_I2_HOST_INTERRUPT_MASK;
2810         reg->sample_at_reset = acb->mem_base0 + ARCMSR_ARC1214_SAMPLE_RESET;
2811         reg->reset_request = acb->mem_base0 + ARCMSR_ARC1214_RESET_REQUEST;
2812         reg->host_int_status = acb->mem_base0 +
2813                 ARCMSR_ARC1214_MAIN_INTERRUPT_STATUS;
2814         reg->pcief0_int_enable = acb->mem_base0 +
2815                 ARCMSR_ARC1214_PCIE_F0_INTERRUPT_ENABLE;
2816         reg->inbound_msgaddr0 = acb->mem_base0 +
2817                 ARCMSR_ARC1214_INBOUND_MESSAGE0;
2818         reg->inbound_msgaddr1 = acb->mem_base0 +
2819                 ARCMSR_ARC1214_INBOUND_MESSAGE1;
2820         reg->outbound_msgaddr0 = acb->mem_base0 +
2821                 ARCMSR_ARC1214_OUTBOUND_MESSAGE0;
2822         reg->outbound_msgaddr1 = acb->mem_base0 +
2823                 ARCMSR_ARC1214_OUTBOUND_MESSAGE1;
2824         reg->inbound_doorbell = acb->mem_base0 +
2825                 ARCMSR_ARC1214_INBOUND_DOORBELL;
2826         reg->outbound_doorbell = acb->mem_base0 +
2827                 ARCMSR_ARC1214_OUTBOUND_DOORBELL;
2828         reg->outbound_doorbell_enable = acb->mem_base0 +
2829                 ARCMSR_ARC1214_OUTBOUND_DOORBELL_ENABLE;
2830         reg->inboundlist_base_low = acb->mem_base0 +
2831                 ARCMSR_ARC1214_INBOUND_LIST_BASE_LOW;
2832         reg->inboundlist_base_high = acb->mem_base0 +
2833                 ARCMSR_ARC1214_INBOUND_LIST_BASE_HIGH;
2834         reg->inboundlist_write_pointer = acb->mem_base0 +
2835                 ARCMSR_ARC1214_INBOUND_LIST_WRITE_POINTER;
2836         reg->outboundlist_base_low = acb->mem_base0 +
2837                 ARCMSR_ARC1214_OUTBOUND_LIST_BASE_LOW;
2838         reg->outboundlist_base_high = acb->mem_base0 +
2839                 ARCMSR_ARC1214_OUTBOUND_LIST_BASE_HIGH;
2840         reg->outboundlist_copy_pointer = acb->mem_base0 +
2841                 ARCMSR_ARC1214_OUTBOUND_LIST_COPY_POINTER;
2842         reg->outboundlist_read_pointer = acb->mem_base0 +
2843                 ARCMSR_ARC1214_OUTBOUND_LIST_READ_POINTER;
2844         reg->outboundlist_interrupt_cause = acb->mem_base0 +
2845                 ARCMSR_ARC1214_OUTBOUND_INTERRUPT_CAUSE;
2846         reg->outboundlist_interrupt_enable = acb->mem_base0 +
2847                 ARCMSR_ARC1214_OUTBOUND_INTERRUPT_ENABLE;
2848         reg->message_wbuffer = acb->mem_base0 + ARCMSR_ARC1214_MESSAGE_WBUFFER;
2849         reg->message_rbuffer = acb->mem_base0 + ARCMSR_ARC1214_MESSAGE_RBUFFER;
2850         reg->msgcode_rwbuffer = acb->mem_base0 +
2851                 ARCMSR_ARC1214_MESSAGE_RWBUFFER;
2852         iop_firm_model = (char __iomem *)(&reg->msgcode_rwbuffer[15]);
2853         iop_firm_version = (char __iomem *)(&reg->msgcode_rwbuffer[17]);
2854         iop_device_map = (char __iomem *)(&reg->msgcode_rwbuffer[21]);
2855         if (readl(acb->pmuD->outbound_doorbell) &
2856                 ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE) {
2857                 writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE,
2858                         acb->pmuD->outbound_doorbell);/*clear interrupt*/
2859         }
2860         /* post "get config" instruction */
2861         writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, reg->inbound_msgaddr0);
2862         /* wait message ready */
2863         if (!arcmsr_hbaD_wait_msgint_ready(acb)) {
2864                 pr_notice("arcmsr%d: wait get adapter firmware "
2865                         "miscellaneous data timeout\n", acb->host->host_no);
2866                 dma_free_coherent(&acb->pdev->dev, acb->roundup_ccbsize,
2867                         acb->dma_coherent2, acb->dma_coherent_handle2);
2868                 return false;
2869         }
2870         count = 8;
2871         while (count) {
2872                 *acb_firm_model = readb(iop_firm_model);
2873                 acb_firm_model++;
2874                 iop_firm_model++;
2875                 count--;
2876         }
2877         count = 16;
2878         while (count) {
2879                 *acb_firm_version = readb(iop_firm_version);
2880                 acb_firm_version++;
2881                 iop_firm_version++;
2882                 count--;
2883         }
2884         count = 16;
2885         while (count) {
2886                 *acb_device_map = readb(iop_device_map);
2887                 acb_device_map++;
2888                 iop_device_map++;
2889                 count--;
2890         }
2891         acb->signature = readl(&reg->msgcode_rwbuffer[1]);
2892         /*firm_signature,1,00-03*/
2893         acb->firm_request_len = readl(&reg->msgcode_rwbuffer[2]);
2894         /*firm_request_len,1,04-07*/
2895         acb->firm_numbers_queue = readl(&reg->msgcode_rwbuffer[3]);
2896         /*firm_numbers_queue,2,08-11*/
2897         acb->firm_sdram_size = readl(&reg->msgcode_rwbuffer[4]);
2898         /*firm_sdram_size,3,12-15*/
2899         acb->firm_hd_channels = readl(&reg->msgcode_rwbuffer[5]);
2900         /*firm_hd_channels,4,16-19*/
2901         acb->firm_cfg_version = readl(&reg->msgcode_rwbuffer[25]);
2902         pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
2903                 acb->host->host_no,
2904                 acb->firm_model,
2905                 acb->firm_version);
2906         return true;
2907 }
2908
2909 static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb)
2910 {
2911         bool rtn = false;
2912
2913         switch (acb->adapter_type) {
2914         case ACB_ADAPTER_TYPE_A:
2915                 rtn = arcmsr_hbaA_get_config(acb);
2916                 break;
2917         case ACB_ADAPTER_TYPE_B:
2918                 rtn = arcmsr_hbaB_get_config(acb);
2919                 break;
2920         case ACB_ADAPTER_TYPE_C:
2921                 rtn = arcmsr_hbaC_get_config(acb);
2922                 break;
2923         case ACB_ADAPTER_TYPE_D:
2924                 rtn = arcmsr_hbaD_get_config(acb);
2925                 break;
2926         default:
2927                 break;
2928         }
2929         if (acb->firm_numbers_queue > ARCMSR_MAX_OUTSTANDING_CMD)
2930                 acb->maxOutstanding = ARCMSR_MAX_OUTSTANDING_CMD;
2931         else
2932                 acb->maxOutstanding = acb->firm_numbers_queue - 1;
2933         acb->host->can_queue = acb->maxOutstanding;
2934         return rtn;
2935 }
2936
2937 static int arcmsr_hbaA_polling_ccbdone(struct AdapterControlBlock *acb,
2938         struct CommandControlBlock *poll_ccb)
2939 {
2940         struct MessageUnit_A __iomem *reg = acb->pmuA;
2941         struct CommandControlBlock *ccb;
2942         struct ARCMSR_CDB *arcmsr_cdb;
2943         uint32_t flag_ccb, outbound_intstatus, poll_ccb_done = 0, poll_count = 0;
2944         int rtn;
2945         bool error;
2946         polling_hba_ccb_retry:
2947         poll_count++;
2948         outbound_intstatus = readl(&reg->outbound_intstatus) & acb->outbound_int_enable;
2949         writel(outbound_intstatus, &reg->outbound_intstatus);/*clear interrupt*/
2950         while (1) {
2951                 if ((flag_ccb = readl(&reg->outbound_queueport)) == 0xFFFFFFFF) {
2952                         if (poll_ccb_done){
2953                                 rtn = SUCCESS;
2954                                 break;
2955                         }else {
2956                                 msleep(25);
2957                                 if (poll_count > 100){
2958                                         rtn = FAILED;
2959                                         break;
2960                                 }
2961                                 goto polling_hba_ccb_retry;
2962                         }
2963                 }
2964                 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));
2965                 ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
2966                 poll_ccb_done |= (ccb == poll_ccb) ? 1 : 0;
2967                 if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
2968                         if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) {
2969                                 printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
2970                                         " poll command abort successfully \n"
2971                                         , acb->host->host_no
2972                                         , ccb->pcmd->device->id
2973                                         , (u32)ccb->pcmd->device->lun
2974                                         , ccb);
2975                                 ccb->pcmd->result = DID_ABORT << 16;
2976                                 arcmsr_ccb_complete(ccb);
2977                                 continue;
2978                         }
2979                         printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
2980                                 " command done ccb = '0x%p'"
2981                                 "ccboutstandingcount = %d \n"
2982                                 , acb->host->host_no
2983                                 , ccb
2984                                 , atomic_read(&acb->ccboutstandingcount));
2985                         continue;
2986                 }
2987                 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
2988                 arcmsr_report_ccb_state(acb, ccb, error);
2989         }
2990         return rtn;
2991 }
2992
2993 static int arcmsr_hbaB_polling_ccbdone(struct AdapterControlBlock *acb,
2994                                         struct CommandControlBlock *poll_ccb)
2995 {
2996         struct MessageUnit_B *reg = acb->pmuB;
2997         struct ARCMSR_CDB *arcmsr_cdb;
2998         struct CommandControlBlock *ccb;
2999         uint32_t flag_ccb, poll_ccb_done = 0, poll_count = 0;
3000         int index, rtn;
3001         bool error;
3002         polling_hbb_ccb_retry:
3003
3004         poll_count++;
3005         /* clear doorbell interrupt */
3006         writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
3007         while(1){
3008                 index = reg->doneq_index;
3009                 flag_ccb = reg->done_qbuffer[index];
3010                 if (flag_ccb == 0) {
3011                         if (poll_ccb_done){
3012                                 rtn = SUCCESS;
3013                                 break;
3014                         }else {
3015                                 msleep(25);
3016                                 if (poll_count > 100){
3017                                         rtn = FAILED;
3018                                         break;
3019                                 }
3020                                 goto polling_hbb_ccb_retry;
3021                         }
3022                 }
3023                 reg->done_qbuffer[index] = 0;
3024                 index++;
3025                 /*if last index number set it to 0 */
3026                 index %= ARCMSR_MAX_HBB_POSTQUEUE;
3027                 reg->doneq_index = index;
3028                 /* check if command done with no error*/
3029                 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));
3030                 ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
3031                 poll_ccb_done |= (ccb == poll_ccb) ? 1 : 0;
3032                 if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
3033                         if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) {
3034                                 printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
3035                                         " poll command abort successfully \n"
3036                                         ,acb->host->host_no
3037                                         ,ccb->pcmd->device->id
3038                                         ,(u32)ccb->pcmd->device->lun
3039                                         ,ccb);
3040                                 ccb->pcmd->result = DID_ABORT << 16;
3041                                 arcmsr_ccb_complete(ccb);
3042                                 continue;
3043                         }
3044                         printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
3045                                 " command done ccb = '0x%p'"
3046                                 "ccboutstandingcount = %d \n"
3047                                 , acb->host->host_no
3048                                 , ccb
3049                                 , atomic_read(&acb->ccboutstandingcount));
3050                         continue;
3051                 } 
3052                 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
3053                 arcmsr_report_ccb_state(acb, ccb, error);
3054         }
3055         return rtn;
3056 }
3057
3058 static int arcmsr_hbaC_polling_ccbdone(struct AdapterControlBlock *acb,
3059                 struct CommandControlBlock *poll_ccb)
3060 {
3061         struct MessageUnit_C __iomem *reg = acb->pmuC;
3062         uint32_t flag_ccb, ccb_cdb_phy;
3063         struct ARCMSR_CDB *arcmsr_cdb;
3064         bool error;
3065         struct CommandControlBlock *pCCB;
3066         uint32_t poll_ccb_done = 0, poll_count = 0;
3067         int rtn;
3068 polling_hbc_ccb_retry:
3069         poll_count++;
3070         while (1) {
3071                 if ((readl(&reg->host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) == 0) {
3072                         if (poll_ccb_done) {
3073                                 rtn = SUCCESS;
3074                                 break;
3075                         } else {
3076                                 msleep(25);
3077                                 if (poll_count > 100) {
3078                                         rtn = FAILED;
3079                                         break;
3080                                 }
3081                                 goto polling_hbc_ccb_retry;
3082                         }
3083                 }
3084                 flag_ccb = readl(&reg->outbound_queueport_low);
3085                 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
3086                 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);/*frame must be 32 bytes aligned*/
3087                 pCCB = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
3088                 poll_ccb_done |= (pCCB == poll_ccb) ? 1 : 0;
3089                 /* check ifcommand done with no error*/
3090                 if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) {
3091                         if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
3092                                 printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
3093                                         " poll command abort successfully \n"
3094                                         , acb->host->host_no
3095                                         , pCCB->pcmd->device->id
3096                                         , (u32)pCCB->pcmd->device->lun
3097                                         , pCCB);
3098                                         pCCB->pcmd->result = DID_ABORT << 16;
3099                                         arcmsr_ccb_complete(pCCB);
3100                                 continue;
3101                         }
3102                         printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
3103                                 " command done ccb = '0x%p'"
3104                                 "ccboutstandingcount = %d \n"
3105                                 , acb->host->host_no
3106                                 , pCCB
3107                                 , atomic_read(&acb->ccboutstandingcount));
3108                         continue;
3109                 }
3110                 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
3111                 arcmsr_report_ccb_state(acb, pCCB, error);
3112         }
3113         return rtn;
3114 }
3115
3116 static int arcmsr_hbaD_polling_ccbdone(struct AdapterControlBlock *acb,
3117                                 struct CommandControlBlock *poll_ccb)
3118 {
3119         bool error;
3120         uint32_t poll_ccb_done = 0, poll_count = 0, flag_ccb, ccb_cdb_phy;
3121         int rtn, doneq_index, index_stripped, outbound_write_pointer, toggle;
3122         unsigned long flags;
3123         struct ARCMSR_CDB *arcmsr_cdb;
3124         struct CommandControlBlock *pCCB;
3125         struct MessageUnit_D *pmu = acb->pmuD;
3126
3127 polling_hbaD_ccb_retry:
3128         poll_count++;
3129         while (1) {
3130                 spin_lock_irqsave(&acb->doneq_lock, flags);
3131                 outbound_write_pointer = pmu->done_qbuffer[0].addressLow + 1;
3132                 doneq_index = pmu->doneq_index;
3133                 if ((outbound_write_pointer & 0xFFF) == (doneq_index & 0xFFF)) {
3134                         spin_unlock_irqrestore(&acb->doneq_lock, flags);
3135                         if (poll_ccb_done) {
3136                                 rtn = SUCCESS;
3137                                 break;
3138                         } else {
3139                                 msleep(25);
3140                                 if (poll_count > 40) {
3141                                         rtn = FAILED;
3142                                         break;
3143                                 }
3144                                 goto polling_hbaD_ccb_retry;
3145                         }
3146                 }
3147                 toggle = doneq_index & 0x4000;
3148                 index_stripped = (doneq_index & 0xFFF) + 1;
3149                 index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE;
3150                 pmu->doneq_index = index_stripped ? (index_stripped | toggle) :
3151                                 ((toggle ^ 0x4000) + 1);
3152                 doneq_index = pmu->doneq_index;
3153                 spin_unlock_irqrestore(&acb->doneq_lock, flags);
3154                 flag_ccb = pmu->done_qbuffer[doneq_index & 0xFFF].addressLow;
3155                 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
3156                 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset +
3157                         ccb_cdb_phy);
3158                 pCCB = container_of(arcmsr_cdb, struct CommandControlBlock,
3159                         arcmsr_cdb);
3160                 poll_ccb_done |= (pCCB == poll_ccb) ? 1 : 0;
3161                 if ((pCCB->acb != acb) ||
3162                         (pCCB->startdone != ARCMSR_CCB_START)) {
3163                         if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
3164                                 pr_notice("arcmsr%d: scsi id = %d "
3165                                         "lun = %d ccb = '0x%p' poll command "
3166                                         "abort successfully\n"
3167                                         , acb->host->host_no
3168                                         , pCCB->pcmd->device->id
3169                                         , (u32)pCCB->pcmd->device->lun
3170                                         , pCCB);
3171                                 pCCB->pcmd->result = DID_ABORT << 16;
3172                                 arcmsr_ccb_complete(pCCB);
3173                                 continue;
3174                         }
3175                         pr_notice("arcmsr%d: polling an illegal "
3176                                 "ccb command done ccb = '0x%p' "
3177                                 "ccboutstandingcount = %d\n"
3178                                 , acb->host->host_no
3179                                 , pCCB
3180                                 , atomic_read(&acb->ccboutstandingcount));
3181                         continue;
3182                 }
3183                 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1)
3184                         ? true : false;
3185                 arcmsr_report_ccb_state(acb, pCCB, error);
3186         }
3187         return rtn;
3188 }
3189
3190 static int arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
3191                                         struct CommandControlBlock *poll_ccb)
3192 {
3193         int rtn = 0;
3194         switch (acb->adapter_type) {
3195
3196         case ACB_ADAPTER_TYPE_A: {
3197                 rtn = arcmsr_hbaA_polling_ccbdone(acb, poll_ccb);
3198                 }
3199                 break;
3200
3201         case ACB_ADAPTER_TYPE_B: {
3202                 rtn = arcmsr_hbaB_polling_ccbdone(acb, poll_ccb);
3203                 }
3204                 break;
3205         case ACB_ADAPTER_TYPE_C: {
3206                 rtn = arcmsr_hbaC_polling_ccbdone(acb, poll_ccb);
3207                 }
3208                 break;
3209         case ACB_ADAPTER_TYPE_D:
3210                 rtn = arcmsr_hbaD_polling_ccbdone(acb, poll_ccb);
3211                 break;
3212         }
3213         return rtn;
3214 }
3215
3216 static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
3217 {
3218         uint32_t cdb_phyaddr, cdb_phyaddr_hi32;
3219         dma_addr_t dma_coherent_handle;
3220
3221         /*
3222         ********************************************************************
3223         ** here we need to tell iop 331 our freeccb.HighPart
3224         ** if freeccb.HighPart is not zero
3225         ********************************************************************
3226         */
3227         switch (acb->adapter_type) {
3228         case ACB_ADAPTER_TYPE_B:
3229         case ACB_ADAPTER_TYPE_D:
3230                 dma_coherent_handle = acb->dma_coherent_handle2;
3231                 break;
3232         default:
3233                 dma_coherent_handle = acb->dma_coherent_handle;
3234                 break;
3235         }
3236         cdb_phyaddr = lower_32_bits(dma_coherent_handle);
3237         cdb_phyaddr_hi32 = upper_32_bits(dma_coherent_handle);
3238         acb->cdb_phyaddr_hi32 = cdb_phyaddr_hi32;
3239         /*
3240         ***********************************************************************
3241         **    if adapter type B, set window of "post command Q"
3242         ***********************************************************************
3243         */
3244         switch (acb->adapter_type) {
3245
3246         case ACB_ADAPTER_TYPE_A: {
3247                 if (cdb_phyaddr_hi32 != 0) {
3248                         struct MessageUnit_A __iomem *reg = acb->pmuA;
3249                         writel(ARCMSR_SIGNATURE_SET_CONFIG, \
3250                                                 &reg->message_rwbuffer[0]);
3251                         writel(cdb_phyaddr_hi32, &reg->message_rwbuffer[1]);
3252                         writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, \
3253                                                         &reg->inbound_msgaddr0);
3254                         if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
3255                                 printk(KERN_NOTICE "arcmsr%d: ""set ccb high \
3256                                 part physical address timeout\n",
3257                                 acb->host->host_no);
3258                                 return 1;
3259                         }
3260                 }
3261                 }
3262                 break;
3263
3264         case ACB_ADAPTER_TYPE_B: {
3265                 uint32_t __iomem *rwbuffer;
3266
3267                 struct MessageUnit_B *reg = acb->pmuB;
3268                 reg->postq_index = 0;
3269                 reg->doneq_index = 0;
3270                 writel(ARCMSR_MESSAGE_SET_POST_WINDOW, reg->drv2iop_doorbell);
3271                 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
3272                         printk(KERN_NOTICE "arcmsr%d:can not set diver mode\n", \
3273                                 acb->host->host_no);
3274                         return 1;
3275                 }
3276                 rwbuffer = reg->message_rwbuffer;
3277                 /* driver "set config" signature */
3278                 writel(ARCMSR_SIGNATURE_SET_CONFIG, rwbuffer++);
3279                 /* normal should be zero */
3280                 writel(cdb_phyaddr_hi32, rwbuffer++);
3281                 /* postQ size (256 + 8)*4        */
3282                 writel(cdb_phyaddr, rwbuffer++);
3283                 /* doneQ size (256 + 8)*4        */
3284                 writel(cdb_phyaddr + 1056, rwbuffer++);
3285                 /* ccb maxQ size must be --> [(256 + 8)*4]*/
3286                 writel(1056, rwbuffer);
3287
3288                 writel(ARCMSR_MESSAGE_SET_CONFIG, reg->drv2iop_doorbell);
3289                 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
3290                         printk(KERN_NOTICE "arcmsr%d: 'set command Q window' \
3291                         timeout \n",acb->host->host_no);
3292                         return 1;
3293                 }
3294                 writel(ARCMSR_MESSAGE_START_DRIVER_MODE, reg->drv2iop_doorbell);
3295                 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
3296                         pr_err("arcmsr%d: can't set driver mode.\n",
3297                                 acb->host->host_no);
3298                         return 1;
3299                 }
3300                 }
3301                 break;
3302         case ACB_ADAPTER_TYPE_C: {
3303                 if (cdb_phyaddr_hi32 != 0) {
3304                         struct MessageUnit_C __iomem *reg = acb->pmuC;
3305
3306                         printk(KERN_NOTICE "arcmsr%d: cdb_phyaddr_hi32=0x%x\n",
3307                                         acb->adapter_index, cdb_phyaddr_hi32);
3308                         writel(ARCMSR_SIGNATURE_SET_CONFIG, &reg->msgcode_rwbuffer[0]);
3309                         writel(cdb_phyaddr_hi32, &reg->msgcode_rwbuffer[1]);
3310                         writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, &reg->inbound_msgaddr0);
3311                         writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
3312                         if (!arcmsr_hbaC_wait_msgint_ready(acb)) {
3313                                 printk(KERN_NOTICE "arcmsr%d: 'set command Q window' \
3314                                 timeout \n", acb->host->host_no);
3315                                 return 1;
3316                         }
3317                 }
3318                 }
3319                 break;
3320         case ACB_ADAPTER_TYPE_D: {
3321                 uint32_t __iomem *rwbuffer;
3322                 struct MessageUnit_D *reg = acb->pmuD;
3323                 reg->postq_index = 0;
3324                 reg->doneq_index = 0;
3325                 rwbuffer = reg->msgcode_rwbuffer;
3326                 writel(ARCMSR_SIGNATURE_SET_CONFIG, rwbuffer++);
3327                 writel(cdb_phyaddr_hi32, rwbuffer++);
3328                 writel(cdb_phyaddr, rwbuffer++);
3329                 writel(cdb_phyaddr + (ARCMSR_MAX_ARC1214_POSTQUEUE *
3330                         sizeof(struct InBound_SRB)), rwbuffer++);
3331                 writel(0x100, rwbuffer);
3332                 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, reg->inbound_msgaddr0);
3333                 if (!arcmsr_hbaD_wait_msgint_ready(acb)) {
3334                         pr_notice("arcmsr%d: 'set command Q window' timeout\n",
3335                                 acb->host->host_no);
3336                         return 1;
3337                 }
3338                 }
3339                 break;
3340         }
3341         return 0;
3342 }
3343
3344 static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb)
3345 {
3346         uint32_t firmware_state = 0;
3347         switch (acb->adapter_type) {
3348
3349         case ACB_ADAPTER_TYPE_A: {
3350                 struct MessageUnit_A __iomem *reg = acb->pmuA;
3351                 do {
3352                         firmware_state = readl(&reg->outbound_msgaddr1);
3353                 } while ((firmware_state & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0);
3354                 }
3355                 break;
3356
3357         case ACB_ADAPTER_TYPE_B: {
3358                 struct MessageUnit_B *reg = acb->pmuB;
3359                 do {
3360                         firmware_state = readl(reg->iop2drv_doorbell);
3361                 } while ((firmware_state & ARCMSR_MESSAGE_FIRMWARE_OK) == 0);
3362                 writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell);
3363                 }
3364                 break;
3365         case ACB_ADAPTER_TYPE_C: {
3366                 struct MessageUnit_C __iomem *reg = acb->pmuC;
3367                 do {
3368                         firmware_state = readl(&reg->outbound_msgaddr1);
3369                 } while ((firmware_state & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0);
3370                 }
3371                 break;
3372         case ACB_ADAPTER_TYPE_D: {
3373                 struct MessageUnit_D *reg = acb->pmuD;
3374                 do {
3375                         firmware_state = readl(reg->outbound_msgaddr1);
3376                 } while ((firmware_state &
3377                         ARCMSR_ARC1214_MESSAGE_FIRMWARE_OK) == 0);
3378                 }
3379                 break;
3380         }
3381 }
3382
3383 static void arcmsr_hbaA_request_device_map(struct AdapterControlBlock *acb)
3384 {
3385         struct MessageUnit_A __iomem *reg = acb->pmuA;
3386         if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){
3387                 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
3388                 return;
3389         } else {
3390                 acb->fw_flag = FW_NORMAL;
3391                 if (atomic_read(&acb->ante_token_value) == atomic_read(&acb->rq_map_token)){
3392                         atomic_set(&acb->rq_map_token, 16);
3393                 }
3394                 atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
3395                 if (atomic_dec_and_test(&acb->rq_map_token)) {
3396                         mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
3397                         return;
3398                 }
3399                 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
3400                 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
3401         }
3402         return;
3403 }
3404
3405 static void arcmsr_hbaB_request_device_map(struct AdapterControlBlock *acb)
3406 {
3407         struct MessageUnit_B *reg = acb->pmuB;
3408         if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){
3409                 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
3410                 return;
3411         } else {
3412                 acb->fw_flag = FW_NORMAL;
3413                 if (atomic_read(&acb->ante_token_value) == atomic_read(&acb->rq_map_token)) {
3414                         atomic_set(&acb->rq_map_token, 16);
3415                 }
3416                 atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
3417                 if (atomic_dec_and_test(&acb->rq_map_token)) {
3418                         mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
3419                         return;
3420                 }
3421                 writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell);
3422                 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
3423         }
3424         return;
3425 }
3426
3427 static void arcmsr_hbaC_request_device_map(struct AdapterControlBlock *acb)
3428 {
3429         struct MessageUnit_C __iomem *reg = acb->pmuC;
3430         if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0) || ((acb->acb_flags & ACB_F_ABORT) != 0)) {
3431                 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
3432                 return;
3433         } else {
3434                 acb->fw_flag = FW_NORMAL;
3435                 if (atomic_read(&acb->ante_token_value) == atomic_read(&acb->rq_map_token)) {
3436                         atomic_set(&acb->rq_map_token, 16);
3437                 }
3438                 atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
3439                 if (atomic_dec_and_test(&acb->rq_map_token)) {
3440                         mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
3441                         return;
3442                 }
3443                 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
3444                 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
3445                 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
3446         }
3447         return;
3448 }
3449
3450 static void arcmsr_hbaD_request_device_map(struct AdapterControlBlock *acb)
3451 {
3452         struct MessageUnit_D *reg = acb->pmuD;
3453
3454         if (unlikely(atomic_read(&acb->rq_map_token) == 0) ||
3455                 ((acb->acb_flags & ACB_F_BUS_RESET) != 0) ||
3456                 ((acb->acb_flags & ACB_F_ABORT) != 0)) {
3457                 mod_timer(&acb->eternal_timer,
3458                         jiffies + msecs_to_jiffies(6 * HZ));
3459         } else {
3460                 acb->fw_flag = FW_NORMAL;
3461                 if (atomic_read(&acb->ante_token_value) ==
3462                         atomic_read(&acb->rq_map_token)) {
3463                         atomic_set(&acb->rq_map_token, 16);
3464                 }
3465                 atomic_set(&acb->ante_token_value,
3466                         atomic_read(&acb->rq_map_token));
3467                 if (atomic_dec_and_test(&acb->rq_map_token)) {
3468                         mod_timer(&acb->eternal_timer, jiffies +
3469                                 msecs_to_jiffies(6 * HZ));
3470                         return;
3471                 }
3472                 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG,
3473                         reg->inbound_msgaddr0);
3474                 mod_timer(&acb->eternal_timer, jiffies +
3475                         msecs_to_jiffies(6 * HZ));
3476         }
3477 }
3478
3479 static void arcmsr_request_device_map(unsigned long pacb)
3480 {
3481         struct AdapterControlBlock *acb = (struct AdapterControlBlock *)pacb;
3482         switch (acb->adapter_type) {
3483                 case ACB_ADAPTER_TYPE_A: {
3484                         arcmsr_hbaA_request_device_map(acb);
3485                 }
3486                 break;
3487                 case ACB_ADAPTER_TYPE_B: {
3488                         arcmsr_hbaB_request_device_map(acb);
3489                 }
3490                 break;
3491                 case ACB_ADAPTER_TYPE_C: {
3492                         arcmsr_hbaC_request_device_map(acb);
3493                 }
3494                 break;
3495                 case ACB_ADAPTER_TYPE_D:
3496                         arcmsr_hbaD_request_device_map(acb);
3497                 break;
3498         }
3499 }
3500
3501 static void arcmsr_hbaA_start_bgrb(struct AdapterControlBlock *acb)
3502 {
3503         struct MessageUnit_A __iomem *reg = acb->pmuA;
3504         acb->acb_flags |= ACB_F_MSG_START_BGRB;
3505         writel(ARCMSR_INBOUND_MESG0_START_BGRB, &reg->inbound_msgaddr0);
3506         if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
3507                 printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
3508                                 rebulid' timeout \n", acb->host->host_no);
3509         }
3510 }
3511
3512 static void arcmsr_hbaB_start_bgrb(struct AdapterControlBlock *acb)
3513 {
3514         struct MessageUnit_B *reg = acb->pmuB;
3515         acb->acb_flags |= ACB_F_MSG_START_BGRB;
3516         writel(ARCMSR_MESSAGE_START_BGRB, reg->drv2iop_doorbell);
3517         if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
3518                 printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
3519                                 rebulid' timeout \n",acb->host->host_no);
3520         }
3521 }
3522
3523 static void arcmsr_hbaC_start_bgrb(struct AdapterControlBlock *pACB)
3524 {
3525         struct MessageUnit_C __iomem *phbcmu = pACB->pmuC;
3526         pACB->acb_flags |= ACB_F_MSG_START_BGRB;
3527         writel(ARCMSR_INBOUND_MESG0_START_BGRB, &phbcmu->inbound_msgaddr0);
3528         writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &phbcmu->inbound_doorbell);
3529         if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
3530                 printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
3531                                 rebulid' timeout \n", pACB->host->host_no);
3532         }
3533         return;
3534 }
3535
3536 static void arcmsr_hbaD_start_bgrb(struct AdapterControlBlock *pACB)
3537 {
3538         struct MessageUnit_D *pmu = pACB->pmuD;
3539
3540         pACB->acb_flags |= ACB_F_MSG_START_BGRB;
3541         writel(ARCMSR_INBOUND_MESG0_START_BGRB, pmu->inbound_msgaddr0);
3542         if (!arcmsr_hbaD_wait_msgint_ready(pACB)) {
3543                 pr_notice("arcmsr%d: wait 'start adapter "
3544                         "background rebulid' timeout\n", pACB->host->host_no);
3545         }
3546 }
3547
3548 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb)
3549 {
3550         switch (acb->adapter_type) {
3551         case ACB_ADAPTER_TYPE_A:
3552                 arcmsr_hbaA_start_bgrb(acb);
3553                 break;
3554         case ACB_ADAPTER_TYPE_B:
3555                 arcmsr_hbaB_start_bgrb(acb);
3556                 break;
3557         case ACB_ADAPTER_TYPE_C:
3558                 arcmsr_hbaC_start_bgrb(acb);
3559                 break;
3560         case ACB_ADAPTER_TYPE_D:
3561                 arcmsr_hbaD_start_bgrb(acb);
3562                 break;
3563         }
3564 }
3565
3566 static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock *acb)
3567 {
3568         switch (acb->adapter_type) {
3569         case ACB_ADAPTER_TYPE_A: {
3570                 struct MessageUnit_A __iomem *reg = acb->pmuA;
3571                 uint32_t outbound_doorbell;
3572                 /* empty doorbell Qbuffer if door bell ringed */
3573                 outbound_doorbell = readl(&reg->outbound_doorbell);
3574                 /*clear doorbell interrupt */
3575                 writel(outbound_doorbell, &reg->outbound_doorbell);
3576                 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell);
3577                 }
3578                 break;
3579
3580         case ACB_ADAPTER_TYPE_B: {
3581                 struct MessageUnit_B *reg = acb->pmuB;
3582                 /*clear interrupt and message state*/
3583                 writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
3584                 writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell);
3585                 /* let IOP know data has been read */
3586                 }
3587                 break;
3588         case ACB_ADAPTER_TYPE_C: {
3589                 struct MessageUnit_C __iomem *reg = acb->pmuC;
3590                 uint32_t outbound_doorbell, i;
3591                 /* empty doorbell Qbuffer if door bell ringed */
3592                 outbound_doorbell = readl(&reg->outbound_doorbell);
3593                 writel(outbound_doorbell, &reg->outbound_doorbell_clear);
3594                 writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, &reg->inbound_doorbell);
3595                 for (i = 0; i < 200; i++) {
3596                         msleep(20);
3597                         outbound_doorbell = readl(&reg->outbound_doorbell);
3598                         if (outbound_doorbell &
3599                                 ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) {
3600                                 writel(outbound_doorbell,
3601                                         &reg->outbound_doorbell_clear);
3602                                 writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK,
3603                                         &reg->inbound_doorbell);
3604                         } else
3605                                 break;
3606                 }
3607                 }
3608                 break;
3609         case ACB_ADAPTER_TYPE_D: {
3610                 struct MessageUnit_D *reg = acb->pmuD;
3611                 uint32_t outbound_doorbell, i;
3612                 /* empty doorbell Qbuffer if door bell ringed */
3613                 outbound_doorbell = readl(reg->outbound_doorbell);
3614                 writel(outbound_doorbell, reg->outbound_doorbell);
3615                 writel(ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ,
3616                         reg->inbound_doorbell);
3617                 for (i = 0; i < 200; i++) {
3618                         msleep(20);
3619                         outbound_doorbell = readl(reg->outbound_doorbell);
3620                         if (outbound_doorbell &
3621                                 ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK) {
3622                                 writel(outbound_doorbell,
3623                                         reg->outbound_doorbell);
3624                                 writel(ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ,
3625                                         reg->inbound_doorbell);
3626                         } else
3627                                 break;
3628                 }
3629                 }
3630                 break;
3631         }
3632 }
3633
3634 static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb)
3635 {
3636         switch (acb->adapter_type) {
3637         case ACB_ADAPTER_TYPE_A:
3638                 return;
3639         case ACB_ADAPTER_TYPE_B:
3640                 {
3641                         struct MessageUnit_B *reg = acb->pmuB;
3642                         writel(ARCMSR_MESSAGE_ACTIVE_EOI_MODE, reg->drv2iop_doorbell);
3643                         if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
3644                                 printk(KERN_NOTICE "ARCMSR IOP enables EOI_MODE TIMEOUT");
3645                                 return;
3646                         }
3647                 }
3648                 break;
3649         case ACB_ADAPTER_TYPE_C:
3650                 return;
3651         }
3652         return;
3653 }
3654
3655 static void arcmsr_hardware_reset(struct AdapterControlBlock *acb)
3656 {
3657         uint8_t value[64];
3658         int i, count = 0;
3659         struct MessageUnit_A __iomem *pmuA = acb->pmuA;
3660         struct MessageUnit_C __iomem *pmuC = acb->pmuC;
3661         struct MessageUnit_D *pmuD = acb->pmuD;
3662
3663         /* backup pci config data */
3664         printk(KERN_NOTICE "arcmsr%d: executing hw bus reset .....\n", acb->host->host_no);
3665         for (i = 0; i < 64; i++) {
3666                 pci_read_config_byte(acb->pdev, i, &value[i]);
3667         }
3668         /* hardware reset signal */
3669         if ((acb->dev_id == 0x1680)) {
3670                 writel(ARCMSR_ARC1680_BUS_RESET, &pmuA->reserved1[0]);
3671         } else if ((acb->dev_id == 0x1880)) {
3672                 do {
3673                         count++;
3674                         writel(0xF, &pmuC->write_sequence);
3675                         writel(0x4, &pmuC->write_sequence);
3676                         writel(0xB, &pmuC->write_sequence);
3677                         writel(0x2, &pmuC->write_sequence);
3678                         writel(0x7, &pmuC->write_sequence);
3679                         writel(0xD, &pmuC->write_sequence);
3680                 } while (((readl(&pmuC->host_diagnostic) & ARCMSR_ARC1880_DiagWrite_ENABLE) == 0) && (count < 5));
3681                 writel(ARCMSR_ARC1880_RESET_ADAPTER, &pmuC->host_diagnostic);
3682         } else if ((acb->dev_id == 0x1214)) {
3683                 writel(0x20, pmuD->reset_request);
3684         } else {
3685                 pci_write_config_byte(acb->pdev, 0x84, 0x20);
3686         }
3687         msleep(2000);
3688         /* write back pci config data */
3689         for (i = 0; i < 64; i++) {
3690                 pci_write_config_byte(acb->pdev, i, value[i]);
3691         }
3692         msleep(1000);
3693         return;
3694 }
3695 static void arcmsr_iop_init(struct AdapterControlBlock *acb)
3696 {
3697         uint32_t intmask_org;
3698         /* disable all outbound interrupt */
3699         intmask_org = arcmsr_disable_outbound_ints(acb);
3700         arcmsr_wait_firmware_ready(acb);
3701         arcmsr_iop_confirm(acb);
3702         /*start background rebuild*/
3703         arcmsr_start_adapter_bgrb(acb);
3704         /* empty doorbell Qbuffer if door bell ringed */
3705         arcmsr_clear_doorbell_queue_buffer(acb);
3706         arcmsr_enable_eoi_mode(acb);
3707         /* enable outbound Post Queue,outbound doorbell Interrupt */
3708         arcmsr_enable_outbound_ints(acb, intmask_org);
3709         acb->acb_flags |= ACB_F_IOP_INITED;
3710 }
3711
3712 static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb)
3713 {
3714         struct CommandControlBlock *ccb;
3715         uint32_t intmask_org;
3716         uint8_t rtnval = 0x00;
3717         int i = 0;
3718         unsigned long flags;
3719
3720         if (atomic_read(&acb->ccboutstandingcount) != 0) {
3721                 /* disable all outbound interrupt */
3722                 intmask_org = arcmsr_disable_outbound_ints(acb);
3723                 /* talk to iop 331 outstanding command aborted */
3724                 rtnval = arcmsr_abort_allcmd(acb);
3725                 /* clear all outbound posted Q */
3726                 arcmsr_done4abort_postqueue(acb);
3727                 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
3728                         ccb = acb->pccb_pool[i];
3729                         if (ccb->startdone == ARCMSR_CCB_START) {
3730                                 scsi_dma_unmap(ccb->pcmd);
3731                                 ccb->startdone = ARCMSR_CCB_DONE;
3732                                 ccb->ccb_flags = 0;
3733                                 spin_lock_irqsave(&acb->ccblist_lock, flags);
3734                                 list_add_tail(&ccb->list, &acb->ccb_free_list);
3735                                 spin_unlock_irqrestore(&acb->ccblist_lock, flags);
3736                         }
3737                 }
3738                 atomic_set(&acb->ccboutstandingcount, 0);
3739                 /* enable all outbound interrupt */
3740                 arcmsr_enable_outbound_ints(acb, intmask_org);
3741                 return rtnval;
3742         }
3743         return rtnval;
3744 }
3745
3746 static int arcmsr_bus_reset(struct scsi_cmnd *cmd)
3747 {
3748         struct AdapterControlBlock *acb;
3749         uint32_t intmask_org, outbound_doorbell;
3750         int retry_count = 0;
3751         int rtn = FAILED;
3752         acb = (struct AdapterControlBlock *) cmd->device->host->hostdata;
3753         printk(KERN_ERR "arcmsr: executing bus reset eh.....num_resets = %d, num_aborts = %d \n", acb->num_resets, acb->num_aborts);
3754         acb->num_resets++;
3755
3756         switch(acb->adapter_type){
3757                 case ACB_ADAPTER_TYPE_A:{
3758                         if (acb->acb_flags & ACB_F_BUS_RESET){
3759                                 long timeout;
3760                                 printk(KERN_ERR "arcmsr: there is an  bus reset eh proceeding.......\n");
3761                                 timeout = wait_event_timeout(wait_q, (acb->acb_flags & ACB_F_BUS_RESET) == 0, 220*HZ);
3762                                 if (timeout) {
3763                                         return SUCCESS;
3764                                 }
3765                         }
3766                         acb->acb_flags |= ACB_F_BUS_RESET;
3767                         if (!arcmsr_iop_reset(acb)) {
3768                                 struct MessageUnit_A __iomem *reg;
3769                                 reg = acb->pmuA;
3770                                 arcmsr_hardware_reset(acb);
3771                                 acb->acb_flags &= ~ACB_F_IOP_INITED;
3772 sleep_again:
3773                                 ssleep(ARCMSR_SLEEPTIME);
3774                                 if ((readl(&reg->outbound_msgaddr1) & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) {
3775                                         printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, retry=%d\n", acb->host->host_no, retry_count);
3776                                         if (retry_count > ARCMSR_RETRYCOUNT) {
3777                                                 acb->fw_flag = FW_DEADLOCK;
3778                                                 printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, RETRY TERMINATED!!\n", acb->host->host_no);
3779                                                 return FAILED;
3780                                         }
3781                                         retry_count++;
3782                                         goto sleep_again;
3783                                 }
3784                                 acb->acb_flags |= ACB_F_IOP_INITED;
3785                                 /* disable all outbound interrupt */
3786                                 intmask_org = arcmsr_disable_outbound_ints(acb);
3787                                 arcmsr_get_firmware_spec(acb);
3788                                 arcmsr_start_adapter_bgrb(acb);
3789                                 /* clear Qbuffer if door bell ringed */
3790                                 outbound_doorbell = readl(&reg->outbound_doorbell);
3791                                 writel(outbound_doorbell, &reg->outbound_doorbell); /*clear interrupt */
3792                                 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell);
3793                                 /* enable outbound Post Queue,outbound doorbell Interrupt */
3794                                 arcmsr_enable_outbound_ints(acb, intmask_org);
3795                                 atomic_set(&acb->rq_map_token, 16);
3796                                 atomic_set(&acb->ante_token_value, 16);
3797                                 acb->fw_flag = FW_NORMAL;
3798                                 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
3799                                 acb->acb_flags &= ~ACB_F_BUS_RESET;
3800                                 rtn = SUCCESS;
3801                                 printk(KERN_ERR "arcmsr: scsi  bus reset eh returns with success\n");
3802                         } else {
3803                                 acb->acb_flags &= ~ACB_F_BUS_RESET;
3804                                 atomic_set(&acb->rq_map_token, 16);
3805                                 atomic_set(&acb->ante_token_value, 16);
3806                                 acb->fw_flag = FW_NORMAL;
3807                                 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
3808                                 rtn = SUCCESS;
3809                         }
3810                         break;
3811                 }
3812                 case ACB_ADAPTER_TYPE_B:{
3813                         acb->acb_flags |= ACB_F_BUS_RESET;
3814                         if (!arcmsr_iop_reset(acb)) {
3815                                 acb->acb_flags &= ~ACB_F_BUS_RESET;
3816                                 rtn = FAILED;
3817                         } else {
3818                                 acb->acb_flags &= ~ACB_F_BUS_RESET;
3819                                 atomic_set(&acb->rq_map_token, 16);
3820                                 atomic_set(&acb->ante_token_value, 16);
3821                                 acb->fw_flag = FW_NORMAL;
3822                                 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
3823                                 rtn = SUCCESS;
3824                         }
3825                         break;
3826                 }
3827                 case ACB_ADAPTER_TYPE_C:{
3828                         if (acb->acb_flags & ACB_F_BUS_RESET) {
3829                                 long timeout;
3830                                 printk(KERN_ERR "arcmsr: there is an bus reset eh proceeding.......\n");
3831                                 timeout = wait_event_timeout(wait_q, (acb->acb_flags & ACB_F_BUS_RESET) == 0, 220*HZ);
3832                                 if (timeout) {
3833                                         return SUCCESS;
3834                                 }
3835                         }
3836                         acb->acb_flags |= ACB_F_BUS_RESET;
3837                         if (!arcmsr_iop_reset(acb)) {
3838                                 struct MessageUnit_C __iomem *reg;
3839                                 reg = acb->pmuC;
3840                                 arcmsr_hardware_reset(acb);
3841                                 acb->acb_flags &= ~ACB_F_IOP_INITED;
3842 sleep:
3843                                 ssleep(ARCMSR_SLEEPTIME);
3844                                 if ((readl(&reg->host_diagnostic) & 0x04) != 0) {
3845                                         printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, retry=%d\n", acb->host->host_no, retry_count);
3846                                         if (retry_count > ARCMSR_RETRYCOUNT) {
3847                                                 acb->fw_flag = FW_DEADLOCK;
3848                                                 printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, RETRY TERMINATED!!\n", acb->host->host_no);
3849                                                 return FAILED;
3850                                         }
3851                                         retry_count++;
3852                                         goto sleep;
3853                                 }
3854                                 acb->acb_flags |= ACB_F_IOP_INITED;
3855                                 /* disable all outbound interrupt */
3856                                 intmask_org = arcmsr_disable_outbound_ints(acb);
3857                                 arcmsr_get_firmware_spec(acb);
3858                                 arcmsr_start_adapter_bgrb(acb);
3859                                 /* clear Qbuffer if door bell ringed */
3860                                 arcmsr_clear_doorbell_queue_buffer(acb);
3861                                 /* enable outbound Post Queue,outbound doorbell Interrupt */
3862                                 arcmsr_enable_outbound_ints(acb, intmask_org);
3863                                 atomic_set(&acb->rq_map_token, 16);
3864                                 atomic_set(&acb->ante_token_value, 16);
3865                                 acb->fw_flag = FW_NORMAL;
3866                                 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
3867                                 acb->acb_flags &= ~ACB_F_BUS_RESET;
3868                                 rtn = SUCCESS;
3869                                 printk(KERN_ERR "arcmsr: scsi bus reset eh returns with success\n");
3870                         } else {
3871                                 acb->acb_flags &= ~ACB_F_BUS_RESET;
3872                                 atomic_set(&acb->rq_map_token, 16);
3873                                 atomic_set(&acb->ante_token_value, 16);
3874                                 acb->fw_flag = FW_NORMAL;
3875                                 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
3876                                 rtn = SUCCESS;
3877                         }
3878                         break;
3879                 }
3880                 case ACB_ADAPTER_TYPE_D: {
3881                         if (acb->acb_flags & ACB_F_BUS_RESET) {
3882                                 long timeout;
3883                                 pr_notice("arcmsr: there is an bus reset"
3884                                         " eh proceeding.......\n");
3885                                 timeout = wait_event_timeout(wait_q, (acb->acb_flags
3886                                         & ACB_F_BUS_RESET) == 0, 220 * HZ);
3887                                 if (timeout)
3888                                         return SUCCESS;
3889                         }
3890                         acb->acb_flags |= ACB_F_BUS_RESET;
3891                         if (!arcmsr_iop_reset(acb)) {
3892                                 struct MessageUnit_D *reg;
3893                                 reg = acb->pmuD;
3894                                 arcmsr_hardware_reset(acb);
3895                                 acb->acb_flags &= ~ACB_F_IOP_INITED;
3896                         nap:
3897                                 ssleep(ARCMSR_SLEEPTIME);
3898                                 if ((readl(reg->sample_at_reset) & 0x80) != 0) {
3899                                         pr_err("arcmsr%d: waiting for "
3900                                                 "hw bus reset return, retry=%d\n",
3901                                                 acb->host->host_no, retry_count);
3902                                         if (retry_count > ARCMSR_RETRYCOUNT) {
3903                                                 acb->fw_flag = FW_DEADLOCK;
3904                                                 pr_err("arcmsr%d: waiting for hw bus"
3905                                                         " reset return, "
3906                                                         "RETRY TERMINATED!!\n",
3907                                                         acb->host->host_no);
3908                                                 return FAILED;
3909                                         }
3910                                         retry_count++;
3911                                         goto nap;
3912                                 }
3913                                 acb->acb_flags |= ACB_F_IOP_INITED;
3914                                 /* disable all outbound interrupt */
3915                                 intmask_org = arcmsr_disable_outbound_ints(acb);
3916                                 arcmsr_get_firmware_spec(acb);
3917                                 arcmsr_start_adapter_bgrb(acb);
3918                                 arcmsr_clear_doorbell_queue_buffer(acb);
3919                                 arcmsr_enable_outbound_ints(acb, intmask_org);
3920                                 atomic_set(&acb->rq_map_token, 16);
3921                                 atomic_set(&acb->ante_token_value, 16);
3922                                 acb->fw_flag = FW_NORMAL;
3923                                 mod_timer(&acb->eternal_timer,
3924                                         jiffies + msecs_to_jiffies(6 * HZ));
3925                                 acb->acb_flags &= ~ACB_F_BUS_RESET;
3926                                 rtn = SUCCESS;
3927                                 pr_err("arcmsr: scsi bus reset "
3928                                         "eh returns with success\n");
3929                         } else {
3930                                 acb->acb_flags &= ~ACB_F_BUS_RESET;
3931                                 atomic_set(&acb->rq_map_token, 16);
3932                                 atomic_set(&acb->ante_token_value, 16);
3933                                 acb->fw_flag = FW_NORMAL;
3934                                 mod_timer(&acb->eternal_timer,
3935                                         jiffies + msecs_to_jiffies(6 * HZ));
3936                                 rtn = SUCCESS;
3937                         }
3938                         break;
3939                 }
3940         }
3941         return rtn;
3942 }
3943
3944 static int arcmsr_abort_one_cmd(struct AdapterControlBlock *acb,
3945                 struct CommandControlBlock *ccb)
3946 {
3947         int rtn;
3948         rtn = arcmsr_polling_ccbdone(acb, ccb);
3949         return rtn;
3950 }
3951
3952 static int arcmsr_abort(struct scsi_cmnd *cmd)
3953 {
3954         struct AdapterControlBlock *acb =
3955                 (struct AdapterControlBlock *)cmd->device->host->hostdata;
3956         int i = 0;
3957         int rtn = FAILED;
3958         uint32_t intmask_org;
3959
3960         printk(KERN_NOTICE
3961                 "arcmsr%d: abort device command of scsi id = %d lun = %d\n",
3962                 acb->host->host_no, cmd->device->id, (u32)cmd->device->lun);
3963         acb->acb_flags |= ACB_F_ABORT;
3964         acb->num_aborts++;
3965         /*
3966         ************************************************
3967         ** the all interrupt service routine is locked
3968         ** we need to handle it as soon as possible and exit
3969         ************************************************
3970         */
3971         if (!atomic_read(&acb->ccboutstandingcount)) {
3972                 acb->acb_flags &= ~ACB_F_ABORT;
3973                 return rtn;
3974         }
3975
3976         intmask_org = arcmsr_disable_outbound_ints(acb);
3977         for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
3978                 struct CommandControlBlock *ccb = acb->pccb_pool[i];
3979                 if (ccb->startdone == ARCMSR_CCB_START && ccb->pcmd == cmd) {
3980                         ccb->startdone = ARCMSR_CCB_ABORTED;
3981                         rtn = arcmsr_abort_one_cmd(acb, ccb);
3982                         break;
3983                 }
3984         }
3985         acb->acb_flags &= ~ACB_F_ABORT;
3986         arcmsr_enable_outbound_ints(acb, intmask_org);
3987         return rtn;
3988 }
3989
3990 static const char *arcmsr_info(struct Scsi_Host *host)
3991 {
3992         struct AdapterControlBlock *acb =
3993                 (struct AdapterControlBlock *) host->hostdata;
3994         static char buf[256];
3995         char *type;
3996         int raid6 = 1;
3997         switch (acb->pdev->device) {
3998         case PCI_DEVICE_ID_ARECA_1110:
3999         case PCI_DEVICE_ID_ARECA_1200:
4000         case PCI_DEVICE_ID_ARECA_1202:
4001         case PCI_DEVICE_ID_ARECA_1210:
4002                 raid6 = 0;
4003                 /*FALLTHRU*/
4004         case PCI_DEVICE_ID_ARECA_1120:
4005         case PCI_DEVICE_ID_ARECA_1130:
4006         case PCI_DEVICE_ID_ARECA_1160:
4007         case PCI_DEVICE_ID_ARECA_1170:
4008         case PCI_DEVICE_ID_ARECA_1201:
4009         case PCI_DEVICE_ID_ARECA_1220:
4010         case PCI_DEVICE_ID_ARECA_1230:
4011         case PCI_DEVICE_ID_ARECA_1260:
4012         case PCI_DEVICE_ID_ARECA_1270:
4013         case PCI_DEVICE_ID_ARECA_1280:
4014                 type = "SATA";
4015                 break;
4016         case PCI_DEVICE_ID_ARECA_1214:
4017         case PCI_DEVICE_ID_ARECA_1380:
4018         case PCI_DEVICE_ID_ARECA_1381:
4019         case PCI_DEVICE_ID_ARECA_1680:
4020         case PCI_DEVICE_ID_ARECA_1681:
4021         case PCI_DEVICE_ID_ARECA_1880:
4022                 type = "SAS/SATA";
4023                 break;
4024         default:
4025                 type = "unknown";
4026                 raid6 = 0;
4027                 break;
4028         }
4029         sprintf(buf, "Areca %s RAID Controller %s\narcmsr version %s\n",
4030                 type, raid6 ? "(RAID6 capable)" : "", ARCMSR_DRIVER_VERSION);
4031         return buf;
4032 }