pci, ACPI, iommu: Enhance pci_root to support DMAR device hotplug
[cascardo/linux.git] / drivers / scsi / qla2xxx / qla_iocb.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2014 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8 #include "qla_target.h"
9
10 #include <linux/blkdev.h>
11 #include <linux/delay.h>
12
13 #include <scsi/scsi_tcq.h>
14
15 static void qla25xx_set_que(srb_t *, struct rsp_que **);
16 /**
17  * qla2x00_get_cmd_direction() - Determine control_flag data direction.
18  * @cmd: SCSI command
19  *
20  * Returns the proper CF_* direction based on CDB.
21  */
22 static inline uint16_t
23 qla2x00_get_cmd_direction(srb_t *sp)
24 {
25         uint16_t cflags;
26         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
27         struct scsi_qla_host *vha = sp->fcport->vha;
28
29         cflags = 0;
30
31         /* Set transfer direction */
32         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
33                 cflags = CF_WRITE;
34                 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
35                 vha->qla_stats.output_requests++;
36         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
37                 cflags = CF_READ;
38                 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
39                 vha->qla_stats.input_requests++;
40         }
41         return (cflags);
42 }
43
44 /**
45  * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
46  * Continuation Type 0 IOCBs to allocate.
47  *
48  * @dsds: number of data segment decriptors needed
49  *
50  * Returns the number of IOCB entries needed to store @dsds.
51  */
52 uint16_t
53 qla2x00_calc_iocbs_32(uint16_t dsds)
54 {
55         uint16_t iocbs;
56
57         iocbs = 1;
58         if (dsds > 3) {
59                 iocbs += (dsds - 3) / 7;
60                 if ((dsds - 3) % 7)
61                         iocbs++;
62         }
63         return (iocbs);
64 }
65
66 /**
67  * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
68  * Continuation Type 1 IOCBs to allocate.
69  *
70  * @dsds: number of data segment decriptors needed
71  *
72  * Returns the number of IOCB entries needed to store @dsds.
73  */
74 uint16_t
75 qla2x00_calc_iocbs_64(uint16_t dsds)
76 {
77         uint16_t iocbs;
78
79         iocbs = 1;
80         if (dsds > 2) {
81                 iocbs += (dsds - 2) / 5;
82                 if ((dsds - 2) % 5)
83                         iocbs++;
84         }
85         return (iocbs);
86 }
87
88 /**
89  * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
90  * @ha: HA context
91  *
92  * Returns a pointer to the Continuation Type 0 IOCB packet.
93  */
94 static inline cont_entry_t *
95 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
96 {
97         cont_entry_t *cont_pkt;
98         struct req_que *req = vha->req;
99         /* Adjust ring index. */
100         req->ring_index++;
101         if (req->ring_index == req->length) {
102                 req->ring_index = 0;
103                 req->ring_ptr = req->ring;
104         } else {
105                 req->ring_ptr++;
106         }
107
108         cont_pkt = (cont_entry_t *)req->ring_ptr;
109
110         /* Load packet defaults. */
111         *((uint32_t *)(&cont_pkt->entry_type)) =
112             __constant_cpu_to_le32(CONTINUE_TYPE);
113
114         return (cont_pkt);
115 }
116
117 /**
118  * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
119  * @ha: HA context
120  *
121  * Returns a pointer to the continuation type 1 IOCB packet.
122  */
123 static inline cont_a64_entry_t *
124 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
125 {
126         cont_a64_entry_t *cont_pkt;
127
128         /* Adjust ring index. */
129         req->ring_index++;
130         if (req->ring_index == req->length) {
131                 req->ring_index = 0;
132                 req->ring_ptr = req->ring;
133         } else {
134                 req->ring_ptr++;
135         }
136
137         cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
138
139         /* Load packet defaults. */
140         *((uint32_t *)(&cont_pkt->entry_type)) = IS_QLAFX00(vha->hw) ?
141             __constant_cpu_to_le32(CONTINUE_A64_TYPE_FX00) :
142             __constant_cpu_to_le32(CONTINUE_A64_TYPE);
143
144         return (cont_pkt);
145 }
146
147 static inline int
148 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
149 {
150         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
151         uint8_t guard = scsi_host_get_guard(cmd->device->host);
152
153         /* We always use DIFF Bundling for best performance */
154         *fw_prot_opts = 0;
155
156         /* Translate SCSI opcode to a protection opcode */
157         switch (scsi_get_prot_op(cmd)) {
158         case SCSI_PROT_READ_STRIP:
159                 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
160                 break;
161         case SCSI_PROT_WRITE_INSERT:
162                 *fw_prot_opts |= PO_MODE_DIF_INSERT;
163                 break;
164         case SCSI_PROT_READ_INSERT:
165                 *fw_prot_opts |= PO_MODE_DIF_INSERT;
166                 break;
167         case SCSI_PROT_WRITE_STRIP:
168                 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
169                 break;
170         case SCSI_PROT_READ_PASS:
171         case SCSI_PROT_WRITE_PASS:
172                 if (guard & SHOST_DIX_GUARD_IP)
173                         *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
174                 else
175                         *fw_prot_opts |= PO_MODE_DIF_PASS;
176                 break;
177         default:        /* Normal Request */
178                 *fw_prot_opts |= PO_MODE_DIF_PASS;
179                 break;
180         }
181
182         return scsi_prot_sg_count(cmd);
183 }
184
185 /*
186  * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
187  * capable IOCB types.
188  *
189  * @sp: SRB command to process
190  * @cmd_pkt: Command type 2 IOCB
191  * @tot_dsds: Total number of segments to transfer
192  */
193 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
194     uint16_t tot_dsds)
195 {
196         uint16_t        avail_dsds;
197         uint32_t        *cur_dsd;
198         scsi_qla_host_t *vha;
199         struct scsi_cmnd *cmd;
200         struct scatterlist *sg;
201         int i;
202
203         cmd = GET_CMD_SP(sp);
204
205         /* Update entry type to indicate Command Type 2 IOCB */
206         *((uint32_t *)(&cmd_pkt->entry_type)) =
207             __constant_cpu_to_le32(COMMAND_TYPE);
208
209         /* No data transfer */
210         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
211                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
212                 return;
213         }
214
215         vha = sp->fcport->vha;
216         cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
217
218         /* Three DSDs are available in the Command Type 2 IOCB */
219         avail_dsds = 3;
220         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
221
222         /* Load data segments */
223         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
224                 cont_entry_t *cont_pkt;
225
226                 /* Allocate additional continuation packets? */
227                 if (avail_dsds == 0) {
228                         /*
229                          * Seven DSDs are available in the Continuation
230                          * Type 0 IOCB.
231                          */
232                         cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
233                         cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
234                         avail_dsds = 7;
235                 }
236
237                 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
238                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
239                 avail_dsds--;
240         }
241 }
242
243 /**
244  * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
245  * capable IOCB types.
246  *
247  * @sp: SRB command to process
248  * @cmd_pkt: Command type 3 IOCB
249  * @tot_dsds: Total number of segments to transfer
250  */
251 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
252     uint16_t tot_dsds)
253 {
254         uint16_t        avail_dsds;
255         uint32_t        *cur_dsd;
256         scsi_qla_host_t *vha;
257         struct scsi_cmnd *cmd;
258         struct scatterlist *sg;
259         int i;
260
261         cmd = GET_CMD_SP(sp);
262
263         /* Update entry type to indicate Command Type 3 IOCB */
264         *((uint32_t *)(&cmd_pkt->entry_type)) =
265             __constant_cpu_to_le32(COMMAND_A64_TYPE);
266
267         /* No data transfer */
268         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
269                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
270                 return;
271         }
272
273         vha = sp->fcport->vha;
274         cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
275
276         /* Two DSDs are available in the Command Type 3 IOCB */
277         avail_dsds = 2;
278         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
279
280         /* Load data segments */
281         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
282                 dma_addr_t      sle_dma;
283                 cont_a64_entry_t *cont_pkt;
284
285                 /* Allocate additional continuation packets? */
286                 if (avail_dsds == 0) {
287                         /*
288                          * Five DSDs are available in the Continuation
289                          * Type 1 IOCB.
290                          */
291                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
292                         cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
293                         avail_dsds = 5;
294                 }
295
296                 sle_dma = sg_dma_address(sg);
297                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
298                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
299                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
300                 avail_dsds--;
301         }
302 }
303
304 /**
305  * qla2x00_start_scsi() - Send a SCSI command to the ISP
306  * @sp: command to send to the ISP
307  *
308  * Returns non-zero if a failure occurred, else zero.
309  */
310 int
311 qla2x00_start_scsi(srb_t *sp)
312 {
313         int             ret, nseg;
314         unsigned long   flags;
315         scsi_qla_host_t *vha;
316         struct scsi_cmnd *cmd;
317         uint32_t        *clr_ptr;
318         uint32_t        index;
319         uint32_t        handle;
320         cmd_entry_t     *cmd_pkt;
321         uint16_t        cnt;
322         uint16_t        req_cnt;
323         uint16_t        tot_dsds;
324         struct device_reg_2xxx __iomem *reg;
325         struct qla_hw_data *ha;
326         struct req_que *req;
327         struct rsp_que *rsp;
328         char            tag[2];
329
330         /* Setup device pointers. */
331         ret = 0;
332         vha = sp->fcport->vha;
333         ha = vha->hw;
334         reg = &ha->iobase->isp;
335         cmd = GET_CMD_SP(sp);
336         req = ha->req_q_map[0];
337         rsp = ha->rsp_q_map[0];
338         /* So we know we haven't pci_map'ed anything yet */
339         tot_dsds = 0;
340
341         /* Send marker if required */
342         if (vha->marker_needed != 0) {
343                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
344                     QLA_SUCCESS) {
345                         return (QLA_FUNCTION_FAILED);
346                 }
347                 vha->marker_needed = 0;
348         }
349
350         /* Acquire ring specific lock */
351         spin_lock_irqsave(&ha->hardware_lock, flags);
352
353         /* Check for room in outstanding command list. */
354         handle = req->current_outstanding_cmd;
355         for (index = 1; index < req->num_outstanding_cmds; index++) {
356                 handle++;
357                 if (handle == req->num_outstanding_cmds)
358                         handle = 1;
359                 if (!req->outstanding_cmds[handle])
360                         break;
361         }
362         if (index == req->num_outstanding_cmds)
363                 goto queuing_error;
364
365         /* Map the sg table so we have an accurate count of sg entries needed */
366         if (scsi_sg_count(cmd)) {
367                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
368                     scsi_sg_count(cmd), cmd->sc_data_direction);
369                 if (unlikely(!nseg))
370                         goto queuing_error;
371         } else
372                 nseg = 0;
373
374         tot_dsds = nseg;
375
376         /* Calculate the number of request entries needed. */
377         req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
378         if (req->cnt < (req_cnt + 2)) {
379                 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
380                 if (req->ring_index < cnt)
381                         req->cnt = cnt - req->ring_index;
382                 else
383                         req->cnt = req->length -
384                             (req->ring_index - cnt);
385                 /* If still no head room then bail out */
386                 if (req->cnt < (req_cnt + 2))
387                         goto queuing_error;
388         }
389
390         /* Build command packet */
391         req->current_outstanding_cmd = handle;
392         req->outstanding_cmds[handle] = sp;
393         sp->handle = handle;
394         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
395         req->cnt -= req_cnt;
396
397         cmd_pkt = (cmd_entry_t *)req->ring_ptr;
398         cmd_pkt->handle = handle;
399         /* Zero out remaining portion of packet. */
400         clr_ptr = (uint32_t *)cmd_pkt + 2;
401         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
402         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
403
404         /* Set target ID and LUN number*/
405         SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
406         cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
407
408         /* Update tagged queuing modifier */
409         if (scsi_populate_tag_msg(cmd, tag)) {
410                 switch (tag[0]) {
411                 case HEAD_OF_QUEUE_TAG:
412                         cmd_pkt->control_flags =
413                             __constant_cpu_to_le16(CF_HEAD_TAG);
414                         break;
415                 case ORDERED_QUEUE_TAG:
416                         cmd_pkt->control_flags =
417                             __constant_cpu_to_le16(CF_ORDERED_TAG);
418                         break;
419                 default:
420                         cmd_pkt->control_flags =
421                             __constant_cpu_to_le16(CF_SIMPLE_TAG);
422                         break;
423                 }
424         } else {
425                 cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
426         }
427
428         /* Load SCSI command packet. */
429         memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
430         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
431
432         /* Build IOCB segments */
433         ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
434
435         /* Set total data segment count. */
436         cmd_pkt->entry_count = (uint8_t)req_cnt;
437         wmb();
438
439         /* Adjust ring index. */
440         req->ring_index++;
441         if (req->ring_index == req->length) {
442                 req->ring_index = 0;
443                 req->ring_ptr = req->ring;
444         } else
445                 req->ring_ptr++;
446
447         sp->flags |= SRB_DMA_VALID;
448
449         /* Set chip new ring index. */
450         WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
451         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg));     /* PCI Posting. */
452
453         /* Manage unprocessed RIO/ZIO commands in response queue. */
454         if (vha->flags.process_response_queue &&
455             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
456                 qla2x00_process_response_queue(rsp);
457
458         spin_unlock_irqrestore(&ha->hardware_lock, flags);
459         return (QLA_SUCCESS);
460
461 queuing_error:
462         if (tot_dsds)
463                 scsi_dma_unmap(cmd);
464
465         spin_unlock_irqrestore(&ha->hardware_lock, flags);
466
467         return (QLA_FUNCTION_FAILED);
468 }
469
470 /**
471  * qla2x00_start_iocbs() - Execute the IOCB command
472  */
473 void
474 qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
475 {
476         struct qla_hw_data *ha = vha->hw;
477         device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
478
479         if (IS_P3P_TYPE(ha)) {
480                 qla82xx_start_iocbs(vha);
481         } else {
482                 /* Adjust ring index. */
483                 req->ring_index++;
484                 if (req->ring_index == req->length) {
485                         req->ring_index = 0;
486                         req->ring_ptr = req->ring;
487                 } else
488                         req->ring_ptr++;
489
490                 /* Set chip new ring index. */
491                 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
492                         WRT_REG_DWORD(req->req_q_in, req->ring_index);
493                         RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
494                 } else if (IS_QLAFX00(ha)) {
495                         WRT_REG_DWORD(&reg->ispfx00.req_q_in, req->ring_index);
496                         RD_REG_DWORD_RELAXED(&reg->ispfx00.req_q_in);
497                         QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
498                 } else if (IS_FWI2_CAPABLE(ha)) {
499                         WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
500                         RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
501                 } else {
502                         WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
503                                 req->ring_index);
504                         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
505                 }
506         }
507 }
508
509 /**
510  * qla2x00_marker() - Send a marker IOCB to the firmware.
511  * @ha: HA context
512  * @loop_id: loop ID
513  * @lun: LUN
514  * @type: marker modifier
515  *
516  * Can be called from both normal and interrupt context.
517  *
518  * Returns non-zero if a failure occurred, else zero.
519  */
520 static int
521 __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
522                         struct rsp_que *rsp, uint16_t loop_id,
523                         uint64_t lun, uint8_t type)
524 {
525         mrk_entry_t *mrk;
526         struct mrk_entry_24xx *mrk24 = NULL;
527
528         struct qla_hw_data *ha = vha->hw;
529         scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
530
531         req = ha->req_q_map[0];
532         mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, NULL);
533         if (mrk == NULL) {
534                 ql_log(ql_log_warn, base_vha, 0x3026,
535                     "Failed to allocate Marker IOCB.\n");
536
537                 return (QLA_FUNCTION_FAILED);
538         }
539
540         mrk->entry_type = MARKER_TYPE;
541         mrk->modifier = type;
542         if (type != MK_SYNC_ALL) {
543                 if (IS_FWI2_CAPABLE(ha)) {
544                         mrk24 = (struct mrk_entry_24xx *) mrk;
545                         mrk24->nport_handle = cpu_to_le16(loop_id);
546                         int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun);
547                         host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
548                         mrk24->vp_index = vha->vp_idx;
549                         mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
550                 } else {
551                         SET_TARGET_ID(ha, mrk->target, loop_id);
552                         mrk->lun = cpu_to_le16((uint16_t)lun);
553                 }
554         }
555         wmb();
556
557         qla2x00_start_iocbs(vha, req);
558
559         return (QLA_SUCCESS);
560 }
561
562 int
563 qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
564                 struct rsp_que *rsp, uint16_t loop_id, uint64_t lun,
565                 uint8_t type)
566 {
567         int ret;
568         unsigned long flags = 0;
569
570         spin_lock_irqsave(&vha->hw->hardware_lock, flags);
571         ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
572         spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
573
574         return (ret);
575 }
576
577 /*
578  * qla2x00_issue_marker
579  *
580  * Issue marker
581  * Caller CAN have hardware lock held as specified by ha_locked parameter.
582  * Might release it, then reaquire.
583  */
584 int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
585 {
586         if (ha_locked) {
587                 if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
588                                         MK_SYNC_ALL) != QLA_SUCCESS)
589                         return QLA_FUNCTION_FAILED;
590         } else {
591                 if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
592                                         MK_SYNC_ALL) != QLA_SUCCESS)
593                         return QLA_FUNCTION_FAILED;
594         }
595         vha->marker_needed = 0;
596
597         return QLA_SUCCESS;
598 }
599
600 static inline int
601 qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
602         uint16_t tot_dsds)
603 {
604         uint32_t *cur_dsd = NULL;
605         scsi_qla_host_t *vha;
606         struct qla_hw_data *ha;
607         struct scsi_cmnd *cmd;
608         struct  scatterlist *cur_seg;
609         uint32_t *dsd_seg;
610         void *next_dsd;
611         uint8_t avail_dsds;
612         uint8_t first_iocb = 1;
613         uint32_t dsd_list_len;
614         struct dsd_dma *dsd_ptr;
615         struct ct6_dsd *ctx;
616
617         cmd = GET_CMD_SP(sp);
618
619         /* Update entry type to indicate Command Type 3 IOCB */
620         *((uint32_t *)(&cmd_pkt->entry_type)) =
621                 __constant_cpu_to_le32(COMMAND_TYPE_6);
622
623         /* No data transfer */
624         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
625                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
626                 return 0;
627         }
628
629         vha = sp->fcport->vha;
630         ha = vha->hw;
631
632         /* Set transfer direction */
633         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
634                 cmd_pkt->control_flags =
635                     __constant_cpu_to_le16(CF_WRITE_DATA);
636                 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
637                 vha->qla_stats.output_requests++;
638         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
639                 cmd_pkt->control_flags =
640                     __constant_cpu_to_le16(CF_READ_DATA);
641                 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
642                 vha->qla_stats.input_requests++;
643         }
644
645         cur_seg = scsi_sglist(cmd);
646         ctx = GET_CMD_CTX_SP(sp);
647
648         while (tot_dsds) {
649                 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
650                     QLA_DSDS_PER_IOCB : tot_dsds;
651                 tot_dsds -= avail_dsds;
652                 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
653
654                 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
655                     struct dsd_dma, list);
656                 next_dsd = dsd_ptr->dsd_addr;
657                 list_del(&dsd_ptr->list);
658                 ha->gbl_dsd_avail--;
659                 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
660                 ctx->dsd_use_cnt++;
661                 ha->gbl_dsd_inuse++;
662
663                 if (first_iocb) {
664                         first_iocb = 0;
665                         dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
666                         *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
667                         *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
668                         cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
669                 } else {
670                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
671                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
672                         *cur_dsd++ = cpu_to_le32(dsd_list_len);
673                 }
674                 cur_dsd = (uint32_t *)next_dsd;
675                 while (avail_dsds) {
676                         dma_addr_t      sle_dma;
677
678                         sle_dma = sg_dma_address(cur_seg);
679                         *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
680                         *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
681                         *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
682                         cur_seg = sg_next(cur_seg);
683                         avail_dsds--;
684                 }
685         }
686
687         /* Null termination */
688         *cur_dsd++ =  0;
689         *cur_dsd++ = 0;
690         *cur_dsd++ = 0;
691         cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
692         return 0;
693 }
694
695 /*
696  * qla24xx_calc_dsd_lists() - Determine number of DSD list required
697  * for Command Type 6.
698  *
699  * @dsds: number of data segment decriptors needed
700  *
701  * Returns the number of dsd list needed to store @dsds.
702  */
703 inline uint16_t
704 qla24xx_calc_dsd_lists(uint16_t dsds)
705 {
706         uint16_t dsd_lists = 0;
707
708         dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
709         if (dsds % QLA_DSDS_PER_IOCB)
710                 dsd_lists++;
711         return dsd_lists;
712 }
713
714
715 /**
716  * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
717  * IOCB types.
718  *
719  * @sp: SRB command to process
720  * @cmd_pkt: Command type 3 IOCB
721  * @tot_dsds: Total number of segments to transfer
722  */
723 inline void
724 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
725     uint16_t tot_dsds)
726 {
727         uint16_t        avail_dsds;
728         uint32_t        *cur_dsd;
729         scsi_qla_host_t *vha;
730         struct scsi_cmnd *cmd;
731         struct scatterlist *sg;
732         int i;
733         struct req_que *req;
734
735         cmd = GET_CMD_SP(sp);
736
737         /* Update entry type to indicate Command Type 3 IOCB */
738         *((uint32_t *)(&cmd_pkt->entry_type)) =
739             __constant_cpu_to_le32(COMMAND_TYPE_7);
740
741         /* No data transfer */
742         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
743                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
744                 return;
745         }
746
747         vha = sp->fcport->vha;
748         req = vha->req;
749
750         /* Set transfer direction */
751         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
752                 cmd_pkt->task_mgmt_flags =
753                     __constant_cpu_to_le16(TMF_WRITE_DATA);
754                 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
755                 vha->qla_stats.output_requests++;
756         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
757                 cmd_pkt->task_mgmt_flags =
758                     __constant_cpu_to_le16(TMF_READ_DATA);
759                 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
760                 vha->qla_stats.input_requests++;
761         }
762
763         /* One DSD is available in the Command Type 3 IOCB */
764         avail_dsds = 1;
765         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
766
767         /* Load data segments */
768
769         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
770                 dma_addr_t      sle_dma;
771                 cont_a64_entry_t *cont_pkt;
772
773                 /* Allocate additional continuation packets? */
774                 if (avail_dsds == 0) {
775                         /*
776                          * Five DSDs are available in the Continuation
777                          * Type 1 IOCB.
778                          */
779                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
780                         cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
781                         avail_dsds = 5;
782                 }
783
784                 sle_dma = sg_dma_address(sg);
785                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
786                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
787                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
788                 avail_dsds--;
789         }
790 }
791
792 struct fw_dif_context {
793         uint32_t ref_tag;
794         uint16_t app_tag;
795         uint8_t ref_tag_mask[4];        /* Validation/Replacement Mask*/
796         uint8_t app_tag_mask[2];        /* Validation/Replacement Mask*/
797 };
798
799 /*
800  * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
801  *
802  */
803 static inline void
804 qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
805     unsigned int protcnt)
806 {
807         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
808
809         switch (scsi_get_prot_type(cmd)) {
810         case SCSI_PROT_DIF_TYPE0:
811                 /*
812                  * No check for ql2xenablehba_err_chk, as it would be an
813                  * I/O error if hba tag generation is not done.
814                  */
815                 pkt->ref_tag = cpu_to_le32((uint32_t)
816                     (0xffffffff & scsi_get_lba(cmd)));
817
818                 if (!qla2x00_hba_err_chk_enabled(sp))
819                         break;
820
821                 pkt->ref_tag_mask[0] = 0xff;
822                 pkt->ref_tag_mask[1] = 0xff;
823                 pkt->ref_tag_mask[2] = 0xff;
824                 pkt->ref_tag_mask[3] = 0xff;
825                 break;
826
827         /*
828          * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
829          * match LBA in CDB + N
830          */
831         case SCSI_PROT_DIF_TYPE2:
832                 pkt->app_tag = __constant_cpu_to_le16(0);
833                 pkt->app_tag_mask[0] = 0x0;
834                 pkt->app_tag_mask[1] = 0x0;
835
836                 pkt->ref_tag = cpu_to_le32((uint32_t)
837                     (0xffffffff & scsi_get_lba(cmd)));
838
839                 if (!qla2x00_hba_err_chk_enabled(sp))
840                         break;
841
842                 /* enable ALL bytes of the ref tag */
843                 pkt->ref_tag_mask[0] = 0xff;
844                 pkt->ref_tag_mask[1] = 0xff;
845                 pkt->ref_tag_mask[2] = 0xff;
846                 pkt->ref_tag_mask[3] = 0xff;
847                 break;
848
849         /* For Type 3 protection: 16 bit GUARD only */
850         case SCSI_PROT_DIF_TYPE3:
851                 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
852                         pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
853                                                                 0x00;
854                 break;
855
856         /*
857          * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
858          * 16 bit app tag.
859          */
860         case SCSI_PROT_DIF_TYPE1:
861                 pkt->ref_tag = cpu_to_le32((uint32_t)
862                     (0xffffffff & scsi_get_lba(cmd)));
863                 pkt->app_tag = __constant_cpu_to_le16(0);
864                 pkt->app_tag_mask[0] = 0x0;
865                 pkt->app_tag_mask[1] = 0x0;
866
867                 if (!qla2x00_hba_err_chk_enabled(sp))
868                         break;
869
870                 /* enable ALL bytes of the ref tag */
871                 pkt->ref_tag_mask[0] = 0xff;
872                 pkt->ref_tag_mask[1] = 0xff;
873                 pkt->ref_tag_mask[2] = 0xff;
874                 pkt->ref_tag_mask[3] = 0xff;
875                 break;
876         }
877 }
878
879 struct qla2_sgx {
880         dma_addr_t              dma_addr;       /* OUT */
881         uint32_t                dma_len;        /* OUT */
882
883         uint32_t                tot_bytes;      /* IN */
884         struct scatterlist      *cur_sg;        /* IN */
885
886         /* for book keeping, bzero on initial invocation */
887         uint32_t                bytes_consumed;
888         uint32_t                num_bytes;
889         uint32_t                tot_partial;
890
891         /* for debugging */
892         uint32_t                num_sg;
893         srb_t                   *sp;
894 };
895
896 static int
897 qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
898         uint32_t *partial)
899 {
900         struct scatterlist *sg;
901         uint32_t cumulative_partial, sg_len;
902         dma_addr_t sg_dma_addr;
903
904         if (sgx->num_bytes == sgx->tot_bytes)
905                 return 0;
906
907         sg = sgx->cur_sg;
908         cumulative_partial = sgx->tot_partial;
909
910         sg_dma_addr = sg_dma_address(sg);
911         sg_len = sg_dma_len(sg);
912
913         sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
914
915         if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
916                 sgx->dma_len = (blk_sz - cumulative_partial);
917                 sgx->tot_partial = 0;
918                 sgx->num_bytes += blk_sz;
919                 *partial = 0;
920         } else {
921                 sgx->dma_len = sg_len - sgx->bytes_consumed;
922                 sgx->tot_partial += sgx->dma_len;
923                 *partial = 1;
924         }
925
926         sgx->bytes_consumed += sgx->dma_len;
927
928         if (sg_len == sgx->bytes_consumed) {
929                 sg = sg_next(sg);
930                 sgx->num_sg++;
931                 sgx->cur_sg = sg;
932                 sgx->bytes_consumed = 0;
933         }
934
935         return 1;
936 }
937
938 int
939 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
940         uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
941 {
942         void *next_dsd;
943         uint8_t avail_dsds = 0;
944         uint32_t dsd_list_len;
945         struct dsd_dma *dsd_ptr;
946         struct scatterlist *sg_prot;
947         uint32_t *cur_dsd = dsd;
948         uint16_t        used_dsds = tot_dsds;
949
950         uint32_t        prot_int; /* protection interval */
951         uint32_t        partial;
952         struct qla2_sgx sgx;
953         dma_addr_t      sle_dma;
954         uint32_t        sle_dma_len, tot_prot_dma_len = 0;
955         struct scsi_cmnd *cmd;
956         struct scsi_qla_host *vha;
957
958         memset(&sgx, 0, sizeof(struct qla2_sgx));
959         if (sp) {
960                 vha = sp->fcport->vha;
961                 cmd = GET_CMD_SP(sp);
962                 prot_int = cmd->device->sector_size;
963
964                 sgx.tot_bytes = scsi_bufflen(cmd);
965                 sgx.cur_sg = scsi_sglist(cmd);
966                 sgx.sp = sp;
967
968                 sg_prot = scsi_prot_sglist(cmd);
969         } else if (tc) {
970                 vha = tc->vha;
971                 prot_int      = tc->blk_sz;
972                 sgx.tot_bytes = tc->bufflen;
973                 sgx.cur_sg    = tc->sg;
974                 sg_prot       = tc->prot_sg;
975         } else {
976                 BUG();
977                 return 1;
978         }
979
980         while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
981
982                 sle_dma = sgx.dma_addr;
983                 sle_dma_len = sgx.dma_len;
984 alloc_and_fill:
985                 /* Allocate additional continuation packets? */
986                 if (avail_dsds == 0) {
987                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
988                                         QLA_DSDS_PER_IOCB : used_dsds;
989                         dsd_list_len = (avail_dsds + 1) * 12;
990                         used_dsds -= avail_dsds;
991
992                         /* allocate tracking DS */
993                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
994                         if (!dsd_ptr)
995                                 return 1;
996
997                         /* allocate new list */
998                         dsd_ptr->dsd_addr = next_dsd =
999                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1000                                 &dsd_ptr->dsd_list_dma);
1001
1002                         if (!next_dsd) {
1003                                 /*
1004                                  * Need to cleanup only this dsd_ptr, rest
1005                                  * will be done by sp_free_dma()
1006                                  */
1007                                 kfree(dsd_ptr);
1008                                 return 1;
1009                         }
1010
1011                         if (sp) {
1012                                 list_add_tail(&dsd_ptr->list,
1013                                     &((struct crc_context *)
1014                                             sp->u.scmd.ctx)->dsd_list);
1015
1016                                 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1017                         } else {
1018                                 list_add_tail(&dsd_ptr->list,
1019                                     &(tc->ctx->dsd_list));
1020                                 tc->ctx_dsd_alloced = 1;
1021                         }
1022
1023
1024                         /* add new list to cmd iocb or last list */
1025                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1026                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1027                         *cur_dsd++ = dsd_list_len;
1028                         cur_dsd = (uint32_t *)next_dsd;
1029                 }
1030                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1031                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1032                 *cur_dsd++ = cpu_to_le32(sle_dma_len);
1033                 avail_dsds--;
1034
1035                 if (partial == 0) {
1036                         /* Got a full protection interval */
1037                         sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
1038                         sle_dma_len = 8;
1039
1040                         tot_prot_dma_len += sle_dma_len;
1041                         if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
1042                                 tot_prot_dma_len = 0;
1043                                 sg_prot = sg_next(sg_prot);
1044                         }
1045
1046                         partial = 1; /* So as to not re-enter this block */
1047                         goto alloc_and_fill;
1048                 }
1049         }
1050         /* Null termination */
1051         *cur_dsd++ = 0;
1052         *cur_dsd++ = 0;
1053         *cur_dsd++ = 0;
1054         return 0;
1055 }
1056
1057 int
1058 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1059         uint16_t tot_dsds, struct qla_tgt_cmd *tc)
1060 {
1061         void *next_dsd;
1062         uint8_t avail_dsds = 0;
1063         uint32_t dsd_list_len;
1064         struct dsd_dma *dsd_ptr;
1065         struct scatterlist *sg, *sgl;
1066         uint32_t *cur_dsd = dsd;
1067         int     i;
1068         uint16_t        used_dsds = tot_dsds;
1069         struct scsi_cmnd *cmd;
1070         struct scsi_qla_host *vha;
1071
1072         if (sp) {
1073                 cmd = GET_CMD_SP(sp);
1074                 sgl = scsi_sglist(cmd);
1075                 vha = sp->fcport->vha;
1076         } else if (tc) {
1077                 sgl = tc->sg;
1078                 vha = tc->vha;
1079         } else {
1080                 BUG();
1081                 return 1;
1082         }
1083
1084
1085         for_each_sg(sgl, sg, tot_dsds, i) {
1086                 dma_addr_t      sle_dma;
1087
1088                 /* Allocate additional continuation packets? */
1089                 if (avail_dsds == 0) {
1090                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1091                                         QLA_DSDS_PER_IOCB : used_dsds;
1092                         dsd_list_len = (avail_dsds + 1) * 12;
1093                         used_dsds -= avail_dsds;
1094
1095                         /* allocate tracking DS */
1096                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1097                         if (!dsd_ptr)
1098                                 return 1;
1099
1100                         /* allocate new list */
1101                         dsd_ptr->dsd_addr = next_dsd =
1102                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1103                                 &dsd_ptr->dsd_list_dma);
1104
1105                         if (!next_dsd) {
1106                                 /*
1107                                  * Need to cleanup only this dsd_ptr, rest
1108                                  * will be done by sp_free_dma()
1109                                  */
1110                                 kfree(dsd_ptr);
1111                                 return 1;
1112                         }
1113
1114                         if (sp) {
1115                                 list_add_tail(&dsd_ptr->list,
1116                                     &((struct crc_context *)
1117                                             sp->u.scmd.ctx)->dsd_list);
1118
1119                                 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1120                         } else {
1121                                 list_add_tail(&dsd_ptr->list,
1122                                     &(tc->ctx->dsd_list));
1123                                 tc->ctx_dsd_alloced = 1;
1124                         }
1125
1126                         /* add new list to cmd iocb or last list */
1127                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1128                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1129                         *cur_dsd++ = dsd_list_len;
1130                         cur_dsd = (uint32_t *)next_dsd;
1131                 }
1132                 sle_dma = sg_dma_address(sg);
1133
1134                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1135                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1136                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1137                 avail_dsds--;
1138
1139         }
1140         /* Null termination */
1141         *cur_dsd++ = 0;
1142         *cur_dsd++ = 0;
1143         *cur_dsd++ = 0;
1144         return 0;
1145 }
1146
1147 int
1148 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1149         uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
1150 {
1151         void *next_dsd;
1152         uint8_t avail_dsds = 0;
1153         uint32_t dsd_list_len;
1154         struct dsd_dma *dsd_ptr;
1155         struct scatterlist *sg, *sgl;
1156         int     i;
1157         struct scsi_cmnd *cmd;
1158         uint32_t *cur_dsd = dsd;
1159         uint16_t used_dsds = tot_dsds;
1160         struct scsi_qla_host *vha;
1161
1162         if (sp) {
1163                 cmd = GET_CMD_SP(sp);
1164                 sgl = scsi_prot_sglist(cmd);
1165                 vha = sp->fcport->vha;
1166         } else if (tc) {
1167                 vha = tc->vha;
1168                 sgl = tc->prot_sg;
1169         } else {
1170                 BUG();
1171                 return 1;
1172         }
1173
1174         ql_dbg(ql_dbg_tgt, vha, 0xe021,
1175                 "%s: enter\n", __func__);
1176
1177         for_each_sg(sgl, sg, tot_dsds, i) {
1178                 dma_addr_t      sle_dma;
1179
1180                 /* Allocate additional continuation packets? */
1181                 if (avail_dsds == 0) {
1182                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1183                                                 QLA_DSDS_PER_IOCB : used_dsds;
1184                         dsd_list_len = (avail_dsds + 1) * 12;
1185                         used_dsds -= avail_dsds;
1186
1187                         /* allocate tracking DS */
1188                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1189                         if (!dsd_ptr)
1190                                 return 1;
1191
1192                         /* allocate new list */
1193                         dsd_ptr->dsd_addr = next_dsd =
1194                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1195                                 &dsd_ptr->dsd_list_dma);
1196
1197                         if (!next_dsd) {
1198                                 /*
1199                                  * Need to cleanup only this dsd_ptr, rest
1200                                  * will be done by sp_free_dma()
1201                                  */
1202                                 kfree(dsd_ptr);
1203                                 return 1;
1204                         }
1205
1206                         if (sp) {
1207                                 list_add_tail(&dsd_ptr->list,
1208                                     &((struct crc_context *)
1209                                             sp->u.scmd.ctx)->dsd_list);
1210
1211                                 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1212                         } else {
1213                                 list_add_tail(&dsd_ptr->list,
1214                                     &(tc->ctx->dsd_list));
1215                                 tc->ctx_dsd_alloced = 1;
1216                         }
1217
1218                         /* add new list to cmd iocb or last list */
1219                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1220                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1221                         *cur_dsd++ = dsd_list_len;
1222                         cur_dsd = (uint32_t *)next_dsd;
1223                 }
1224                 sle_dma = sg_dma_address(sg);
1225
1226                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1227                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1228                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1229
1230                 avail_dsds--;
1231         }
1232         /* Null termination */
1233         *cur_dsd++ = 0;
1234         *cur_dsd++ = 0;
1235         *cur_dsd++ = 0;
1236         return 0;
1237 }
1238
1239 /**
1240  * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1241  *                                                      Type 6 IOCB types.
1242  *
1243  * @sp: SRB command to process
1244  * @cmd_pkt: Command type 3 IOCB
1245  * @tot_dsds: Total number of segments to transfer
1246  */
1247 static inline int
1248 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1249     uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1250 {
1251         uint32_t                *cur_dsd, *fcp_dl;
1252         scsi_qla_host_t         *vha;
1253         struct scsi_cmnd        *cmd;
1254         int                     sgc;
1255         uint32_t                total_bytes = 0;
1256         uint32_t                data_bytes;
1257         uint32_t                dif_bytes;
1258         uint8_t                 bundling = 1;
1259         uint16_t                blk_size;
1260         uint8_t                 *clr_ptr;
1261         struct crc_context      *crc_ctx_pkt = NULL;
1262         struct qla_hw_data      *ha;
1263         uint8_t                 additional_fcpcdb_len;
1264         uint16_t                fcp_cmnd_len;
1265         struct fcp_cmnd         *fcp_cmnd;
1266         dma_addr_t              crc_ctx_dma;
1267         char                    tag[2];
1268
1269         cmd = GET_CMD_SP(sp);
1270
1271         sgc = 0;
1272         /* Update entry type to indicate Command Type CRC_2 IOCB */
1273         *((uint32_t *)(&cmd_pkt->entry_type)) =
1274             __constant_cpu_to_le32(COMMAND_TYPE_CRC_2);
1275
1276         vha = sp->fcport->vha;
1277         ha = vha->hw;
1278
1279         /* No data transfer */
1280         data_bytes = scsi_bufflen(cmd);
1281         if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1282                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1283                 return QLA_SUCCESS;
1284         }
1285
1286         cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1287
1288         /* Set transfer direction */
1289         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1290                 cmd_pkt->control_flags =
1291                     __constant_cpu_to_le16(CF_WRITE_DATA);
1292         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1293                 cmd_pkt->control_flags =
1294                     __constant_cpu_to_le16(CF_READ_DATA);
1295         }
1296
1297         if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1298             (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1299             (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1300             (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
1301                 bundling = 0;
1302
1303         /* Allocate CRC context from global pool */
1304         crc_ctx_pkt = sp->u.scmd.ctx =
1305             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
1306
1307         if (!crc_ctx_pkt)
1308                 goto crc_queuing_error;
1309
1310         /* Zero out CTX area. */
1311         clr_ptr = (uint8_t *)crc_ctx_pkt;
1312         memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
1313
1314         crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1315
1316         sp->flags |= SRB_CRC_CTX_DMA_VALID;
1317
1318         /* Set handle */
1319         crc_ctx_pkt->handle = cmd_pkt->handle;
1320
1321         INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1322
1323         qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
1324             &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1325
1326         cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
1327         cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
1328         cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1329
1330         /* Determine SCSI command length -- align to 4 byte boundary */
1331         if (cmd->cmd_len > 16) {
1332                 additional_fcpcdb_len = cmd->cmd_len - 16;
1333                 if ((cmd->cmd_len % 4) != 0) {
1334                         /* SCSI cmd > 16 bytes must be multiple of 4 */
1335                         goto crc_queuing_error;
1336                 }
1337                 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1338         } else {
1339                 additional_fcpcdb_len = 0;
1340                 fcp_cmnd_len = 12 + 16 + 4;
1341         }
1342
1343         fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1344
1345         fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1346         if (cmd->sc_data_direction == DMA_TO_DEVICE)
1347                 fcp_cmnd->additional_cdb_len |= 1;
1348         else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1349                 fcp_cmnd->additional_cdb_len |= 2;
1350
1351         int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
1352         memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1353         cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1354         cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
1355             LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1356         cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
1357             MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1358         fcp_cmnd->task_management = 0;
1359
1360         /*
1361          * Update tagged queuing modifier if using command tag queuing
1362          */
1363         if (scsi_populate_tag_msg(cmd, tag)) {
1364                 switch (tag[0]) {
1365                 case HEAD_OF_QUEUE_TAG:
1366                     fcp_cmnd->task_attribute = TSK_HEAD_OF_QUEUE;
1367                     break;
1368                 case ORDERED_QUEUE_TAG:
1369                     fcp_cmnd->task_attribute = TSK_ORDERED;
1370                     break;
1371                 default:
1372                     fcp_cmnd->task_attribute = TSK_SIMPLE;
1373                     break;
1374                 }
1375         } else {
1376                 fcp_cmnd->task_attribute = TSK_SIMPLE;
1377         }
1378
1379         cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1380
1381         /* Compute dif len and adjust data len to incude protection */
1382         dif_bytes = 0;
1383         blk_size = cmd->device->sector_size;
1384         dif_bytes = (data_bytes / blk_size) * 8;
1385
1386         switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
1387         case SCSI_PROT_READ_INSERT:
1388         case SCSI_PROT_WRITE_STRIP:
1389             total_bytes = data_bytes;
1390             data_bytes += dif_bytes;
1391             break;
1392
1393         case SCSI_PROT_READ_STRIP:
1394         case SCSI_PROT_WRITE_INSERT:
1395         case SCSI_PROT_READ_PASS:
1396         case SCSI_PROT_WRITE_PASS:
1397             total_bytes = data_bytes + dif_bytes;
1398             break;
1399         default:
1400             BUG();
1401         }
1402
1403         if (!qla2x00_hba_err_chk_enabled(sp))
1404                 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1405         /* HBA error checking enabled */
1406         else if (IS_PI_UNINIT_CAPABLE(ha)) {
1407                 if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
1408                     || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1409                         SCSI_PROT_DIF_TYPE2))
1410                         fw_prot_opts |= BIT_10;
1411                 else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1412                     SCSI_PROT_DIF_TYPE3)
1413                         fw_prot_opts |= BIT_11;
1414         }
1415
1416         if (!bundling) {
1417                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
1418         } else {
1419                 /*
1420                  * Configure Bundling if we need to fetch interlaving
1421                  * protection PCI accesses
1422                  */
1423                 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1424                 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1425                 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1426                                                         tot_prot_dsds);
1427                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
1428         }
1429
1430         /* Finish the common fields of CRC pkt */
1431         crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1432         crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1433         crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1434         crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0);
1435         /* Fibre channel byte count */
1436         cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1437         fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1438             additional_fcpcdb_len);
1439         *fcp_dl = htonl(total_bytes);
1440
1441         if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1442                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1443                 return QLA_SUCCESS;
1444         }
1445         /* Walks data segments */
1446
1447         cmd_pkt->control_flags |=
1448             __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1449
1450         if (!bundling && tot_prot_dsds) {
1451                 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1452                         cur_dsd, tot_dsds, NULL))
1453                         goto crc_queuing_error;
1454         } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1455                         (tot_dsds - tot_prot_dsds), NULL))
1456                 goto crc_queuing_error;
1457
1458         if (bundling && tot_prot_dsds) {
1459                 /* Walks dif segments */
1460                 cmd_pkt->control_flags |=
1461                         __constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1462                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
1463                 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1464                                 tot_prot_dsds, NULL))
1465                         goto crc_queuing_error;
1466         }
1467         return QLA_SUCCESS;
1468
1469 crc_queuing_error:
1470         /* Cleanup will be performed by the caller */
1471
1472         return QLA_FUNCTION_FAILED;
1473 }
1474
1475 /**
1476  * qla24xx_start_scsi() - Send a SCSI command to the ISP
1477  * @sp: command to send to the ISP
1478  *
1479  * Returns non-zero if a failure occurred, else zero.
1480  */
1481 int
1482 qla24xx_start_scsi(srb_t *sp)
1483 {
1484         int             ret, nseg;
1485         unsigned long   flags;
1486         uint32_t        *clr_ptr;
1487         uint32_t        index;
1488         uint32_t        handle;
1489         struct cmd_type_7 *cmd_pkt;
1490         uint16_t        cnt;
1491         uint16_t        req_cnt;
1492         uint16_t        tot_dsds;
1493         struct req_que *req = NULL;
1494         struct rsp_que *rsp = NULL;
1495         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1496         struct scsi_qla_host *vha = sp->fcport->vha;
1497         struct qla_hw_data *ha = vha->hw;
1498         char            tag[2];
1499
1500         /* Setup device pointers. */
1501         ret = 0;
1502
1503         qla25xx_set_que(sp, &rsp);
1504         req = vha->req;
1505
1506         /* So we know we haven't pci_map'ed anything yet */
1507         tot_dsds = 0;
1508
1509         /* Send marker if required */
1510         if (vha->marker_needed != 0) {
1511                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1512                     QLA_SUCCESS)
1513                         return QLA_FUNCTION_FAILED;
1514                 vha->marker_needed = 0;
1515         }
1516
1517         /* Acquire ring specific lock */
1518         spin_lock_irqsave(&ha->hardware_lock, flags);
1519
1520         /* Check for room in outstanding command list. */
1521         handle = req->current_outstanding_cmd;
1522         for (index = 1; index < req->num_outstanding_cmds; index++) {
1523                 handle++;
1524                 if (handle == req->num_outstanding_cmds)
1525                         handle = 1;
1526                 if (!req->outstanding_cmds[handle])
1527                         break;
1528         }
1529         if (index == req->num_outstanding_cmds)
1530                 goto queuing_error;
1531
1532         /* Map the sg table so we have an accurate count of sg entries needed */
1533         if (scsi_sg_count(cmd)) {
1534                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1535                     scsi_sg_count(cmd), cmd->sc_data_direction);
1536                 if (unlikely(!nseg))
1537                         goto queuing_error;
1538         } else
1539                 nseg = 0;
1540
1541         tot_dsds = nseg;
1542         req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1543         if (req->cnt < (req_cnt + 2)) {
1544                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1545                     RD_REG_DWORD_RELAXED(req->req_q_out);
1546                 if (req->ring_index < cnt)
1547                         req->cnt = cnt - req->ring_index;
1548                 else
1549                         req->cnt = req->length -
1550                                 (req->ring_index - cnt);
1551                 if (req->cnt < (req_cnt + 2))
1552                         goto queuing_error;
1553         }
1554
1555         /* Build command packet. */
1556         req->current_outstanding_cmd = handle;
1557         req->outstanding_cmds[handle] = sp;
1558         sp->handle = handle;
1559         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1560         req->cnt -= req_cnt;
1561
1562         cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1563         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1564
1565         /* Zero out remaining portion of packet. */
1566         /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
1567         clr_ptr = (uint32_t *)cmd_pkt + 2;
1568         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1569         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1570
1571         /* Set NPORT-ID and LUN number*/
1572         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1573         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1574         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1575         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1576         cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1577
1578         int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1579         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1580
1581         /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */
1582         if (scsi_populate_tag_msg(cmd, tag)) {
1583                 switch (tag[0]) {
1584                 case HEAD_OF_QUEUE_TAG:
1585                         cmd_pkt->task = TSK_HEAD_OF_QUEUE;
1586                         break;
1587                 case ORDERED_QUEUE_TAG:
1588                         cmd_pkt->task = TSK_ORDERED;
1589                         break;
1590                 default:
1591                     cmd_pkt->task = TSK_SIMPLE;
1592                     break;
1593                 }
1594         } else {
1595                 cmd_pkt->task = TSK_SIMPLE;
1596         }
1597
1598         /* Load SCSI command packet. */
1599         memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1600         host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1601
1602         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1603
1604         /* Build IOCB segments */
1605         qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
1606
1607         /* Set total data segment count. */
1608         cmd_pkt->entry_count = (uint8_t)req_cnt;
1609         /* Specify response queue number where completion should happen */
1610         cmd_pkt->entry_status = (uint8_t) rsp->id;
1611         wmb();
1612         /* Adjust ring index. */
1613         req->ring_index++;
1614         if (req->ring_index == req->length) {
1615                 req->ring_index = 0;
1616                 req->ring_ptr = req->ring;
1617         } else
1618                 req->ring_ptr++;
1619
1620         sp->flags |= SRB_DMA_VALID;
1621
1622         /* Set chip new ring index. */
1623         WRT_REG_DWORD(req->req_q_in, req->ring_index);
1624         RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1625
1626         /* Manage unprocessed RIO/ZIO commands in response queue. */
1627         if (vha->flags.process_response_queue &&
1628                 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1629                 qla24xx_process_response_queue(vha, rsp);
1630
1631         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1632         return QLA_SUCCESS;
1633
1634 queuing_error:
1635         if (tot_dsds)
1636                 scsi_dma_unmap(cmd);
1637
1638         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1639
1640         return QLA_FUNCTION_FAILED;
1641 }
1642
1643 /**
1644  * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1645  * @sp: command to send to the ISP
1646  *
1647  * Returns non-zero if a failure occurred, else zero.
1648  */
1649 int
1650 qla24xx_dif_start_scsi(srb_t *sp)
1651 {
1652         int                     nseg;
1653         unsigned long           flags;
1654         uint32_t                *clr_ptr;
1655         uint32_t                index;
1656         uint32_t                handle;
1657         uint16_t                cnt;
1658         uint16_t                req_cnt = 0;
1659         uint16_t                tot_dsds;
1660         uint16_t                tot_prot_dsds;
1661         uint16_t                fw_prot_opts = 0;
1662         struct req_que          *req = NULL;
1663         struct rsp_que          *rsp = NULL;
1664         struct scsi_cmnd        *cmd = GET_CMD_SP(sp);
1665         struct scsi_qla_host    *vha = sp->fcport->vha;
1666         struct qla_hw_data      *ha = vha->hw;
1667         struct cmd_type_crc_2   *cmd_pkt;
1668         uint32_t                status = 0;
1669
1670 #define QDSS_GOT_Q_SPACE        BIT_0
1671
1672         /* Only process protection or >16 cdb in this routine */
1673         if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1674                 if (cmd->cmd_len <= 16)
1675                         return qla24xx_start_scsi(sp);
1676         }
1677
1678         /* Setup device pointers. */
1679
1680         qla25xx_set_que(sp, &rsp);
1681         req = vha->req;
1682
1683         /* So we know we haven't pci_map'ed anything yet */
1684         tot_dsds = 0;
1685
1686         /* Send marker if required */
1687         if (vha->marker_needed != 0) {
1688                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1689                     QLA_SUCCESS)
1690                         return QLA_FUNCTION_FAILED;
1691                 vha->marker_needed = 0;
1692         }
1693
1694         /* Acquire ring specific lock */
1695         spin_lock_irqsave(&ha->hardware_lock, flags);
1696
1697         /* Check for room in outstanding command list. */
1698         handle = req->current_outstanding_cmd;
1699         for (index = 1; index < req->num_outstanding_cmds; index++) {
1700                 handle++;
1701                 if (handle == req->num_outstanding_cmds)
1702                         handle = 1;
1703                 if (!req->outstanding_cmds[handle])
1704                         break;
1705         }
1706
1707         if (index == req->num_outstanding_cmds)
1708                 goto queuing_error;
1709
1710         /* Compute number of required data segments */
1711         /* Map the sg table so we have an accurate count of sg entries needed */
1712         if (scsi_sg_count(cmd)) {
1713                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1714                     scsi_sg_count(cmd), cmd->sc_data_direction);
1715                 if (unlikely(!nseg))
1716                         goto queuing_error;
1717                 else
1718                         sp->flags |= SRB_DMA_VALID;
1719
1720                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1721                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1722                         struct qla2_sgx sgx;
1723                         uint32_t        partial;
1724
1725                         memset(&sgx, 0, sizeof(struct qla2_sgx));
1726                         sgx.tot_bytes = scsi_bufflen(cmd);
1727                         sgx.cur_sg = scsi_sglist(cmd);
1728                         sgx.sp = sp;
1729
1730                         nseg = 0;
1731                         while (qla24xx_get_one_block_sg(
1732                             cmd->device->sector_size, &sgx, &partial))
1733                                 nseg++;
1734                 }
1735         } else
1736                 nseg = 0;
1737
1738         /* number of required data segments */
1739         tot_dsds = nseg;
1740
1741         /* Compute number of required protection segments */
1742         if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1743                 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1744                     scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1745                 if (unlikely(!nseg))
1746                         goto queuing_error;
1747                 else
1748                         sp->flags |= SRB_CRC_PROT_DMA_VALID;
1749
1750                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1751                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1752                         nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1753                 }
1754         } else {
1755                 nseg = 0;
1756         }
1757
1758         req_cnt = 1;
1759         /* Total Data and protection sg segment(s) */
1760         tot_prot_dsds = nseg;
1761         tot_dsds += nseg;
1762         if (req->cnt < (req_cnt + 2)) {
1763                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1764                     RD_REG_DWORD_RELAXED(req->req_q_out);
1765                 if (req->ring_index < cnt)
1766                         req->cnt = cnt - req->ring_index;
1767                 else
1768                         req->cnt = req->length -
1769                                 (req->ring_index - cnt);
1770                 if (req->cnt < (req_cnt + 2))
1771                         goto queuing_error;
1772         }
1773
1774         status |= QDSS_GOT_Q_SPACE;
1775
1776         /* Build header part of command packet (excluding the OPCODE). */
1777         req->current_outstanding_cmd = handle;
1778         req->outstanding_cmds[handle] = sp;
1779         sp->handle = handle;
1780         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1781         req->cnt -= req_cnt;
1782
1783         /* Fill-in common area */
1784         cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1785         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1786
1787         clr_ptr = (uint32_t *)cmd_pkt + 2;
1788         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1789
1790         /* Set NPORT-ID and LUN number*/
1791         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1792         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1793         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1794         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1795
1796         int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1797         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1798
1799         /* Total Data and protection segment(s) */
1800         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1801
1802         /* Build IOCB segments and adjust for data protection segments */
1803         if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1804             req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1805                 QLA_SUCCESS)
1806                 goto queuing_error;
1807
1808         cmd_pkt->entry_count = (uint8_t)req_cnt;
1809         /* Specify response queue number where completion should happen */
1810         cmd_pkt->entry_status = (uint8_t) rsp->id;
1811         cmd_pkt->timeout = __constant_cpu_to_le16(0);
1812         wmb();
1813
1814         /* Adjust ring index. */
1815         req->ring_index++;
1816         if (req->ring_index == req->length) {
1817                 req->ring_index = 0;
1818                 req->ring_ptr = req->ring;
1819         } else
1820                 req->ring_ptr++;
1821
1822         /* Set chip new ring index. */
1823         WRT_REG_DWORD(req->req_q_in, req->ring_index);
1824         RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1825
1826         /* Manage unprocessed RIO/ZIO commands in response queue. */
1827         if (vha->flags.process_response_queue &&
1828             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1829                 qla24xx_process_response_queue(vha, rsp);
1830
1831         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1832
1833         return QLA_SUCCESS;
1834
1835 queuing_error:
1836         if (status & QDSS_GOT_Q_SPACE) {
1837                 req->outstanding_cmds[handle] = NULL;
1838                 req->cnt += req_cnt;
1839         }
1840         /* Cleanup will be performed by the caller (queuecommand) */
1841
1842         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1843         return QLA_FUNCTION_FAILED;
1844 }
1845
1846
1847 static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
1848 {
1849         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1850         struct qla_hw_data *ha = sp->fcport->vha->hw;
1851         int affinity = cmd->request->cpu;
1852
1853         if (ha->flags.cpu_affinity_enabled && affinity >= 0 &&
1854                 affinity < ha->max_rsp_queues - 1)
1855                 *rsp = ha->rsp_q_map[affinity + 1];
1856          else
1857                 *rsp = ha->rsp_q_map[0];
1858 }
1859
1860 /* Generic Control-SRB manipulation functions. */
1861
1862 /* hardware_lock assumed to be held. */
1863 void *
1864 qla2x00_alloc_iocbs_ready(scsi_qla_host_t *vha, srb_t *sp)
1865 {
1866         if (qla2x00_reset_active(vha))
1867                 return NULL;
1868
1869         return qla2x00_alloc_iocbs(vha, sp);
1870 }
1871
1872 void *
1873 qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
1874 {
1875         struct qla_hw_data *ha = vha->hw;
1876         struct req_que *req = ha->req_q_map[0];
1877         device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
1878         uint32_t index, handle;
1879         request_t *pkt;
1880         uint16_t cnt, req_cnt;
1881
1882         pkt = NULL;
1883         req_cnt = 1;
1884         handle = 0;
1885
1886         if (!sp)
1887                 goto skip_cmd_array;
1888
1889         /* Check for room in outstanding command list. */
1890         handle = req->current_outstanding_cmd;
1891         for (index = 1; index < req->num_outstanding_cmds; index++) {
1892                 handle++;
1893                 if (handle == req->num_outstanding_cmds)
1894                         handle = 1;
1895                 if (!req->outstanding_cmds[handle])
1896                         break;
1897         }
1898         if (index == req->num_outstanding_cmds) {
1899                 ql_log(ql_log_warn, vha, 0x700b,
1900                     "No room on outstanding cmd array.\n");
1901                 goto queuing_error;
1902         }
1903
1904         /* Prep command array. */
1905         req->current_outstanding_cmd = handle;
1906         req->outstanding_cmds[handle] = sp;
1907         sp->handle = handle;
1908
1909         /* Adjust entry-counts as needed. */
1910         if (sp->type != SRB_SCSI_CMD)
1911                 req_cnt = sp->iocbs;
1912
1913 skip_cmd_array:
1914         /* Check for room on request queue. */
1915         if (req->cnt < req_cnt + 2) {
1916                 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
1917                         cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
1918                 else if (IS_P3P_TYPE(ha))
1919                         cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
1920                 else if (IS_FWI2_CAPABLE(ha))
1921                         cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
1922                 else if (IS_QLAFX00(ha))
1923                         cnt = RD_REG_DWORD(&reg->ispfx00.req_q_out);
1924                 else
1925                         cnt = qla2x00_debounce_register(
1926                             ISP_REQ_Q_OUT(ha, &reg->isp));
1927
1928                 if  (req->ring_index < cnt)
1929                         req->cnt = cnt - req->ring_index;
1930                 else
1931                         req->cnt = req->length -
1932                             (req->ring_index - cnt);
1933         }
1934         if (req->cnt < req_cnt + 2)
1935                 goto queuing_error;
1936
1937         /* Prep packet */
1938         req->cnt -= req_cnt;
1939         pkt = req->ring_ptr;
1940         memset(pkt, 0, REQUEST_ENTRY_SIZE);
1941         if (IS_QLAFX00(ha)) {
1942                 WRT_REG_BYTE((void __iomem *)&pkt->entry_count, req_cnt);
1943                 WRT_REG_WORD((void __iomem *)&pkt->handle, handle);
1944         } else {
1945                 pkt->entry_count = req_cnt;
1946                 pkt->handle = handle;
1947         }
1948
1949 queuing_error:
1950         return pkt;
1951 }
1952
1953 static void
1954 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1955 {
1956         struct srb_iocb *lio = &sp->u.iocb_cmd;
1957
1958         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1959         logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
1960         if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
1961                 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
1962         if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
1963                 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
1964         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1965         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1966         logio->port_id[1] = sp->fcport->d_id.b.area;
1967         logio->port_id[2] = sp->fcport->d_id.b.domain;
1968         logio->vp_index = sp->fcport->vha->vp_idx;
1969 }
1970
1971 static void
1972 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
1973 {
1974         struct qla_hw_data *ha = sp->fcport->vha->hw;
1975         struct srb_iocb *lio = &sp->u.iocb_cmd;
1976         uint16_t opts;
1977
1978         mbx->entry_type = MBX_IOCB_TYPE;
1979         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1980         mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
1981         opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
1982         opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
1983         if (HAS_EXTENDED_IDS(ha)) {
1984                 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1985                 mbx->mb10 = cpu_to_le16(opts);
1986         } else {
1987                 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
1988         }
1989         mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1990         mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1991             sp->fcport->d_id.b.al_pa);
1992         mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
1993 }
1994
1995 static void
1996 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1997 {
1998         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1999         logio->control_flags =
2000             cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
2001         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2002         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2003         logio->port_id[1] = sp->fcport->d_id.b.area;
2004         logio->port_id[2] = sp->fcport->d_id.b.domain;
2005         logio->vp_index = sp->fcport->vha->vp_idx;
2006 }
2007
2008 static void
2009 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
2010 {
2011         struct qla_hw_data *ha = sp->fcport->vha->hw;
2012
2013         mbx->entry_type = MBX_IOCB_TYPE;
2014         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2015         mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
2016         mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
2017             cpu_to_le16(sp->fcport->loop_id):
2018             cpu_to_le16(sp->fcport->loop_id << 8);
2019         mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2020         mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2021             sp->fcport->d_id.b.al_pa);
2022         mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
2023         /* Implicit: mbx->mbx10 = 0. */
2024 }
2025
2026 static void
2027 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2028 {
2029         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2030         logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
2031         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2032         logio->vp_index = sp->fcport->vha->vp_idx;
2033 }
2034
2035 static void
2036 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
2037 {
2038         struct qla_hw_data *ha = sp->fcport->vha->hw;
2039
2040         mbx->entry_type = MBX_IOCB_TYPE;
2041         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2042         mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
2043         if (HAS_EXTENDED_IDS(ha)) {
2044                 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2045                 mbx->mb10 = cpu_to_le16(BIT_0);
2046         } else {
2047                 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
2048         }
2049         mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
2050         mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
2051         mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
2052         mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
2053         mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
2054 }
2055
2056 static void
2057 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
2058 {
2059         uint32_t flags;
2060         uint64_t lun;
2061         struct fc_port *fcport = sp->fcport;
2062         scsi_qla_host_t *vha = fcport->vha;
2063         struct qla_hw_data *ha = vha->hw;
2064         struct srb_iocb *iocb = &sp->u.iocb_cmd;
2065         struct req_que *req = vha->req;
2066
2067         flags = iocb->u.tmf.flags;
2068         lun = iocb->u.tmf.lun;
2069
2070         tsk->entry_type = TSK_MGMT_IOCB_TYPE;
2071         tsk->entry_count = 1;
2072         tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
2073         tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2074         tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2075         tsk->control_flags = cpu_to_le32(flags);
2076         tsk->port_id[0] = fcport->d_id.b.al_pa;
2077         tsk->port_id[1] = fcport->d_id.b.area;
2078         tsk->port_id[2] = fcport->d_id.b.domain;
2079         tsk->vp_index = fcport->vha->vp_idx;
2080
2081         if (flags == TCF_LUN_RESET) {
2082                 int_to_scsilun(lun, &tsk->lun);
2083                 host_to_fcp_swap((uint8_t *)&tsk->lun,
2084                         sizeof(tsk->lun));
2085         }
2086 }
2087
2088 static void
2089 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2090 {
2091         struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2092
2093         els_iocb->entry_type = ELS_IOCB_TYPE;
2094         els_iocb->entry_count = 1;
2095         els_iocb->sys_define = 0;
2096         els_iocb->entry_status = 0;
2097         els_iocb->handle = sp->handle;
2098         els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2099         els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2100         els_iocb->vp_index = sp->fcport->vha->vp_idx;
2101         els_iocb->sof_type = EST_SOFI3;
2102         els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2103
2104         els_iocb->opcode =
2105             sp->type == SRB_ELS_CMD_RPT ?
2106             bsg_job->request->rqst_data.r_els.els_code :
2107             bsg_job->request->rqst_data.h_els.command_code;
2108         els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2109         els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2110         els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2111         els_iocb->control_flags = 0;
2112         els_iocb->rx_byte_count =
2113             cpu_to_le32(bsg_job->reply_payload.payload_len);
2114         els_iocb->tx_byte_count =
2115             cpu_to_le32(bsg_job->request_payload.payload_len);
2116
2117         els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
2118             (bsg_job->request_payload.sg_list)));
2119         els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
2120             (bsg_job->request_payload.sg_list)));
2121         els_iocb->tx_len = cpu_to_le32(sg_dma_len
2122             (bsg_job->request_payload.sg_list));
2123
2124         els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
2125             (bsg_job->reply_payload.sg_list)));
2126         els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
2127             (bsg_job->reply_payload.sg_list)));
2128         els_iocb->rx_len = cpu_to_le32(sg_dma_len
2129             (bsg_job->reply_payload.sg_list));
2130
2131         sp->fcport->vha->qla_stats.control_requests++;
2132 }
2133
2134 static void
2135 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
2136 {
2137         uint16_t        avail_dsds;
2138         uint32_t        *cur_dsd;
2139         struct scatterlist *sg;
2140         int index;
2141         uint16_t tot_dsds;
2142         scsi_qla_host_t *vha = sp->fcport->vha;
2143         struct qla_hw_data *ha = vha->hw;
2144         struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2145         int loop_iterartion = 0;
2146         int cont_iocb_prsnt = 0;
2147         int entry_count = 1;
2148
2149         memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
2150         ct_iocb->entry_type = CT_IOCB_TYPE;
2151         ct_iocb->entry_status = 0;
2152         ct_iocb->handle1 = sp->handle;
2153         SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
2154         ct_iocb->status = __constant_cpu_to_le16(0);
2155         ct_iocb->control_flags = __constant_cpu_to_le16(0);
2156         ct_iocb->timeout = 0;
2157         ct_iocb->cmd_dsd_count =
2158             __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2159         ct_iocb->total_dsd_count =
2160             __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
2161         ct_iocb->req_bytecount =
2162             cpu_to_le32(bsg_job->request_payload.payload_len);
2163         ct_iocb->rsp_bytecount =
2164             cpu_to_le32(bsg_job->reply_payload.payload_len);
2165
2166         ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
2167             (bsg_job->request_payload.sg_list)));
2168         ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
2169             (bsg_job->request_payload.sg_list)));
2170         ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
2171
2172         ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
2173             (bsg_job->reply_payload.sg_list)));
2174         ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
2175             (bsg_job->reply_payload.sg_list)));
2176         ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
2177
2178         avail_dsds = 1;
2179         cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
2180         index = 0;
2181         tot_dsds = bsg_job->reply_payload.sg_cnt;
2182
2183         for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2184                 dma_addr_t       sle_dma;
2185                 cont_a64_entry_t *cont_pkt;
2186
2187                 /* Allocate additional continuation packets? */
2188                 if (avail_dsds == 0) {
2189                         /*
2190                         * Five DSDs are available in the Cont.
2191                         * Type 1 IOCB.
2192                                */
2193                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2194                             vha->hw->req_q_map[0]);
2195                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2196                         avail_dsds = 5;
2197                         cont_iocb_prsnt = 1;
2198                         entry_count++;
2199                 }
2200
2201                 sle_dma = sg_dma_address(sg);
2202                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2203                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2204                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2205                 loop_iterartion++;
2206                 avail_dsds--;
2207         }
2208         ct_iocb->entry_count = entry_count;
2209
2210         sp->fcport->vha->qla_stats.control_requests++;
2211 }
2212
2213 static void
2214 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2215 {
2216         uint16_t        avail_dsds;
2217         uint32_t        *cur_dsd;
2218         struct scatterlist *sg;
2219         int index;
2220         uint16_t tot_dsds;
2221         scsi_qla_host_t *vha = sp->fcport->vha;
2222         struct qla_hw_data *ha = vha->hw;
2223         struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2224         int loop_iterartion = 0;
2225         int cont_iocb_prsnt = 0;
2226         int entry_count = 1;
2227
2228         ct_iocb->entry_type = CT_IOCB_TYPE;
2229         ct_iocb->entry_status = 0;
2230         ct_iocb->sys_define = 0;
2231         ct_iocb->handle = sp->handle;
2232
2233         ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2234         ct_iocb->vp_index = sp->fcport->vha->vp_idx;
2235         ct_iocb->comp_status = __constant_cpu_to_le16(0);
2236
2237         ct_iocb->cmd_dsd_count =
2238             __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2239         ct_iocb->timeout = 0;
2240         ct_iocb->rsp_dsd_count =
2241             __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2242         ct_iocb->rsp_byte_count =
2243             cpu_to_le32(bsg_job->reply_payload.payload_len);
2244         ct_iocb->cmd_byte_count =
2245             cpu_to_le32(bsg_job->request_payload.payload_len);
2246         ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address
2247             (bsg_job->request_payload.sg_list)));
2248         ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address
2249            (bsg_job->request_payload.sg_list)));
2250         ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len
2251             (bsg_job->request_payload.sg_list));
2252
2253         avail_dsds = 1;
2254         cur_dsd = (uint32_t *)ct_iocb->dseg_1_address;
2255         index = 0;
2256         tot_dsds = bsg_job->reply_payload.sg_cnt;
2257
2258         for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2259                 dma_addr_t       sle_dma;
2260                 cont_a64_entry_t *cont_pkt;
2261
2262                 /* Allocate additional continuation packets? */
2263                 if (avail_dsds == 0) {
2264                         /*
2265                         * Five DSDs are available in the Cont.
2266                         * Type 1 IOCB.
2267                                */
2268                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2269                             ha->req_q_map[0]);
2270                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2271                         avail_dsds = 5;
2272                         cont_iocb_prsnt = 1;
2273                         entry_count++;
2274                 }
2275
2276                 sle_dma = sg_dma_address(sg);
2277                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2278                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2279                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2280                 loop_iterartion++;
2281                 avail_dsds--;
2282         }
2283         ct_iocb->entry_count = entry_count;
2284 }
2285
2286 /*
2287  * qla82xx_start_scsi() - Send a SCSI command to the ISP
2288  * @sp: command to send to the ISP
2289  *
2290  * Returns non-zero if a failure occurred, else zero.
2291  */
2292 int
2293 qla82xx_start_scsi(srb_t *sp)
2294 {
2295         int             ret, nseg;
2296         unsigned long   flags;
2297         struct scsi_cmnd *cmd;
2298         uint32_t        *clr_ptr;
2299         uint32_t        index;
2300         uint32_t        handle;
2301         uint16_t        cnt;
2302         uint16_t        req_cnt;
2303         uint16_t        tot_dsds;
2304         struct device_reg_82xx __iomem *reg;
2305         uint32_t dbval;
2306         uint32_t *fcp_dl;
2307         uint8_t additional_cdb_len;
2308         struct ct6_dsd *ctx;
2309         struct scsi_qla_host *vha = sp->fcport->vha;
2310         struct qla_hw_data *ha = vha->hw;
2311         struct req_que *req = NULL;
2312         struct rsp_que *rsp = NULL;
2313         char tag[2];
2314
2315         /* Setup device pointers. */
2316         ret = 0;
2317         reg = &ha->iobase->isp82;
2318         cmd = GET_CMD_SP(sp);
2319         req = vha->req;
2320         rsp = ha->rsp_q_map[0];
2321
2322         /* So we know we haven't pci_map'ed anything yet */
2323         tot_dsds = 0;
2324
2325         dbval = 0x04 | (ha->portnum << 5);
2326
2327         /* Send marker if required */
2328         if (vha->marker_needed != 0) {
2329                 if (qla2x00_marker(vha, req,
2330                         rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
2331                         ql_log(ql_log_warn, vha, 0x300c,
2332                             "qla2x00_marker failed for cmd=%p.\n", cmd);
2333                         return QLA_FUNCTION_FAILED;
2334                 }
2335                 vha->marker_needed = 0;
2336         }
2337
2338         /* Acquire ring specific lock */
2339         spin_lock_irqsave(&ha->hardware_lock, flags);
2340
2341         /* Check for room in outstanding command list. */
2342         handle = req->current_outstanding_cmd;
2343         for (index = 1; index < req->num_outstanding_cmds; index++) {
2344                 handle++;
2345                 if (handle == req->num_outstanding_cmds)
2346                         handle = 1;
2347                 if (!req->outstanding_cmds[handle])
2348                         break;
2349         }
2350         if (index == req->num_outstanding_cmds)
2351                 goto queuing_error;
2352
2353         /* Map the sg table so we have an accurate count of sg entries needed */
2354         if (scsi_sg_count(cmd)) {
2355                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2356                     scsi_sg_count(cmd), cmd->sc_data_direction);
2357                 if (unlikely(!nseg))
2358                         goto queuing_error;
2359         } else
2360                 nseg = 0;
2361
2362         tot_dsds = nseg;
2363
2364         if (tot_dsds > ql2xshiftctondsd) {
2365                 struct cmd_type_6 *cmd_pkt;
2366                 uint16_t more_dsd_lists = 0;
2367                 struct dsd_dma *dsd_ptr;
2368                 uint16_t i;
2369
2370                 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
2371                 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
2372                         ql_dbg(ql_dbg_io, vha, 0x300d,
2373                             "Num of DSD list %d is than %d for cmd=%p.\n",
2374                             more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
2375                             cmd);
2376                         goto queuing_error;
2377                 }
2378
2379                 if (more_dsd_lists <= ha->gbl_dsd_avail)
2380                         goto sufficient_dsds;
2381                 else
2382                         more_dsd_lists -= ha->gbl_dsd_avail;
2383
2384                 for (i = 0; i < more_dsd_lists; i++) {
2385                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
2386                         if (!dsd_ptr) {
2387                                 ql_log(ql_log_fatal, vha, 0x300e,
2388                                     "Failed to allocate memory for dsd_dma "
2389                                     "for cmd=%p.\n", cmd);
2390                                 goto queuing_error;
2391                         }
2392
2393                         dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
2394                                 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
2395                         if (!dsd_ptr->dsd_addr) {
2396                                 kfree(dsd_ptr);
2397                                 ql_log(ql_log_fatal, vha, 0x300f,
2398                                     "Failed to allocate memory for dsd_addr "
2399                                     "for cmd=%p.\n", cmd);
2400                                 goto queuing_error;
2401                         }
2402                         list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
2403                         ha->gbl_dsd_avail++;
2404                 }
2405
2406 sufficient_dsds:
2407                 req_cnt = 1;
2408
2409                 if (req->cnt < (req_cnt + 2)) {
2410                         cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2411                                 &reg->req_q_out[0]);
2412                         if (req->ring_index < cnt)
2413                                 req->cnt = cnt - req->ring_index;
2414                         else
2415                                 req->cnt = req->length -
2416                                         (req->ring_index - cnt);
2417                         if (req->cnt < (req_cnt + 2))
2418                                 goto queuing_error;
2419                 }
2420
2421                 ctx = sp->u.scmd.ctx =
2422                     mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2423                 if (!ctx) {
2424                         ql_log(ql_log_fatal, vha, 0x3010,
2425                             "Failed to allocate ctx for cmd=%p.\n", cmd);
2426                         goto queuing_error;
2427                 }
2428
2429                 memset(ctx, 0, sizeof(struct ct6_dsd));
2430                 ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
2431                         GFP_ATOMIC, &ctx->fcp_cmnd_dma);
2432                 if (!ctx->fcp_cmnd) {
2433                         ql_log(ql_log_fatal, vha, 0x3011,
2434                             "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
2435                         goto queuing_error;
2436                 }
2437
2438                 /* Initialize the DSD list and dma handle */
2439                 INIT_LIST_HEAD(&ctx->dsd_list);
2440                 ctx->dsd_use_cnt = 0;
2441
2442                 if (cmd->cmd_len > 16) {
2443                         additional_cdb_len = cmd->cmd_len - 16;
2444                         if ((cmd->cmd_len % 4) != 0) {
2445                                 /* SCSI command bigger than 16 bytes must be
2446                                  * multiple of 4
2447                                  */
2448                                 ql_log(ql_log_warn, vha, 0x3012,
2449                                     "scsi cmd len %d not multiple of 4 "
2450                                     "for cmd=%p.\n", cmd->cmd_len, cmd);
2451                                 goto queuing_error_fcp_cmnd;
2452                         }
2453                         ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
2454                 } else {
2455                         additional_cdb_len = 0;
2456                         ctx->fcp_cmnd_len = 12 + 16 + 4;
2457                 }
2458
2459                 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
2460                 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2461
2462                 /* Zero out remaining portion of packet. */
2463                 /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
2464                 clr_ptr = (uint32_t *)cmd_pkt + 2;
2465                 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2466                 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2467
2468                 /* Set NPORT-ID and LUN number*/
2469                 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2470                 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2471                 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2472                 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2473                 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
2474
2475                 /* Build IOCB segments */
2476                 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
2477                         goto queuing_error_fcp_cmnd;
2478
2479                 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2480                 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2481
2482                 /* build FCP_CMND IU */
2483                 memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
2484                 int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
2485                 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
2486
2487                 if (cmd->sc_data_direction == DMA_TO_DEVICE)
2488                         ctx->fcp_cmnd->additional_cdb_len |= 1;
2489                 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
2490                         ctx->fcp_cmnd->additional_cdb_len |= 2;
2491
2492                 /*
2493                  * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2494                  */
2495                 if (scsi_populate_tag_msg(cmd, tag)) {
2496                         switch (tag[0]) {
2497                         case HEAD_OF_QUEUE_TAG:
2498                                 ctx->fcp_cmnd->task_attribute =
2499                                     TSK_HEAD_OF_QUEUE;
2500                                 break;
2501                         case ORDERED_QUEUE_TAG:
2502                                 ctx->fcp_cmnd->task_attribute =
2503                                     TSK_ORDERED;
2504                                 break;
2505                         }
2506                 }
2507
2508                 /* Populate the FCP_PRIO. */
2509                 if (ha->flags.fcp_prio_enabled)
2510                         ctx->fcp_cmnd->task_attribute |=
2511                             sp->fcport->fcp_prio << 3;
2512
2513                 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
2514
2515                 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
2516                     additional_cdb_len);
2517                 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
2518
2519                 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
2520                 cmd_pkt->fcp_cmnd_dseg_address[0] =
2521                     cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
2522                 cmd_pkt->fcp_cmnd_dseg_address[1] =
2523                     cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
2524
2525                 sp->flags |= SRB_FCP_CMND_DMA_VALID;
2526                 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2527                 /* Set total data segment count. */
2528                 cmd_pkt->entry_count = (uint8_t)req_cnt;
2529                 /* Specify response queue number where
2530                  * completion should happen
2531                  */
2532                 cmd_pkt->entry_status = (uint8_t) rsp->id;
2533         } else {
2534                 struct cmd_type_7 *cmd_pkt;
2535                 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2536                 if (req->cnt < (req_cnt + 2)) {
2537                         cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2538                             &reg->req_q_out[0]);
2539                         if (req->ring_index < cnt)
2540                                 req->cnt = cnt - req->ring_index;
2541                         else
2542                                 req->cnt = req->length -
2543                                         (req->ring_index - cnt);
2544                 }
2545                 if (req->cnt < (req_cnt + 2))
2546                         goto queuing_error;
2547
2548                 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
2549                 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2550
2551                 /* Zero out remaining portion of packet. */
2552                 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2553                 clr_ptr = (uint32_t *)cmd_pkt + 2;
2554                 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2555                 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2556
2557                 /* Set NPORT-ID and LUN number*/
2558                 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2559                 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2560                 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2561                 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2562                 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
2563
2564                 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2565                 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
2566                     sizeof(cmd_pkt->lun));
2567
2568                 /*
2569                  * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2570                  */
2571                 if (scsi_populate_tag_msg(cmd, tag)) {
2572                         switch (tag[0]) {
2573                         case HEAD_OF_QUEUE_TAG:
2574                                 cmd_pkt->task = TSK_HEAD_OF_QUEUE;
2575                                 break;
2576                         case ORDERED_QUEUE_TAG:
2577                                 cmd_pkt->task = TSK_ORDERED;
2578                                 break;
2579                         }
2580                 }
2581
2582                 /* Populate the FCP_PRIO. */
2583                 if (ha->flags.fcp_prio_enabled)
2584                         cmd_pkt->task |= sp->fcport->fcp_prio << 3;
2585
2586                 /* Load SCSI command packet. */
2587                 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2588                 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2589
2590                 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2591
2592                 /* Build IOCB segments */
2593                 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
2594
2595                 /* Set total data segment count. */
2596                 cmd_pkt->entry_count = (uint8_t)req_cnt;
2597                 /* Specify response queue number where
2598                  * completion should happen.
2599                  */
2600                 cmd_pkt->entry_status = (uint8_t) rsp->id;
2601
2602         }
2603         /* Build command packet. */
2604         req->current_outstanding_cmd = handle;
2605         req->outstanding_cmds[handle] = sp;
2606         sp->handle = handle;
2607         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2608         req->cnt -= req_cnt;
2609         wmb();
2610
2611         /* Adjust ring index. */
2612         req->ring_index++;
2613         if (req->ring_index == req->length) {
2614                 req->ring_index = 0;
2615                 req->ring_ptr = req->ring;
2616         } else
2617                 req->ring_ptr++;
2618
2619         sp->flags |= SRB_DMA_VALID;
2620
2621         /* Set chip new ring index. */
2622         /* write, read and verify logic */
2623         dbval = dbval | (req->id << 8) | (req->ring_index << 16);
2624         if (ql2xdbwr)
2625                 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
2626         else {
2627                 WRT_REG_DWORD(
2628                         (unsigned long __iomem *)ha->nxdb_wr_ptr,
2629                         dbval);
2630                 wmb();
2631                 while (RD_REG_DWORD((void __iomem *)ha->nxdb_rd_ptr) != dbval) {
2632                         WRT_REG_DWORD(
2633                                 (unsigned long __iomem *)ha->nxdb_wr_ptr,
2634                                 dbval);
2635                         wmb();
2636                 }
2637         }
2638
2639         /* Manage unprocessed RIO/ZIO commands in response queue. */
2640         if (vha->flags.process_response_queue &&
2641             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2642                 qla24xx_process_response_queue(vha, rsp);
2643
2644         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2645         return QLA_SUCCESS;
2646
2647 queuing_error_fcp_cmnd:
2648         dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
2649 queuing_error:
2650         if (tot_dsds)
2651                 scsi_dma_unmap(cmd);
2652
2653         if (sp->u.scmd.ctx) {
2654                 mempool_free(sp->u.scmd.ctx, ha->ctx_mempool);
2655                 sp->u.scmd.ctx = NULL;
2656         }
2657         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2658
2659         return QLA_FUNCTION_FAILED;
2660 }
2661
2662 static void
2663 qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
2664 {
2665         struct srb_iocb *aio = &sp->u.iocb_cmd;
2666         scsi_qla_host_t *vha = sp->fcport->vha;
2667         struct req_que *req = vha->req;
2668
2669         memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
2670         abt_iocb->entry_type = ABORT_IOCB_TYPE;
2671         abt_iocb->entry_count = 1;
2672         abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
2673         abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2674         abt_iocb->handle_to_abort =
2675             cpu_to_le32(MAKE_HANDLE(req->id, aio->u.abt.cmd_hndl));
2676         abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2677         abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
2678         abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2679         abt_iocb->vp_index = vha->vp_idx;
2680         abt_iocb->req_que_no = cpu_to_le16(req->id);
2681         /* Send the command to the firmware */
2682         wmb();
2683 }
2684
2685 int
2686 qla2x00_start_sp(srb_t *sp)
2687 {
2688         int rval;
2689         struct qla_hw_data *ha = sp->fcport->vha->hw;
2690         void *pkt;
2691         unsigned long flags;
2692
2693         rval = QLA_FUNCTION_FAILED;
2694         spin_lock_irqsave(&ha->hardware_lock, flags);
2695         pkt = qla2x00_alloc_iocbs(sp->fcport->vha, sp);
2696         if (!pkt) {
2697                 ql_log(ql_log_warn, sp->fcport->vha, 0x700c,
2698                     "qla2x00_alloc_iocbs failed.\n");
2699                 goto done;
2700         }
2701
2702         rval = QLA_SUCCESS;
2703         switch (sp->type) {
2704         case SRB_LOGIN_CMD:
2705                 IS_FWI2_CAPABLE(ha) ?
2706                     qla24xx_login_iocb(sp, pkt) :
2707                     qla2x00_login_iocb(sp, pkt);
2708                 break;
2709         case SRB_LOGOUT_CMD:
2710                 IS_FWI2_CAPABLE(ha) ?
2711                     qla24xx_logout_iocb(sp, pkt) :
2712                     qla2x00_logout_iocb(sp, pkt);
2713                 break;
2714         case SRB_ELS_CMD_RPT:
2715         case SRB_ELS_CMD_HST:
2716                 qla24xx_els_iocb(sp, pkt);
2717                 break;
2718         case SRB_CT_CMD:
2719                 IS_FWI2_CAPABLE(ha) ?
2720                     qla24xx_ct_iocb(sp, pkt) :
2721                     qla2x00_ct_iocb(sp, pkt);
2722                 break;
2723         case SRB_ADISC_CMD:
2724                 IS_FWI2_CAPABLE(ha) ?
2725                     qla24xx_adisc_iocb(sp, pkt) :
2726                     qla2x00_adisc_iocb(sp, pkt);
2727                 break;
2728         case SRB_TM_CMD:
2729                 IS_QLAFX00(ha) ?
2730                     qlafx00_tm_iocb(sp, pkt) :
2731                     qla24xx_tm_iocb(sp, pkt);
2732                 break;
2733         case SRB_FXIOCB_DCMD:
2734         case SRB_FXIOCB_BCMD:
2735                 qlafx00_fxdisc_iocb(sp, pkt);
2736                 break;
2737         case SRB_ABT_CMD:
2738                 IS_QLAFX00(ha) ?
2739                         qlafx00_abort_iocb(sp, pkt) :
2740                         qla24xx_abort_iocb(sp, pkt);
2741                 break;
2742         default:
2743                 break;
2744         }
2745
2746         wmb();
2747         qla2x00_start_iocbs(sp->fcport->vha, ha->req_q_map[0]);
2748 done:
2749         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2750         return rval;
2751 }
2752
2753 static void
2754 qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
2755                                 struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
2756 {
2757         uint16_t avail_dsds;
2758         uint32_t *cur_dsd;
2759         uint32_t req_data_len = 0;
2760         uint32_t rsp_data_len = 0;
2761         struct scatterlist *sg;
2762         int index;
2763         int entry_count = 1;
2764         struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2765
2766         /*Update entry type to indicate bidir command */
2767         *((uint32_t *)(&cmd_pkt->entry_type)) =
2768                 __constant_cpu_to_le32(COMMAND_BIDIRECTIONAL);
2769
2770         /* Set the transfer direction, in this set both flags
2771          * Also set the BD_WRAP_BACK flag, firmware will take care
2772          * assigning DID=SID for outgoing pkts.
2773          */
2774         cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
2775         cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2776         cmd_pkt->control_flags =
2777                         __constant_cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
2778                                                         BD_WRAP_BACK);
2779
2780         req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
2781         cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
2782         cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
2783         cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
2784
2785         vha->bidi_stats.transfer_bytes += req_data_len;
2786         vha->bidi_stats.io_count++;
2787
2788         vha->qla_stats.output_bytes += req_data_len;
2789         vha->qla_stats.output_requests++;
2790
2791         /* Only one dsd is available for bidirectional IOCB, remaining dsds
2792          * are bundled in continuation iocb
2793          */
2794         avail_dsds = 1;
2795         cur_dsd = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
2796
2797         index = 0;
2798
2799         for_each_sg(bsg_job->request_payload.sg_list, sg,
2800                                 bsg_job->request_payload.sg_cnt, index) {
2801                 dma_addr_t sle_dma;
2802                 cont_a64_entry_t *cont_pkt;
2803
2804                 /* Allocate additional continuation packets */
2805                 if (avail_dsds == 0) {
2806                         /* Continuation type 1 IOCB can accomodate
2807                          * 5 DSDS
2808                          */
2809                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
2810                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2811                         avail_dsds = 5;
2812                         entry_count++;
2813                 }
2814                 sle_dma = sg_dma_address(sg);
2815                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2816                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2817                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2818                 avail_dsds--;
2819         }
2820         /* For read request DSD will always goes to continuation IOCB
2821          * and follow the write DSD. If there is room on the current IOCB
2822          * then it is added to that IOCB else new continuation IOCB is
2823          * allocated.
2824          */
2825         for_each_sg(bsg_job->reply_payload.sg_list, sg,
2826                                 bsg_job->reply_payload.sg_cnt, index) {
2827                 dma_addr_t sle_dma;
2828                 cont_a64_entry_t *cont_pkt;
2829
2830                 /* Allocate additional continuation packets */
2831                 if (avail_dsds == 0) {
2832                         /* Continuation type 1 IOCB can accomodate
2833                          * 5 DSDS
2834                          */
2835                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
2836                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2837                         avail_dsds = 5;
2838                         entry_count++;
2839                 }
2840                 sle_dma = sg_dma_address(sg);
2841                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2842                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2843                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2844                 avail_dsds--;
2845         }
2846         /* This value should be same as number of IOCB required for this cmd */
2847         cmd_pkt->entry_count = entry_count;
2848 }
2849
2850 int
2851 qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
2852 {
2853
2854         struct qla_hw_data *ha = vha->hw;
2855         unsigned long flags;
2856         uint32_t handle;
2857         uint32_t index;
2858         uint16_t req_cnt;
2859         uint16_t cnt;
2860         uint32_t *clr_ptr;
2861         struct cmd_bidir *cmd_pkt = NULL;
2862         struct rsp_que *rsp;
2863         struct req_que *req;
2864         int rval = EXT_STATUS_OK;
2865
2866         rval = QLA_SUCCESS;
2867
2868         rsp = ha->rsp_q_map[0];
2869         req = vha->req;
2870
2871         /* Send marker if required */
2872         if (vha->marker_needed != 0) {
2873                 if (qla2x00_marker(vha, req,
2874                         rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
2875                         return EXT_STATUS_MAILBOX;
2876                 vha->marker_needed = 0;
2877         }
2878
2879         /* Acquire ring specific lock */
2880         spin_lock_irqsave(&ha->hardware_lock, flags);
2881
2882         /* Check for room in outstanding command list. */
2883         handle = req->current_outstanding_cmd;
2884         for (index = 1; index < req->num_outstanding_cmds; index++) {
2885                 handle++;
2886         if (handle == req->num_outstanding_cmds)
2887                 handle = 1;
2888         if (!req->outstanding_cmds[handle])
2889                 break;
2890         }
2891
2892         if (index == req->num_outstanding_cmds) {
2893                 rval = EXT_STATUS_BUSY;
2894                 goto queuing_error;
2895         }
2896
2897         /* Calculate number of IOCB required */
2898         req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2899
2900         /* Check for room on request queue. */
2901         if (req->cnt < req_cnt + 2) {
2902                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
2903                     RD_REG_DWORD_RELAXED(req->req_q_out);
2904                 if  (req->ring_index < cnt)
2905                         req->cnt = cnt - req->ring_index;
2906                 else
2907                         req->cnt = req->length -
2908                                 (req->ring_index - cnt);
2909         }
2910         if (req->cnt < req_cnt + 2) {
2911                 rval = EXT_STATUS_BUSY;
2912                 goto queuing_error;
2913         }
2914
2915         cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
2916         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2917
2918         /* Zero out remaining portion of packet. */
2919         /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2920         clr_ptr = (uint32_t *)cmd_pkt + 2;
2921         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2922
2923         /* Set NPORT-ID  (of vha)*/
2924         cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
2925         cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
2926         cmd_pkt->port_id[1] = vha->d_id.b.area;
2927         cmd_pkt->port_id[2] = vha->d_id.b.domain;
2928
2929         qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
2930         cmd_pkt->entry_status = (uint8_t) rsp->id;
2931         /* Build command packet. */
2932         req->current_outstanding_cmd = handle;
2933         req->outstanding_cmds[handle] = sp;
2934         sp->handle = handle;
2935         req->cnt -= req_cnt;
2936
2937         /* Send the command to the firmware */
2938         wmb();
2939         qla2x00_start_iocbs(vha, req);
2940 queuing_error:
2941         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2942         return rval;
2943 }