Merge branch 'fix/asoc' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound
[cascardo/linux.git] / drivers / scsi / qla2xxx / qla_iocb.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2011 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8
9 #include <linux/blkdev.h>
10 #include <linux/delay.h>
11
12 #include <scsi/scsi_tcq.h>
13
14 static void qla25xx_set_que(srb_t *, struct rsp_que **);
15 /**
16  * qla2x00_get_cmd_direction() - Determine control_flag data direction.
17  * @cmd: SCSI command
18  *
19  * Returns the proper CF_* direction based on CDB.
20  */
21 static inline uint16_t
22 qla2x00_get_cmd_direction(srb_t *sp)
23 {
24         uint16_t cflags;
25
26         cflags = 0;
27
28         /* Set transfer direction */
29         if (sp->cmd->sc_data_direction == DMA_TO_DEVICE) {
30                 cflags = CF_WRITE;
31                 sp->fcport->vha->hw->qla_stats.output_bytes +=
32                     scsi_bufflen(sp->cmd);
33         } else if (sp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
34                 cflags = CF_READ;
35                 sp->fcport->vha->hw->qla_stats.input_bytes +=
36                     scsi_bufflen(sp->cmd);
37         }
38         return (cflags);
39 }
40
41 /**
42  * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
43  * Continuation Type 0 IOCBs to allocate.
44  *
45  * @dsds: number of data segment decriptors needed
46  *
47  * Returns the number of IOCB entries needed to store @dsds.
48  */
49 uint16_t
50 qla2x00_calc_iocbs_32(uint16_t dsds)
51 {
52         uint16_t iocbs;
53
54         iocbs = 1;
55         if (dsds > 3) {
56                 iocbs += (dsds - 3) / 7;
57                 if ((dsds - 3) % 7)
58                         iocbs++;
59         }
60         return (iocbs);
61 }
62
63 /**
64  * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
65  * Continuation Type 1 IOCBs to allocate.
66  *
67  * @dsds: number of data segment decriptors needed
68  *
69  * Returns the number of IOCB entries needed to store @dsds.
70  */
71 uint16_t
72 qla2x00_calc_iocbs_64(uint16_t dsds)
73 {
74         uint16_t iocbs;
75
76         iocbs = 1;
77         if (dsds > 2) {
78                 iocbs += (dsds - 2) / 5;
79                 if ((dsds - 2) % 5)
80                         iocbs++;
81         }
82         return (iocbs);
83 }
84
85 /**
86  * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
87  * @ha: HA context
88  *
89  * Returns a pointer to the Continuation Type 0 IOCB packet.
90  */
91 static inline cont_entry_t *
92 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
93 {
94         cont_entry_t *cont_pkt;
95         struct req_que *req = vha->req;
96         /* Adjust ring index. */
97         req->ring_index++;
98         if (req->ring_index == req->length) {
99                 req->ring_index = 0;
100                 req->ring_ptr = req->ring;
101         } else {
102                 req->ring_ptr++;
103         }
104
105         cont_pkt = (cont_entry_t *)req->ring_ptr;
106
107         /* Load packet defaults. */
108         *((uint32_t *)(&cont_pkt->entry_type)) =
109             __constant_cpu_to_le32(CONTINUE_TYPE);
110
111         return (cont_pkt);
112 }
113
114 /**
115  * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
116  * @ha: HA context
117  *
118  * Returns a pointer to the continuation type 1 IOCB packet.
119  */
120 static inline cont_a64_entry_t *
121 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
122 {
123         cont_a64_entry_t *cont_pkt;
124
125         /* Adjust ring index. */
126         req->ring_index++;
127         if (req->ring_index == req->length) {
128                 req->ring_index = 0;
129                 req->ring_ptr = req->ring;
130         } else {
131                 req->ring_ptr++;
132         }
133
134         cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
135
136         /* Load packet defaults. */
137         *((uint32_t *)(&cont_pkt->entry_type)) =
138             __constant_cpu_to_le32(CONTINUE_A64_TYPE);
139
140         return (cont_pkt);
141 }
142
143 static inline int
144 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
145 {
146         uint8_t guard = scsi_host_get_guard(sp->cmd->device->host);
147
148         /* We only support T10 DIF right now */
149         if (guard != SHOST_DIX_GUARD_CRC) {
150                 ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3007,
151                     "Unsupported guard: %d for cmd=%p.\n", guard, sp->cmd);
152                 return 0;
153         }
154
155         /* We always use DIFF Bundling for best performance */
156         *fw_prot_opts = 0;
157
158         /* Translate SCSI opcode to a protection opcode */
159         switch (scsi_get_prot_op(sp->cmd)) {
160         case SCSI_PROT_READ_STRIP:
161                 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
162                 break;
163         case SCSI_PROT_WRITE_INSERT:
164                 *fw_prot_opts |= PO_MODE_DIF_INSERT;
165                 break;
166         case SCSI_PROT_READ_INSERT:
167                 *fw_prot_opts |= PO_MODE_DIF_INSERT;
168                 break;
169         case SCSI_PROT_WRITE_STRIP:
170                 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
171                 break;
172         case SCSI_PROT_READ_PASS:
173                 *fw_prot_opts |= PO_MODE_DIF_PASS;
174                 break;
175         case SCSI_PROT_WRITE_PASS:
176                 *fw_prot_opts |= PO_MODE_DIF_PASS;
177                 break;
178         default:        /* Normal Request */
179                 *fw_prot_opts |= PO_MODE_DIF_PASS;
180                 break;
181         }
182
183         return scsi_prot_sg_count(sp->cmd);
184 }
185
186 /*
187  * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
188  * capable IOCB types.
189  *
190  * @sp: SRB command to process
191  * @cmd_pkt: Command type 2 IOCB
192  * @tot_dsds: Total number of segments to transfer
193  */
194 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
195     uint16_t tot_dsds)
196 {
197         uint16_t        avail_dsds;
198         uint32_t        *cur_dsd;
199         scsi_qla_host_t *vha;
200         struct scsi_cmnd *cmd;
201         struct scatterlist *sg;
202         int i;
203
204         cmd = sp->cmd;
205
206         /* Update entry type to indicate Command Type 2 IOCB */
207         *((uint32_t *)(&cmd_pkt->entry_type)) =
208             __constant_cpu_to_le32(COMMAND_TYPE);
209
210         /* No data transfer */
211         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
212                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
213                 return;
214         }
215
216         vha = sp->fcport->vha;
217         cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
218
219         /* Three DSDs are available in the Command Type 2 IOCB */
220         avail_dsds = 3;
221         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
222
223         /* Load data segments */
224         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
225                 cont_entry_t *cont_pkt;
226
227                 /* Allocate additional continuation packets? */
228                 if (avail_dsds == 0) {
229                         /*
230                          * Seven DSDs are available in the Continuation
231                          * Type 0 IOCB.
232                          */
233                         cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
234                         cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
235                         avail_dsds = 7;
236                 }
237
238                 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
239                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
240                 avail_dsds--;
241         }
242 }
243
244 /**
245  * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
246  * capable IOCB types.
247  *
248  * @sp: SRB command to process
249  * @cmd_pkt: Command type 3 IOCB
250  * @tot_dsds: Total number of segments to transfer
251  */
252 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
253     uint16_t tot_dsds)
254 {
255         uint16_t        avail_dsds;
256         uint32_t        *cur_dsd;
257         scsi_qla_host_t *vha;
258         struct scsi_cmnd *cmd;
259         struct scatterlist *sg;
260         int i;
261
262         cmd = sp->cmd;
263
264         /* Update entry type to indicate Command Type 3 IOCB */
265         *((uint32_t *)(&cmd_pkt->entry_type)) =
266             __constant_cpu_to_le32(COMMAND_A64_TYPE);
267
268         /* No data transfer */
269         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
270                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
271                 return;
272         }
273
274         vha = sp->fcport->vha;
275         cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
276
277         /* Two DSDs are available in the Command Type 3 IOCB */
278         avail_dsds = 2;
279         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
280
281         /* Load data segments */
282         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
283                 dma_addr_t      sle_dma;
284                 cont_a64_entry_t *cont_pkt;
285
286                 /* Allocate additional continuation packets? */
287                 if (avail_dsds == 0) {
288                         /*
289                          * Five DSDs are available in the Continuation
290                          * Type 1 IOCB.
291                          */
292                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
293                         cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
294                         avail_dsds = 5;
295                 }
296
297                 sle_dma = sg_dma_address(sg);
298                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
299                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
300                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
301                 avail_dsds--;
302         }
303 }
304
305 /**
306  * qla2x00_start_scsi() - Send a SCSI command to the ISP
307  * @sp: command to send to the ISP
308  *
309  * Returns non-zero if a failure occurred, else zero.
310  */
311 int
312 qla2x00_start_scsi(srb_t *sp)
313 {
314         int             ret, nseg;
315         unsigned long   flags;
316         scsi_qla_host_t *vha;
317         struct scsi_cmnd *cmd;
318         uint32_t        *clr_ptr;
319         uint32_t        index;
320         uint32_t        handle;
321         cmd_entry_t     *cmd_pkt;
322         uint16_t        cnt;
323         uint16_t        req_cnt;
324         uint16_t        tot_dsds;
325         struct device_reg_2xxx __iomem *reg;
326         struct qla_hw_data *ha;
327         struct req_que *req;
328         struct rsp_que *rsp;
329         char            tag[2];
330
331         /* Setup device pointers. */
332         ret = 0;
333         vha = sp->fcport->vha;
334         ha = vha->hw;
335         reg = &ha->iobase->isp;
336         cmd = sp->cmd;
337         req = ha->req_q_map[0];
338         rsp = ha->rsp_q_map[0];
339         /* So we know we haven't pci_map'ed anything yet */
340         tot_dsds = 0;
341
342         /* Send marker if required */
343         if (vha->marker_needed != 0) {
344                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
345                     QLA_SUCCESS) {
346                         return (QLA_FUNCTION_FAILED);
347                 }
348                 vha->marker_needed = 0;
349         }
350
351         /* Acquire ring specific lock */
352         spin_lock_irqsave(&ha->hardware_lock, flags);
353
354         /* Check for room in outstanding command list. */
355         handle = req->current_outstanding_cmd;
356         for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
357                 handle++;
358                 if (handle == MAX_OUTSTANDING_COMMANDS)
359                         handle = 1;
360                 if (!req->outstanding_cmds[handle])
361                         break;
362         }
363         if (index == MAX_OUTSTANDING_COMMANDS)
364                 goto queuing_error;
365
366         /* Map the sg table so we have an accurate count of sg entries needed */
367         if (scsi_sg_count(cmd)) {
368                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
369                     scsi_sg_count(cmd), cmd->sc_data_direction);
370                 if (unlikely(!nseg))
371                         goto queuing_error;
372         } else
373                 nseg = 0;
374
375         tot_dsds = nseg;
376
377         /* Calculate the number of request entries needed. */
378         req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
379         if (req->cnt < (req_cnt + 2)) {
380                 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
381                 if (req->ring_index < cnt)
382                         req->cnt = cnt - req->ring_index;
383                 else
384                         req->cnt = req->length -
385                             (req->ring_index - cnt);
386         }
387         if (req->cnt < (req_cnt + 2))
388                 goto queuing_error;
389
390         /* Build command packet */
391         req->current_outstanding_cmd = handle;
392         req->outstanding_cmds[handle] = sp;
393         sp->handle = handle;
394         sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
395         req->cnt -= req_cnt;
396
397         cmd_pkt = (cmd_entry_t *)req->ring_ptr;
398         cmd_pkt->handle = handle;
399         /* Zero out remaining portion of packet. */
400         clr_ptr = (uint32_t *)cmd_pkt + 2;
401         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
402         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
403
404         /* Set target ID and LUN number*/
405         SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
406         cmd_pkt->lun = cpu_to_le16(sp->cmd->device->lun);
407
408         /* Update tagged queuing modifier */
409         if (scsi_populate_tag_msg(cmd, tag)) {
410                 switch (tag[0]) {
411                 case HEAD_OF_QUEUE_TAG:
412                         cmd_pkt->control_flags =
413                             __constant_cpu_to_le16(CF_HEAD_TAG);
414                         break;
415                 case ORDERED_QUEUE_TAG:
416                         cmd_pkt->control_flags =
417                             __constant_cpu_to_le16(CF_ORDERED_TAG);
418                         break;
419                 default:
420                         cmd_pkt->control_flags =
421                             __constant_cpu_to_le16(CF_SIMPLE_TAG);
422                         break;
423                 }
424         }
425
426         /* Load SCSI command packet. */
427         memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
428         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
429
430         /* Build IOCB segments */
431         ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
432
433         /* Set total data segment count. */
434         cmd_pkt->entry_count = (uint8_t)req_cnt;
435         wmb();
436
437         /* Adjust ring index. */
438         req->ring_index++;
439         if (req->ring_index == req->length) {
440                 req->ring_index = 0;
441                 req->ring_ptr = req->ring;
442         } else
443                 req->ring_ptr++;
444
445         sp->flags |= SRB_DMA_VALID;
446
447         /* Set chip new ring index. */
448         WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
449         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg));     /* PCI Posting. */
450
451         /* Manage unprocessed RIO/ZIO commands in response queue. */
452         if (vha->flags.process_response_queue &&
453             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
454                 qla2x00_process_response_queue(rsp);
455
456         spin_unlock_irqrestore(&ha->hardware_lock, flags);
457         return (QLA_SUCCESS);
458
459 queuing_error:
460         if (tot_dsds)
461                 scsi_dma_unmap(cmd);
462
463         spin_unlock_irqrestore(&ha->hardware_lock, flags);
464
465         return (QLA_FUNCTION_FAILED);
466 }
467
468 /**
469  * qla2x00_start_iocbs() - Execute the IOCB command
470  */
471 static void
472 qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
473 {
474         struct qla_hw_data *ha = vha->hw;
475         device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
476         struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
477
478         if (IS_QLA82XX(ha)) {
479                 qla82xx_start_iocbs(vha);
480         } else {
481                 /* Adjust ring index. */
482                 req->ring_index++;
483                 if (req->ring_index == req->length) {
484                         req->ring_index = 0;
485                         req->ring_ptr = req->ring;
486                 } else
487                         req->ring_ptr++;
488
489                 /* Set chip new ring index. */
490                 if (ha->mqenable) {
491                         WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
492                         RD_REG_DWORD(&ioreg->hccr);
493                 } else if (IS_FWI2_CAPABLE(ha)) {
494                         WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
495                         RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
496                 } else {
497                         WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
498                                 req->ring_index);
499                         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
500                 }
501         }
502 }
503
504 /**
505  * qla2x00_marker() - Send a marker IOCB to the firmware.
506  * @ha: HA context
507  * @loop_id: loop ID
508  * @lun: LUN
509  * @type: marker modifier
510  *
511  * Can be called from both normal and interrupt context.
512  *
513  * Returns non-zero if a failure occurred, else zero.
514  */
515 static int
516 __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
517                         struct rsp_que *rsp, uint16_t loop_id,
518                         uint16_t lun, uint8_t type)
519 {
520         mrk_entry_t *mrk;
521         struct mrk_entry_24xx *mrk24;
522         struct qla_hw_data *ha = vha->hw;
523         scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
524
525         mrk24 = NULL;
526         req = ha->req_q_map[0];
527         mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, 0);
528         if (mrk == NULL) {
529                 ql_log(ql_log_warn, base_vha, 0x3026,
530                     "Failed to allocate Marker IOCB.\n");
531
532                 return (QLA_FUNCTION_FAILED);
533         }
534
535         mrk->entry_type = MARKER_TYPE;
536         mrk->modifier = type;
537         if (type != MK_SYNC_ALL) {
538                 if (IS_FWI2_CAPABLE(ha)) {
539                         mrk24 = (struct mrk_entry_24xx *) mrk;
540                         mrk24->nport_handle = cpu_to_le16(loop_id);
541                         mrk24->lun[1] = LSB(lun);
542                         mrk24->lun[2] = MSB(lun);
543                         host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
544                         mrk24->vp_index = vha->vp_idx;
545                         mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
546                 } else {
547                         SET_TARGET_ID(ha, mrk->target, loop_id);
548                         mrk->lun = cpu_to_le16(lun);
549                 }
550         }
551         wmb();
552
553         qla2x00_start_iocbs(vha, req);
554
555         return (QLA_SUCCESS);
556 }
557
558 int
559 qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
560                 struct rsp_que *rsp, uint16_t loop_id, uint16_t lun,
561                 uint8_t type)
562 {
563         int ret;
564         unsigned long flags = 0;
565
566         spin_lock_irqsave(&vha->hw->hardware_lock, flags);
567         ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
568         spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
569
570         return (ret);
571 }
572
573 /**
574  * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
575  * Continuation Type 1 IOCBs to allocate.
576  *
577  * @dsds: number of data segment decriptors needed
578  *
579  * Returns the number of IOCB entries needed to store @dsds.
580  */
581 inline uint16_t
582 qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
583 {
584         uint16_t iocbs;
585
586         iocbs = 1;
587         if (dsds > 1) {
588                 iocbs += (dsds - 1) / 5;
589                 if ((dsds - 1) % 5)
590                         iocbs++;
591         }
592         return iocbs;
593 }
594
595 static inline int
596 qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
597         uint16_t tot_dsds)
598 {
599         uint32_t *cur_dsd = NULL;
600         scsi_qla_host_t *vha;
601         struct qla_hw_data *ha;
602         struct scsi_cmnd *cmd;
603         struct  scatterlist *cur_seg;
604         uint32_t *dsd_seg;
605         void *next_dsd;
606         uint8_t avail_dsds;
607         uint8_t first_iocb = 1;
608         uint32_t dsd_list_len;
609         struct dsd_dma *dsd_ptr;
610         struct ct6_dsd *ctx;
611
612         cmd = sp->cmd;
613
614         /* Update entry type to indicate Command Type 3 IOCB */
615         *((uint32_t *)(&cmd_pkt->entry_type)) =
616                 __constant_cpu_to_le32(COMMAND_TYPE_6);
617
618         /* No data transfer */
619         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
620                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
621                 return 0;
622         }
623
624         vha = sp->fcport->vha;
625         ha = vha->hw;
626
627         /* Set transfer direction */
628         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
629                 cmd_pkt->control_flags =
630                     __constant_cpu_to_le16(CF_WRITE_DATA);
631                 ha->qla_stats.output_bytes += scsi_bufflen(cmd);
632         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
633                 cmd_pkt->control_flags =
634                     __constant_cpu_to_le16(CF_READ_DATA);
635                 ha->qla_stats.input_bytes += scsi_bufflen(cmd);
636         }
637
638         cur_seg = scsi_sglist(cmd);
639         ctx = sp->ctx;
640
641         while (tot_dsds) {
642                 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
643                     QLA_DSDS_PER_IOCB : tot_dsds;
644                 tot_dsds -= avail_dsds;
645                 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
646
647                 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
648                     struct dsd_dma, list);
649                 next_dsd = dsd_ptr->dsd_addr;
650                 list_del(&dsd_ptr->list);
651                 ha->gbl_dsd_avail--;
652                 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
653                 ctx->dsd_use_cnt++;
654                 ha->gbl_dsd_inuse++;
655
656                 if (first_iocb) {
657                         first_iocb = 0;
658                         dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
659                         *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
660                         *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
661                         cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
662                 } else {
663                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
664                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
665                         *cur_dsd++ = cpu_to_le32(dsd_list_len);
666                 }
667                 cur_dsd = (uint32_t *)next_dsd;
668                 while (avail_dsds) {
669                         dma_addr_t      sle_dma;
670
671                         sle_dma = sg_dma_address(cur_seg);
672                         *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
673                         *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
674                         *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
675                         cur_seg = sg_next(cur_seg);
676                         avail_dsds--;
677                 }
678         }
679
680         /* Null termination */
681         *cur_dsd++ =  0;
682         *cur_dsd++ = 0;
683         *cur_dsd++ = 0;
684         cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
685         return 0;
686 }
687
688 /*
689  * qla24xx_calc_dsd_lists() - Determine number of DSD list required
690  * for Command Type 6.
691  *
692  * @dsds: number of data segment decriptors needed
693  *
694  * Returns the number of dsd list needed to store @dsds.
695  */
696 inline uint16_t
697 qla24xx_calc_dsd_lists(uint16_t dsds)
698 {
699         uint16_t dsd_lists = 0;
700
701         dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
702         if (dsds % QLA_DSDS_PER_IOCB)
703                 dsd_lists++;
704         return dsd_lists;
705 }
706
707
708 /**
709  * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
710  * IOCB types.
711  *
712  * @sp: SRB command to process
713  * @cmd_pkt: Command type 3 IOCB
714  * @tot_dsds: Total number of segments to transfer
715  */
716 inline void
717 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
718     uint16_t tot_dsds)
719 {
720         uint16_t        avail_dsds;
721         uint32_t        *cur_dsd;
722         scsi_qla_host_t *vha;
723         struct scsi_cmnd *cmd;
724         struct scatterlist *sg;
725         int i;
726         struct req_que *req;
727
728         cmd = sp->cmd;
729
730         /* Update entry type to indicate Command Type 3 IOCB */
731         *((uint32_t *)(&cmd_pkt->entry_type)) =
732             __constant_cpu_to_le32(COMMAND_TYPE_7);
733
734         /* No data transfer */
735         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
736                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
737                 return;
738         }
739
740         vha = sp->fcport->vha;
741         req = vha->req;
742
743         /* Set transfer direction */
744         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
745                 cmd_pkt->task_mgmt_flags =
746                     __constant_cpu_to_le16(TMF_WRITE_DATA);
747                 sp->fcport->vha->hw->qla_stats.output_bytes +=
748                     scsi_bufflen(sp->cmd);
749         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
750                 cmd_pkt->task_mgmt_flags =
751                     __constant_cpu_to_le16(TMF_READ_DATA);
752                 sp->fcport->vha->hw->qla_stats.input_bytes +=
753                     scsi_bufflen(sp->cmd);
754         }
755
756         /* One DSD is available in the Command Type 3 IOCB */
757         avail_dsds = 1;
758         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
759
760         /* Load data segments */
761
762         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
763                 dma_addr_t      sle_dma;
764                 cont_a64_entry_t *cont_pkt;
765
766                 /* Allocate additional continuation packets? */
767                 if (avail_dsds == 0) {
768                         /*
769                          * Five DSDs are available in the Continuation
770                          * Type 1 IOCB.
771                          */
772                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
773                         cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
774                         avail_dsds = 5;
775                 }
776
777                 sle_dma = sg_dma_address(sg);
778                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
779                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
780                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
781                 avail_dsds--;
782         }
783 }
784
785 struct fw_dif_context {
786         uint32_t ref_tag;
787         uint16_t app_tag;
788         uint8_t ref_tag_mask[4];        /* Validation/Replacement Mask*/
789         uint8_t app_tag_mask[2];        /* Validation/Replacement Mask*/
790 };
791
792 /*
793  * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
794  *
795  */
796 static inline void
797 qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
798     unsigned int protcnt)
799 {
800         struct scsi_cmnd *cmd = sp->cmd;
801         scsi_qla_host_t *vha = shost_priv(cmd->device->host);
802
803         switch (scsi_get_prot_type(cmd)) {
804         case SCSI_PROT_DIF_TYPE0:
805                 /*
806                  * No check for ql2xenablehba_err_chk, as it would be an
807                  * I/O error if hba tag generation is not done.
808                  */
809                 pkt->ref_tag = cpu_to_le32((uint32_t)
810                     (0xffffffff & scsi_get_lba(cmd)));
811
812                 if (!qla2x00_hba_err_chk_enabled(sp))
813                         break;
814
815                 pkt->ref_tag_mask[0] = 0xff;
816                 pkt->ref_tag_mask[1] = 0xff;
817                 pkt->ref_tag_mask[2] = 0xff;
818                 pkt->ref_tag_mask[3] = 0xff;
819                 break;
820
821         /*
822          * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
823          * match LBA in CDB + N
824          */
825         case SCSI_PROT_DIF_TYPE2:
826                 pkt->app_tag = __constant_cpu_to_le16(0);
827                 pkt->app_tag_mask[0] = 0x0;
828                 pkt->app_tag_mask[1] = 0x0;
829
830                 pkt->ref_tag = cpu_to_le32((uint32_t)
831                     (0xffffffff & scsi_get_lba(cmd)));
832
833                 if (!qla2x00_hba_err_chk_enabled(sp))
834                         break;
835
836                 /* enable ALL bytes of the ref tag */
837                 pkt->ref_tag_mask[0] = 0xff;
838                 pkt->ref_tag_mask[1] = 0xff;
839                 pkt->ref_tag_mask[2] = 0xff;
840                 pkt->ref_tag_mask[3] = 0xff;
841                 break;
842
843         /* For Type 3 protection: 16 bit GUARD only */
844         case SCSI_PROT_DIF_TYPE3:
845                 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
846                         pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
847                                                                 0x00;
848                 break;
849
850         /*
851          * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
852          * 16 bit app tag.
853          */
854         case SCSI_PROT_DIF_TYPE1:
855                 pkt->ref_tag = cpu_to_le32((uint32_t)
856                     (0xffffffff & scsi_get_lba(cmd)));
857                 pkt->app_tag = __constant_cpu_to_le16(0);
858                 pkt->app_tag_mask[0] = 0x0;
859                 pkt->app_tag_mask[1] = 0x0;
860
861                 if (!qla2x00_hba_err_chk_enabled(sp))
862                         break;
863
864                 /* enable ALL bytes of the ref tag */
865                 pkt->ref_tag_mask[0] = 0xff;
866                 pkt->ref_tag_mask[1] = 0xff;
867                 pkt->ref_tag_mask[2] = 0xff;
868                 pkt->ref_tag_mask[3] = 0xff;
869                 break;
870         }
871
872         ql_dbg(ql_dbg_io, vha, 0x3009,
873             "Setting protection Tags: (BIG) ref tag = 0x%x, app tag = 0x%x, "
874             "prot SG count %d, cmd lba 0x%x, prot_type=%u cmd=%p.\n",
875             pkt->ref_tag, pkt->app_tag, protcnt, (int)scsi_get_lba(cmd),
876             scsi_get_prot_type(cmd), cmd);
877 }
878
879 struct qla2_sgx {
880         dma_addr_t              dma_addr;       /* OUT */
881         uint32_t                dma_len;        /* OUT */
882
883         uint32_t                tot_bytes;      /* IN */
884         struct scatterlist      *cur_sg;        /* IN */
885
886         /* for book keeping, bzero on initial invocation */
887         uint32_t                bytes_consumed;
888         uint32_t                num_bytes;
889         uint32_t                tot_partial;
890
891         /* for debugging */
892         uint32_t                num_sg;
893         srb_t                   *sp;
894 };
895
896 static int
897 qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
898         uint32_t *partial)
899 {
900         struct scatterlist *sg;
901         uint32_t cumulative_partial, sg_len;
902         dma_addr_t sg_dma_addr;
903
904         if (sgx->num_bytes == sgx->tot_bytes)
905                 return 0;
906
907         sg = sgx->cur_sg;
908         cumulative_partial = sgx->tot_partial;
909
910         sg_dma_addr = sg_dma_address(sg);
911         sg_len = sg_dma_len(sg);
912
913         sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
914
915         if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
916                 sgx->dma_len = (blk_sz - cumulative_partial);
917                 sgx->tot_partial = 0;
918                 sgx->num_bytes += blk_sz;
919                 *partial = 0;
920         } else {
921                 sgx->dma_len = sg_len - sgx->bytes_consumed;
922                 sgx->tot_partial += sgx->dma_len;
923                 *partial = 1;
924         }
925
926         sgx->bytes_consumed += sgx->dma_len;
927
928         if (sg_len == sgx->bytes_consumed) {
929                 sg = sg_next(sg);
930                 sgx->num_sg++;
931                 sgx->cur_sg = sg;
932                 sgx->bytes_consumed = 0;
933         }
934
935         return 1;
936 }
937
938 static int
939 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
940         uint32_t *dsd, uint16_t tot_dsds)
941 {
942         void *next_dsd;
943         uint8_t avail_dsds = 0;
944         uint32_t dsd_list_len;
945         struct dsd_dma *dsd_ptr;
946         struct scatterlist *sg_prot;
947         uint32_t *cur_dsd = dsd;
948         uint16_t        used_dsds = tot_dsds;
949
950         uint32_t        prot_int;
951         uint32_t        partial;
952         struct qla2_sgx sgx;
953         dma_addr_t      sle_dma;
954         uint32_t        sle_dma_len, tot_prot_dma_len = 0;
955         struct scsi_cmnd *cmd = sp->cmd;
956
957         prot_int = cmd->device->sector_size;
958
959         memset(&sgx, 0, sizeof(struct qla2_sgx));
960         sgx.tot_bytes = scsi_bufflen(sp->cmd);
961         sgx.cur_sg = scsi_sglist(sp->cmd);
962         sgx.sp = sp;
963
964         sg_prot = scsi_prot_sglist(sp->cmd);
965
966         while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
967
968                 sle_dma = sgx.dma_addr;
969                 sle_dma_len = sgx.dma_len;
970 alloc_and_fill:
971                 /* Allocate additional continuation packets? */
972                 if (avail_dsds == 0) {
973                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
974                                         QLA_DSDS_PER_IOCB : used_dsds;
975                         dsd_list_len = (avail_dsds + 1) * 12;
976                         used_dsds -= avail_dsds;
977
978                         /* allocate tracking DS */
979                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
980                         if (!dsd_ptr)
981                                 return 1;
982
983                         /* allocate new list */
984                         dsd_ptr->dsd_addr = next_dsd =
985                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
986                                 &dsd_ptr->dsd_list_dma);
987
988                         if (!next_dsd) {
989                                 /*
990                                  * Need to cleanup only this dsd_ptr, rest
991                                  * will be done by sp_free_dma()
992                                  */
993                                 kfree(dsd_ptr);
994                                 return 1;
995                         }
996
997                         list_add_tail(&dsd_ptr->list,
998                             &((struct crc_context *)sp->ctx)->dsd_list);
999
1000                         sp->flags |= SRB_CRC_CTX_DSD_VALID;
1001
1002                         /* add new list to cmd iocb or last list */
1003                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1004                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1005                         *cur_dsd++ = dsd_list_len;
1006                         cur_dsd = (uint32_t *)next_dsd;
1007                 }
1008                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1009                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1010                 *cur_dsd++ = cpu_to_le32(sle_dma_len);
1011                 avail_dsds--;
1012
1013                 if (partial == 0) {
1014                         /* Got a full protection interval */
1015                         sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
1016                         sle_dma_len = 8;
1017
1018                         tot_prot_dma_len += sle_dma_len;
1019                         if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
1020                                 tot_prot_dma_len = 0;
1021                                 sg_prot = sg_next(sg_prot);
1022                         }
1023
1024                         partial = 1; /* So as to not re-enter this block */
1025                         goto alloc_and_fill;
1026                 }
1027         }
1028         /* Null termination */
1029         *cur_dsd++ = 0;
1030         *cur_dsd++ = 0;
1031         *cur_dsd++ = 0;
1032         return 0;
1033 }
1034
1035 static int
1036 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1037         uint16_t tot_dsds)
1038 {
1039         void *next_dsd;
1040         uint8_t avail_dsds = 0;
1041         uint32_t dsd_list_len;
1042         struct dsd_dma *dsd_ptr;
1043         struct scatterlist *sg;
1044         uint32_t *cur_dsd = dsd;
1045         int     i;
1046         uint16_t        used_dsds = tot_dsds;
1047         scsi_qla_host_t *vha = shost_priv(sp->cmd->device->host);
1048
1049         uint8_t         *cp;
1050
1051         scsi_for_each_sg(sp->cmd, sg, tot_dsds, i) {
1052                 dma_addr_t      sle_dma;
1053
1054                 /* Allocate additional continuation packets? */
1055                 if (avail_dsds == 0) {
1056                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1057                                         QLA_DSDS_PER_IOCB : used_dsds;
1058                         dsd_list_len = (avail_dsds + 1) * 12;
1059                         used_dsds -= avail_dsds;
1060
1061                         /* allocate tracking DS */
1062                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1063                         if (!dsd_ptr)
1064                                 return 1;
1065
1066                         /* allocate new list */
1067                         dsd_ptr->dsd_addr = next_dsd =
1068                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1069                                 &dsd_ptr->dsd_list_dma);
1070
1071                         if (!next_dsd) {
1072                                 /*
1073                                  * Need to cleanup only this dsd_ptr, rest
1074                                  * will be done by sp_free_dma()
1075                                  */
1076                                 kfree(dsd_ptr);
1077                                 return 1;
1078                         }
1079
1080                         list_add_tail(&dsd_ptr->list,
1081                             &((struct crc_context *)sp->ctx)->dsd_list);
1082
1083                         sp->flags |= SRB_CRC_CTX_DSD_VALID;
1084
1085                         /* add new list to cmd iocb or last list */
1086                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1087                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1088                         *cur_dsd++ = dsd_list_len;
1089                         cur_dsd = (uint32_t *)next_dsd;
1090                 }
1091                 sle_dma = sg_dma_address(sg);
1092                 ql_dbg(ql_dbg_io, vha, 0x300a,
1093                     "sg entry %d - addr=0x%x 0x%x, " "len=%d for cmd=%p.\n",
1094                     i, LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg),
1095                     sp->cmd);
1096                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1097                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1098                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1099                 avail_dsds--;
1100
1101                 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
1102                         cp = page_address(sg_page(sg)) + sg->offset;
1103                         ql_dbg(ql_dbg_io, vha, 0x300b,
1104                             "User data buffer=%p for cmd=%p.\n", cp, sp->cmd);
1105                 }
1106         }
1107         /* Null termination */
1108         *cur_dsd++ = 0;
1109         *cur_dsd++ = 0;
1110         *cur_dsd++ = 0;
1111         return 0;
1112 }
1113
1114 static int
1115 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1116                                                         uint32_t *dsd,
1117         uint16_t tot_dsds)
1118 {
1119         void *next_dsd;
1120         uint8_t avail_dsds = 0;
1121         uint32_t dsd_list_len;
1122         struct dsd_dma *dsd_ptr;
1123         struct scatterlist *sg;
1124         int     i;
1125         struct scsi_cmnd *cmd;
1126         uint32_t *cur_dsd = dsd;
1127         uint16_t        used_dsds = tot_dsds;
1128         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1129         uint8_t         *cp;
1130
1131
1132         cmd = sp->cmd;
1133         scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) {
1134                 dma_addr_t      sle_dma;
1135
1136                 /* Allocate additional continuation packets? */
1137                 if (avail_dsds == 0) {
1138                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1139                                                 QLA_DSDS_PER_IOCB : used_dsds;
1140                         dsd_list_len = (avail_dsds + 1) * 12;
1141                         used_dsds -= avail_dsds;
1142
1143                         /* allocate tracking DS */
1144                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1145                         if (!dsd_ptr)
1146                                 return 1;
1147
1148                         /* allocate new list */
1149                         dsd_ptr->dsd_addr = next_dsd =
1150                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1151                                 &dsd_ptr->dsd_list_dma);
1152
1153                         if (!next_dsd) {
1154                                 /*
1155                                  * Need to cleanup only this dsd_ptr, rest
1156                                  * will be done by sp_free_dma()
1157                                  */
1158                                 kfree(dsd_ptr);
1159                                 return 1;
1160                         }
1161
1162                         list_add_tail(&dsd_ptr->list,
1163                             &((struct crc_context *)sp->ctx)->dsd_list);
1164
1165                         sp->flags |= SRB_CRC_CTX_DSD_VALID;
1166
1167                         /* add new list to cmd iocb or last list */
1168                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1169                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1170                         *cur_dsd++ = dsd_list_len;
1171                         cur_dsd = (uint32_t *)next_dsd;
1172                 }
1173                 sle_dma = sg_dma_address(sg);
1174                 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
1175                         ql_dbg(ql_dbg_io, vha, 0x3027,
1176                             "%s(): %p, sg_entry %d - "
1177                             "addr=0x%x0x%x, len=%d.\n",
1178                             __func__, cur_dsd, i,
1179                             LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg));
1180                 }
1181                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1182                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1183                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1184
1185                 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
1186                         cp = page_address(sg_page(sg)) + sg->offset;
1187                         ql_dbg(ql_dbg_io, vha, 0x3028,
1188                             "%s(): Protection Data buffer = %p.\n", __func__,
1189                             cp);
1190                 }
1191                 avail_dsds--;
1192         }
1193         /* Null termination */
1194         *cur_dsd++ = 0;
1195         *cur_dsd++ = 0;
1196         *cur_dsd++ = 0;
1197         return 0;
1198 }
1199
1200 /**
1201  * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1202  *                                                      Type 6 IOCB types.
1203  *
1204  * @sp: SRB command to process
1205  * @cmd_pkt: Command type 3 IOCB
1206  * @tot_dsds: Total number of segments to transfer
1207  */
1208 static inline int
1209 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1210     uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1211 {
1212         uint32_t                *cur_dsd, *fcp_dl;
1213         scsi_qla_host_t         *vha;
1214         struct scsi_cmnd        *cmd;
1215         struct scatterlist      *cur_seg;
1216         int                     sgc;
1217         uint32_t                total_bytes = 0;
1218         uint32_t                data_bytes;
1219         uint32_t                dif_bytes;
1220         uint8_t                 bundling = 1;
1221         uint16_t                blk_size;
1222         uint8_t                 *clr_ptr;
1223         struct crc_context      *crc_ctx_pkt = NULL;
1224         struct qla_hw_data      *ha;
1225         uint8_t                 additional_fcpcdb_len;
1226         uint16_t                fcp_cmnd_len;
1227         struct fcp_cmnd         *fcp_cmnd;
1228         dma_addr_t              crc_ctx_dma;
1229         char                    tag[2];
1230
1231         cmd = sp->cmd;
1232
1233         sgc = 0;
1234         /* Update entry type to indicate Command Type CRC_2 IOCB */
1235         *((uint32_t *)(&cmd_pkt->entry_type)) =
1236             __constant_cpu_to_le32(COMMAND_TYPE_CRC_2);
1237
1238         vha = sp->fcport->vha;
1239         ha = vha->hw;
1240
1241         /* No data transfer */
1242         data_bytes = scsi_bufflen(cmd);
1243         if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1244                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1245                 return QLA_SUCCESS;
1246         }
1247
1248         cmd_pkt->vp_index = sp->fcport->vp_idx;
1249
1250         /* Set transfer direction */
1251         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1252                 cmd_pkt->control_flags =
1253                     __constant_cpu_to_le16(CF_WRITE_DATA);
1254         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1255                 cmd_pkt->control_flags =
1256                     __constant_cpu_to_le16(CF_READ_DATA);
1257         }
1258
1259         if ((scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_INSERT) ||
1260             (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_STRIP) ||
1261             (scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_STRIP) ||
1262             (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_INSERT))
1263                 bundling = 0;
1264
1265         /* Allocate CRC context from global pool */
1266         crc_ctx_pkt = sp->ctx = dma_pool_alloc(ha->dl_dma_pool,
1267             GFP_ATOMIC, &crc_ctx_dma);
1268
1269         if (!crc_ctx_pkt)
1270                 goto crc_queuing_error;
1271
1272         /* Zero out CTX area. */
1273         clr_ptr = (uint8_t *)crc_ctx_pkt;
1274         memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
1275
1276         crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1277
1278         sp->flags |= SRB_CRC_CTX_DMA_VALID;
1279
1280         /* Set handle */
1281         crc_ctx_pkt->handle = cmd_pkt->handle;
1282
1283         INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1284
1285         qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
1286             &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1287
1288         cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
1289         cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
1290         cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1291
1292         /* Determine SCSI command length -- align to 4 byte boundary */
1293         if (cmd->cmd_len > 16) {
1294                 additional_fcpcdb_len = cmd->cmd_len - 16;
1295                 if ((cmd->cmd_len % 4) != 0) {
1296                         /* SCSI cmd > 16 bytes must be multiple of 4 */
1297                         goto crc_queuing_error;
1298                 }
1299                 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1300         } else {
1301                 additional_fcpcdb_len = 0;
1302                 fcp_cmnd_len = 12 + 16 + 4;
1303         }
1304
1305         fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1306
1307         fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1308         if (cmd->sc_data_direction == DMA_TO_DEVICE)
1309                 fcp_cmnd->additional_cdb_len |= 1;
1310         else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1311                 fcp_cmnd->additional_cdb_len |= 2;
1312
1313         int_to_scsilun(sp->cmd->device->lun, &fcp_cmnd->lun);
1314         memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1315         cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1316         cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
1317             LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1318         cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
1319             MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1320         fcp_cmnd->task_management = 0;
1321
1322         /*
1323          * Update tagged queuing modifier if using command tag queuing
1324          */
1325         if (scsi_populate_tag_msg(cmd, tag)) {
1326                 switch (tag[0]) {
1327                 case HEAD_OF_QUEUE_TAG:
1328                     fcp_cmnd->task_attribute = TSK_HEAD_OF_QUEUE;
1329                     break;
1330                 case ORDERED_QUEUE_TAG:
1331                     fcp_cmnd->task_attribute = TSK_ORDERED;
1332                     break;
1333                 default:
1334                     fcp_cmnd->task_attribute = 0;
1335                     break;
1336                 }
1337         } else {
1338                 fcp_cmnd->task_attribute = 0;
1339         }
1340
1341         cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1342
1343         /* Compute dif len and adjust data len to incude protection */
1344         dif_bytes = 0;
1345         blk_size = cmd->device->sector_size;
1346         dif_bytes = (data_bytes / blk_size) * 8;
1347
1348         switch (scsi_get_prot_op(sp->cmd)) {
1349         case SCSI_PROT_READ_INSERT:
1350         case SCSI_PROT_WRITE_STRIP:
1351             total_bytes = data_bytes;
1352             data_bytes += dif_bytes;
1353             break;
1354
1355         case SCSI_PROT_READ_STRIP:
1356         case SCSI_PROT_WRITE_INSERT:
1357         case SCSI_PROT_READ_PASS:
1358         case SCSI_PROT_WRITE_PASS:
1359             total_bytes = data_bytes + dif_bytes;
1360             break;
1361         default:
1362             BUG();
1363         }
1364
1365         if (!qla2x00_hba_err_chk_enabled(sp))
1366                 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1367
1368         if (!bundling) {
1369                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
1370         } else {
1371                 /*
1372                  * Configure Bundling if we need to fetch interlaving
1373                  * protection PCI accesses
1374                  */
1375                 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1376                 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1377                 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1378                                                         tot_prot_dsds);
1379                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
1380         }
1381
1382         /* Finish the common fields of CRC pkt */
1383         crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1384         crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1385         crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1386         crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0);
1387         /* Fibre channel byte count */
1388         cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1389         fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1390             additional_fcpcdb_len);
1391         *fcp_dl = htonl(total_bytes);
1392
1393         if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1394                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1395                 return QLA_SUCCESS;
1396         }
1397         /* Walks data segments */
1398
1399         cmd_pkt->control_flags |=
1400             __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1401
1402         if (!bundling && tot_prot_dsds) {
1403                 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1404                     cur_dsd, tot_dsds))
1405                         goto crc_queuing_error;
1406         } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1407             (tot_dsds - tot_prot_dsds)))
1408                 goto crc_queuing_error;
1409
1410         if (bundling && tot_prot_dsds) {
1411                 /* Walks dif segments */
1412                 cur_seg = scsi_prot_sglist(cmd);
1413                 cmd_pkt->control_flags |=
1414                         __constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1415                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
1416                 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1417                     tot_prot_dsds))
1418                         goto crc_queuing_error;
1419         }
1420         return QLA_SUCCESS;
1421
1422 crc_queuing_error:
1423         /* Cleanup will be performed by the caller */
1424
1425         return QLA_FUNCTION_FAILED;
1426 }
1427
1428 /**
1429  * qla24xx_start_scsi() - Send a SCSI command to the ISP
1430  * @sp: command to send to the ISP
1431  *
1432  * Returns non-zero if a failure occurred, else zero.
1433  */
1434 int
1435 qla24xx_start_scsi(srb_t *sp)
1436 {
1437         int             ret, nseg;
1438         unsigned long   flags;
1439         uint32_t        *clr_ptr;
1440         uint32_t        index;
1441         uint32_t        handle;
1442         struct cmd_type_7 *cmd_pkt;
1443         uint16_t        cnt;
1444         uint16_t        req_cnt;
1445         uint16_t        tot_dsds;
1446         struct req_que *req = NULL;
1447         struct rsp_que *rsp = NULL;
1448         struct scsi_cmnd *cmd = sp->cmd;
1449         struct scsi_qla_host *vha = sp->fcport->vha;
1450         struct qla_hw_data *ha = vha->hw;
1451         char            tag[2];
1452
1453         /* Setup device pointers. */
1454         ret = 0;
1455
1456         qla25xx_set_que(sp, &rsp);
1457         req = vha->req;
1458
1459         /* So we know we haven't pci_map'ed anything yet */
1460         tot_dsds = 0;
1461
1462         /* Send marker if required */
1463         if (vha->marker_needed != 0) {
1464                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1465                     QLA_SUCCESS)
1466                         return QLA_FUNCTION_FAILED;
1467                 vha->marker_needed = 0;
1468         }
1469
1470         /* Acquire ring specific lock */
1471         spin_lock_irqsave(&ha->hardware_lock, flags);
1472
1473         /* Check for room in outstanding command list. */
1474         handle = req->current_outstanding_cmd;
1475         for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
1476                 handle++;
1477                 if (handle == MAX_OUTSTANDING_COMMANDS)
1478                         handle = 1;
1479                 if (!req->outstanding_cmds[handle])
1480                         break;
1481         }
1482         if (index == MAX_OUTSTANDING_COMMANDS) {
1483                 goto queuing_error;
1484         }
1485
1486         /* Map the sg table so we have an accurate count of sg entries needed */
1487         if (scsi_sg_count(cmd)) {
1488                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1489                     scsi_sg_count(cmd), cmd->sc_data_direction);
1490                 if (unlikely(!nseg))
1491                         goto queuing_error;
1492         } else
1493                 nseg = 0;
1494
1495         tot_dsds = nseg;
1496         req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1497         if (req->cnt < (req_cnt + 2)) {
1498                 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
1499
1500                 if (req->ring_index < cnt)
1501                         req->cnt = cnt - req->ring_index;
1502                 else
1503                         req->cnt = req->length -
1504                                 (req->ring_index - cnt);
1505         }
1506         if (req->cnt < (req_cnt + 2))
1507                 goto queuing_error;
1508
1509         /* Build command packet. */
1510         req->current_outstanding_cmd = handle;
1511         req->outstanding_cmds[handle] = sp;
1512         sp->handle = handle;
1513         sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1514         req->cnt -= req_cnt;
1515
1516         cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1517         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1518
1519         /* Zero out remaining portion of packet. */
1520         /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
1521         clr_ptr = (uint32_t *)cmd_pkt + 2;
1522         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1523         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1524
1525         /* Set NPORT-ID and LUN number*/
1526         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1527         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1528         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1529         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1530         cmd_pkt->vp_index = sp->fcport->vp_idx;
1531
1532         int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
1533         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1534
1535         /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */
1536         if (scsi_populate_tag_msg(cmd, tag)) {
1537                 switch (tag[0]) {
1538                 case HEAD_OF_QUEUE_TAG:
1539                         cmd_pkt->task = TSK_HEAD_OF_QUEUE;
1540                         break;
1541                 case ORDERED_QUEUE_TAG:
1542                         cmd_pkt->task = TSK_ORDERED;
1543                         break;
1544                 }
1545         }
1546
1547         /* Load SCSI command packet. */
1548         memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1549         host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1550
1551         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1552
1553         /* Build IOCB segments */
1554         qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
1555
1556         /* Set total data segment count. */
1557         cmd_pkt->entry_count = (uint8_t)req_cnt;
1558         /* Specify response queue number where completion should happen */
1559         cmd_pkt->entry_status = (uint8_t) rsp->id;
1560         wmb();
1561         /* Adjust ring index. */
1562         req->ring_index++;
1563         if (req->ring_index == req->length) {
1564                 req->ring_index = 0;
1565                 req->ring_ptr = req->ring;
1566         } else
1567                 req->ring_ptr++;
1568
1569         sp->flags |= SRB_DMA_VALID;
1570
1571         /* Set chip new ring index. */
1572         WRT_REG_DWORD(req->req_q_in, req->ring_index);
1573         RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1574
1575         /* Manage unprocessed RIO/ZIO commands in response queue. */
1576         if (vha->flags.process_response_queue &&
1577                 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1578                 qla24xx_process_response_queue(vha, rsp);
1579
1580         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1581         return QLA_SUCCESS;
1582
1583 queuing_error:
1584         if (tot_dsds)
1585                 scsi_dma_unmap(cmd);
1586
1587         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1588
1589         return QLA_FUNCTION_FAILED;
1590 }
1591
1592
1593 /**
1594  * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1595  * @sp: command to send to the ISP
1596  *
1597  * Returns non-zero if a failure occurred, else zero.
1598  */
1599 int
1600 qla24xx_dif_start_scsi(srb_t *sp)
1601 {
1602         int                     nseg;
1603         unsigned long           flags;
1604         uint32_t                *clr_ptr;
1605         uint32_t                index;
1606         uint32_t                handle;
1607         uint16_t                cnt;
1608         uint16_t                req_cnt = 0;
1609         uint16_t                tot_dsds;
1610         uint16_t                tot_prot_dsds;
1611         uint16_t                fw_prot_opts = 0;
1612         struct req_que          *req = NULL;
1613         struct rsp_que          *rsp = NULL;
1614         struct scsi_cmnd        *cmd = sp->cmd;
1615         struct scsi_qla_host    *vha = sp->fcport->vha;
1616         struct qla_hw_data      *ha = vha->hw;
1617         struct cmd_type_crc_2   *cmd_pkt;
1618         uint32_t                status = 0;
1619
1620 #define QDSS_GOT_Q_SPACE        BIT_0
1621
1622         /* Only process protection or >16 cdb in this routine */
1623         if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1624                 if (cmd->cmd_len <= 16)
1625                         return qla24xx_start_scsi(sp);
1626         }
1627
1628         /* Setup device pointers. */
1629
1630         qla25xx_set_que(sp, &rsp);
1631         req = vha->req;
1632
1633         /* So we know we haven't pci_map'ed anything yet */
1634         tot_dsds = 0;
1635
1636         /* Send marker if required */
1637         if (vha->marker_needed != 0) {
1638                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1639                     QLA_SUCCESS)
1640                         return QLA_FUNCTION_FAILED;
1641                 vha->marker_needed = 0;
1642         }
1643
1644         /* Acquire ring specific lock */
1645         spin_lock_irqsave(&ha->hardware_lock, flags);
1646
1647         /* Check for room in outstanding command list. */
1648         handle = req->current_outstanding_cmd;
1649         for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
1650                 handle++;
1651                 if (handle == MAX_OUTSTANDING_COMMANDS)
1652                         handle = 1;
1653                 if (!req->outstanding_cmds[handle])
1654                         break;
1655         }
1656
1657         if (index == MAX_OUTSTANDING_COMMANDS)
1658                 goto queuing_error;
1659
1660         /* Compute number of required data segments */
1661         /* Map the sg table so we have an accurate count of sg entries needed */
1662         if (scsi_sg_count(cmd)) {
1663                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1664                     scsi_sg_count(cmd), cmd->sc_data_direction);
1665                 if (unlikely(!nseg))
1666                         goto queuing_error;
1667                 else
1668                         sp->flags |= SRB_DMA_VALID;
1669
1670                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1671                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1672                         struct qla2_sgx sgx;
1673                         uint32_t        partial;
1674
1675                         memset(&sgx, 0, sizeof(struct qla2_sgx));
1676                         sgx.tot_bytes = scsi_bufflen(cmd);
1677                         sgx.cur_sg = scsi_sglist(cmd);
1678                         sgx.sp = sp;
1679
1680                         nseg = 0;
1681                         while (qla24xx_get_one_block_sg(
1682                             cmd->device->sector_size, &sgx, &partial))
1683                                 nseg++;
1684                 }
1685         } else
1686                 nseg = 0;
1687
1688         /* number of required data segments */
1689         tot_dsds = nseg;
1690
1691         /* Compute number of required protection segments */
1692         if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1693                 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1694                     scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1695                 if (unlikely(!nseg))
1696                         goto queuing_error;
1697                 else
1698                         sp->flags |= SRB_CRC_PROT_DMA_VALID;
1699
1700                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1701                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1702                         nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1703                 }
1704         } else {
1705                 nseg = 0;
1706         }
1707
1708         req_cnt = 1;
1709         /* Total Data and protection sg segment(s) */
1710         tot_prot_dsds = nseg;
1711         tot_dsds += nseg;
1712         if (req->cnt < (req_cnt + 2)) {
1713                 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
1714
1715                 if (req->ring_index < cnt)
1716                         req->cnt = cnt - req->ring_index;
1717                 else
1718                         req->cnt = req->length -
1719                                 (req->ring_index - cnt);
1720         }
1721
1722         if (req->cnt < (req_cnt + 2))
1723                 goto queuing_error;
1724
1725         status |= QDSS_GOT_Q_SPACE;
1726
1727         /* Build header part of command packet (excluding the OPCODE). */
1728         req->current_outstanding_cmd = handle;
1729         req->outstanding_cmds[handle] = sp;
1730         sp->handle = handle;
1731         sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1732         req->cnt -= req_cnt;
1733
1734         /* Fill-in common area */
1735         cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1736         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1737
1738         clr_ptr = (uint32_t *)cmd_pkt + 2;
1739         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1740
1741         /* Set NPORT-ID and LUN number*/
1742         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1743         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1744         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1745         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1746
1747         int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
1748         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1749
1750         /* Total Data and protection segment(s) */
1751         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1752
1753         /* Build IOCB segments and adjust for data protection segments */
1754         if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1755             req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1756                 QLA_SUCCESS)
1757                 goto queuing_error;
1758
1759         cmd_pkt->entry_count = (uint8_t)req_cnt;
1760         /* Specify response queue number where completion should happen */
1761         cmd_pkt->entry_status = (uint8_t) rsp->id;
1762         cmd_pkt->timeout = __constant_cpu_to_le16(0);
1763         wmb();
1764
1765         /* Adjust ring index. */
1766         req->ring_index++;
1767         if (req->ring_index == req->length) {
1768                 req->ring_index = 0;
1769                 req->ring_ptr = req->ring;
1770         } else
1771                 req->ring_ptr++;
1772
1773         /* Set chip new ring index. */
1774         WRT_REG_DWORD(req->req_q_in, req->ring_index);
1775         RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1776
1777         /* Manage unprocessed RIO/ZIO commands in response queue. */
1778         if (vha->flags.process_response_queue &&
1779             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1780                 qla24xx_process_response_queue(vha, rsp);
1781
1782         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1783
1784         return QLA_SUCCESS;
1785
1786 queuing_error:
1787         if (status & QDSS_GOT_Q_SPACE) {
1788                 req->outstanding_cmds[handle] = NULL;
1789                 req->cnt += req_cnt;
1790         }
1791         /* Cleanup will be performed by the caller (queuecommand) */
1792
1793         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1794         return QLA_FUNCTION_FAILED;
1795 }
1796
1797
1798 static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
1799 {
1800         struct scsi_cmnd *cmd = sp->cmd;
1801         struct qla_hw_data *ha = sp->fcport->vha->hw;
1802         int affinity = cmd->request->cpu;
1803
1804         if (ha->flags.cpu_affinity_enabled && affinity >= 0 &&
1805                 affinity < ha->max_rsp_queues - 1)
1806                 *rsp = ha->rsp_q_map[affinity + 1];
1807          else
1808                 *rsp = ha->rsp_q_map[0];
1809 }
1810
1811 /* Generic Control-SRB manipulation functions. */
1812 void *
1813 qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
1814 {
1815         struct qla_hw_data *ha = vha->hw;
1816         struct req_que *req = ha->req_q_map[0];
1817         device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
1818         uint32_t index, handle;
1819         request_t *pkt;
1820         uint16_t cnt, req_cnt;
1821         struct srb_ctx *ctx;
1822
1823         pkt = NULL;
1824         req_cnt = 1;
1825         handle = 0;
1826
1827         if (!sp)
1828                 goto skip_cmd_array;
1829
1830         /* Check for room in outstanding command list. */
1831         handle = req->current_outstanding_cmd;
1832         for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
1833                 handle++;
1834                 if (handle == MAX_OUTSTANDING_COMMANDS)
1835                         handle = 1;
1836                 if (!req->outstanding_cmds[handle])
1837                         break;
1838         }
1839         if (index == MAX_OUTSTANDING_COMMANDS) {
1840                 ql_log(ql_log_warn, vha, 0x700b,
1841                     "No room on oustanding cmd array.\n");
1842                 goto queuing_error;
1843         }
1844
1845         /* Prep command array. */
1846         req->current_outstanding_cmd = handle;
1847         req->outstanding_cmds[handle] = sp;
1848         sp->handle = handle;
1849
1850         /* Adjust entry-counts as needed. */
1851         if (sp->ctx) {
1852                 ctx = sp->ctx;
1853                 req_cnt = ctx->iocbs;
1854         }
1855
1856 skip_cmd_array:
1857         /* Check for room on request queue. */
1858         if (req->cnt < req_cnt) {
1859                 if (ha->mqenable)
1860                         cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
1861                 else if (IS_QLA82XX(ha))
1862                         cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
1863                 else if (IS_FWI2_CAPABLE(ha))
1864                         cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
1865                 else
1866                         cnt = qla2x00_debounce_register(
1867                             ISP_REQ_Q_OUT(ha, &reg->isp));
1868
1869                 if  (req->ring_index < cnt)
1870                         req->cnt = cnt - req->ring_index;
1871                 else
1872                         req->cnt = req->length -
1873                             (req->ring_index - cnt);
1874         }
1875         if (req->cnt < req_cnt)
1876                 goto queuing_error;
1877
1878         /* Prep packet */
1879         req->cnt -= req_cnt;
1880         pkt = req->ring_ptr;
1881         memset(pkt, 0, REQUEST_ENTRY_SIZE);
1882         pkt->entry_count = req_cnt;
1883         pkt->handle = handle;
1884
1885 queuing_error:
1886         return pkt;
1887 }
1888
1889 static void
1890 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1891 {
1892         struct srb_ctx *ctx = sp->ctx;
1893         struct srb_iocb *lio = ctx->u.iocb_cmd;
1894
1895         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1896         logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
1897         if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
1898                 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
1899         if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
1900                 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
1901         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1902         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1903         logio->port_id[1] = sp->fcport->d_id.b.area;
1904         logio->port_id[2] = sp->fcport->d_id.b.domain;
1905         logio->vp_index = sp->fcport->vp_idx;
1906 }
1907
1908 static void
1909 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
1910 {
1911         struct qla_hw_data *ha = sp->fcport->vha->hw;
1912         struct srb_ctx *ctx = sp->ctx;
1913         struct srb_iocb *lio = ctx->u.iocb_cmd;
1914         uint16_t opts;
1915
1916         mbx->entry_type = MBX_IOCB_TYPE;
1917         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1918         mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
1919         opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
1920         opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
1921         if (HAS_EXTENDED_IDS(ha)) {
1922                 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1923                 mbx->mb10 = cpu_to_le16(opts);
1924         } else {
1925                 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
1926         }
1927         mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1928         mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1929             sp->fcport->d_id.b.al_pa);
1930         mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
1931 }
1932
1933 static void
1934 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1935 {
1936         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1937         logio->control_flags =
1938             cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
1939         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1940         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1941         logio->port_id[1] = sp->fcport->d_id.b.area;
1942         logio->port_id[2] = sp->fcport->d_id.b.domain;
1943         logio->vp_index = sp->fcport->vp_idx;
1944 }
1945
1946 static void
1947 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
1948 {
1949         struct qla_hw_data *ha = sp->fcport->vha->hw;
1950
1951         mbx->entry_type = MBX_IOCB_TYPE;
1952         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1953         mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
1954         mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
1955             cpu_to_le16(sp->fcport->loop_id):
1956             cpu_to_le16(sp->fcport->loop_id << 8);
1957         mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1958         mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1959             sp->fcport->d_id.b.al_pa);
1960         mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
1961         /* Implicit: mbx->mbx10 = 0. */
1962 }
1963
1964 static void
1965 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1966 {
1967         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1968         logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
1969         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1970         logio->vp_index = sp->fcport->vp_idx;
1971 }
1972
1973 static void
1974 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
1975 {
1976         struct qla_hw_data *ha = sp->fcport->vha->hw;
1977
1978         mbx->entry_type = MBX_IOCB_TYPE;
1979         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1980         mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
1981         if (HAS_EXTENDED_IDS(ha)) {
1982                 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1983                 mbx->mb10 = cpu_to_le16(BIT_0);
1984         } else {
1985                 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
1986         }
1987         mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
1988         mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
1989         mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
1990         mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
1991         mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
1992 }
1993
1994 static void
1995 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
1996 {
1997         uint32_t flags;
1998         unsigned int lun;
1999         struct fc_port *fcport = sp->fcport;
2000         scsi_qla_host_t *vha = fcport->vha;
2001         struct qla_hw_data *ha = vha->hw;
2002         struct srb_ctx *ctx = sp->ctx;
2003         struct srb_iocb *iocb = ctx->u.iocb_cmd;
2004         struct req_que *req = vha->req;
2005
2006         flags = iocb->u.tmf.flags;
2007         lun = iocb->u.tmf.lun;
2008
2009         tsk->entry_type = TSK_MGMT_IOCB_TYPE;
2010         tsk->entry_count = 1;
2011         tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
2012         tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2013         tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2014         tsk->control_flags = cpu_to_le32(flags);
2015         tsk->port_id[0] = fcport->d_id.b.al_pa;
2016         tsk->port_id[1] = fcport->d_id.b.area;
2017         tsk->port_id[2] = fcport->d_id.b.domain;
2018         tsk->vp_index = fcport->vp_idx;
2019
2020         if (flags == TCF_LUN_RESET) {
2021                 int_to_scsilun(lun, &tsk->lun);
2022                 host_to_fcp_swap((uint8_t *)&tsk->lun,
2023                         sizeof(tsk->lun));
2024         }
2025 }
2026
2027 static void
2028 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2029 {
2030         struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
2031
2032         els_iocb->entry_type = ELS_IOCB_TYPE;
2033         els_iocb->entry_count = 1;
2034         els_iocb->sys_define = 0;
2035         els_iocb->entry_status = 0;
2036         els_iocb->handle = sp->handle;
2037         els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2038         els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2039         els_iocb->vp_index = sp->fcport->vp_idx;
2040         els_iocb->sof_type = EST_SOFI3;
2041         els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2042
2043         els_iocb->opcode =
2044             (((struct srb_ctx *)sp->ctx)->type == SRB_ELS_CMD_RPT) ?
2045             bsg_job->request->rqst_data.r_els.els_code :
2046             bsg_job->request->rqst_data.h_els.command_code;
2047         els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2048         els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2049         els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2050         els_iocb->control_flags = 0;
2051         els_iocb->rx_byte_count =
2052             cpu_to_le32(bsg_job->reply_payload.payload_len);
2053         els_iocb->tx_byte_count =
2054             cpu_to_le32(bsg_job->request_payload.payload_len);
2055
2056         els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
2057             (bsg_job->request_payload.sg_list)));
2058         els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
2059             (bsg_job->request_payload.sg_list)));
2060         els_iocb->tx_len = cpu_to_le32(sg_dma_len
2061             (bsg_job->request_payload.sg_list));
2062
2063         els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
2064             (bsg_job->reply_payload.sg_list)));
2065         els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
2066             (bsg_job->reply_payload.sg_list)));
2067         els_iocb->rx_len = cpu_to_le32(sg_dma_len
2068             (bsg_job->reply_payload.sg_list));
2069 }
2070
2071 static void
2072 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
2073 {
2074         uint16_t        avail_dsds;
2075         uint32_t        *cur_dsd;
2076         struct scatterlist *sg;
2077         int index;
2078         uint16_t tot_dsds;
2079         scsi_qla_host_t *vha = sp->fcport->vha;
2080         struct qla_hw_data *ha = vha->hw;
2081         struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
2082         int loop_iterartion = 0;
2083         int cont_iocb_prsnt = 0;
2084         int entry_count = 1;
2085
2086         memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
2087         ct_iocb->entry_type = CT_IOCB_TYPE;
2088         ct_iocb->entry_status = 0;
2089         ct_iocb->handle1 = sp->handle;
2090         SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
2091         ct_iocb->status = __constant_cpu_to_le16(0);
2092         ct_iocb->control_flags = __constant_cpu_to_le16(0);
2093         ct_iocb->timeout = 0;
2094         ct_iocb->cmd_dsd_count =
2095             __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2096         ct_iocb->total_dsd_count =
2097             __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
2098         ct_iocb->req_bytecount =
2099             cpu_to_le32(bsg_job->request_payload.payload_len);
2100         ct_iocb->rsp_bytecount =
2101             cpu_to_le32(bsg_job->reply_payload.payload_len);
2102
2103         ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
2104             (bsg_job->request_payload.sg_list)));
2105         ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
2106             (bsg_job->request_payload.sg_list)));
2107         ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
2108
2109         ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
2110             (bsg_job->reply_payload.sg_list)));
2111         ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
2112             (bsg_job->reply_payload.sg_list)));
2113         ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
2114
2115         avail_dsds = 1;
2116         cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
2117         index = 0;
2118         tot_dsds = bsg_job->reply_payload.sg_cnt;
2119
2120         for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2121                 dma_addr_t       sle_dma;
2122                 cont_a64_entry_t *cont_pkt;
2123
2124                 /* Allocate additional continuation packets? */
2125                 if (avail_dsds == 0) {
2126                         /*
2127                         * Five DSDs are available in the Cont.
2128                         * Type 1 IOCB.
2129                                */
2130                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2131                             vha->hw->req_q_map[0]);
2132                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2133                         avail_dsds = 5;
2134                         cont_iocb_prsnt = 1;
2135                         entry_count++;
2136                 }
2137
2138                 sle_dma = sg_dma_address(sg);
2139                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2140                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2141                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2142                 loop_iterartion++;
2143                 avail_dsds--;
2144         }
2145         ct_iocb->entry_count = entry_count;
2146 }
2147
2148 static void
2149 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2150 {
2151         uint16_t        avail_dsds;
2152         uint32_t        *cur_dsd;
2153         struct scatterlist *sg;
2154         int index;
2155         uint16_t tot_dsds;
2156         scsi_qla_host_t *vha = sp->fcport->vha;
2157         struct qla_hw_data *ha = vha->hw;
2158         struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
2159         int loop_iterartion = 0;
2160         int cont_iocb_prsnt = 0;
2161         int entry_count = 1;
2162
2163         ct_iocb->entry_type = CT_IOCB_TYPE;
2164         ct_iocb->entry_status = 0;
2165         ct_iocb->sys_define = 0;
2166         ct_iocb->handle = sp->handle;
2167
2168         ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2169         ct_iocb->vp_index = sp->fcport->vp_idx;
2170         ct_iocb->comp_status = __constant_cpu_to_le16(0);
2171
2172         ct_iocb->cmd_dsd_count =
2173             __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2174         ct_iocb->timeout = 0;
2175         ct_iocb->rsp_dsd_count =
2176             __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2177         ct_iocb->rsp_byte_count =
2178             cpu_to_le32(bsg_job->reply_payload.payload_len);
2179         ct_iocb->cmd_byte_count =
2180             cpu_to_le32(bsg_job->request_payload.payload_len);
2181         ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address
2182             (bsg_job->request_payload.sg_list)));
2183         ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address
2184            (bsg_job->request_payload.sg_list)));
2185         ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len
2186             (bsg_job->request_payload.sg_list));
2187
2188         avail_dsds = 1;
2189         cur_dsd = (uint32_t *)ct_iocb->dseg_1_address;
2190         index = 0;
2191         tot_dsds = bsg_job->reply_payload.sg_cnt;
2192
2193         for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2194                 dma_addr_t       sle_dma;
2195                 cont_a64_entry_t *cont_pkt;
2196
2197                 /* Allocate additional continuation packets? */
2198                 if (avail_dsds == 0) {
2199                         /*
2200                         * Five DSDs are available in the Cont.
2201                         * Type 1 IOCB.
2202                                */
2203                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2204                             ha->req_q_map[0]);
2205                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2206                         avail_dsds = 5;
2207                         cont_iocb_prsnt = 1;
2208                         entry_count++;
2209                 }
2210
2211                 sle_dma = sg_dma_address(sg);
2212                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2213                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2214                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2215                 loop_iterartion++;
2216                 avail_dsds--;
2217         }
2218         ct_iocb->entry_count = entry_count;
2219 }
2220
2221 /*
2222  * qla82xx_start_scsi() - Send a SCSI command to the ISP
2223  * @sp: command to send to the ISP
2224  *
2225  * Returns non-zero if a failure occurred, else zero.
2226  */
2227 int
2228 qla82xx_start_scsi(srb_t *sp)
2229 {
2230         int             ret, nseg;
2231         unsigned long   flags;
2232         struct scsi_cmnd *cmd;
2233         uint32_t        *clr_ptr;
2234         uint32_t        index;
2235         uint32_t        handle;
2236         uint16_t        cnt;
2237         uint16_t        req_cnt;
2238         uint16_t        tot_dsds;
2239         struct device_reg_82xx __iomem *reg;
2240         uint32_t dbval;
2241         uint32_t *fcp_dl;
2242         uint8_t additional_cdb_len;
2243         struct ct6_dsd *ctx;
2244         struct scsi_qla_host *vha = sp->fcport->vha;
2245         struct qla_hw_data *ha = vha->hw;
2246         struct req_que *req = NULL;
2247         struct rsp_que *rsp = NULL;
2248         char            tag[2];
2249
2250         /* Setup device pointers. */
2251         ret = 0;
2252         reg = &ha->iobase->isp82;
2253         cmd = sp->cmd;
2254         req = vha->req;
2255         rsp = ha->rsp_q_map[0];
2256
2257         /* So we know we haven't pci_map'ed anything yet */
2258         tot_dsds = 0;
2259
2260         dbval = 0x04 | (ha->portnum << 5);
2261
2262         /* Send marker if required */
2263         if (vha->marker_needed != 0) {
2264                 if (qla2x00_marker(vha, req,
2265                         rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
2266                         ql_log(ql_log_warn, vha, 0x300c,
2267                             "qla2x00_marker failed for cmd=%p.\n", cmd);
2268                         return QLA_FUNCTION_FAILED;
2269                 }
2270                 vha->marker_needed = 0;
2271         }
2272
2273         /* Acquire ring specific lock */
2274         spin_lock_irqsave(&ha->hardware_lock, flags);
2275
2276         /* Check for room in outstanding command list. */
2277         handle = req->current_outstanding_cmd;
2278         for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
2279                 handle++;
2280                 if (handle == MAX_OUTSTANDING_COMMANDS)
2281                         handle = 1;
2282                 if (!req->outstanding_cmds[handle])
2283                         break;
2284         }
2285         if (index == MAX_OUTSTANDING_COMMANDS)
2286                 goto queuing_error;
2287
2288         /* Map the sg table so we have an accurate count of sg entries needed */
2289         if (scsi_sg_count(cmd)) {
2290                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2291                     scsi_sg_count(cmd), cmd->sc_data_direction);
2292                 if (unlikely(!nseg))
2293                         goto queuing_error;
2294         } else
2295                 nseg = 0;
2296
2297         tot_dsds = nseg;
2298
2299         if (tot_dsds > ql2xshiftctondsd) {
2300                 struct cmd_type_6 *cmd_pkt;
2301                 uint16_t more_dsd_lists = 0;
2302                 struct dsd_dma *dsd_ptr;
2303                 uint16_t i;
2304
2305                 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
2306                 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
2307                         ql_dbg(ql_dbg_io, vha, 0x300d,
2308                             "Num of DSD list %d is than %d for cmd=%p.\n",
2309                             more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
2310                             cmd);
2311                         goto queuing_error;
2312                 }
2313
2314                 if (more_dsd_lists <= ha->gbl_dsd_avail)
2315                         goto sufficient_dsds;
2316                 else
2317                         more_dsd_lists -= ha->gbl_dsd_avail;
2318
2319                 for (i = 0; i < more_dsd_lists; i++) {
2320                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
2321                         if (!dsd_ptr) {
2322                                 ql_log(ql_log_fatal, vha, 0x300e,
2323                                     "Failed to allocate memory for dsd_dma "
2324                                     "for cmd=%p.\n", cmd);
2325                                 goto queuing_error;
2326                         }
2327
2328                         dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
2329                                 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
2330                         if (!dsd_ptr->dsd_addr) {
2331                                 kfree(dsd_ptr);
2332                                 ql_log(ql_log_fatal, vha, 0x300f,
2333                                     "Failed to allocate memory for dsd_addr "
2334                                     "for cmd=%p.\n", cmd);
2335                                 goto queuing_error;
2336                         }
2337                         list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
2338                         ha->gbl_dsd_avail++;
2339                 }
2340
2341 sufficient_dsds:
2342                 req_cnt = 1;
2343
2344                 if (req->cnt < (req_cnt + 2)) {
2345                         cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2346                                 &reg->req_q_out[0]);
2347                         if (req->ring_index < cnt)
2348                                 req->cnt = cnt - req->ring_index;
2349                         else
2350                                 req->cnt = req->length -
2351                                         (req->ring_index - cnt);
2352                 }
2353
2354                 if (req->cnt < (req_cnt + 2))
2355                         goto queuing_error;
2356
2357                 ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2358                 if (!sp->ctx) {
2359                         ql_log(ql_log_fatal, vha, 0x3010,
2360                             "Failed to allocate ctx for cmd=%p.\n", cmd);
2361                         goto queuing_error;
2362                 }
2363                 memset(ctx, 0, sizeof(struct ct6_dsd));
2364                 ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
2365                         GFP_ATOMIC, &ctx->fcp_cmnd_dma);
2366                 if (!ctx->fcp_cmnd) {
2367                         ql_log(ql_log_fatal, vha, 0x3011,
2368                             "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
2369                         goto queuing_error_fcp_cmnd;
2370                 }
2371
2372                 /* Initialize the DSD list and dma handle */
2373                 INIT_LIST_HEAD(&ctx->dsd_list);
2374                 ctx->dsd_use_cnt = 0;
2375
2376                 if (cmd->cmd_len > 16) {
2377                         additional_cdb_len = cmd->cmd_len - 16;
2378                         if ((cmd->cmd_len % 4) != 0) {
2379                                 /* SCSI command bigger than 16 bytes must be
2380                                  * multiple of 4
2381                                  */
2382                                 ql_log(ql_log_warn, vha, 0x3012,
2383                                     "scsi cmd len %d not multiple of 4 "
2384                                     "for cmd=%p.\n", cmd->cmd_len, cmd);
2385                                 goto queuing_error_fcp_cmnd;
2386                         }
2387                         ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
2388                 } else {
2389                         additional_cdb_len = 0;
2390                         ctx->fcp_cmnd_len = 12 + 16 + 4;
2391                 }
2392
2393                 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
2394                 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2395
2396                 /* Zero out remaining portion of packet. */
2397                 /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
2398                 clr_ptr = (uint32_t *)cmd_pkt + 2;
2399                 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2400                 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2401
2402                 /* Set NPORT-ID and LUN number*/
2403                 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2404                 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2405                 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2406                 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2407                 cmd_pkt->vp_index = sp->fcport->vp_idx;
2408
2409                 /* Build IOCB segments */
2410                 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
2411                         goto queuing_error_fcp_cmnd;
2412
2413                 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
2414                 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2415
2416                 /* build FCP_CMND IU */
2417                 memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
2418                 int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun);
2419                 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
2420
2421                 if (cmd->sc_data_direction == DMA_TO_DEVICE)
2422                         ctx->fcp_cmnd->additional_cdb_len |= 1;
2423                 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
2424                         ctx->fcp_cmnd->additional_cdb_len |= 2;
2425
2426                 /*
2427                  * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2428                  */
2429                 if (scsi_populate_tag_msg(cmd, tag)) {
2430                         switch (tag[0]) {
2431                         case HEAD_OF_QUEUE_TAG:
2432                                 ctx->fcp_cmnd->task_attribute =
2433                                     TSK_HEAD_OF_QUEUE;
2434                                 break;
2435                         case ORDERED_QUEUE_TAG:
2436                                 ctx->fcp_cmnd->task_attribute =
2437                                     TSK_ORDERED;
2438                                 break;
2439                         }
2440                 }
2441
2442                 /* Populate the FCP_PRIO. */
2443                 if (ha->flags.fcp_prio_enabled)
2444                         ctx->fcp_cmnd->task_attribute |=
2445                             sp->fcport->fcp_prio << 3;
2446
2447                 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
2448
2449                 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
2450                     additional_cdb_len);
2451                 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
2452
2453                 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
2454                 cmd_pkt->fcp_cmnd_dseg_address[0] =
2455                     cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
2456                 cmd_pkt->fcp_cmnd_dseg_address[1] =
2457                     cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
2458
2459                 sp->flags |= SRB_FCP_CMND_DMA_VALID;
2460                 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2461                 /* Set total data segment count. */
2462                 cmd_pkt->entry_count = (uint8_t)req_cnt;
2463                 /* Specify response queue number where
2464                  * completion should happen
2465                  */
2466                 cmd_pkt->entry_status = (uint8_t) rsp->id;
2467         } else {
2468                 struct cmd_type_7 *cmd_pkt;
2469                 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2470                 if (req->cnt < (req_cnt + 2)) {
2471                         cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2472                             &reg->req_q_out[0]);
2473                         if (req->ring_index < cnt)
2474                                 req->cnt = cnt - req->ring_index;
2475                         else
2476                                 req->cnt = req->length -
2477                                         (req->ring_index - cnt);
2478                 }
2479                 if (req->cnt < (req_cnt + 2))
2480                         goto queuing_error;
2481
2482                 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
2483                 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2484
2485                 /* Zero out remaining portion of packet. */
2486                 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2487                 clr_ptr = (uint32_t *)cmd_pkt + 2;
2488                 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2489                 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2490
2491                 /* Set NPORT-ID and LUN number*/
2492                 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2493                 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2494                 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2495                 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2496                 cmd_pkt->vp_index = sp->fcport->vp_idx;
2497
2498                 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
2499                 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
2500                         sizeof(cmd_pkt->lun));
2501
2502                 /*
2503                  * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2504                  */
2505                 if (scsi_populate_tag_msg(cmd, tag)) {
2506                         switch (tag[0]) {
2507                         case HEAD_OF_QUEUE_TAG:
2508                                 cmd_pkt->task = TSK_HEAD_OF_QUEUE;
2509                                 break;
2510                         case ORDERED_QUEUE_TAG:
2511                                 cmd_pkt->task = TSK_ORDERED;
2512                                 break;
2513                         }
2514                 }
2515
2516                 /* Populate the FCP_PRIO. */
2517                 if (ha->flags.fcp_prio_enabled)
2518                         cmd_pkt->task |= sp->fcport->fcp_prio << 3;
2519
2520                 /* Load SCSI command packet. */
2521                 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2522                 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2523
2524                 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2525
2526                 /* Build IOCB segments */
2527                 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
2528
2529                 /* Set total data segment count. */
2530                 cmd_pkt->entry_count = (uint8_t)req_cnt;
2531                 /* Specify response queue number where
2532                  * completion should happen.
2533                  */
2534                 cmd_pkt->entry_status = (uint8_t) rsp->id;
2535
2536         }
2537         /* Build command packet. */
2538         req->current_outstanding_cmd = handle;
2539         req->outstanding_cmds[handle] = sp;
2540         sp->handle = handle;
2541         sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2542         req->cnt -= req_cnt;
2543         wmb();
2544
2545         /* Adjust ring index. */
2546         req->ring_index++;
2547         if (req->ring_index == req->length) {
2548                 req->ring_index = 0;
2549                 req->ring_ptr = req->ring;
2550         } else
2551                 req->ring_ptr++;
2552
2553         sp->flags |= SRB_DMA_VALID;
2554
2555         /* Set chip new ring index. */
2556         /* write, read and verify logic */
2557         dbval = dbval | (req->id << 8) | (req->ring_index << 16);
2558         if (ql2xdbwr)
2559                 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
2560         else {
2561                 WRT_REG_DWORD(
2562                         (unsigned long __iomem *)ha->nxdb_wr_ptr,
2563                         dbval);
2564                 wmb();
2565                 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
2566                         WRT_REG_DWORD(
2567                                 (unsigned long __iomem *)ha->nxdb_wr_ptr,
2568                                 dbval);
2569                         wmb();
2570                 }
2571         }
2572
2573         /* Manage unprocessed RIO/ZIO commands in response queue. */
2574         if (vha->flags.process_response_queue &&
2575             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2576                 qla24xx_process_response_queue(vha, rsp);
2577
2578         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2579         return QLA_SUCCESS;
2580
2581 queuing_error_fcp_cmnd:
2582         dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
2583 queuing_error:
2584         if (tot_dsds)
2585                 scsi_dma_unmap(cmd);
2586
2587         if (sp->ctx) {
2588                 mempool_free(sp->ctx, ha->ctx_mempool);
2589                 sp->ctx = NULL;
2590         }
2591         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2592
2593         return QLA_FUNCTION_FAILED;
2594 }
2595
2596 int
2597 qla2x00_start_sp(srb_t *sp)
2598 {
2599         int rval;
2600         struct qla_hw_data *ha = sp->fcport->vha->hw;
2601         void *pkt;
2602         struct srb_ctx *ctx = sp->ctx;
2603         unsigned long flags;
2604
2605         rval = QLA_FUNCTION_FAILED;
2606         spin_lock_irqsave(&ha->hardware_lock, flags);
2607         pkt = qla2x00_alloc_iocbs(sp->fcport->vha, sp);
2608         if (!pkt) {
2609                 ql_log(ql_log_warn, sp->fcport->vha, 0x700c,
2610                     "qla2x00_alloc_iocbs failed.\n");
2611                 goto done;
2612         }
2613
2614         rval = QLA_SUCCESS;
2615         switch (ctx->type) {
2616         case SRB_LOGIN_CMD:
2617                 IS_FWI2_CAPABLE(ha) ?
2618                     qla24xx_login_iocb(sp, pkt) :
2619                     qla2x00_login_iocb(sp, pkt);
2620                 break;
2621         case SRB_LOGOUT_CMD:
2622                 IS_FWI2_CAPABLE(ha) ?
2623                     qla24xx_logout_iocb(sp, pkt) :
2624                     qla2x00_logout_iocb(sp, pkt);
2625                 break;
2626         case SRB_ELS_CMD_RPT:
2627         case SRB_ELS_CMD_HST:
2628                 qla24xx_els_iocb(sp, pkt);
2629                 break;
2630         case SRB_CT_CMD:
2631                 IS_FWI2_CAPABLE(ha) ?
2632                     qla24xx_ct_iocb(sp, pkt) :
2633                     qla2x00_ct_iocb(sp, pkt);
2634                 break;
2635         case SRB_ADISC_CMD:
2636                 IS_FWI2_CAPABLE(ha) ?
2637                     qla24xx_adisc_iocb(sp, pkt) :
2638                     qla2x00_adisc_iocb(sp, pkt);
2639                 break;
2640         case SRB_TM_CMD:
2641                 qla24xx_tm_iocb(sp, pkt);
2642                 break;
2643         default:
2644                 break;
2645         }
2646
2647         wmb();
2648         qla2x00_start_iocbs(sp->fcport->vha, ha->req_q_map[0]);
2649 done:
2650         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2651         return rval;
2652 }