0b1f3b54a0c1790f14ac39815ee0f2db9a8d808a
[cascardo/linux.git] / drivers / infiniband / ulp / iser / iser_initiator.c
1 /*
2  * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
3  * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 #include <linux/kernel.h>
34 #include <linux/slab.h>
35 #include <linux/mm.h>
36 #include <linux/scatterlist.h>
37 #include <linux/kfifo.h>
38 #include <scsi/scsi_cmnd.h>
39 #include <scsi/scsi_host.h>
40
41 #include "iscsi_iser.h"
42
43 /* Register user buffer memory and initialize passive rdma
44  *  dto descriptor. Data size is stored in
45  *  task->data[ISER_DIR_IN].data_len, Protection size
46  *  os stored in task->prot[ISER_DIR_IN].data_len
47  */
48 static int iser_prepare_read_cmd(struct iscsi_task *task)
49
50 {
51         struct iscsi_iser_task *iser_task = task->dd_data;
52         struct iser_mem_reg *mem_reg;
53         int err;
54         struct iser_hdr *hdr = &iser_task->desc.iser_header;
55         struct iser_data_buf *buf_in = &iser_task->data[ISER_DIR_IN];
56
57         err = iser_dma_map_task_data(iser_task,
58                                      buf_in,
59                                      ISER_DIR_IN,
60                                      DMA_FROM_DEVICE);
61         if (err)
62                 return err;
63
64         if (scsi_prot_sg_count(iser_task->sc)) {
65                 struct iser_data_buf *pbuf_in = &iser_task->prot[ISER_DIR_IN];
66
67                 err = iser_dma_map_task_data(iser_task,
68                                              pbuf_in,
69                                              ISER_DIR_IN,
70                                              DMA_FROM_DEVICE);
71                 if (err)
72                         return err;
73         }
74
75         err = iser_reg_rdma_mem(iser_task, ISER_DIR_IN);
76         if (err) {
77                 iser_err("Failed to set up Data-IN RDMA\n");
78                 return err;
79         }
80         mem_reg = &iser_task->rdma_reg[ISER_DIR_IN];
81
82         hdr->flags    |= ISER_RSV;
83         hdr->read_stag = cpu_to_be32(mem_reg->rkey);
84         hdr->read_va   = cpu_to_be64(mem_reg->sge.addr);
85
86         iser_dbg("Cmd itt:%d READ tags RKEY:%#.4X VA:%#llX\n",
87                  task->itt, mem_reg->rkey,
88                  (unsigned long long)mem_reg->sge.addr);
89
90         return 0;
91 }
92
93 /* Register user buffer memory and initialize passive rdma
94  *  dto descriptor. Data size is stored in
95  *  task->data[ISER_DIR_OUT].data_len, Protection size
96  *  is stored at task->prot[ISER_DIR_OUT].data_len
97  */
98 static int
99 iser_prepare_write_cmd(struct iscsi_task *task,
100                        unsigned int imm_sz,
101                        unsigned int unsol_sz,
102                        unsigned int edtl)
103 {
104         struct iscsi_iser_task *iser_task = task->dd_data;
105         struct iser_mem_reg *mem_reg;
106         int err;
107         struct iser_hdr *hdr = &iser_task->desc.iser_header;
108         struct iser_data_buf *buf_out = &iser_task->data[ISER_DIR_OUT];
109         struct ib_sge *tx_dsg = &iser_task->desc.tx_sg[1];
110
111         err = iser_dma_map_task_data(iser_task,
112                                      buf_out,
113                                      ISER_DIR_OUT,
114                                      DMA_TO_DEVICE);
115         if (err)
116                 return err;
117
118         if (scsi_prot_sg_count(iser_task->sc)) {
119                 struct iser_data_buf *pbuf_out = &iser_task->prot[ISER_DIR_OUT];
120
121                 err = iser_dma_map_task_data(iser_task,
122                                              pbuf_out,
123                                              ISER_DIR_OUT,
124                                              DMA_TO_DEVICE);
125                 if (err)
126                         return err;
127         }
128
129         err = iser_reg_rdma_mem(iser_task, ISER_DIR_OUT);
130         if (err != 0) {
131                 iser_err("Failed to register write cmd RDMA mem\n");
132                 return err;
133         }
134
135         mem_reg = &iser_task->rdma_reg[ISER_DIR_OUT];
136
137         if (unsol_sz < edtl) {
138                 hdr->flags     |= ISER_WSV;
139                 hdr->write_stag = cpu_to_be32(mem_reg->rkey);
140                 hdr->write_va   = cpu_to_be64(mem_reg->sge.addr + unsol_sz);
141
142                 iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X "
143                          "VA:%#llX + unsol:%d\n",
144                          task->itt, mem_reg->rkey,
145                          (unsigned long long)mem_reg->sge.addr, unsol_sz);
146         }
147
148         if (imm_sz > 0) {
149                 iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n",
150                          task->itt, imm_sz);
151                 tx_dsg->addr = mem_reg->sge.addr;
152                 tx_dsg->length = imm_sz;
153                 tx_dsg->lkey = mem_reg->sge.lkey;
154                 iser_task->desc.num_sge = 2;
155         }
156
157         return 0;
158 }
159
160 /* creates a new tx descriptor and adds header regd buffer */
161 static void iser_create_send_desc(struct iser_conn      *iser_conn,
162                                   struct iser_tx_desc   *tx_desc)
163 {
164         struct iser_device *device = iser_conn->ib_conn.device;
165
166         ib_dma_sync_single_for_cpu(device->ib_device,
167                 tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE);
168
169         memset(&tx_desc->iser_header, 0, sizeof(struct iser_hdr));
170         tx_desc->iser_header.flags = ISER_VER;
171         tx_desc->num_sge = 1;
172 }
173
174 static void iser_free_login_buf(struct iser_conn *iser_conn)
175 {
176         struct iser_device *device = iser_conn->ib_conn.device;
177
178         if (!iser_conn->login_buf)
179                 return;
180
181         if (iser_conn->login_req_dma)
182                 ib_dma_unmap_single(device->ib_device,
183                                     iser_conn->login_req_dma,
184                                     ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE);
185
186         if (iser_conn->login_resp_dma)
187                 ib_dma_unmap_single(device->ib_device,
188                                     iser_conn->login_resp_dma,
189                                     ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE);
190
191         kfree(iser_conn->login_buf);
192
193         /* make sure we never redo any unmapping */
194         iser_conn->login_req_dma = 0;
195         iser_conn->login_resp_dma = 0;
196         iser_conn->login_buf = NULL;
197 }
198
199 static int iser_alloc_login_buf(struct iser_conn *iser_conn)
200 {
201         struct iser_device *device = iser_conn->ib_conn.device;
202         int                     req_err, resp_err;
203
204         BUG_ON(device == NULL);
205
206         iser_conn->login_buf = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
207                                      ISER_RX_LOGIN_SIZE, GFP_KERNEL);
208         if (!iser_conn->login_buf)
209                 goto out_err;
210
211         iser_conn->login_req_buf  = iser_conn->login_buf;
212         iser_conn->login_resp_buf = iser_conn->login_buf +
213                                                 ISCSI_DEF_MAX_RECV_SEG_LEN;
214
215         iser_conn->login_req_dma = ib_dma_map_single(device->ib_device,
216                                                      iser_conn->login_req_buf,
217                                                      ISCSI_DEF_MAX_RECV_SEG_LEN,
218                                                      DMA_TO_DEVICE);
219
220         iser_conn->login_resp_dma = ib_dma_map_single(device->ib_device,
221                                                       iser_conn->login_resp_buf,
222                                                       ISER_RX_LOGIN_SIZE,
223                                                       DMA_FROM_DEVICE);
224
225         req_err  = ib_dma_mapping_error(device->ib_device,
226                                         iser_conn->login_req_dma);
227         resp_err = ib_dma_mapping_error(device->ib_device,
228                                         iser_conn->login_resp_dma);
229
230         if (req_err || resp_err) {
231                 if (req_err)
232                         iser_conn->login_req_dma = 0;
233                 if (resp_err)
234                         iser_conn->login_resp_dma = 0;
235                 goto free_login_buf;
236         }
237         return 0;
238
239 free_login_buf:
240         iser_free_login_buf(iser_conn);
241
242 out_err:
243         iser_err("unable to alloc or map login buf\n");
244         return -ENOMEM;
245 }
246
247 int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
248                               struct iscsi_session *session)
249 {
250         int i, j;
251         u64 dma_addr;
252         struct iser_rx_desc *rx_desc;
253         struct ib_sge       *rx_sg;
254         struct ib_conn *ib_conn = &iser_conn->ib_conn;
255         struct iser_device *device = ib_conn->device;
256
257         iser_conn->qp_max_recv_dtos = session->cmds_max;
258         iser_conn->qp_max_recv_dtos_mask = session->cmds_max - 1; /* cmds_max is 2^N */
259         iser_conn->min_posted_rx = iser_conn->qp_max_recv_dtos >> 2;
260
261         if (device->reg_ops->alloc_reg_res(ib_conn, session->scsi_cmds_max,
262                                            iser_conn->scsi_sg_tablesize))
263                 goto create_rdma_reg_res_failed;
264
265         if (iser_alloc_login_buf(iser_conn))
266                 goto alloc_login_buf_fail;
267
268         iser_conn->num_rx_descs = session->cmds_max;
269         iser_conn->rx_descs = kmalloc(iser_conn->num_rx_descs *
270                                 sizeof(struct iser_rx_desc), GFP_KERNEL);
271         if (!iser_conn->rx_descs)
272                 goto rx_desc_alloc_fail;
273
274         rx_desc = iser_conn->rx_descs;
275
276         for (i = 0; i < iser_conn->qp_max_recv_dtos; i++, rx_desc++)  {
277                 dma_addr = ib_dma_map_single(device->ib_device, (void *)rx_desc,
278                                         ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
279                 if (ib_dma_mapping_error(device->ib_device, dma_addr))
280                         goto rx_desc_dma_map_failed;
281
282                 rx_desc->dma_addr = dma_addr;
283
284                 rx_sg = &rx_desc->rx_sg;
285                 rx_sg->addr   = rx_desc->dma_addr;
286                 rx_sg->length = ISER_RX_PAYLOAD_SIZE;
287                 rx_sg->lkey   = device->mr->lkey;
288         }
289
290         iser_conn->rx_desc_head = 0;
291         return 0;
292
293 rx_desc_dma_map_failed:
294         rx_desc = iser_conn->rx_descs;
295         for (j = 0; j < i; j++, rx_desc++)
296                 ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
297                                     ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
298         kfree(iser_conn->rx_descs);
299         iser_conn->rx_descs = NULL;
300 rx_desc_alloc_fail:
301         iser_free_login_buf(iser_conn);
302 alloc_login_buf_fail:
303         device->reg_ops->free_reg_res(ib_conn);
304 create_rdma_reg_res_failed:
305         iser_err("failed allocating rx descriptors / data buffers\n");
306         return -ENOMEM;
307 }
308
309 void iser_free_rx_descriptors(struct iser_conn *iser_conn)
310 {
311         int i;
312         struct iser_rx_desc *rx_desc;
313         struct ib_conn *ib_conn = &iser_conn->ib_conn;
314         struct iser_device *device = ib_conn->device;
315
316         if (device->reg_ops->free_reg_res)
317                 device->reg_ops->free_reg_res(ib_conn);
318
319         rx_desc = iser_conn->rx_descs;
320         for (i = 0; i < iser_conn->qp_max_recv_dtos; i++, rx_desc++)
321                 ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
322                                     ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
323         kfree(iser_conn->rx_descs);
324         /* make sure we never redo any unmapping */
325         iser_conn->rx_descs = NULL;
326
327         iser_free_login_buf(iser_conn);
328 }
329
330 static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req)
331 {
332         struct iser_conn *iser_conn = conn->dd_data;
333         struct ib_conn *ib_conn = &iser_conn->ib_conn;
334         struct iscsi_session *session = conn->session;
335
336         iser_dbg("req op %x flags %x\n", req->opcode, req->flags);
337         /* check if this is the last login - going to full feature phase */
338         if ((req->flags & ISCSI_FULL_FEATURE_PHASE) != ISCSI_FULL_FEATURE_PHASE)
339                 return 0;
340
341         /*
342          * Check that there is one posted recv buffer
343          * (for the last login response).
344          */
345         WARN_ON(ib_conn->post_recv_buf_count != 1);
346
347         if (session->discovery_sess) {
348                 iser_info("Discovery session, re-using login RX buffer\n");
349                 return 0;
350         } else
351                 iser_info("Normal session, posting batch of RX %d buffers\n",
352                           iser_conn->min_posted_rx);
353
354         /* Initial post receive buffers */
355         if (iser_post_recvm(iser_conn, iser_conn->min_posted_rx))
356                 return -ENOMEM;
357
358         return 0;
359 }
360
361 static inline bool iser_signal_comp(u8 sig_count)
362 {
363         return ((sig_count % ISER_SIGNAL_CMD_COUNT) == 0);
364 }
365
366 /**
367  * iser_send_command - send command PDU
368  */
369 int iser_send_command(struct iscsi_conn *conn,
370                       struct iscsi_task *task)
371 {
372         struct iser_conn *iser_conn = conn->dd_data;
373         struct iscsi_iser_task *iser_task = task->dd_data;
374         unsigned long edtl;
375         int err;
376         struct iser_data_buf *data_buf, *prot_buf;
377         struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
378         struct scsi_cmnd *sc  =  task->sc;
379         struct iser_tx_desc *tx_desc = &iser_task->desc;
380         u8 sig_count = ++iser_conn->ib_conn.sig_count;
381
382         edtl = ntohl(hdr->data_length);
383
384         /* build the tx desc regd header and add it to the tx desc dto */
385         tx_desc->type = ISCSI_TX_SCSI_COMMAND;
386         iser_create_send_desc(iser_conn, tx_desc);
387
388         if (hdr->flags & ISCSI_FLAG_CMD_READ) {
389                 data_buf = &iser_task->data[ISER_DIR_IN];
390                 prot_buf = &iser_task->prot[ISER_DIR_IN];
391         } else {
392                 data_buf = &iser_task->data[ISER_DIR_OUT];
393                 prot_buf = &iser_task->prot[ISER_DIR_OUT];
394         }
395
396         if (scsi_sg_count(sc)) { /* using a scatter list */
397                 data_buf->sg = scsi_sglist(sc);
398                 data_buf->size = scsi_sg_count(sc);
399         }
400         data_buf->data_len = scsi_bufflen(sc);
401
402         if (scsi_prot_sg_count(sc)) {
403                 prot_buf->sg  = scsi_prot_sglist(sc);
404                 prot_buf->size = scsi_prot_sg_count(sc);
405                 prot_buf->data_len = (data_buf->data_len >>
406                                      ilog2(sc->device->sector_size)) * 8;
407         }
408
409         if (hdr->flags & ISCSI_FLAG_CMD_READ) {
410                 err = iser_prepare_read_cmd(task);
411                 if (err)
412                         goto send_command_error;
413         }
414         if (hdr->flags & ISCSI_FLAG_CMD_WRITE) {
415                 err = iser_prepare_write_cmd(task,
416                                              task->imm_count,
417                                              task->imm_count +
418                                              task->unsol_r2t.data_length,
419                                              edtl);
420                 if (err)
421                         goto send_command_error;
422         }
423
424         iser_task->status = ISER_TASK_STATUS_STARTED;
425
426         err = iser_post_send(&iser_conn->ib_conn, tx_desc,
427                              iser_signal_comp(sig_count));
428         if (!err)
429                 return 0;
430
431 send_command_error:
432         iser_err("conn %p failed task->itt %d err %d\n",conn, task->itt, err);
433         return err;
434 }
435
436 /**
437  * iser_send_data_out - send data out PDU
438  */
439 int iser_send_data_out(struct iscsi_conn *conn,
440                        struct iscsi_task *task,
441                        struct iscsi_data *hdr)
442 {
443         struct iser_conn *iser_conn = conn->dd_data;
444         struct iscsi_iser_task *iser_task = task->dd_data;
445         struct iser_tx_desc *tx_desc = NULL;
446         struct iser_mem_reg *mem_reg;
447         unsigned long buf_offset;
448         unsigned long data_seg_len;
449         uint32_t itt;
450         int err;
451         struct ib_sge *tx_dsg;
452
453         itt = (__force uint32_t)hdr->itt;
454         data_seg_len = ntoh24(hdr->dlength);
455         buf_offset   = ntohl(hdr->offset);
456
457         iser_dbg("%s itt %d dseg_len %d offset %d\n",
458                  __func__,(int)itt,(int)data_seg_len,(int)buf_offset);
459
460         tx_desc = kmem_cache_zalloc(ig.desc_cache, GFP_ATOMIC);
461         if (tx_desc == NULL) {
462                 iser_err("Failed to alloc desc for post dataout\n");
463                 return -ENOMEM;
464         }
465
466         tx_desc->type = ISCSI_TX_DATAOUT;
467         tx_desc->iser_header.flags = ISER_VER;
468         memcpy(&tx_desc->iscsi_header, hdr, sizeof(struct iscsi_hdr));
469
470         /* build the tx desc */
471         err = iser_initialize_task_headers(task, tx_desc);
472         if (err)
473                 goto send_data_out_error;
474
475         mem_reg = &iser_task->rdma_reg[ISER_DIR_OUT];
476         tx_dsg = &tx_desc->tx_sg[1];
477         tx_dsg->addr = mem_reg->sge.addr + buf_offset;
478         tx_dsg->length = data_seg_len;
479         tx_dsg->lkey = mem_reg->sge.lkey;
480         tx_desc->num_sge = 2;
481
482         if (buf_offset + data_seg_len > iser_task->data[ISER_DIR_OUT].data_len) {
483                 iser_err("Offset:%ld & DSL:%ld in Data-Out "
484                          "inconsistent with total len:%ld, itt:%d\n",
485                          buf_offset, data_seg_len,
486                          iser_task->data[ISER_DIR_OUT].data_len, itt);
487                 err = -EINVAL;
488                 goto send_data_out_error;
489         }
490         iser_dbg("data-out itt: %d, offset: %ld, sz: %ld\n",
491                  itt, buf_offset, data_seg_len);
492
493
494         err = iser_post_send(&iser_conn->ib_conn, tx_desc, true);
495         if (!err)
496                 return 0;
497
498 send_data_out_error:
499         kmem_cache_free(ig.desc_cache, tx_desc);
500         iser_err("conn %p failed err %d\n", conn, err);
501         return err;
502 }
503
504 int iser_send_control(struct iscsi_conn *conn,
505                       struct iscsi_task *task)
506 {
507         struct iser_conn *iser_conn = conn->dd_data;
508         struct iscsi_iser_task *iser_task = task->dd_data;
509         struct iser_tx_desc *mdesc = &iser_task->desc;
510         unsigned long data_seg_len;
511         int err = 0;
512         struct iser_device *device;
513
514         /* build the tx desc regd header and add it to the tx desc dto */
515         mdesc->type = ISCSI_TX_CONTROL;
516         iser_create_send_desc(iser_conn, mdesc);
517
518         device = iser_conn->ib_conn.device;
519
520         data_seg_len = ntoh24(task->hdr->dlength);
521
522         if (data_seg_len > 0) {
523                 struct ib_sge *tx_dsg = &mdesc->tx_sg[1];
524                 if (task != conn->login_task) {
525                         iser_err("data present on non login task!!!\n");
526                         goto send_control_error;
527                 }
528
529                 ib_dma_sync_single_for_cpu(device->ib_device,
530                         iser_conn->login_req_dma, task->data_count,
531                         DMA_TO_DEVICE);
532
533                 memcpy(iser_conn->login_req_buf, task->data, task->data_count);
534
535                 ib_dma_sync_single_for_device(device->ib_device,
536                         iser_conn->login_req_dma, task->data_count,
537                         DMA_TO_DEVICE);
538
539                 tx_dsg->addr    = iser_conn->login_req_dma;
540                 tx_dsg->length  = task->data_count;
541                 tx_dsg->lkey    = device->mr->lkey;
542                 mdesc->num_sge = 2;
543         }
544
545         if (task == conn->login_task) {
546                 iser_dbg("op %x dsl %lx, posting login rx buffer\n",
547                          task->hdr->opcode, data_seg_len);
548                 err = iser_post_recvl(iser_conn);
549                 if (err)
550                         goto send_control_error;
551                 err = iser_post_rx_bufs(conn, task->hdr);
552                 if (err)
553                         goto send_control_error;
554         }
555
556         err = iser_post_send(&iser_conn->ib_conn, mdesc, true);
557         if (!err)
558                 return 0;
559
560 send_control_error:
561         iser_err("conn %p failed err %d\n",conn, err);
562         return err;
563 }
564
565 /**
566  * iser_rcv_dto_completion - recv DTO completion
567  */
568 void iser_rcv_completion(struct iser_rx_desc *rx_desc,
569                          unsigned long rx_xfer_len,
570                          struct ib_conn *ib_conn)
571 {
572         struct iser_conn *iser_conn = container_of(ib_conn, struct iser_conn,
573                                                    ib_conn);
574         struct iscsi_hdr *hdr;
575         u64 rx_dma;
576         int rx_buflen, outstanding, count, err;
577
578         /* differentiate between login to all other PDUs */
579         if ((char *)rx_desc == iser_conn->login_resp_buf) {
580                 rx_dma = iser_conn->login_resp_dma;
581                 rx_buflen = ISER_RX_LOGIN_SIZE;
582         } else {
583                 rx_dma = rx_desc->dma_addr;
584                 rx_buflen = ISER_RX_PAYLOAD_SIZE;
585         }
586
587         ib_dma_sync_single_for_cpu(ib_conn->device->ib_device, rx_dma,
588                                    rx_buflen, DMA_FROM_DEVICE);
589
590         hdr = &rx_desc->iscsi_header;
591
592         iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode,
593                         hdr->itt, (int)(rx_xfer_len - ISER_HEADERS_LEN));
594
595         iscsi_iser_recv(iser_conn->iscsi_conn, hdr, rx_desc->data,
596                         rx_xfer_len - ISER_HEADERS_LEN);
597
598         ib_dma_sync_single_for_device(ib_conn->device->ib_device, rx_dma,
599                                       rx_buflen, DMA_FROM_DEVICE);
600
601         /* decrementing conn->post_recv_buf_count only --after-- freeing the   *
602          * task eliminates the need to worry on tasks which are completed in   *
603          * parallel to the execution of iser_conn_term. So the code that waits *
604          * for the posted rx bufs refcount to become zero handles everything   */
605         ib_conn->post_recv_buf_count--;
606
607         if (rx_dma == iser_conn->login_resp_dma)
608                 return;
609
610         outstanding = ib_conn->post_recv_buf_count;
611         if (outstanding + iser_conn->min_posted_rx <= iser_conn->qp_max_recv_dtos) {
612                 count = min(iser_conn->qp_max_recv_dtos - outstanding,
613                             iser_conn->min_posted_rx);
614                 err = iser_post_recvm(iser_conn, count);
615                 if (err)
616                         iser_err("posting %d rx bufs err %d\n", count, err);
617         }
618 }
619
620 void iser_snd_completion(struct iser_tx_desc *tx_desc,
621                         struct ib_conn *ib_conn)
622 {
623         struct iscsi_task *task;
624         struct iser_device *device = ib_conn->device;
625
626         if (tx_desc->type == ISCSI_TX_DATAOUT) {
627                 ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr,
628                                         ISER_HEADERS_LEN, DMA_TO_DEVICE);
629                 kmem_cache_free(ig.desc_cache, tx_desc);
630                 tx_desc = NULL;
631         }
632
633         if (tx_desc && tx_desc->type == ISCSI_TX_CONTROL) {
634                 /* this arithmetic is legal by libiscsi dd_data allocation */
635                 task = (void *) ((long)(void *)tx_desc -
636                                   sizeof(struct iscsi_task));
637                 if (task->hdr->itt == RESERVED_ITT)
638                         iscsi_put_task(task);
639         }
640 }
641
642 void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
643
644 {
645         iser_task->status = ISER_TASK_STATUS_INIT;
646
647         iser_task->dir[ISER_DIR_IN] = 0;
648         iser_task->dir[ISER_DIR_OUT] = 0;
649
650         iser_task->data[ISER_DIR_IN].data_len  = 0;
651         iser_task->data[ISER_DIR_OUT].data_len = 0;
652
653         iser_task->prot[ISER_DIR_IN].data_len  = 0;
654         iser_task->prot[ISER_DIR_OUT].data_len = 0;
655
656         memset(&iser_task->rdma_reg[ISER_DIR_IN], 0,
657                sizeof(struct iser_mem_reg));
658         memset(&iser_task->rdma_reg[ISER_DIR_OUT], 0,
659                sizeof(struct iser_mem_reg));
660 }
661
662 void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
663 {
664         int is_rdma_data_aligned = 1;
665         int is_rdma_prot_aligned = 1;
666         int prot_count = scsi_prot_sg_count(iser_task->sc);
667
668         /* if we were reading, copy back to unaligned sglist,
669          * anyway dma_unmap and free the copy
670          */
671         if (iser_task->data[ISER_DIR_IN].orig_sg) {
672                 is_rdma_data_aligned = 0;
673                 iser_finalize_rdma_unaligned_sg(iser_task,
674                                                 &iser_task->data[ISER_DIR_IN],
675                                                 ISER_DIR_IN);
676         }
677
678         if (iser_task->data[ISER_DIR_OUT].orig_sg) {
679                 is_rdma_data_aligned = 0;
680                 iser_finalize_rdma_unaligned_sg(iser_task,
681                                                 &iser_task->data[ISER_DIR_OUT],
682                                                 ISER_DIR_OUT);
683         }
684
685         if (iser_task->prot[ISER_DIR_IN].orig_sg) {
686                 is_rdma_prot_aligned = 0;
687                 iser_finalize_rdma_unaligned_sg(iser_task,
688                                                 &iser_task->prot[ISER_DIR_IN],
689                                                 ISER_DIR_IN);
690         }
691
692         if (iser_task->prot[ISER_DIR_OUT].orig_sg) {
693                 is_rdma_prot_aligned = 0;
694                 iser_finalize_rdma_unaligned_sg(iser_task,
695                                                 &iser_task->prot[ISER_DIR_OUT],
696                                                 ISER_DIR_OUT);
697         }
698
699         if (iser_task->dir[ISER_DIR_IN]) {
700                 iser_unreg_rdma_mem(iser_task, ISER_DIR_IN);
701                 if (is_rdma_data_aligned)
702                         iser_dma_unmap_task_data(iser_task,
703                                                  &iser_task->data[ISER_DIR_IN],
704                                                  DMA_FROM_DEVICE);
705                 if (prot_count && is_rdma_prot_aligned)
706                         iser_dma_unmap_task_data(iser_task,
707                                                  &iser_task->prot[ISER_DIR_IN],
708                                                  DMA_FROM_DEVICE);
709         }
710
711         if (iser_task->dir[ISER_DIR_OUT]) {
712                 iser_unreg_rdma_mem(iser_task, ISER_DIR_OUT);
713                 if (is_rdma_data_aligned)
714                         iser_dma_unmap_task_data(iser_task,
715                                                  &iser_task->data[ISER_DIR_OUT],
716                                                  DMA_TO_DEVICE);
717                 if (prot_count && is_rdma_prot_aligned)
718                         iser_dma_unmap_task_data(iser_task,
719                                                  &iser_task->prot[ISER_DIR_OUT],
720                                                  DMA_TO_DEVICE);
721         }
722 }