Merge branch 'for-linus' of git://git.kernel.dk/linux-block
[cascardo/linux.git] / drivers / infiniband / hw / i40iw / i40iw_uk.c
1 /*******************************************************************************
2 *
3 * Copyright (c) 2015-2016 Intel Corporation.  All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses.  You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenFabrics.org BSD license below:
10 *
11 *   Redistribution and use in source and binary forms, with or
12 *   without modification, are permitted provided that the following
13 *   conditions are met:
14 *
15 *    - Redistributions of source code must retain the above
16 *       copyright notice, this list of conditions and the following
17 *       disclaimer.
18 *
19 *    - Redistributions in binary form must reproduce the above
20 *       copyright notice, this list of conditions and the following
21 *       disclaimer in the documentation and/or other materials
22 *       provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 *
33 *******************************************************************************/
34
35 #include "i40iw_osdep.h"
36 #include "i40iw_status.h"
37 #include "i40iw_d.h"
38 #include "i40iw_user.h"
39 #include "i40iw_register.h"
40
41 static u32 nop_signature = 0x55550000;
42
43 /**
44  * i40iw_nop_1 - insert a nop wqe and move head. no post work
45  * @qp: hw qp ptr
46  */
47 static enum i40iw_status_code i40iw_nop_1(struct i40iw_qp_uk *qp)
48 {
49         u64 header, *wqe;
50         u64 *wqe_0 = NULL;
51         u32 wqe_idx, peek_head;
52         bool signaled = false;
53
54         if (!qp->sq_ring.head)
55                 return I40IW_ERR_PARAM;
56
57         wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
58         wqe = qp->sq_base[wqe_idx].elem;
59
60         qp->sq_wrtrk_array[wqe_idx].wqe_size = I40IW_QP_WQE_MIN_SIZE;
61
62         peek_head = (qp->sq_ring.head + 1) % qp->sq_ring.size;
63         wqe_0 = qp->sq_base[peek_head].elem;
64         if (peek_head)
65                 wqe_0[3] = LS_64(!qp->swqe_polarity, I40IWQPSQ_VALID);
66         else
67                 wqe_0[3] = LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
68
69         set_64bit_val(wqe, 0, 0);
70         set_64bit_val(wqe, 8, 0);
71         set_64bit_val(wqe, 16, 0);
72
73         header = LS_64(I40IWQP_OP_NOP, I40IWQPSQ_OPCODE) |
74             LS_64(signaled, I40IWQPSQ_SIGCOMPL) |
75             LS_64(qp->swqe_polarity, I40IWQPSQ_VALID) | nop_signature++;
76
77         wmb();  /* Memory barrier to ensure data is written before valid bit is set */
78
79         set_64bit_val(wqe, 24, header);
80         return 0;
81 }
82
83 /**
84  * i40iw_qp_post_wr - post wr to hrdware
85  * @qp: hw qp ptr
86  */
87 void i40iw_qp_post_wr(struct i40iw_qp_uk *qp)
88 {
89         u64 temp;
90         u32 hw_sq_tail;
91         u32 sw_sq_head;
92
93         mb(); /* valid bit is written and loads completed before reading shadow */
94
95         /* read the doorbell shadow area */
96         get_64bit_val(qp->shadow_area, 0, &temp);
97
98         hw_sq_tail = (u32)RS_64(temp, I40IW_QP_DBSA_HW_SQ_TAIL);
99         sw_sq_head = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
100         if (sw_sq_head != hw_sq_tail) {
101                 if (sw_sq_head > qp->initial_ring.head) {
102                         if ((hw_sq_tail >= qp->initial_ring.head) &&
103                             (hw_sq_tail < sw_sq_head)) {
104                                 writel(qp->qp_id, qp->wqe_alloc_reg);
105                         }
106                 } else if (sw_sq_head != qp->initial_ring.head) {
107                         if ((hw_sq_tail >= qp->initial_ring.head) ||
108                             (hw_sq_tail < sw_sq_head)) {
109                                 writel(qp->qp_id, qp->wqe_alloc_reg);
110                         }
111                 }
112         }
113
114         qp->initial_ring.head = qp->sq_ring.head;
115 }
116
117 /**
118  * i40iw_qp_ring_push_db -  ring qp doorbell
119  * @qp: hw qp ptr
120  * @wqe_idx: wqe index
121  */
122 static void i40iw_qp_ring_push_db(struct i40iw_qp_uk *qp, u32 wqe_idx)
123 {
124         set_32bit_val(qp->push_db, 0, LS_32((wqe_idx >> 2), I40E_PFPE_WQEALLOC_WQE_DESC_INDEX) | qp->qp_id);
125         qp->initial_ring.head = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
126 }
127
128 /**
129  * i40iw_qp_get_next_send_wqe - return next wqe ptr
130  * @qp: hw qp ptr
131  * @wqe_idx: return wqe index
132  * @wqe_size: size of sq wqe
133  */
134 u64 *i40iw_qp_get_next_send_wqe(struct i40iw_qp_uk *qp,
135                                 u32 *wqe_idx,
136                                 u8 wqe_size,
137                                 u32 total_size,
138                                 u64 wr_id
139                                 )
140 {
141         u64 *wqe = NULL;
142         u64 wqe_ptr;
143         u32 peek_head = 0;
144         u16 offset;
145         enum i40iw_status_code ret_code = 0;
146         u8 nop_wqe_cnt = 0, i;
147         u64 *wqe_0 = NULL;
148
149         *wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
150
151         if (!*wqe_idx)
152                 qp->swqe_polarity = !qp->swqe_polarity;
153         wqe_ptr = (uintptr_t)qp->sq_base[*wqe_idx].elem;
154         offset = (u16)(wqe_ptr) & 0x7F;
155         if ((offset + wqe_size) > I40IW_QP_WQE_MAX_SIZE) {
156                 nop_wqe_cnt = (u8)(I40IW_QP_WQE_MAX_SIZE - offset) / I40IW_QP_WQE_MIN_SIZE;
157                 for (i = 0; i < nop_wqe_cnt; i++) {
158                         i40iw_nop_1(qp);
159                         I40IW_RING_MOVE_HEAD(qp->sq_ring, ret_code);
160                         if (ret_code)
161                                 return NULL;
162                 }
163
164                 *wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
165                 if (!*wqe_idx)
166                         qp->swqe_polarity = !qp->swqe_polarity;
167         }
168
169         if (((*wqe_idx & 3) == 1) && (wqe_size == I40IW_WQE_SIZE_64)) {
170                 i40iw_nop_1(qp);
171                 I40IW_RING_MOVE_HEAD(qp->sq_ring, ret_code);
172                 if (ret_code)
173                         return NULL;
174                 *wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
175                 if (!*wqe_idx)
176                         qp->swqe_polarity = !qp->swqe_polarity;
177         }
178
179         for (i = 0; i < wqe_size / I40IW_QP_WQE_MIN_SIZE; i++) {
180                 I40IW_RING_MOVE_HEAD(qp->sq_ring, ret_code);
181                 if (ret_code)
182                         return NULL;
183         }
184
185         wqe = qp->sq_base[*wqe_idx].elem;
186
187         peek_head = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
188         wqe_0 = qp->sq_base[peek_head].elem;
189
190         if (((peek_head & 3) == 1) || ((peek_head & 3) == 3)) {
191                 if (RS_64(wqe_0[3], I40IWQPSQ_VALID) != !qp->swqe_polarity)
192                         wqe_0[3] = LS_64(!qp->swqe_polarity, I40IWQPSQ_VALID);
193         }
194
195         qp->sq_wrtrk_array[*wqe_idx].wrid = wr_id;
196         qp->sq_wrtrk_array[*wqe_idx].wr_len = total_size;
197         qp->sq_wrtrk_array[*wqe_idx].wqe_size = wqe_size;
198         return wqe;
199 }
200
201 /**
202  * i40iw_set_fragment - set fragment in wqe
203  * @wqe: wqe for setting fragment
204  * @offset: offset value
205  * @sge: sge length and stag
206  */
207 static void i40iw_set_fragment(u64 *wqe, u32 offset, struct i40iw_sge *sge)
208 {
209         if (sge) {
210                 set_64bit_val(wqe, offset, LS_64(sge->tag_off, I40IWQPSQ_FRAG_TO));
211                 set_64bit_val(wqe, (offset + 8),
212                               (LS_64(sge->len, I40IWQPSQ_FRAG_LEN) |
213                                LS_64(sge->stag, I40IWQPSQ_FRAG_STAG)));
214         }
215 }
216
217 /**
218  * i40iw_qp_get_next_recv_wqe - get next qp's rcv wqe
219  * @qp: hw qp ptr
220  * @wqe_idx: return wqe index
221  */
222 u64 *i40iw_qp_get_next_recv_wqe(struct i40iw_qp_uk *qp, u32 *wqe_idx)
223 {
224         u64 *wqe = NULL;
225         enum i40iw_status_code ret_code;
226
227         if (I40IW_RING_FULL_ERR(qp->rq_ring))
228                 return NULL;
229
230         I40IW_ATOMIC_RING_MOVE_HEAD(qp->rq_ring, *wqe_idx, ret_code);
231         if (ret_code)
232                 return NULL;
233         if (!*wqe_idx)
234                 qp->rwqe_polarity = !qp->rwqe_polarity;
235         /* rq_wqe_size_multiplier is no of qwords in one rq wqe */
236         wqe = qp->rq_base[*wqe_idx * (qp->rq_wqe_size_multiplier >> 2)].elem;
237
238         return wqe;
239 }
240
241 /**
242  * i40iw_rdma_write - rdma write operation
243  * @qp: hw qp ptr
244  * @info: post sq information
245  * @post_sq: flag to post sq
246  */
247 static enum i40iw_status_code i40iw_rdma_write(struct i40iw_qp_uk *qp,
248                                                struct i40iw_post_sq_info *info,
249                                                bool post_sq)
250 {
251         u64 header;
252         u64 *wqe;
253         struct i40iw_rdma_write *op_info;
254         u32 i, wqe_idx;
255         u32 total_size = 0, byte_off;
256         enum i40iw_status_code ret_code;
257         bool read_fence = false;
258         u8 wqe_size;
259
260         op_info = &info->op.rdma_write;
261         if (op_info->num_lo_sges > qp->max_sq_frag_cnt)
262                 return I40IW_ERR_INVALID_FRAG_COUNT;
263
264         for (i = 0; i < op_info->num_lo_sges; i++)
265                 total_size += op_info->lo_sg_list[i].len;
266
267         if (total_size > I40IW_MAX_OUTBOUND_MESSAGE_SIZE)
268                 return I40IW_ERR_QP_INVALID_MSG_SIZE;
269
270         read_fence |= info->read_fence;
271
272         ret_code = i40iw_fragcnt_to_wqesize_sq(op_info->num_lo_sges, &wqe_size);
273         if (ret_code)
274                 return ret_code;
275
276         wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size, total_size, info->wr_id);
277         if (!wqe)
278                 return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
279         set_64bit_val(wqe, 16,
280                       LS_64(op_info->rem_addr.tag_off, I40IWQPSQ_FRAG_TO));
281         if (!op_info->rem_addr.stag)
282                 return I40IW_ERR_BAD_STAG;
283
284         header = LS_64(op_info->rem_addr.stag, I40IWQPSQ_REMSTAG) |
285                  LS_64(I40IWQP_OP_RDMA_WRITE, I40IWQPSQ_OPCODE) |
286                  LS_64((op_info->num_lo_sges > 1 ?  (op_info->num_lo_sges - 1) : 0), I40IWQPSQ_ADDFRAGCNT) |
287                  LS_64(read_fence, I40IWQPSQ_READFENCE) |
288                  LS_64(info->local_fence, I40IWQPSQ_LOCALFENCE) |
289                  LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
290                  LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
291
292         i40iw_set_fragment(wqe, 0, op_info->lo_sg_list);
293
294         for (i = 1, byte_off = 32; i < op_info->num_lo_sges; i++) {
295                 i40iw_set_fragment(wqe, byte_off, &op_info->lo_sg_list[i]);
296                 byte_off += 16;
297         }
298
299         wmb(); /* make sure WQE is populated before valid bit is set */
300
301         set_64bit_val(wqe, 24, header);
302
303         if (post_sq)
304                 i40iw_qp_post_wr(qp);
305
306         return 0;
307 }
308
309 /**
310  * i40iw_rdma_read - rdma read command
311  * @qp: hw qp ptr
312  * @info: post sq information
313  * @inv_stag: flag for inv_stag
314  * @post_sq: flag to post sq
315  */
316 static enum i40iw_status_code i40iw_rdma_read(struct i40iw_qp_uk *qp,
317                                               struct i40iw_post_sq_info *info,
318                                               bool inv_stag,
319                                               bool post_sq)
320 {
321         u64 *wqe;
322         struct i40iw_rdma_read *op_info;
323         u64 header;
324         u32 wqe_idx;
325         enum i40iw_status_code ret_code;
326         u8 wqe_size;
327         bool local_fence = false;
328
329         op_info = &info->op.rdma_read;
330         ret_code = i40iw_fragcnt_to_wqesize_sq(1, &wqe_size);
331         if (ret_code)
332                 return ret_code;
333         wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size, op_info->lo_addr.len, info->wr_id);
334         if (!wqe)
335                 return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
336         local_fence |= info->local_fence;
337
338         set_64bit_val(wqe, 16, LS_64(op_info->rem_addr.tag_off, I40IWQPSQ_FRAG_TO));
339         header = LS_64(op_info->rem_addr.stag, I40IWQPSQ_REMSTAG) |
340                  LS_64((inv_stag ? I40IWQP_OP_RDMA_READ_LOC_INV : I40IWQP_OP_RDMA_READ), I40IWQPSQ_OPCODE) |
341                  LS_64(info->read_fence, I40IWQPSQ_READFENCE) |
342                  LS_64(local_fence, I40IWQPSQ_LOCALFENCE) |
343                  LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
344                  LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
345
346         i40iw_set_fragment(wqe, 0, &op_info->lo_addr);
347
348         wmb(); /* make sure WQE is populated before valid bit is set */
349
350         set_64bit_val(wqe, 24, header);
351         if (post_sq)
352                 i40iw_qp_post_wr(qp);
353
354         return 0;
355 }
356
357 /**
358  * i40iw_send - rdma send command
359  * @qp: hw qp ptr
360  * @info: post sq information
361  * @stag_to_inv: stag_to_inv value
362  * @post_sq: flag to post sq
363  */
364 static enum i40iw_status_code i40iw_send(struct i40iw_qp_uk *qp,
365                                          struct i40iw_post_sq_info *info,
366                                          u32 stag_to_inv,
367                                          bool post_sq)
368 {
369         u64 *wqe;
370         struct i40iw_post_send *op_info;
371         u64 header;
372         u32 i, wqe_idx, total_size = 0, byte_off;
373         enum i40iw_status_code ret_code;
374         bool read_fence = false;
375         u8 wqe_size;
376
377         op_info = &info->op.send;
378         if (qp->max_sq_frag_cnt < op_info->num_sges)
379                 return I40IW_ERR_INVALID_FRAG_COUNT;
380
381         for (i = 0; i < op_info->num_sges; i++)
382                 total_size += op_info->sg_list[i].len;
383         ret_code = i40iw_fragcnt_to_wqesize_sq(op_info->num_sges, &wqe_size);
384         if (ret_code)
385                 return ret_code;
386
387         wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size, total_size, info->wr_id);
388         if (!wqe)
389                 return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
390
391         read_fence |= info->read_fence;
392         set_64bit_val(wqe, 16, 0);
393         header = LS_64(stag_to_inv, I40IWQPSQ_REMSTAG) |
394                  LS_64(info->op_type, I40IWQPSQ_OPCODE) |
395                  LS_64((op_info->num_sges > 1 ? (op_info->num_sges - 1) : 0),
396                        I40IWQPSQ_ADDFRAGCNT) |
397                  LS_64(read_fence, I40IWQPSQ_READFENCE) |
398                  LS_64(info->local_fence, I40IWQPSQ_LOCALFENCE) |
399                  LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
400                  LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
401
402         i40iw_set_fragment(wqe, 0, op_info->sg_list);
403
404         for (i = 1, byte_off = 32; i < op_info->num_sges; i++) {
405                 i40iw_set_fragment(wqe, byte_off, &op_info->sg_list[i]);
406                 byte_off += 16;
407         }
408
409         wmb(); /* make sure WQE is populated before valid bit is set */
410
411         set_64bit_val(wqe, 24, header);
412         if (post_sq)
413                 i40iw_qp_post_wr(qp);
414
415         return 0;
416 }
417
418 /**
419  * i40iw_inline_rdma_write - inline rdma write operation
420  * @qp: hw qp ptr
421  * @info: post sq information
422  * @post_sq: flag to post sq
423  */
424 static enum i40iw_status_code i40iw_inline_rdma_write(struct i40iw_qp_uk *qp,
425                                                       struct i40iw_post_sq_info *info,
426                                                       bool post_sq)
427 {
428         u64 *wqe;
429         u8 *dest, *src;
430         struct i40iw_inline_rdma_write *op_info;
431         u64 *push;
432         u64 header = 0;
433         u32 i, wqe_idx;
434         enum i40iw_status_code ret_code;
435         bool read_fence = false;
436         u8 wqe_size;
437
438         op_info = &info->op.inline_rdma_write;
439         if (op_info->len > I40IW_MAX_INLINE_DATA_SIZE)
440                 return I40IW_ERR_INVALID_IMM_DATA_SIZE;
441
442         ret_code = i40iw_inline_data_size_to_wqesize(op_info->len, &wqe_size);
443         if (ret_code)
444                 return ret_code;
445
446         wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size, op_info->len, info->wr_id);
447         if (!wqe)
448                 return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
449
450         read_fence |= info->read_fence;
451         set_64bit_val(wqe, 16,
452                       LS_64(op_info->rem_addr.tag_off, I40IWQPSQ_FRAG_TO));
453
454         header = LS_64(op_info->rem_addr.stag, I40IWQPSQ_REMSTAG) |
455                  LS_64(I40IWQP_OP_RDMA_WRITE, I40IWQPSQ_OPCODE) |
456                  LS_64(op_info->len, I40IWQPSQ_INLINEDATALEN) |
457                  LS_64(1, I40IWQPSQ_INLINEDATAFLAG) |
458                  LS_64((qp->push_db ? 1 : 0), I40IWQPSQ_PUSHWQE) |
459                  LS_64(read_fence, I40IWQPSQ_READFENCE) |
460                  LS_64(info->local_fence, I40IWQPSQ_LOCALFENCE) |
461                  LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
462                  LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
463
464         dest = (u8 *)wqe;
465         src = (u8 *)(op_info->data);
466
467         if (op_info->len <= 16) {
468                 for (i = 0; i < op_info->len; i++, src++, dest++)
469                         *dest = *src;
470         } else {
471                 for (i = 0; i < 16; i++, src++, dest++)
472                         *dest = *src;
473                 dest = (u8 *)wqe + 32;
474                 for (; i < op_info->len; i++, src++, dest++)
475                         *dest = *src;
476         }
477
478         wmb(); /* make sure WQE is populated before valid bit is set */
479
480         set_64bit_val(wqe, 24, header);
481
482         if (qp->push_db) {
483                 push = (u64 *)((uintptr_t)qp->push_wqe + (wqe_idx & 0x3) * 0x20);
484                 memcpy(push, wqe, (op_info->len > 16) ? op_info->len + 16 : 32);
485                 i40iw_qp_ring_push_db(qp, wqe_idx);
486         } else {
487                 if (post_sq)
488                         i40iw_qp_post_wr(qp);
489         }
490
491         return 0;
492 }
493
494 /**
495  * i40iw_inline_send - inline send operation
496  * @qp: hw qp ptr
497  * @info: post sq information
498  * @stag_to_inv: remote stag
499  * @post_sq: flag to post sq
500  */
501 static enum i40iw_status_code i40iw_inline_send(struct i40iw_qp_uk *qp,
502                                                 struct i40iw_post_sq_info *info,
503                                                 u32 stag_to_inv,
504                                                 bool post_sq)
505 {
506         u64 *wqe;
507         u8 *dest, *src;
508         struct i40iw_post_inline_send *op_info;
509         u64 header;
510         u32 wqe_idx, i;
511         enum i40iw_status_code ret_code;
512         bool read_fence = false;
513         u8 wqe_size;
514         u64 *push;
515
516         op_info = &info->op.inline_send;
517         if (op_info->len > I40IW_MAX_INLINE_DATA_SIZE)
518                 return I40IW_ERR_INVALID_IMM_DATA_SIZE;
519
520         ret_code = i40iw_inline_data_size_to_wqesize(op_info->len, &wqe_size);
521         if (ret_code)
522                 return ret_code;
523
524         wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size, op_info->len, info->wr_id);
525         if (!wqe)
526                 return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
527
528         read_fence |= info->read_fence;
529         header = LS_64(stag_to_inv, I40IWQPSQ_REMSTAG) |
530             LS_64(info->op_type, I40IWQPSQ_OPCODE) |
531             LS_64(op_info->len, I40IWQPSQ_INLINEDATALEN) |
532             LS_64(1, I40IWQPSQ_INLINEDATAFLAG) |
533             LS_64((qp->push_db ? 1 : 0), I40IWQPSQ_PUSHWQE) |
534             LS_64(read_fence, I40IWQPSQ_READFENCE) |
535             LS_64(info->local_fence, I40IWQPSQ_LOCALFENCE) |
536             LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
537             LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
538
539         dest = (u8 *)wqe;
540         src = (u8 *)(op_info->data);
541
542         if (op_info->len <= 16) {
543                 for (i = 0; i < op_info->len; i++, src++, dest++)
544                         *dest = *src;
545         } else {
546                 for (i = 0; i < 16; i++, src++, dest++)
547                         *dest = *src;
548                 dest = (u8 *)wqe + 32;
549                 for (; i < op_info->len; i++, src++, dest++)
550                         *dest = *src;
551         }
552
553         wmb(); /* make sure WQE is populated before valid bit is set */
554
555         set_64bit_val(wqe, 24, header);
556
557         if (qp->push_db) {
558                 push = (u64 *)((uintptr_t)qp->push_wqe + (wqe_idx & 0x3) * 0x20);
559                 memcpy(push, wqe, (op_info->len > 16) ? op_info->len + 16 : 32);
560                 i40iw_qp_ring_push_db(qp, wqe_idx);
561         } else {
562                 if (post_sq)
563                         i40iw_qp_post_wr(qp);
564         }
565
566         return 0;
567 }
568
569 /**
570  * i40iw_stag_local_invalidate - stag invalidate operation
571  * @qp: hw qp ptr
572  * @info: post sq information
573  * @post_sq: flag to post sq
574  */
575 static enum i40iw_status_code i40iw_stag_local_invalidate(struct i40iw_qp_uk *qp,
576                                                           struct i40iw_post_sq_info *info,
577                                                           bool post_sq)
578 {
579         u64 *wqe;
580         struct i40iw_inv_local_stag *op_info;
581         u64 header;
582         u32 wqe_idx;
583         bool local_fence = false;
584
585         op_info = &info->op.inv_local_stag;
586         local_fence = info->local_fence;
587
588         wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, I40IW_QP_WQE_MIN_SIZE, 0, info->wr_id);
589         if (!wqe)
590                 return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
591         set_64bit_val(wqe, 0, 0);
592         set_64bit_val(wqe, 8,
593                       LS_64(op_info->target_stag, I40IWQPSQ_LOCSTAG));
594         set_64bit_val(wqe, 16, 0);
595         header = LS_64(I40IW_OP_TYPE_INV_STAG, I40IWQPSQ_OPCODE) |
596             LS_64(info->read_fence, I40IWQPSQ_READFENCE) |
597             LS_64(local_fence, I40IWQPSQ_LOCALFENCE) |
598             LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
599             LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
600
601         wmb(); /* make sure WQE is populated before valid bit is set */
602
603         set_64bit_val(wqe, 24, header);
604
605         if (post_sq)
606                 i40iw_qp_post_wr(qp);
607
608         return 0;
609 }
610
611 /**
612  * i40iw_mw_bind - Memory Window bind operation
613  * @qp: hw qp ptr
614  * @info: post sq information
615  * @post_sq: flag to post sq
616  */
617 static enum i40iw_status_code i40iw_mw_bind(struct i40iw_qp_uk *qp,
618                                             struct i40iw_post_sq_info *info,
619                                             bool post_sq)
620 {
621         u64 *wqe;
622         struct i40iw_bind_window *op_info;
623         u64 header;
624         u32 wqe_idx;
625         bool local_fence = false;
626
627         op_info = &info->op.bind_window;
628
629         local_fence |= info->local_fence;
630         wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, I40IW_QP_WQE_MIN_SIZE, 0, info->wr_id);
631         if (!wqe)
632                 return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
633         set_64bit_val(wqe, 0, (uintptr_t)op_info->va);
634         set_64bit_val(wqe, 8,
635                       LS_64(op_info->mr_stag, I40IWQPSQ_PARENTMRSTAG) |
636                       LS_64(op_info->mw_stag, I40IWQPSQ_MWSTAG));
637         set_64bit_val(wqe, 16, op_info->bind_length);
638         header = LS_64(I40IW_OP_TYPE_BIND_MW, I40IWQPSQ_OPCODE) |
639             LS_64(((op_info->enable_reads << 2) |
640                    (op_info->enable_writes << 3)),
641                   I40IWQPSQ_STAGRIGHTS) |
642             LS_64((op_info->addressing_type == I40IW_ADDR_TYPE_VA_BASED ?  1 : 0),
643                   I40IWQPSQ_VABASEDTO) |
644             LS_64(info->read_fence, I40IWQPSQ_READFENCE) |
645             LS_64(local_fence, I40IWQPSQ_LOCALFENCE) |
646             LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
647             LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
648
649         wmb(); /* make sure WQE is populated before valid bit is set */
650
651         set_64bit_val(wqe, 24, header);
652
653         if (post_sq)
654                 i40iw_qp_post_wr(qp);
655
656         return 0;
657 }
658
659 /**
660  * i40iw_post_receive - post receive wqe
661  * @qp: hw qp ptr
662  * @info: post rq information
663  */
664 static enum i40iw_status_code i40iw_post_receive(struct i40iw_qp_uk *qp,
665                                                  struct i40iw_post_rq_info *info)
666 {
667         u64 *wqe;
668         u64 header;
669         u32 total_size = 0, wqe_idx, i, byte_off;
670
671         if (qp->max_rq_frag_cnt < info->num_sges)
672                 return I40IW_ERR_INVALID_FRAG_COUNT;
673         for (i = 0; i < info->num_sges; i++)
674                 total_size += info->sg_list[i].len;
675         wqe = i40iw_qp_get_next_recv_wqe(qp, &wqe_idx);
676         if (!wqe)
677                 return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
678
679         qp->rq_wrid_array[wqe_idx] = info->wr_id;
680         set_64bit_val(wqe, 16, 0);
681
682         header = LS_64((info->num_sges > 1 ? (info->num_sges - 1) : 0),
683                        I40IWQPSQ_ADDFRAGCNT) |
684             LS_64(qp->rwqe_polarity, I40IWQPSQ_VALID);
685
686         i40iw_set_fragment(wqe, 0, info->sg_list);
687
688         for (i = 1, byte_off = 32; i < info->num_sges; i++) {
689                 i40iw_set_fragment(wqe, byte_off, &info->sg_list[i]);
690                 byte_off += 16;
691         }
692
693         wmb(); /* make sure WQE is populated before valid bit is set */
694
695         set_64bit_val(wqe, 24, header);
696
697         return 0;
698 }
699
700 /**
701  * i40iw_cq_request_notification - cq notification request (door bell)
702  * @cq: hw cq
703  * @cq_notify: notification type
704  */
705 static void i40iw_cq_request_notification(struct i40iw_cq_uk *cq,
706                                           enum i40iw_completion_notify cq_notify)
707 {
708         u64 temp_val;
709         u16 sw_cq_sel;
710         u8 arm_next_se = 0;
711         u8 arm_next = 0;
712         u8 arm_seq_num;
713
714         get_64bit_val(cq->shadow_area, 32, &temp_val);
715         arm_seq_num = (u8)RS_64(temp_val, I40IW_CQ_DBSA_ARM_SEQ_NUM);
716         arm_seq_num++;
717
718         sw_cq_sel = (u16)RS_64(temp_val, I40IW_CQ_DBSA_SW_CQ_SELECT);
719         arm_next_se = (u8)RS_64(temp_val, I40IW_CQ_DBSA_ARM_NEXT_SE);
720         arm_next_se |= 1;
721         if (cq_notify == IW_CQ_COMPL_EVENT)
722                 arm_next = 1;
723         temp_val = LS_64(arm_seq_num, I40IW_CQ_DBSA_ARM_SEQ_NUM) |
724             LS_64(sw_cq_sel, I40IW_CQ_DBSA_SW_CQ_SELECT) |
725             LS_64(arm_next_se, I40IW_CQ_DBSA_ARM_NEXT_SE) |
726             LS_64(arm_next, I40IW_CQ_DBSA_ARM_NEXT);
727
728         set_64bit_val(cq->shadow_area, 32, temp_val);
729
730         wmb(); /* make sure WQE is populated before valid bit is set */
731
732         writel(cq->cq_id, cq->cqe_alloc_reg);
733 }
734
735 /**
736  * i40iw_cq_post_entries - update tail in shadow memory
737  * @cq: hw cq
738  * @count: # of entries processed
739  */
740 static enum i40iw_status_code i40iw_cq_post_entries(struct i40iw_cq_uk *cq,
741                                                     u8 count)
742 {
743         I40IW_RING_MOVE_TAIL_BY_COUNT(cq->cq_ring, count);
744         set_64bit_val(cq->shadow_area, 0,
745                       I40IW_RING_GETCURRENT_HEAD(cq->cq_ring));
746         return 0;
747 }
748
749 /**
750  * i40iw_cq_poll_completion - get cq completion info
751  * @cq: hw cq
752  * @info: cq poll information returned
753  * @post_cq: update cq tail
754  */
755 static enum i40iw_status_code i40iw_cq_poll_completion(struct i40iw_cq_uk *cq,
756                                                        struct i40iw_cq_poll_info *info)
757 {
758         u64 comp_ctx, qword0, qword2, qword3, wqe_qword;
759         u64 *cqe, *sw_wqe;
760         struct i40iw_qp_uk *qp;
761         struct i40iw_ring *pring = NULL;
762         u32 wqe_idx, q_type, array_idx = 0;
763         enum i40iw_status_code ret_code = 0;
764         bool move_cq_head = true;
765         u8 polarity;
766         u8 addl_wqes = 0;
767
768         if (cq->avoid_mem_cflct)
769                 cqe = (u64 *)I40IW_GET_CURRENT_EXTENDED_CQ_ELEMENT(cq);
770         else
771                 cqe = (u64 *)I40IW_GET_CURRENT_CQ_ELEMENT(cq);
772
773         get_64bit_val(cqe, 24, &qword3);
774         polarity = (u8)RS_64(qword3, I40IW_CQ_VALID);
775
776         if (polarity != cq->polarity)
777                 return I40IW_ERR_QUEUE_EMPTY;
778
779         q_type = (u8)RS_64(qword3, I40IW_CQ_SQ);
780         info->error = (bool)RS_64(qword3, I40IW_CQ_ERROR);
781         info->push_dropped = (bool)RS_64(qword3, I40IWCQ_PSHDROP);
782         if (info->error) {
783                 info->comp_status = I40IW_COMPL_STATUS_FLUSHED;
784                 info->major_err = (bool)RS_64(qword3, I40IW_CQ_MAJERR);
785                 info->minor_err = (bool)RS_64(qword3, I40IW_CQ_MINERR);
786         } else {
787                 info->comp_status = I40IW_COMPL_STATUS_SUCCESS;
788         }
789
790         get_64bit_val(cqe, 0, &qword0);
791         get_64bit_val(cqe, 16, &qword2);
792
793         info->tcp_seq_num = (u8)RS_64(qword0, I40IWCQ_TCPSEQNUM);
794
795         info->qp_id = (u32)RS_64(qword2, I40IWCQ_QPID);
796
797         get_64bit_val(cqe, 8, &comp_ctx);
798
799         info->solicited_event = (bool)RS_64(qword3, I40IWCQ_SOEVENT);
800         info->is_srq = (bool)RS_64(qword3, I40IWCQ_SRQ);
801
802         qp = (struct i40iw_qp_uk *)(unsigned long)comp_ctx;
803         if (!qp) {
804                 ret_code = I40IW_ERR_QUEUE_DESTROYED;
805                 goto exit;
806         }
807         wqe_idx = (u32)RS_64(qword3, I40IW_CQ_WQEIDX);
808         info->qp_handle = (i40iw_qp_handle)(unsigned long)qp;
809
810         if (q_type == I40IW_CQE_QTYPE_RQ) {
811                 array_idx = (wqe_idx * 4) / qp->rq_wqe_size_multiplier;
812                 if (info->comp_status == I40IW_COMPL_STATUS_FLUSHED) {
813                         info->wr_id = qp->rq_wrid_array[qp->rq_ring.tail];
814                         array_idx = qp->rq_ring.tail;
815                 } else {
816                         info->wr_id = qp->rq_wrid_array[array_idx];
817                 }
818
819                 info->op_type = I40IW_OP_TYPE_REC;
820                 if (qword3 & I40IWCQ_STAG_MASK) {
821                         info->stag_invalid_set = true;
822                         info->inv_stag = (u32)RS_64(qword2, I40IWCQ_INVSTAG);
823                 } else {
824                         info->stag_invalid_set = false;
825                 }
826                 info->bytes_xfered = (u32)RS_64(qword0, I40IWCQ_PAYLDLEN);
827                 I40IW_RING_SET_TAIL(qp->rq_ring, array_idx + 1);
828                 pring = &qp->rq_ring;
829         } else {
830                 if (info->comp_status != I40IW_COMPL_STATUS_FLUSHED) {
831                         info->wr_id = qp->sq_wrtrk_array[wqe_idx].wrid;
832                         info->bytes_xfered = qp->sq_wrtrk_array[wqe_idx].wr_len;
833
834                         info->op_type = (u8)RS_64(qword3, I40IWCQ_OP);
835                         sw_wqe = qp->sq_base[wqe_idx].elem;
836                         get_64bit_val(sw_wqe, 24, &wqe_qword);
837
838                         addl_wqes = qp->sq_wrtrk_array[wqe_idx].wqe_size / I40IW_QP_WQE_MIN_SIZE;
839                         I40IW_RING_SET_TAIL(qp->sq_ring, (wqe_idx + addl_wqes));
840                 } else {
841                         do {
842                                 u8 op_type;
843                                 u32 tail;
844
845                                 tail = qp->sq_ring.tail;
846                                 sw_wqe = qp->sq_base[tail].elem;
847                                 get_64bit_val(sw_wqe, 24, &wqe_qword);
848                                 op_type = (u8)RS_64(wqe_qword, I40IWQPSQ_OPCODE);
849                                 info->op_type = op_type;
850                                 addl_wqes = qp->sq_wrtrk_array[tail].wqe_size / I40IW_QP_WQE_MIN_SIZE;
851                                 I40IW_RING_SET_TAIL(qp->sq_ring, (tail + addl_wqes));
852                                 if (op_type != I40IWQP_OP_NOP) {
853                                         info->wr_id = qp->sq_wrtrk_array[tail].wrid;
854                                         info->bytes_xfered = qp->sq_wrtrk_array[tail].wr_len;
855                                         break;
856                                 }
857                         } while (1);
858                 }
859                 pring = &qp->sq_ring;
860         }
861
862         ret_code = 0;
863
864 exit:
865         if (!ret_code &&
866             (info->comp_status == I40IW_COMPL_STATUS_FLUSHED))
867                 if (pring && (I40IW_RING_MORE_WORK(*pring)))
868                         move_cq_head = false;
869
870         if (move_cq_head) {
871                 I40IW_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
872
873                 if (I40IW_RING_GETCURRENT_HEAD(cq->cq_ring) == 0)
874                         cq->polarity ^= 1;
875
876                 I40IW_RING_MOVE_TAIL(cq->cq_ring);
877                 set_64bit_val(cq->shadow_area, 0,
878                               I40IW_RING_GETCURRENT_HEAD(cq->cq_ring));
879         } else {
880                 if (info->is_srq)
881                         return ret_code;
882                 qword3 &= ~I40IW_CQ_WQEIDX_MASK;
883                 qword3 |= LS_64(pring->tail, I40IW_CQ_WQEIDX);
884                 set_64bit_val(cqe, 24, qword3);
885         }
886
887         return ret_code;
888 }
889
890 /**
891  * i40iw_get_wqe_shift - get shift count for maximum wqe size
892  * @wqdepth: depth of wq required.
893  * @sge: Maximum Scatter Gather Elements wqe
894  * @inline_data: Maximum inline data size
895  * @shift: Returns the shift needed based on sge
896  *
897  * Shift can be used to left shift the wqe size based on number of SGEs and inlind data size.
898  * For 1 SGE or inline data <= 16, shift = 0 (wqe size of 32 bytes).
899  * For 2 or 3 SGEs or inline data <= 48, shift = 1 (wqe size of 64 bytes).
900  * Shift of 2 otherwise (wqe size of 128 bytes).
901  */
902 enum i40iw_status_code i40iw_get_wqe_shift(u32 wqdepth, u32 sge, u32 inline_data, u8 *shift)
903 {
904         u32 size;
905
906         *shift = 0;
907         if (sge > 1 || inline_data > 16)
908                 *shift = (sge < 4 && inline_data <= 48) ? 1 : 2;
909
910         /* check if wqdepth is multiple of 2 or not */
911
912         if ((wqdepth < I40IWQP_SW_MIN_WQSIZE) || (wqdepth & (wqdepth - 1)))
913                 return I40IW_ERR_INVALID_SIZE;
914
915         size = wqdepth << *shift;       /* multiple of 32 bytes count */
916         if (size > I40IWQP_SW_MAX_WQSIZE)
917                 return I40IW_ERR_INVALID_SIZE;
918         return 0;
919 }
920
921 static struct i40iw_qp_uk_ops iw_qp_uk_ops = {
922         i40iw_qp_post_wr,
923         i40iw_qp_ring_push_db,
924         i40iw_rdma_write,
925         i40iw_rdma_read,
926         i40iw_send,
927         i40iw_inline_rdma_write,
928         i40iw_inline_send,
929         i40iw_stag_local_invalidate,
930         i40iw_mw_bind,
931         i40iw_post_receive,
932         i40iw_nop
933 };
934
935 static struct i40iw_cq_ops iw_cq_ops = {
936         i40iw_cq_request_notification,
937         i40iw_cq_poll_completion,
938         i40iw_cq_post_entries,
939         i40iw_clean_cq
940 };
941
942 static struct i40iw_device_uk_ops iw_device_uk_ops = {
943         i40iw_cq_uk_init,
944         i40iw_qp_uk_init,
945 };
946
947 /**
948  * i40iw_qp_uk_init - initialize shared qp
949  * @qp: hw qp (user and kernel)
950  * @info: qp initialization info
951  *
952  * initializes the vars used in both user and kernel mode.
953  * size of the wqe depends on numbers of max. fragements
954  * allowed. Then size of wqe * the number of wqes should be the
955  * amount of memory allocated for sq and rq. If srq is used,
956  * then rq_base will point to one rq wqe only (not the whole
957  * array of wqes)
958  */
959 enum i40iw_status_code i40iw_qp_uk_init(struct i40iw_qp_uk *qp,
960                                         struct i40iw_qp_uk_init_info *info)
961 {
962         enum i40iw_status_code ret_code = 0;
963         u32 sq_ring_size;
964         u8 sqshift, rqshift;
965
966         if (info->max_sq_frag_cnt > I40IW_MAX_WQ_FRAGMENT_COUNT)
967                 return I40IW_ERR_INVALID_FRAG_COUNT;
968
969         if (info->max_rq_frag_cnt > I40IW_MAX_WQ_FRAGMENT_COUNT)
970                 return I40IW_ERR_INVALID_FRAG_COUNT;
971         ret_code = i40iw_get_wqe_shift(info->sq_size, info->max_sq_frag_cnt, info->max_inline_data, &sqshift);
972         if (ret_code)
973                 return ret_code;
974
975         ret_code = i40iw_get_wqe_shift(info->rq_size, info->max_rq_frag_cnt, 0, &rqshift);
976         if (ret_code)
977                 return ret_code;
978
979         qp->sq_base = info->sq;
980         qp->rq_base = info->rq;
981         qp->shadow_area = info->shadow_area;
982         qp->sq_wrtrk_array = info->sq_wrtrk_array;
983         qp->rq_wrid_array = info->rq_wrid_array;
984
985         qp->wqe_alloc_reg = info->wqe_alloc_reg;
986         qp->qp_id = info->qp_id;
987
988         qp->sq_size = info->sq_size;
989         qp->push_db = info->push_db;
990         qp->push_wqe = info->push_wqe;
991
992         qp->max_sq_frag_cnt = info->max_sq_frag_cnt;
993         sq_ring_size = qp->sq_size << sqshift;
994
995         I40IW_RING_INIT(qp->sq_ring, sq_ring_size);
996         I40IW_RING_INIT(qp->initial_ring, sq_ring_size);
997         I40IW_RING_MOVE_HEAD(qp->sq_ring, ret_code);
998         I40IW_RING_MOVE_TAIL(qp->sq_ring);
999         I40IW_RING_MOVE_HEAD(qp->initial_ring, ret_code);
1000         qp->swqe_polarity = 1;
1001         qp->swqe_polarity_deferred = 1;
1002         qp->rwqe_polarity = 0;
1003
1004         if (!qp->use_srq) {
1005                 qp->rq_size = info->rq_size;
1006                 qp->max_rq_frag_cnt = info->max_rq_frag_cnt;
1007                 qp->rq_wqe_size = rqshift;
1008                 I40IW_RING_INIT(qp->rq_ring, qp->rq_size);
1009                 qp->rq_wqe_size_multiplier = 4 << rqshift;
1010         }
1011         qp->ops = iw_qp_uk_ops;
1012
1013         return ret_code;
1014 }
1015
1016 /**
1017  * i40iw_cq_uk_init - initialize shared cq (user and kernel)
1018  * @cq: hw cq
1019  * @info: hw cq initialization info
1020  */
1021 enum i40iw_status_code i40iw_cq_uk_init(struct i40iw_cq_uk *cq,
1022                                         struct i40iw_cq_uk_init_info *info)
1023 {
1024         if ((info->cq_size < I40IW_MIN_CQ_SIZE) ||
1025             (info->cq_size > I40IW_MAX_CQ_SIZE))
1026                 return I40IW_ERR_INVALID_SIZE;
1027         cq->cq_base = (struct i40iw_cqe *)info->cq_base;
1028         cq->cq_id = info->cq_id;
1029         cq->cq_size = info->cq_size;
1030         cq->cqe_alloc_reg = info->cqe_alloc_reg;
1031         cq->shadow_area = info->shadow_area;
1032         cq->avoid_mem_cflct = info->avoid_mem_cflct;
1033
1034         I40IW_RING_INIT(cq->cq_ring, cq->cq_size);
1035         cq->polarity = 1;
1036         cq->ops = iw_cq_ops;
1037
1038         return 0;
1039 }
1040
1041 /**
1042  * i40iw_device_init_uk - setup routines for iwarp shared device
1043  * @dev: iwarp shared (user and kernel)
1044  */
1045 void i40iw_device_init_uk(struct i40iw_dev_uk *dev)
1046 {
1047         dev->ops_uk = iw_device_uk_ops;
1048 }
1049
1050 /**
1051  * i40iw_clean_cq - clean cq entries
1052  * @ queue completion context
1053  * @cq: cq to clean
1054  */
1055 void i40iw_clean_cq(void *queue, struct i40iw_cq_uk *cq)
1056 {
1057         u64 *cqe;
1058         u64 qword3, comp_ctx;
1059         u32 cq_head;
1060         u8 polarity, temp;
1061
1062         cq_head = cq->cq_ring.head;
1063         temp = cq->polarity;
1064         do {
1065                 if (cq->avoid_mem_cflct)
1066                         cqe = (u64 *)&(((struct i40iw_extended_cqe *)cq->cq_base)[cq_head]);
1067                 else
1068                         cqe = (u64 *)&cq->cq_base[cq_head];
1069                 get_64bit_val(cqe, 24, &qword3);
1070                 polarity = (u8)RS_64(qword3, I40IW_CQ_VALID);
1071
1072                 if (polarity != temp)
1073                         break;
1074
1075                 get_64bit_val(cqe, 8, &comp_ctx);
1076                 if ((void *)(unsigned long)comp_ctx == queue)
1077                         set_64bit_val(cqe, 8, 0);
1078
1079                 cq_head = (cq_head + 1) % cq->cq_ring.size;
1080                 if (!cq_head)
1081                         temp ^= 1;
1082         } while (true);
1083 }
1084
1085 /**
1086  * i40iw_nop - send a nop
1087  * @qp: hw qp ptr
1088  * @wr_id: work request id
1089  * @signaled: flag if signaled for completion
1090  * @post_sq: flag to post sq
1091  */
1092 enum i40iw_status_code i40iw_nop(struct i40iw_qp_uk *qp,
1093                                  u64 wr_id,
1094                                  bool signaled,
1095                                  bool post_sq)
1096 {
1097         u64 header, *wqe;
1098         u32 wqe_idx;
1099
1100         wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, I40IW_QP_WQE_MIN_SIZE, 0, wr_id);
1101         if (!wqe)
1102                 return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
1103         set_64bit_val(wqe, 0, 0);
1104         set_64bit_val(wqe, 8, 0);
1105         set_64bit_val(wqe, 16, 0);
1106
1107         header = LS_64(I40IWQP_OP_NOP, I40IWQPSQ_OPCODE) |
1108             LS_64(signaled, I40IWQPSQ_SIGCOMPL) |
1109             LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
1110
1111         wmb(); /* make sure WQE is populated before valid bit is set */
1112
1113         set_64bit_val(wqe, 24, header);
1114         if (post_sq)
1115                 i40iw_qp_post_wr(qp);
1116
1117         return 0;
1118 }
1119
1120 /**
1121  * i40iw_fragcnt_to_wqesize_sq - calculate wqe size based on fragment count for SQ
1122  * @frag_cnt: number of fragments
1123  * @wqe_size: size of sq wqe returned
1124  */
1125 enum i40iw_status_code i40iw_fragcnt_to_wqesize_sq(u32 frag_cnt, u8 *wqe_size)
1126 {
1127         switch (frag_cnt) {
1128         case 0:
1129         case 1:
1130                 *wqe_size = I40IW_QP_WQE_MIN_SIZE;
1131                 break;
1132         case 2:
1133         case 3:
1134                 *wqe_size = 64;
1135                 break;
1136         case 4:
1137         case 5:
1138                 *wqe_size = 96;
1139                 break;
1140         case 6:
1141         case 7:
1142                 *wqe_size = 128;
1143                 break;
1144         default:
1145                 return I40IW_ERR_INVALID_FRAG_COUNT;
1146         }
1147
1148         return 0;
1149 }
1150
1151 /**
1152  * i40iw_fragcnt_to_wqesize_rq - calculate wqe size based on fragment count for RQ
1153  * @frag_cnt: number of fragments
1154  * @wqe_size: size of rq wqe returned
1155  */
1156 enum i40iw_status_code i40iw_fragcnt_to_wqesize_rq(u32 frag_cnt, u8 *wqe_size)
1157 {
1158         switch (frag_cnt) {
1159         case 0:
1160         case 1:
1161                 *wqe_size = 32;
1162                 break;
1163         case 2:
1164         case 3:
1165                 *wqe_size = 64;
1166                 break;
1167         case 4:
1168         case 5:
1169         case 6:
1170         case 7:
1171                 *wqe_size = 128;
1172                 break;
1173         default:
1174                 return I40IW_ERR_INVALID_FRAG_COUNT;
1175         }
1176
1177         return 0;
1178 }
1179
1180 /**
1181  * i40iw_inline_data_size_to_wqesize - based on inline data, wqe size
1182  * @data_size: data size for inline
1183  * @wqe_size: size of sq wqe returned
1184  */
1185 enum i40iw_status_code i40iw_inline_data_size_to_wqesize(u32 data_size,
1186                                                          u8 *wqe_size)
1187 {
1188         if (data_size > I40IW_MAX_INLINE_DATA_SIZE)
1189                 return I40IW_ERR_INVALID_IMM_DATA_SIZE;
1190
1191         if (data_size <= 16)
1192                 *wqe_size = I40IW_QP_WQE_MIN_SIZE;
1193         else if (data_size <= 48)
1194                 *wqe_size = 64;
1195         else if (data_size <= 80)
1196                 *wqe_size = 96;
1197         else
1198                 *wqe_size = 128;
1199
1200         return 0;
1201 }