Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
[cascardo/linux.git] / drivers / scsi / cxgbi / cxgb4i / cxgb4i.c
1 /*
2  * cxgb4i.c: Chelsio T4 iSCSI driver.
3  *
4  * Copyright (c) 2010-2015 Chelsio Communications, Inc.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  *
10  * Written by:  Karen Xie (kxie@chelsio.com)
11  *              Rakesh Ranjan (rranjan@chelsio.com)
12  */
13
14 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
15
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/moduleparam.h>
19 #include <scsi/scsi_host.h>
20 #include <net/tcp.h>
21 #include <net/dst.h>
22 #include <linux/netdevice.h>
23 #include <net/addrconf.h>
24
25 #include "t4_regs.h"
26 #include "t4_msg.h"
27 #include "cxgb4.h"
28 #include "cxgb4_uld.h"
29 #include "t4fw_api.h"
30 #include "l2t.h"
31 #include "cxgb4i.h"
32 #include "clip_tbl.h"
33
34 static unsigned int dbg_level;
35
36 #include "../libcxgbi.h"
37
38 #define DRV_MODULE_NAME         "cxgb4i"
39 #define DRV_MODULE_DESC         "Chelsio T4/T5 iSCSI Driver"
40 #define DRV_MODULE_VERSION      "0.9.5-ko"
41 #define DRV_MODULE_RELDATE      "Apr. 2015"
42
43 static char version[] =
44         DRV_MODULE_DESC " " DRV_MODULE_NAME
45         " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
46
47 MODULE_AUTHOR("Chelsio Communications, Inc.");
48 MODULE_DESCRIPTION(DRV_MODULE_DESC);
49 MODULE_VERSION(DRV_MODULE_VERSION);
50 MODULE_LICENSE("GPL");
51
52 module_param(dbg_level, uint, 0644);
53 MODULE_PARM_DESC(dbg_level, "Debug flag (default=0)");
54
55 #define CXGB4I_DEFAULT_10G_RCV_WIN (256 * 1024)
56 static int cxgb4i_rcv_win = -1;
57 module_param(cxgb4i_rcv_win, int, 0644);
58 MODULE_PARM_DESC(cxgb4i_rcv_win, "TCP reveive window in bytes");
59
60 #define CXGB4I_DEFAULT_10G_SND_WIN (128 * 1024)
61 static int cxgb4i_snd_win = -1;
62 module_param(cxgb4i_snd_win, int, 0644);
63 MODULE_PARM_DESC(cxgb4i_snd_win, "TCP send window in bytes");
64
65 static int cxgb4i_rx_credit_thres = 10 * 1024;
66 module_param(cxgb4i_rx_credit_thres, int, 0644);
67 MODULE_PARM_DESC(cxgb4i_rx_credit_thres,
68                 "RX credits return threshold in bytes (default=10KB)");
69
70 static unsigned int cxgb4i_max_connect = (8 * 1024);
71 module_param(cxgb4i_max_connect, uint, 0644);
72 MODULE_PARM_DESC(cxgb4i_max_connect, "Maximum number of connections");
73
74 static unsigned short cxgb4i_sport_base = 20000;
75 module_param(cxgb4i_sport_base, ushort, 0644);
76 MODULE_PARM_DESC(cxgb4i_sport_base, "Starting port number (default 20000)");
77
78 typedef void (*cxgb4i_cplhandler_func)(struct cxgbi_device *, struct sk_buff *);
79
80 static void *t4_uld_add(const struct cxgb4_lld_info *);
81 static int t4_uld_rx_handler(void *, const __be64 *, const struct pkt_gl *);
82 static int t4_uld_state_change(void *, enum cxgb4_state state);
83 static inline int send_tx_flowc_wr(struct cxgbi_sock *);
84
85 static const struct cxgb4_uld_info cxgb4i_uld_info = {
86         .name = DRV_MODULE_NAME,
87         .nrxq = MAX_ULD_QSETS,
88         .rxq_size = 1024,
89         .lro = false,
90         .add = t4_uld_add,
91         .rx_handler = t4_uld_rx_handler,
92         .state_change = t4_uld_state_change,
93 };
94
95 static struct scsi_host_template cxgb4i_host_template = {
96         .module         = THIS_MODULE,
97         .name           = DRV_MODULE_NAME,
98         .proc_name      = DRV_MODULE_NAME,
99         .can_queue      = CXGB4I_SCSI_HOST_QDEPTH,
100         .queuecommand   = iscsi_queuecommand,
101         .change_queue_depth = scsi_change_queue_depth,
102         .sg_tablesize   = SG_ALL,
103         .max_sectors    = 0xFFFF,
104         .cmd_per_lun    = ISCSI_DEF_CMD_PER_LUN,
105         .eh_abort_handler = iscsi_eh_abort,
106         .eh_device_reset_handler = iscsi_eh_device_reset,
107         .eh_target_reset_handler = iscsi_eh_recover_target,
108         .target_alloc   = iscsi_target_alloc,
109         .use_clustering = DISABLE_CLUSTERING,
110         .this_id        = -1,
111         .track_queue_depth = 1,
112 };
113
114 static struct iscsi_transport cxgb4i_iscsi_transport = {
115         .owner          = THIS_MODULE,
116         .name           = DRV_MODULE_NAME,
117         .caps           = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST |
118                                 CAP_DATADGST | CAP_DIGEST_OFFLOAD |
119                                 CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO,
120         .attr_is_visible        = cxgbi_attr_is_visible,
121         .get_host_param = cxgbi_get_host_param,
122         .set_host_param = cxgbi_set_host_param,
123         /* session management */
124         .create_session = cxgbi_create_session,
125         .destroy_session        = cxgbi_destroy_session,
126         .get_session_param = iscsi_session_get_param,
127         /* connection management */
128         .create_conn    = cxgbi_create_conn,
129         .bind_conn              = cxgbi_bind_conn,
130         .destroy_conn   = iscsi_tcp_conn_teardown,
131         .start_conn             = iscsi_conn_start,
132         .stop_conn              = iscsi_conn_stop,
133         .get_conn_param = iscsi_conn_get_param,
134         .set_param      = cxgbi_set_conn_param,
135         .get_stats      = cxgbi_get_conn_stats,
136         /* pdu xmit req from user space */
137         .send_pdu       = iscsi_conn_send_pdu,
138         /* task */
139         .init_task      = iscsi_tcp_task_init,
140         .xmit_task      = iscsi_tcp_task_xmit,
141         .cleanup_task   = cxgbi_cleanup_task,
142         /* pdu */
143         .alloc_pdu      = cxgbi_conn_alloc_pdu,
144         .init_pdu       = cxgbi_conn_init_pdu,
145         .xmit_pdu       = cxgbi_conn_xmit_pdu,
146         .parse_pdu_itt  = cxgbi_parse_pdu_itt,
147         /* TCP connect/disconnect */
148         .get_ep_param   = cxgbi_get_ep_param,
149         .ep_connect     = cxgbi_ep_connect,
150         .ep_poll        = cxgbi_ep_poll,
151         .ep_disconnect  = cxgbi_ep_disconnect,
152         /* Error recovery timeout call */
153         .session_recovery_timedout = iscsi_session_recovery_timedout,
154 };
155
156 static struct scsi_transport_template *cxgb4i_stt;
157
158 /*
159  * CPL (Chelsio Protocol Language) defines a message passing interface between
160  * the host driver and Chelsio asic.
161  * The section below implments CPLs that related to iscsi tcp connection
162  * open/close/abort and data send/receive.
163  */
164
165 #define RCV_BUFSIZ_MASK         0x3FFU
166 #define MAX_IMM_TX_PKT_LEN      256
167
168 static int push_tx_frames(struct cxgbi_sock *, int);
169
170 /*
171  * is_ofld_imm - check whether a packet can be sent as immediate data
172  * @skb: the packet
173  *
174  * Returns true if a packet can be sent as an offload WR with immediate
175  * data.  We currently use the same limit as for Ethernet packets.
176  */
177 static inline bool is_ofld_imm(const struct sk_buff *skb)
178 {
179         int len = skb->len;
180
181         if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR)))
182                 len += sizeof(struct fw_ofld_tx_data_wr);
183
184         return len <= MAX_IMM_TX_PKT_LEN;
185 }
186
187 static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
188                                 struct l2t_entry *e)
189 {
190         struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
191         int t4 = is_t4(lldi->adapter_type);
192         int wscale = cxgbi_sock_compute_wscale(csk->mss_idx);
193         unsigned long long opt0;
194         unsigned int opt2;
195         unsigned int qid_atid = ((unsigned int)csk->atid) |
196                                  (((unsigned int)csk->rss_qid) << 14);
197
198         opt0 = KEEP_ALIVE_F |
199                 WND_SCALE_V(wscale) |
200                 MSS_IDX_V(csk->mss_idx) |
201                 L2T_IDX_V(((struct l2t_entry *)csk->l2t)->idx) |
202                 TX_CHAN_V(csk->tx_chan) |
203                 SMAC_SEL_V(csk->smac_idx) |
204                 ULP_MODE_V(ULP_MODE_ISCSI) |
205                 RCV_BUFSIZ_V(csk->rcv_win >> 10);
206
207         opt2 = RX_CHANNEL_V(0) |
208                 RSS_QUEUE_VALID_F |
209                 RSS_QUEUE_V(csk->rss_qid);
210
211         if (is_t4(lldi->adapter_type)) {
212                 struct cpl_act_open_req *req =
213                                 (struct cpl_act_open_req *)skb->head;
214
215                 INIT_TP_WR(req, 0);
216                 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
217                                         qid_atid));
218                 req->local_port = csk->saddr.sin_port;
219                 req->peer_port = csk->daddr.sin_port;
220                 req->local_ip = csk->saddr.sin_addr.s_addr;
221                 req->peer_ip = csk->daddr.sin_addr.s_addr;
222                 req->opt0 = cpu_to_be64(opt0);
223                 req->params = cpu_to_be32(cxgb4_select_ntuple(
224                                         csk->cdev->ports[csk->port_id],
225                                         csk->l2t));
226                 opt2 |= RX_FC_VALID_F;
227                 req->opt2 = cpu_to_be32(opt2);
228
229                 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
230                         "csk t4 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n",
231                         csk, &req->local_ip, ntohs(req->local_port),
232                         &req->peer_ip, ntohs(req->peer_port),
233                         csk->atid, csk->rss_qid);
234         } else {
235                 struct cpl_t5_act_open_req *req =
236                                 (struct cpl_t5_act_open_req *)skb->head;
237                 u32 isn = (prandom_u32() & ~7UL) - 1;
238
239                 INIT_TP_WR(req, 0);
240                 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
241                                         qid_atid));
242                 req->local_port = csk->saddr.sin_port;
243                 req->peer_port = csk->daddr.sin_port;
244                 req->local_ip = csk->saddr.sin_addr.s_addr;
245                 req->peer_ip = csk->daddr.sin_addr.s_addr;
246                 req->opt0 = cpu_to_be64(opt0);
247                 req->params = cpu_to_be64(FILTER_TUPLE_V(
248                                 cxgb4_select_ntuple(
249                                         csk->cdev->ports[csk->port_id],
250                                         csk->l2t)));
251                 req->rsvd = cpu_to_be32(isn);
252                 opt2 |= T5_ISS_VALID;
253                 opt2 |= T5_OPT_2_VALID_F;
254
255                 req->opt2 = cpu_to_be32(opt2);
256
257                 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
258                         "csk t5 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n",
259                         csk, &req->local_ip, ntohs(req->local_port),
260                         &req->peer_ip, ntohs(req->peer_port),
261                         csk->atid, csk->rss_qid);
262         }
263
264         set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
265
266         pr_info_ipaddr("t%d csk 0x%p,%u,0x%lx,%u, rss_qid %u.\n",
267                        (&csk->saddr), (&csk->daddr), t4 ? 4 : 5, csk,
268                        csk->state, csk->flags, csk->atid, csk->rss_qid);
269
270         cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
271 }
272
273 #if IS_ENABLED(CONFIG_IPV6)
274 static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
275                                struct l2t_entry *e)
276 {
277         struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
278         int t4 = is_t4(lldi->adapter_type);
279         int wscale = cxgbi_sock_compute_wscale(csk->mss_idx);
280         unsigned long long opt0;
281         unsigned int opt2;
282         unsigned int qid_atid = ((unsigned int)csk->atid) |
283                                  (((unsigned int)csk->rss_qid) << 14);
284
285         opt0 = KEEP_ALIVE_F |
286                 WND_SCALE_V(wscale) |
287                 MSS_IDX_V(csk->mss_idx) |
288                 L2T_IDX_V(((struct l2t_entry *)csk->l2t)->idx) |
289                 TX_CHAN_V(csk->tx_chan) |
290                 SMAC_SEL_V(csk->smac_idx) |
291                 ULP_MODE_V(ULP_MODE_ISCSI) |
292                 RCV_BUFSIZ_V(csk->rcv_win >> 10);
293
294         opt2 = RX_CHANNEL_V(0) |
295                 RSS_QUEUE_VALID_F |
296                 RX_FC_DISABLE_F |
297                 RSS_QUEUE_V(csk->rss_qid);
298
299         if (t4) {
300                 struct cpl_act_open_req6 *req =
301                             (struct cpl_act_open_req6 *)skb->head;
302
303                 INIT_TP_WR(req, 0);
304                 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
305                                                             qid_atid));
306                 req->local_port = csk->saddr6.sin6_port;
307                 req->peer_port = csk->daddr6.sin6_port;
308
309                 req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr);
310                 req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr +
311                                                                     8);
312                 req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr);
313                 req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr +
314                                                                     8);
315
316                 req->opt0 = cpu_to_be64(opt0);
317
318                 opt2 |= RX_FC_VALID_F;
319                 req->opt2 = cpu_to_be32(opt2);
320
321                 req->params = cpu_to_be32(cxgb4_select_ntuple(
322                                           csk->cdev->ports[csk->port_id],
323                                           csk->l2t));
324         } else {
325                 struct cpl_t5_act_open_req6 *req =
326                                 (struct cpl_t5_act_open_req6 *)skb->head;
327
328                 INIT_TP_WR(req, 0);
329                 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
330                                                             qid_atid));
331                 req->local_port = csk->saddr6.sin6_port;
332                 req->peer_port = csk->daddr6.sin6_port;
333                 req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr);
334                 req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr +
335                                                                         8);
336                 req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr);
337                 req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr +
338                                                                         8);
339                 req->opt0 = cpu_to_be64(opt0);
340
341                 opt2 |= T5_OPT_2_VALID_F;
342                 req->opt2 = cpu_to_be32(opt2);
343
344                 req->params = cpu_to_be64(FILTER_TUPLE_V(cxgb4_select_ntuple(
345                                           csk->cdev->ports[csk->port_id],
346                                           csk->l2t)));
347         }
348
349         set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
350
351         pr_info("t%d csk 0x%p,%u,0x%lx,%u, [%pI6]:%u-[%pI6]:%u, rss_qid %u.\n",
352                 t4 ? 4 : 5, csk, csk->state, csk->flags, csk->atid,
353                 &csk->saddr6.sin6_addr, ntohs(csk->saddr.sin_port),
354                 &csk->daddr6.sin6_addr, ntohs(csk->daddr.sin_port),
355                 csk->rss_qid);
356
357         cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
358 }
359 #endif
360
361 static void send_close_req(struct cxgbi_sock *csk)
362 {
363         struct sk_buff *skb = csk->cpl_close;
364         struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head;
365         unsigned int tid = csk->tid;
366
367         log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
368                 "csk 0x%p,%u,0x%lx, tid %u.\n",
369                 csk, csk->state, csk->flags, csk->tid);
370         csk->cpl_close = NULL;
371         set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
372         INIT_TP_WR(req, tid);
373         OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
374         req->rsvd = 0;
375
376         cxgbi_sock_skb_entail(csk, skb);
377         if (csk->state >= CTP_ESTABLISHED)
378                 push_tx_frames(csk, 1);
379 }
380
381 static void abort_arp_failure(void *handle, struct sk_buff *skb)
382 {
383         struct cxgbi_sock *csk = (struct cxgbi_sock *)handle;
384         struct cpl_abort_req *req;
385
386         log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
387                 "csk 0x%p,%u,0x%lx, tid %u, abort.\n",
388                 csk, csk->state, csk->flags, csk->tid);
389         req = (struct cpl_abort_req *)skb->data;
390         req->cmd = CPL_ABORT_NO_RST;
391         cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
392 }
393
394 static void send_abort_req(struct cxgbi_sock *csk)
395 {
396         struct cpl_abort_req *req;
397         struct sk_buff *skb = csk->cpl_abort_req;
398
399         if (unlikely(csk->state == CTP_ABORTING) || !skb || !csk->cdev)
400                 return;
401
402         if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
403                 send_tx_flowc_wr(csk);
404                 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
405         }
406
407         cxgbi_sock_set_state(csk, CTP_ABORTING);
408         cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_PENDING);
409         cxgbi_sock_purge_write_queue(csk);
410
411         csk->cpl_abort_req = NULL;
412         req = (struct cpl_abort_req *)skb->head;
413         set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
414         req->cmd = CPL_ABORT_SEND_RST;
415         t4_set_arp_err_handler(skb, csk, abort_arp_failure);
416         INIT_TP_WR(req, csk->tid);
417         OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, csk->tid));
418         req->rsvd0 = htonl(csk->snd_nxt);
419         req->rsvd1 = !cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT);
420
421         log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
422                 "csk 0x%p,%u,0x%lx,%u, snd_nxt %u, 0x%x.\n",
423                 csk, csk->state, csk->flags, csk->tid, csk->snd_nxt,
424                 req->rsvd1);
425
426         cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
427 }
428
429 static void send_abort_rpl(struct cxgbi_sock *csk, int rst_status)
430 {
431         struct sk_buff *skb = csk->cpl_abort_rpl;
432         struct cpl_abort_rpl *rpl = (struct cpl_abort_rpl *)skb->head;
433
434         log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
435                 "csk 0x%p,%u,0x%lx,%u, status %d.\n",
436                 csk, csk->state, csk->flags, csk->tid, rst_status);
437
438         csk->cpl_abort_rpl = NULL;
439         set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
440         INIT_TP_WR(rpl, csk->tid);
441         OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, csk->tid));
442         rpl->cmd = rst_status;
443         cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
444 }
445
446 /*
447  * CPL connection rx data ack: host ->
448  * Send RX credits through an RX_DATA_ACK CPL message. Returns the number of
449  * credits sent.
450  */
451 static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits)
452 {
453         struct sk_buff *skb;
454         struct cpl_rx_data_ack *req;
455
456         log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
457                 "csk 0x%p,%u,0x%lx,%u, credit %u.\n",
458                 csk, csk->state, csk->flags, csk->tid, credits);
459
460         skb = alloc_wr(sizeof(*req), 0, GFP_ATOMIC);
461         if (!skb) {
462                 pr_info("csk 0x%p, credit %u, OOM.\n", csk, credits);
463                 return 0;
464         }
465         req = (struct cpl_rx_data_ack *)skb->head;
466
467         set_wr_txq(skb, CPL_PRIORITY_ACK, csk->port_id);
468         INIT_TP_WR(req, csk->tid);
469         OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
470                                       csk->tid));
471         req->credit_dack = cpu_to_be32(RX_CREDITS_V(credits)
472                                        | RX_FORCE_ACK_F);
473         cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
474         return credits;
475 }
476
477 /*
478  * sgl_len - calculates the size of an SGL of the given capacity
479  * @n: the number of SGL entries
480  * Calculates the number of flits needed for a scatter/gather list that
481  * can hold the given number of entries.
482  */
483 static inline unsigned int sgl_len(unsigned int n)
484 {
485         n--;
486         return (3 * n) / 2 + (n & 1) + 2;
487 }
488
489 /*
490  * calc_tx_flits_ofld - calculate # of flits for an offload packet
491  * @skb: the packet
492  *
493  * Returns the number of flits needed for the given offload packet.
494  * These packets are already fully constructed and no additional headers
495  * will be added.
496  */
497 static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
498 {
499         unsigned int flits, cnt;
500
501         if (is_ofld_imm(skb))
502                 return DIV_ROUND_UP(skb->len, 8);
503         flits = skb_transport_offset(skb) / 8;
504         cnt = skb_shinfo(skb)->nr_frags;
505         if (skb_tail_pointer(skb) != skb_transport_header(skb))
506                 cnt++;
507         return flits + sgl_len(cnt);
508 }
509
510 #define FLOWC_WR_NPARAMS_MIN    9
511 static inline int tx_flowc_wr_credits(int *nparamsp, int *flowclenp)
512 {
513         int nparams, flowclen16, flowclen;
514
515         nparams = FLOWC_WR_NPARAMS_MIN;
516         flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]);
517         flowclen16 = DIV_ROUND_UP(flowclen, 16);
518         flowclen = flowclen16 * 16;
519         /*
520          * Return the number of 16-byte credits used by the FlowC request.
521          * Pass back the nparams and actual FlowC length if requested.
522          */
523         if (nparamsp)
524                 *nparamsp = nparams;
525         if (flowclenp)
526                 *flowclenp = flowclen;
527
528         return flowclen16;
529 }
530
531 static inline int send_tx_flowc_wr(struct cxgbi_sock *csk)
532 {
533         struct sk_buff *skb;
534         struct fw_flowc_wr *flowc;
535         int nparams, flowclen16, flowclen;
536
537         flowclen16 = tx_flowc_wr_credits(&nparams, &flowclen);
538         skb = alloc_wr(flowclen, 0, GFP_ATOMIC);
539         flowc = (struct fw_flowc_wr *)skb->head;
540         flowc->op_to_nparams =
541                 htonl(FW_WR_OP_V(FW_FLOWC_WR) | FW_FLOWC_WR_NPARAMS_V(nparams));
542         flowc->flowid_len16 =
543                 htonl(FW_WR_LEN16_V(flowclen16) | FW_WR_FLOWID_V(csk->tid));
544         flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
545         flowc->mnemval[0].val = htonl(csk->cdev->pfvf);
546         flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
547         flowc->mnemval[1].val = htonl(csk->tx_chan);
548         flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
549         flowc->mnemval[2].val = htonl(csk->tx_chan);
550         flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
551         flowc->mnemval[3].val = htonl(csk->rss_qid);
552         flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
553         flowc->mnemval[4].val = htonl(csk->snd_nxt);
554         flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
555         flowc->mnemval[5].val = htonl(csk->rcv_nxt);
556         flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
557         flowc->mnemval[6].val = htonl(csk->snd_win);
558         flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
559         flowc->mnemval[7].val = htonl(csk->advmss);
560         flowc->mnemval[8].mnemonic = 0;
561         flowc->mnemval[8].val = 0;
562         flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX;
563         flowc->mnemval[8].val = 16384;
564
565         set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
566
567         log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
568                 "csk 0x%p, tid 0x%x, %u,%u,%u,%u,%u,%u,%u.\n",
569                 csk, csk->tid, 0, csk->tx_chan, csk->rss_qid,
570                 csk->snd_nxt, csk->rcv_nxt, csk->snd_win,
571                 csk->advmss);
572
573         cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
574
575         return flowclen16;
576 }
577
578 static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb,
579                                    int dlen, int len, u32 credits, int compl)
580 {
581         struct fw_ofld_tx_data_wr *req;
582         unsigned int submode = cxgbi_skcb_ulp_mode(skb) & 3;
583         unsigned int wr_ulp_mode = 0, val;
584         bool imm = is_ofld_imm(skb);
585
586         req = (struct fw_ofld_tx_data_wr *)__skb_push(skb, sizeof(*req));
587
588         if (imm) {
589                 req->op_to_immdlen = htonl(FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
590                                         FW_WR_COMPL_F |
591                                         FW_WR_IMMDLEN_V(dlen));
592                 req->flowid_len16 = htonl(FW_WR_FLOWID_V(csk->tid) |
593                                                 FW_WR_LEN16_V(credits));
594         } else {
595                 req->op_to_immdlen =
596                         cpu_to_be32(FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
597                                         FW_WR_COMPL_F |
598                                         FW_WR_IMMDLEN_V(0));
599                 req->flowid_len16 =
600                         cpu_to_be32(FW_WR_FLOWID_V(csk->tid) |
601                                         FW_WR_LEN16_V(credits));
602         }
603         if (submode)
604                 wr_ulp_mode = FW_OFLD_TX_DATA_WR_ULPMODE_V(ULP2_MODE_ISCSI) |
605                                 FW_OFLD_TX_DATA_WR_ULPSUBMODE_V(submode);
606         val = skb_peek(&csk->write_queue) ? 0 : 1;
607         req->tunnel_to_proxy = htonl(wr_ulp_mode |
608                                      FW_OFLD_TX_DATA_WR_SHOVE_V(val));
609         req->plen = htonl(len);
610         if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT))
611                 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
612 }
613
614 static void arp_failure_skb_discard(void *handle, struct sk_buff *skb)
615 {
616         kfree_skb(skb);
617 }
618
619 static int push_tx_frames(struct cxgbi_sock *csk, int req_completion)
620 {
621         int total_size = 0;
622         struct sk_buff *skb;
623
624         if (unlikely(csk->state < CTP_ESTABLISHED ||
625                 csk->state == CTP_CLOSE_WAIT_1 || csk->state >= CTP_ABORTING)) {
626                 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK |
627                          1 << CXGBI_DBG_PDU_TX,
628                         "csk 0x%p,%u,0x%lx,%u, in closing state.\n",
629                         csk, csk->state, csk->flags, csk->tid);
630                 return 0;
631         }
632
633         while (csk->wr_cred && (skb = skb_peek(&csk->write_queue)) != NULL) {
634                 int dlen = skb->len;
635                 int len = skb->len;
636                 unsigned int credits_needed;
637                 int flowclen16 = 0;
638
639                 skb_reset_transport_header(skb);
640                 if (is_ofld_imm(skb))
641                         credits_needed = DIV_ROUND_UP(dlen, 16);
642                 else
643                         credits_needed = DIV_ROUND_UP(
644                                                 8 * calc_tx_flits_ofld(skb),
645                                                 16);
646
647                 if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR)))
648                         credits_needed += DIV_ROUND_UP(
649                                         sizeof(struct fw_ofld_tx_data_wr),
650                                         16);
651
652                 /*
653                  * Assumes the initial credits is large enough to support
654                  * fw_flowc_wr plus largest possible first payload
655                  */
656                 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
657                         flowclen16 = send_tx_flowc_wr(csk);
658                         csk->wr_cred -= flowclen16;
659                         csk->wr_una_cred += flowclen16;
660                         cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
661                 }
662
663                 if (csk->wr_cred < credits_needed) {
664                         log_debug(1 << CXGBI_DBG_PDU_TX,
665                                 "csk 0x%p, skb %u/%u, wr %d < %u.\n",
666                                 csk, skb->len, skb->data_len,
667                                 credits_needed, csk->wr_cred);
668                         break;
669                 }
670                 __skb_unlink(skb, &csk->write_queue);
671                 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
672                 skb->csum = credits_needed + flowclen16;
673                 csk->wr_cred -= credits_needed;
674                 csk->wr_una_cred += credits_needed;
675                 cxgbi_sock_enqueue_wr(csk, skb);
676
677                 log_debug(1 << CXGBI_DBG_PDU_TX,
678                         "csk 0x%p, skb %u/%u, wr %d, left %u, unack %u.\n",
679                         csk, skb->len, skb->data_len, credits_needed,
680                         csk->wr_cred, csk->wr_una_cred);
681
682                 if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) {
683                         len += cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb));
684                         make_tx_data_wr(csk, skb, dlen, len, credits_needed,
685                                         req_completion);
686                         csk->snd_nxt += len;
687                         cxgbi_skcb_clear_flag(skb, SKCBF_TX_NEED_HDR);
688                 } else if (cxgbi_skcb_test_flag(skb, SKCBF_TX_FLAG_COMPL) &&
689                            (csk->wr_una_cred >= (csk->wr_max_cred / 2))) {
690                         struct cpl_close_con_req *req =
691                                 (struct cpl_close_con_req *)skb->data;
692                         req->wr.wr_hi |= htonl(FW_WR_COMPL_F);
693                 }
694                 total_size += skb->truesize;
695                 t4_set_arp_err_handler(skb, csk, arp_failure_skb_discard);
696
697                 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX,
698                         "csk 0x%p,%u,0x%lx,%u, skb 0x%p, %u.\n",
699                         csk, csk->state, csk->flags, csk->tid, skb, len);
700
701                 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
702         }
703         return total_size;
704 }
705
706 static inline void free_atid(struct cxgbi_sock *csk)
707 {
708         struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
709
710         if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) {
711                 cxgb4_free_atid(lldi->tids, csk->atid);
712                 cxgbi_sock_clear_flag(csk, CTPF_HAS_ATID);
713                 cxgbi_sock_put(csk);
714         }
715 }
716
717 static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb)
718 {
719         struct cxgbi_sock *csk;
720         struct cpl_act_establish *req = (struct cpl_act_establish *)skb->data;
721         unsigned short tcp_opt = ntohs(req->tcp_opt);
722         unsigned int tid = GET_TID(req);
723         unsigned int atid = TID_TID_G(ntohl(req->tos_atid));
724         struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
725         struct tid_info *t = lldi->tids;
726         u32 rcv_isn = be32_to_cpu(req->rcv_isn);
727
728         csk = lookup_atid(t, atid);
729         if (unlikely(!csk)) {
730                 pr_err("NO conn. for atid %u, cdev 0x%p.\n", atid, cdev);
731                 goto rel_skb;
732         }
733
734         if (csk->atid != atid) {
735                 pr_err("bad conn atid %u, csk 0x%p,%u,0x%lx,tid %u, atid %u.\n",
736                         atid, csk, csk->state, csk->flags, csk->tid, csk->atid);
737                 goto rel_skb;
738         }
739
740         pr_info_ipaddr("atid 0x%x, tid 0x%x, csk 0x%p,%u,0x%lx, isn %u.\n",
741                        (&csk->saddr), (&csk->daddr),
742                        atid, tid, csk, csk->state, csk->flags, rcv_isn);
743
744         module_put(THIS_MODULE);
745
746         cxgbi_sock_get(csk);
747         csk->tid = tid;
748         cxgb4_insert_tid(lldi->tids, csk, tid);
749         cxgbi_sock_set_flag(csk, CTPF_HAS_TID);
750
751         free_atid(csk);
752
753         spin_lock_bh(&csk->lock);
754         if (unlikely(csk->state != CTP_ACTIVE_OPEN))
755                 pr_info("csk 0x%p,%u,0x%lx,%u, got EST.\n",
756                         csk, csk->state, csk->flags, csk->tid);
757
758         if (csk->retry_timer.function) {
759                 del_timer(&csk->retry_timer);
760                 csk->retry_timer.function = NULL;
761         }
762
763         csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn;
764         /*
765          * Causes the first RX_DATA_ACK to supply any Rx credits we couldn't
766          * pass through opt0.
767          */
768         if (csk->rcv_win > (RCV_BUFSIZ_MASK << 10))
769                 csk->rcv_wup -= csk->rcv_win - (RCV_BUFSIZ_MASK << 10);
770
771         csk->advmss = lldi->mtus[TCPOPT_MSS_G(tcp_opt)] - 40;
772         if (TCPOPT_TSTAMP_G(tcp_opt))
773                 csk->advmss -= 12;
774         if (csk->advmss < 128)
775                 csk->advmss = 128;
776
777         log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
778                 "csk 0x%p, mss_idx %u, advmss %u.\n",
779                         csk, TCPOPT_MSS_G(tcp_opt), csk->advmss);
780
781         cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt));
782
783         if (unlikely(cxgbi_sock_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED)))
784                 send_abort_req(csk);
785         else {
786                 if (skb_queue_len(&csk->write_queue))
787                         push_tx_frames(csk, 0);
788                 cxgbi_conn_tx_open(csk);
789         }
790         spin_unlock_bh(&csk->lock);
791
792 rel_skb:
793         __kfree_skb(skb);
794 }
795
796 static int act_open_rpl_status_to_errno(int status)
797 {
798         switch (status) {
799         case CPL_ERR_CONN_RESET:
800                 return -ECONNREFUSED;
801         case CPL_ERR_ARP_MISS:
802                 return -EHOSTUNREACH;
803         case CPL_ERR_CONN_TIMEDOUT:
804                 return -ETIMEDOUT;
805         case CPL_ERR_TCAM_FULL:
806                 return -ENOMEM;
807         case CPL_ERR_CONN_EXIST:
808                 return -EADDRINUSE;
809         default:
810                 return -EIO;
811         }
812 }
813
814 static void csk_act_open_retry_timer(unsigned long data)
815 {
816         struct sk_buff *skb = NULL;
817         struct cxgbi_sock *csk = (struct cxgbi_sock *)data;
818         struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
819         void (*send_act_open_func)(struct cxgbi_sock *, struct sk_buff *,
820                                    struct l2t_entry *);
821         int t4 = is_t4(lldi->adapter_type), size, size6;
822
823         log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
824                 "csk 0x%p,%u,0x%lx,%u.\n",
825                 csk, csk->state, csk->flags, csk->tid);
826
827         cxgbi_sock_get(csk);
828         spin_lock_bh(&csk->lock);
829
830         if (t4) {
831                 size = sizeof(struct cpl_act_open_req);
832                 size6 = sizeof(struct cpl_act_open_req6);
833         } else {
834                 size = sizeof(struct cpl_t5_act_open_req);
835                 size6 = sizeof(struct cpl_t5_act_open_req6);
836         }
837
838         if (csk->csk_family == AF_INET) {
839                 send_act_open_func = send_act_open_req;
840                 skb = alloc_wr(size, 0, GFP_ATOMIC);
841 #if IS_ENABLED(CONFIG_IPV6)
842         } else {
843                 send_act_open_func = send_act_open_req6;
844                 skb = alloc_wr(size6, 0, GFP_ATOMIC);
845 #endif
846         }
847
848         if (!skb)
849                 cxgbi_sock_fail_act_open(csk, -ENOMEM);
850         else {
851                 skb->sk = (struct sock *)csk;
852                 t4_set_arp_err_handler(skb, csk,
853                                        cxgbi_sock_act_open_req_arp_failure);
854                 send_act_open_func(csk, skb, csk->l2t);
855         }
856
857         spin_unlock_bh(&csk->lock);
858         cxgbi_sock_put(csk);
859
860 }
861
862 static inline bool is_neg_adv(unsigned int status)
863 {
864         return status == CPL_ERR_RTX_NEG_ADVICE ||
865                 status == CPL_ERR_KEEPALV_NEG_ADVICE ||
866                 status == CPL_ERR_PERSIST_NEG_ADVICE;
867 }
868
869 static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
870 {
871         struct cxgbi_sock *csk;
872         struct cpl_act_open_rpl *rpl = (struct cpl_act_open_rpl *)skb->data;
873         unsigned int tid = GET_TID(rpl);
874         unsigned int atid =
875                 TID_TID_G(AOPEN_ATID_G(be32_to_cpu(rpl->atid_status)));
876         unsigned int status = AOPEN_STATUS_G(be32_to_cpu(rpl->atid_status));
877         struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
878         struct tid_info *t = lldi->tids;
879
880         csk = lookup_atid(t, atid);
881         if (unlikely(!csk)) {
882                 pr_err("NO matching conn. atid %u, tid %u.\n", atid, tid);
883                 goto rel_skb;
884         }
885
886         pr_info_ipaddr("tid %u/%u, status %u.\n"
887                        "csk 0x%p,%u,0x%lx. ", (&csk->saddr), (&csk->daddr),
888                        atid, tid, status, csk, csk->state, csk->flags);
889
890         if (is_neg_adv(status))
891                 goto rel_skb;
892
893         module_put(THIS_MODULE);
894
895         if (status && status != CPL_ERR_TCAM_FULL &&
896             status != CPL_ERR_CONN_EXIST &&
897             status != CPL_ERR_ARP_MISS)
898                 cxgb4_remove_tid(lldi->tids, csk->port_id, GET_TID(rpl));
899
900         cxgbi_sock_get(csk);
901         spin_lock_bh(&csk->lock);
902
903         if (status == CPL_ERR_CONN_EXIST &&
904             csk->retry_timer.function != csk_act_open_retry_timer) {
905                 csk->retry_timer.function = csk_act_open_retry_timer;
906                 mod_timer(&csk->retry_timer, jiffies + HZ / 2);
907         } else
908                 cxgbi_sock_fail_act_open(csk,
909                                         act_open_rpl_status_to_errno(status));
910
911         spin_unlock_bh(&csk->lock);
912         cxgbi_sock_put(csk);
913 rel_skb:
914         __kfree_skb(skb);
915 }
916
917 static void do_peer_close(struct cxgbi_device *cdev, struct sk_buff *skb)
918 {
919         struct cxgbi_sock *csk;
920         struct cpl_peer_close *req = (struct cpl_peer_close *)skb->data;
921         unsigned int tid = GET_TID(req);
922         struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
923         struct tid_info *t = lldi->tids;
924
925         csk = lookup_tid(t, tid);
926         if (unlikely(!csk)) {
927                 pr_err("can't find connection for tid %u.\n", tid);
928                 goto rel_skb;
929         }
930         pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n",
931                        (&csk->saddr), (&csk->daddr),
932                        csk, csk->state, csk->flags, csk->tid);
933         cxgbi_sock_rcv_peer_close(csk);
934 rel_skb:
935         __kfree_skb(skb);
936 }
937
938 static void do_close_con_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
939 {
940         struct cxgbi_sock *csk;
941         struct cpl_close_con_rpl *rpl = (struct cpl_close_con_rpl *)skb->data;
942         unsigned int tid = GET_TID(rpl);
943         struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
944         struct tid_info *t = lldi->tids;
945
946         csk = lookup_tid(t, tid);
947         if (unlikely(!csk)) {
948                 pr_err("can't find connection for tid %u.\n", tid);
949                 goto rel_skb;
950         }
951         pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n",
952                        (&csk->saddr), (&csk->daddr),
953                        csk, csk->state, csk->flags, csk->tid);
954         cxgbi_sock_rcv_close_conn_rpl(csk, ntohl(rpl->snd_nxt));
955 rel_skb:
956         __kfree_skb(skb);
957 }
958
959 static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason,
960                                                                 int *need_rst)
961 {
962         switch (abort_reason) {
963         case CPL_ERR_BAD_SYN: /* fall through */
964         case CPL_ERR_CONN_RESET:
965                 return csk->state > CTP_ESTABLISHED ?
966                         -EPIPE : -ECONNRESET;
967         case CPL_ERR_XMIT_TIMEDOUT:
968         case CPL_ERR_PERSIST_TIMEDOUT:
969         case CPL_ERR_FINWAIT2_TIMEDOUT:
970         case CPL_ERR_KEEPALIVE_TIMEDOUT:
971                 return -ETIMEDOUT;
972         default:
973                 return -EIO;
974         }
975 }
976
977 static void do_abort_req_rss(struct cxgbi_device *cdev, struct sk_buff *skb)
978 {
979         struct cxgbi_sock *csk;
980         struct cpl_abort_req_rss *req = (struct cpl_abort_req_rss *)skb->data;
981         unsigned int tid = GET_TID(req);
982         struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
983         struct tid_info *t = lldi->tids;
984         int rst_status = CPL_ABORT_NO_RST;
985
986         csk = lookup_tid(t, tid);
987         if (unlikely(!csk)) {
988                 pr_err("can't find connection for tid %u.\n", tid);
989                 goto rel_skb;
990         }
991
992         pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n",
993                        (&csk->saddr), (&csk->daddr),
994                        csk, csk->state, csk->flags, csk->tid, req->status);
995
996         if (is_neg_adv(req->status))
997                 goto rel_skb;
998
999         cxgbi_sock_get(csk);
1000         spin_lock_bh(&csk->lock);
1001
1002         cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD);
1003
1004         if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
1005                 send_tx_flowc_wr(csk);
1006                 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
1007         }
1008
1009         cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD);
1010         cxgbi_sock_set_state(csk, CTP_ABORTING);
1011
1012         send_abort_rpl(csk, rst_status);
1013
1014         if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) {
1015                 csk->err = abort_status_to_errno(csk, req->status, &rst_status);
1016                 cxgbi_sock_closed(csk);
1017         }
1018
1019         spin_unlock_bh(&csk->lock);
1020         cxgbi_sock_put(csk);
1021 rel_skb:
1022         __kfree_skb(skb);
1023 }
1024
1025 static void do_abort_rpl_rss(struct cxgbi_device *cdev, struct sk_buff *skb)
1026 {
1027         struct cxgbi_sock *csk;
1028         struct cpl_abort_rpl_rss *rpl = (struct cpl_abort_rpl_rss *)skb->data;
1029         unsigned int tid = GET_TID(rpl);
1030         struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1031         struct tid_info *t = lldi->tids;
1032
1033         csk = lookup_tid(t, tid);
1034         if (!csk)
1035                 goto rel_skb;
1036
1037         if (csk)
1038                 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n",
1039                                (&csk->saddr), (&csk->daddr), csk,
1040                                csk->state, csk->flags, csk->tid, rpl->status);
1041
1042         if (rpl->status == CPL_ERR_ABORT_FAILED)
1043                 goto rel_skb;
1044
1045         cxgbi_sock_rcv_abort_rpl(csk);
1046 rel_skb:
1047         __kfree_skb(skb);
1048 }
1049
1050 static void do_rx_data(struct cxgbi_device *cdev, struct sk_buff *skb)
1051 {
1052         struct cxgbi_sock *csk;
1053         struct cpl_rx_data *cpl = (struct cpl_rx_data *)skb->data;
1054         unsigned int tid = GET_TID(cpl);
1055         struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1056         struct tid_info *t = lldi->tids;
1057
1058         csk = lookup_tid(t, tid);
1059         if (!csk) {
1060                 pr_err("can't find connection for tid %u.\n", tid);
1061         } else {
1062                 /* not expecting this, reset the connection. */
1063                 pr_err("csk 0x%p, tid %u, rcv cpl_rx_data.\n", csk, tid);
1064                 spin_lock_bh(&csk->lock);
1065                 send_abort_req(csk);
1066                 spin_unlock_bh(&csk->lock);
1067         }
1068         __kfree_skb(skb);
1069 }
1070
1071 static void do_rx_iscsi_hdr(struct cxgbi_device *cdev, struct sk_buff *skb)
1072 {
1073         struct cxgbi_sock *csk;
1074         struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)skb->data;
1075         unsigned short pdu_len_ddp = be16_to_cpu(cpl->pdu_len_ddp);
1076         unsigned int tid = GET_TID(cpl);
1077         struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1078         struct tid_info *t = lldi->tids;
1079
1080         csk = lookup_tid(t, tid);
1081         if (unlikely(!csk)) {
1082                 pr_err("can't find conn. for tid %u.\n", tid);
1083                 goto rel_skb;
1084         }
1085
1086         log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1087                 "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p,%u, 0x%x.\n",
1088                 csk, csk->state, csk->flags, csk->tid, skb, skb->len,
1089                 pdu_len_ddp);
1090
1091         spin_lock_bh(&csk->lock);
1092
1093         if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
1094                 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1095                         "csk 0x%p,%u,0x%lx,%u, bad state.\n",
1096                         csk, csk->state, csk->flags, csk->tid);
1097                 if (csk->state != CTP_ABORTING)
1098                         goto abort_conn;
1099                 else
1100                         goto discard;
1101         }
1102
1103         cxgbi_skcb_tcp_seq(skb) = ntohl(cpl->seq);
1104         cxgbi_skcb_flags(skb) = 0;
1105
1106         skb_reset_transport_header(skb);
1107         __skb_pull(skb, sizeof(*cpl));
1108         __pskb_trim(skb, ntohs(cpl->len));
1109
1110         if (!csk->skb_ulp_lhdr) {
1111                 unsigned char *bhs;
1112                 unsigned int hlen, dlen, plen;
1113
1114                 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1115                         "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p header.\n",
1116                         csk, csk->state, csk->flags, csk->tid, skb);
1117                 csk->skb_ulp_lhdr = skb;
1118                 cxgbi_skcb_set_flag(skb, SKCBF_RX_HDR);
1119
1120                 if (cxgbi_skcb_tcp_seq(skb) != csk->rcv_nxt) {
1121                         pr_info("tid %u, CPL_ISCSI_HDR, bad seq, 0x%x/0x%x.\n",
1122                                 csk->tid, cxgbi_skcb_tcp_seq(skb),
1123                                 csk->rcv_nxt);
1124                         goto abort_conn;
1125                 }
1126
1127                 bhs = skb->data;
1128                 hlen = ntohs(cpl->len);
1129                 dlen = ntohl(*(unsigned int *)(bhs + 4)) & 0xFFFFFF;
1130
1131                 plen = ISCSI_PDU_LEN_G(pdu_len_ddp);
1132                 if (is_t4(lldi->adapter_type))
1133                         plen -= 40;
1134
1135                 if ((hlen + dlen) != plen) {
1136                         pr_info("tid 0x%x, CPL_ISCSI_HDR, pdu len "
1137                                 "mismatch %u != %u + %u, seq 0x%x.\n",
1138                                 csk->tid, plen, hlen, dlen,
1139                                 cxgbi_skcb_tcp_seq(skb));
1140                         goto abort_conn;
1141                 }
1142
1143                 cxgbi_skcb_rx_pdulen(skb) = (hlen + dlen + 3) & (~0x3);
1144                 if (dlen)
1145                         cxgbi_skcb_rx_pdulen(skb) += csk->dcrc_len;
1146                 csk->rcv_nxt += cxgbi_skcb_rx_pdulen(skb);
1147
1148                 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1149                         "csk 0x%p, skb 0x%p, 0x%x,%u+%u,0x%x,0x%x.\n",
1150                         csk, skb, *bhs, hlen, dlen,
1151                         ntohl(*((unsigned int *)(bhs + 16))),
1152                         ntohl(*((unsigned int *)(bhs + 24))));
1153
1154         } else {
1155                 struct sk_buff *lskb = csk->skb_ulp_lhdr;
1156
1157                 cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA);
1158                 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1159                         "csk 0x%p,%u,0x%lx, skb 0x%p data, 0x%p.\n",
1160                         csk, csk->state, csk->flags, skb, lskb);
1161         }
1162
1163         __skb_queue_tail(&csk->receive_queue, skb);
1164         spin_unlock_bh(&csk->lock);
1165         return;
1166
1167 abort_conn:
1168         send_abort_req(csk);
1169 discard:
1170         spin_unlock_bh(&csk->lock);
1171 rel_skb:
1172         __kfree_skb(skb);
1173 }
1174
1175 static void do_rx_data_ddp(struct cxgbi_device *cdev,
1176                                   struct sk_buff *skb)
1177 {
1178         struct cxgbi_sock *csk;
1179         struct sk_buff *lskb;
1180         struct cpl_rx_data_ddp *rpl = (struct cpl_rx_data_ddp *)skb->data;
1181         unsigned int tid = GET_TID(rpl);
1182         struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1183         struct tid_info *t = lldi->tids;
1184         unsigned int status = ntohl(rpl->ddpvld);
1185
1186         csk = lookup_tid(t, tid);
1187         if (unlikely(!csk)) {
1188                 pr_err("can't find connection for tid %u.\n", tid);
1189                 goto rel_skb;
1190         }
1191
1192         log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1193                 "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p.\n",
1194                 csk, csk->state, csk->flags, skb, status, csk->skb_ulp_lhdr);
1195
1196         spin_lock_bh(&csk->lock);
1197
1198         if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
1199                 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1200                         "csk 0x%p,%u,0x%lx,%u, bad state.\n",
1201                         csk, csk->state, csk->flags, csk->tid);
1202                 if (csk->state != CTP_ABORTING)
1203                         goto abort_conn;
1204                 else
1205                         goto discard;
1206         }
1207
1208         if (!csk->skb_ulp_lhdr) {
1209                 pr_err("tid 0x%x, rcv RX_DATA_DDP w/o pdu bhs.\n", csk->tid);
1210                 goto abort_conn;
1211         }
1212
1213         lskb = csk->skb_ulp_lhdr;
1214         csk->skb_ulp_lhdr = NULL;
1215
1216         cxgbi_skcb_rx_ddigest(lskb) = ntohl(rpl->ulp_crc);
1217
1218         if (ntohs(rpl->len) != cxgbi_skcb_rx_pdulen(lskb))
1219                 pr_info("tid 0x%x, RX_DATA_DDP pdulen %u != %u.\n",
1220                         csk->tid, ntohs(rpl->len), cxgbi_skcb_rx_pdulen(lskb));
1221
1222         if (status & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT)) {
1223                 pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, hcrc bad 0x%lx.\n",
1224                         csk, lskb, status, cxgbi_skcb_flags(lskb));
1225                 cxgbi_skcb_set_flag(lskb, SKCBF_RX_HCRC_ERR);
1226         }
1227         if (status & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT)) {
1228                 pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, dcrc bad 0x%lx.\n",
1229                         csk, lskb, status, cxgbi_skcb_flags(lskb));
1230                 cxgbi_skcb_set_flag(lskb, SKCBF_RX_DCRC_ERR);
1231         }
1232         if (status & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT)) {
1233                 log_debug(1 << CXGBI_DBG_PDU_RX,
1234                         "csk 0x%p, lhdr 0x%p, status 0x%x, pad bad.\n",
1235                         csk, lskb, status);
1236                 cxgbi_skcb_set_flag(lskb, SKCBF_RX_PAD_ERR);
1237         }
1238         if ((status & (1 << CPL_RX_DDP_STATUS_DDP_SHIFT)) &&
1239                 !cxgbi_skcb_test_flag(lskb, SKCBF_RX_DATA)) {
1240                 log_debug(1 << CXGBI_DBG_PDU_RX,
1241                         "csk 0x%p, lhdr 0x%p, 0x%x, data ddp'ed.\n",
1242                         csk, lskb, status);
1243                 cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA_DDPD);
1244         }
1245         log_debug(1 << CXGBI_DBG_PDU_RX,
1246                 "csk 0x%p, lskb 0x%p, f 0x%lx.\n",
1247                 csk, lskb, cxgbi_skcb_flags(lskb));
1248
1249         cxgbi_skcb_set_flag(lskb, SKCBF_RX_STATUS);
1250         cxgbi_conn_pdu_ready(csk);
1251         spin_unlock_bh(&csk->lock);
1252         goto rel_skb;
1253
1254 abort_conn:
1255         send_abort_req(csk);
1256 discard:
1257         spin_unlock_bh(&csk->lock);
1258 rel_skb:
1259         __kfree_skb(skb);
1260 }
1261
1262 static void do_fw4_ack(struct cxgbi_device *cdev, struct sk_buff *skb)
1263 {
1264         struct cxgbi_sock *csk;
1265         struct cpl_fw4_ack *rpl = (struct cpl_fw4_ack *)skb->data;
1266         unsigned int tid = GET_TID(rpl);
1267         struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1268         struct tid_info *t = lldi->tids;
1269
1270         csk = lookup_tid(t, tid);
1271         if (unlikely(!csk))
1272                 pr_err("can't find connection for tid %u.\n", tid);
1273         else {
1274                 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1275                         "csk 0x%p,%u,0x%lx,%u.\n",
1276                         csk, csk->state, csk->flags, csk->tid);
1277                 cxgbi_sock_rcv_wr_ack(csk, rpl->credits, ntohl(rpl->snd_una),
1278                                         rpl->seq_vld);
1279         }
1280         __kfree_skb(skb);
1281 }
1282
1283 static void do_set_tcb_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
1284 {
1285         struct cpl_set_tcb_rpl *rpl = (struct cpl_set_tcb_rpl *)skb->data;
1286         unsigned int tid = GET_TID(rpl);
1287         struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1288         struct tid_info *t = lldi->tids;
1289         struct cxgbi_sock *csk;
1290
1291         csk = lookup_tid(t, tid);
1292         if (!csk)
1293                 pr_err("can't find conn. for tid %u.\n", tid);
1294
1295         log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1296                 "csk 0x%p,%u,%lx,%u, status 0x%x.\n",
1297                 csk, csk->state, csk->flags, csk->tid, rpl->status);
1298
1299         if (rpl->status != CPL_ERR_NONE)
1300                 pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n",
1301                         csk, tid, rpl->status);
1302
1303         __kfree_skb(skb);
1304 }
1305
1306 static int alloc_cpls(struct cxgbi_sock *csk)
1307 {
1308         csk->cpl_close = alloc_wr(sizeof(struct cpl_close_con_req),
1309                                         0, GFP_KERNEL);
1310         if (!csk->cpl_close)
1311                 return -ENOMEM;
1312
1313         csk->cpl_abort_req = alloc_wr(sizeof(struct cpl_abort_req),
1314                                         0, GFP_KERNEL);
1315         if (!csk->cpl_abort_req)
1316                 goto free_cpls;
1317
1318         csk->cpl_abort_rpl = alloc_wr(sizeof(struct cpl_abort_rpl),
1319                                         0, GFP_KERNEL);
1320         if (!csk->cpl_abort_rpl)
1321                 goto free_cpls;
1322         return 0;
1323
1324 free_cpls:
1325         cxgbi_sock_free_cpl_skbs(csk);
1326         return -ENOMEM;
1327 }
1328
1329 static inline void l2t_put(struct cxgbi_sock *csk)
1330 {
1331         if (csk->l2t) {
1332                 cxgb4_l2t_release(csk->l2t);
1333                 csk->l2t = NULL;
1334                 cxgbi_sock_put(csk);
1335         }
1336 }
1337
1338 static void release_offload_resources(struct cxgbi_sock *csk)
1339 {
1340         struct cxgb4_lld_info *lldi;
1341 #if IS_ENABLED(CONFIG_IPV6)
1342         struct net_device *ndev = csk->cdev->ports[csk->port_id];
1343 #endif
1344
1345         log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1346                 "csk 0x%p,%u,0x%lx,%u.\n",
1347                 csk, csk->state, csk->flags, csk->tid);
1348
1349         cxgbi_sock_free_cpl_skbs(csk);
1350         if (csk->wr_cred != csk->wr_max_cred) {
1351                 cxgbi_sock_purge_wr_queue(csk);
1352                 cxgbi_sock_reset_wr_list(csk);
1353         }
1354
1355         l2t_put(csk);
1356 #if IS_ENABLED(CONFIG_IPV6)
1357         if (csk->csk_family == AF_INET6)
1358                 cxgb4_clip_release(ndev,
1359                                    (const u32 *)&csk->saddr6.sin6_addr, 1);
1360 #endif
1361
1362         if (cxgbi_sock_flag(csk, CTPF_HAS_ATID))
1363                 free_atid(csk);
1364         else if (cxgbi_sock_flag(csk, CTPF_HAS_TID)) {
1365                 lldi = cxgbi_cdev_priv(csk->cdev);
1366                 cxgb4_remove_tid(lldi->tids, 0, csk->tid);
1367                 cxgbi_sock_clear_flag(csk, CTPF_HAS_TID);
1368                 cxgbi_sock_put(csk);
1369         }
1370         csk->dst = NULL;
1371         csk->cdev = NULL;
1372 }
1373
1374 static int init_act_open(struct cxgbi_sock *csk)
1375 {
1376         struct cxgbi_device *cdev = csk->cdev;
1377         struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1378         struct net_device *ndev = cdev->ports[csk->port_id];
1379         struct sk_buff *skb = NULL;
1380         struct neighbour *n = NULL;
1381         void *daddr;
1382         unsigned int step;
1383         unsigned int size, size6;
1384         int t4 = is_t4(lldi->adapter_type);
1385         unsigned int linkspeed;
1386         unsigned int rcv_winf, snd_winf;
1387
1388         log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1389                 "csk 0x%p,%u,0x%lx,%u.\n",
1390                 csk, csk->state, csk->flags, csk->tid);
1391
1392         if (csk->csk_family == AF_INET)
1393                 daddr = &csk->daddr.sin_addr.s_addr;
1394 #if IS_ENABLED(CONFIG_IPV6)
1395         else if (csk->csk_family == AF_INET6)
1396                 daddr = &csk->daddr6.sin6_addr;
1397 #endif
1398         else {
1399                 pr_err("address family 0x%x not supported\n", csk->csk_family);
1400                 goto rel_resource;
1401         }
1402
1403         n = dst_neigh_lookup(csk->dst, daddr);
1404
1405         if (!n) {
1406                 pr_err("%s, can't get neighbour of csk->dst.\n", ndev->name);
1407                 goto rel_resource;
1408         }
1409
1410         csk->atid = cxgb4_alloc_atid(lldi->tids, csk);
1411         if (csk->atid < 0) {
1412                 pr_err("%s, NO atid available.\n", ndev->name);
1413                 return -EINVAL;
1414         }
1415         cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
1416         cxgbi_sock_get(csk);
1417
1418         csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, 0);
1419         if (!csk->l2t) {
1420                 pr_err("%s, cannot alloc l2t.\n", ndev->name);
1421                 goto rel_resource_without_clip;
1422         }
1423         cxgbi_sock_get(csk);
1424
1425 #if IS_ENABLED(CONFIG_IPV6)
1426         if (csk->csk_family == AF_INET6)
1427                 cxgb4_clip_get(ndev, (const u32 *)&csk->saddr6.sin6_addr, 1);
1428 #endif
1429
1430         if (t4) {
1431                 size = sizeof(struct cpl_act_open_req);
1432                 size6 = sizeof(struct cpl_act_open_req6);
1433         } else {
1434                 size = sizeof(struct cpl_t5_act_open_req);
1435                 size6 = sizeof(struct cpl_t5_act_open_req6);
1436         }
1437
1438         if (csk->csk_family == AF_INET)
1439                 skb = alloc_wr(size, 0, GFP_NOIO);
1440 #if IS_ENABLED(CONFIG_IPV6)
1441         else
1442                 skb = alloc_wr(size6, 0, GFP_NOIO);
1443 #endif
1444
1445         if (!skb)
1446                 goto rel_resource;
1447         skb->sk = (struct sock *)csk;
1448         t4_set_arp_err_handler(skb, csk, cxgbi_sock_act_open_req_arp_failure);
1449
1450         if (!csk->mtu)
1451                 csk->mtu = dst_mtu(csk->dst);
1452         cxgb4_best_mtu(lldi->mtus, csk->mtu, &csk->mss_idx);
1453         csk->tx_chan = cxgb4_port_chan(ndev);
1454         /* SMT two entries per row */
1455         csk->smac_idx = ((cxgb4_port_viid(ndev) & 0x7F)) << 1;
1456         step = lldi->ntxq / lldi->nchan;
1457         csk->txq_idx = cxgb4_port_idx(ndev) * step;
1458         step = lldi->nrxq / lldi->nchan;
1459         csk->rss_qid = lldi->rxq_ids[cxgb4_port_idx(ndev) * step];
1460         linkspeed = ((struct port_info *)netdev_priv(ndev))->link_cfg.speed;
1461         csk->snd_win = cxgb4i_snd_win;
1462         csk->rcv_win = cxgb4i_rcv_win;
1463         if (cxgb4i_rcv_win <= 0) {
1464                 csk->rcv_win = CXGB4I_DEFAULT_10G_RCV_WIN;
1465                 rcv_winf = linkspeed / SPEED_10000;
1466                 if (rcv_winf)
1467                         csk->rcv_win *= rcv_winf;
1468         }
1469         if (cxgb4i_snd_win <= 0) {
1470                 csk->snd_win = CXGB4I_DEFAULT_10G_SND_WIN;
1471                 snd_winf = linkspeed / SPEED_10000;
1472                 if (snd_winf)
1473                         csk->snd_win *= snd_winf;
1474         }
1475         csk->wr_cred = lldi->wr_cred -
1476                        DIV_ROUND_UP(sizeof(struct cpl_abort_req), 16);
1477         csk->wr_max_cred = csk->wr_cred;
1478         csk->wr_una_cred = 0;
1479         cxgbi_sock_reset_wr_list(csk);
1480         csk->err = 0;
1481
1482         pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u,%u,%u, mtu %u,%u, smac %u.\n",
1483                        (&csk->saddr), (&csk->daddr), csk, csk->state,
1484                        csk->flags, csk->tx_chan, csk->txq_idx, csk->rss_qid,
1485                        csk->mtu, csk->mss_idx, csk->smac_idx);
1486
1487         /* must wait for either a act_open_rpl or act_open_establish */
1488         try_module_get(THIS_MODULE);
1489         cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN);
1490         if (csk->csk_family == AF_INET)
1491                 send_act_open_req(csk, skb, csk->l2t);
1492 #if IS_ENABLED(CONFIG_IPV6)
1493         else
1494                 send_act_open_req6(csk, skb, csk->l2t);
1495 #endif
1496         neigh_release(n);
1497
1498         return 0;
1499
1500 rel_resource:
1501 #if IS_ENABLED(CONFIG_IPV6)
1502         if (csk->csk_family == AF_INET6)
1503                 cxgb4_clip_release(ndev,
1504                                    (const u32 *)&csk->saddr6.sin6_addr, 1);
1505 #endif
1506 rel_resource_without_clip:
1507         if (n)
1508                 neigh_release(n);
1509         if (skb)
1510                 __kfree_skb(skb);
1511         return -EINVAL;
1512 }
1513
1514 static cxgb4i_cplhandler_func cxgb4i_cplhandlers[NUM_CPL_CMDS] = {
1515         [CPL_ACT_ESTABLISH] = do_act_establish,
1516         [CPL_ACT_OPEN_RPL] = do_act_open_rpl,
1517         [CPL_PEER_CLOSE] = do_peer_close,
1518         [CPL_ABORT_REQ_RSS] = do_abort_req_rss,
1519         [CPL_ABORT_RPL_RSS] = do_abort_rpl_rss,
1520         [CPL_CLOSE_CON_RPL] = do_close_con_rpl,
1521         [CPL_FW4_ACK] = do_fw4_ack,
1522         [CPL_ISCSI_HDR] = do_rx_iscsi_hdr,
1523         [CPL_ISCSI_DATA] = do_rx_iscsi_hdr,
1524         [CPL_SET_TCB_RPL] = do_set_tcb_rpl,
1525         [CPL_RX_DATA_DDP] = do_rx_data_ddp,
1526         [CPL_RX_ISCSI_DDP] = do_rx_data_ddp,
1527         [CPL_RX_DATA] = do_rx_data,
1528 };
1529
1530 static int cxgb4i_ofld_init(struct cxgbi_device *cdev)
1531 {
1532         int rc;
1533
1534         if (cxgb4i_max_connect > CXGB4I_MAX_CONN)
1535                 cxgb4i_max_connect = CXGB4I_MAX_CONN;
1536
1537         rc = cxgbi_device_portmap_create(cdev, cxgb4i_sport_base,
1538                                         cxgb4i_max_connect);
1539         if (rc < 0)
1540                 return rc;
1541
1542         cdev->csk_release_offload_resources = release_offload_resources;
1543         cdev->csk_push_tx_frames = push_tx_frames;
1544         cdev->csk_send_abort_req = send_abort_req;
1545         cdev->csk_send_close_req = send_close_req;
1546         cdev->csk_send_rx_credits = send_rx_credits;
1547         cdev->csk_alloc_cpls = alloc_cpls;
1548         cdev->csk_init_act_open = init_act_open;
1549
1550         pr_info("cdev 0x%p, offload up, added.\n", cdev);
1551         return 0;
1552 }
1553
1554 static inline void
1555 ulp_mem_io_set_hdr(struct cxgbi_device *cdev,
1556                    struct ulp_mem_io *req,
1557                    unsigned int wr_len, unsigned int dlen,
1558                    unsigned int pm_addr,
1559                    int tid)
1560 {
1561         struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1562         struct ulptx_idata *idata = (struct ulptx_idata *)(req + 1);
1563
1564         INIT_ULPTX_WR(req, wr_len, 0, tid);
1565         req->wr.wr_hi = htonl(FW_WR_OP_V(FW_ULPTX_WR) |
1566                 FW_WR_ATOMIC_V(0));
1567         req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) |
1568                 ULP_MEMIO_ORDER_V(is_t4(lldi->adapter_type)) |
1569                 T5_ULP_MEMIO_IMM_V(!is_t4(lldi->adapter_type)));
1570         req->dlen = htonl(ULP_MEMIO_DATA_LEN_V(dlen >> 5));
1571         req->lock_addr = htonl(ULP_MEMIO_ADDR_V(pm_addr >> 5));
1572         req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16));
1573
1574         idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
1575         idata->len = htonl(dlen);
1576 }
1577
1578 static struct sk_buff *
1579 ddp_ppod_init_idata(struct cxgbi_device *cdev,
1580                     struct cxgbi_ppm *ppm,
1581                     unsigned int idx, unsigned int npods,
1582                     unsigned int tid)
1583 {
1584         unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ppm->llimit;
1585         unsigned int dlen = npods << PPOD_SIZE_SHIFT;
1586         unsigned int wr_len = roundup(sizeof(struct ulp_mem_io) +
1587                                 sizeof(struct ulptx_idata) + dlen, 16);
1588         struct sk_buff *skb = alloc_wr(wr_len, 0, GFP_ATOMIC);
1589
1590         if (!skb) {
1591                 pr_err("%s: %s idx %u, npods %u, OOM.\n",
1592                        __func__, ppm->ndev->name, idx, npods);
1593                 return NULL;
1594         }
1595
1596         ulp_mem_io_set_hdr(cdev, (struct ulp_mem_io *)skb->head, wr_len, dlen,
1597                            pm_addr, tid);
1598
1599         return skb;
1600 }
1601
1602 static int ddp_ppod_write_idata(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk,
1603                                 struct cxgbi_task_tag_info *ttinfo,
1604                                 unsigned int idx, unsigned int npods,
1605                                 struct scatterlist **sg_pp,
1606                                 unsigned int *sg_off)
1607 {
1608         struct cxgbi_device *cdev = csk->cdev;
1609         struct sk_buff *skb = ddp_ppod_init_idata(cdev, ppm, idx, npods,
1610                                                   csk->tid);
1611         struct ulp_mem_io *req;
1612         struct ulptx_idata *idata;
1613         struct cxgbi_pagepod *ppod;
1614         int i;
1615
1616         if (!skb)
1617                 return -ENOMEM;
1618
1619         req = (struct ulp_mem_io *)skb->head;
1620         idata = (struct ulptx_idata *)(req + 1);
1621         ppod = (struct cxgbi_pagepod *)(idata + 1);
1622
1623         for (i = 0; i < npods; i++, ppod++)
1624                 cxgbi_ddp_set_one_ppod(ppod, ttinfo, sg_pp, sg_off);
1625
1626         cxgbi_skcb_set_flag(skb, SKCBF_TX_MEM_WRITE);
1627         cxgbi_skcb_set_flag(skb, SKCBF_TX_FLAG_COMPL);
1628         set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
1629
1630         spin_lock_bh(&csk->lock);
1631         cxgbi_sock_skb_entail(csk, skb);
1632         spin_unlock_bh(&csk->lock);
1633
1634         return 0;
1635 }
1636
1637 static int ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk,
1638                        struct cxgbi_task_tag_info *ttinfo)
1639 {
1640         unsigned int pidx = ttinfo->idx;
1641         unsigned int npods = ttinfo->npods;
1642         unsigned int i, cnt;
1643         int err = 0;
1644         struct scatterlist *sg = ttinfo->sgl;
1645         unsigned int offset = 0;
1646
1647         ttinfo->cid = csk->port_id;
1648
1649         for (i = 0; i < npods; i += cnt, pidx += cnt) {
1650                 cnt = npods - i;
1651
1652                 if (cnt > ULPMEM_IDATA_MAX_NPPODS)
1653                         cnt = ULPMEM_IDATA_MAX_NPPODS;
1654                 err = ddp_ppod_write_idata(ppm, csk, ttinfo, pidx, cnt,
1655                                            &sg, &offset);
1656                 if (err < 0)
1657                         break;
1658         }
1659
1660         return err;
1661 }
1662
1663 static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
1664                                 int pg_idx, bool reply)
1665 {
1666         struct sk_buff *skb;
1667         struct cpl_set_tcb_field *req;
1668
1669         if (!pg_idx || pg_idx >= DDP_PGIDX_MAX)
1670                 return 0;
1671
1672         skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL);
1673         if (!skb)
1674                 return -ENOMEM;
1675
1676         /*  set up ulp page size */
1677         req = (struct cpl_set_tcb_field *)skb->head;
1678         INIT_TP_WR(req, csk->tid);
1679         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
1680         req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid));
1681         req->word_cookie = htons(0);
1682         req->mask = cpu_to_be64(0x3 << 8);
1683         req->val = cpu_to_be64(pg_idx << 8);
1684         set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id);
1685
1686         log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1687                 "csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx);
1688
1689         cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
1690         return 0;
1691 }
1692
1693 static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
1694                                  int hcrc, int dcrc, int reply)
1695 {
1696         struct sk_buff *skb;
1697         struct cpl_set_tcb_field *req;
1698
1699         if (!hcrc && !dcrc)
1700                 return 0;
1701
1702         skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL);
1703         if (!skb)
1704                 return -ENOMEM;
1705
1706         csk->hcrc_len = (hcrc ? 4 : 0);
1707         csk->dcrc_len = (dcrc ? 4 : 0);
1708         /*  set up ulp submode */
1709         req = (struct cpl_set_tcb_field *)skb->head;
1710         INIT_TP_WR(req, tid);
1711         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
1712         req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid));
1713         req->word_cookie = htons(0);
1714         req->mask = cpu_to_be64(0x3 << 4);
1715         req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |
1716                                 (dcrc ? ULP_CRC_DATA : 0)) << 4);
1717         set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id);
1718
1719         log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1720                 "csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc);
1721
1722         cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
1723         return 0;
1724 }
1725
1726 static struct cxgbi_ppm *cdev2ppm(struct cxgbi_device *cdev)
1727 {
1728         return (struct cxgbi_ppm *)(*((struct cxgb4_lld_info *)
1729                                        (cxgbi_cdev_priv(cdev)))->iscsi_ppm);
1730 }
1731
1732 static int cxgb4i_ddp_init(struct cxgbi_device *cdev)
1733 {
1734         struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1735         struct net_device *ndev = cdev->ports[0];
1736         struct cxgbi_tag_format tformat;
1737         unsigned int ppmax;
1738         int i;
1739
1740         if (!lldi->vr->iscsi.size) {
1741                 pr_warn("%s, iscsi NOT enabled, check config!\n", ndev->name);
1742                 return -EACCES;
1743         }
1744
1745         cdev->flags |= CXGBI_FLAG_USE_PPOD_OFLDQ;
1746         ppmax = lldi->vr->iscsi.size >> PPOD_SIZE_SHIFT;
1747
1748         memset(&tformat, 0, sizeof(struct cxgbi_tag_format));
1749         for (i = 0; i < 4; i++)
1750                 tformat.pgsz_order[i] = (lldi->iscsi_pgsz_order >> (i << 3))
1751                                          & 0xF;
1752         cxgbi_tagmask_check(lldi->iscsi_tagmask, &tformat);
1753
1754         cxgbi_ddp_ppm_setup(lldi->iscsi_ppm, cdev, &tformat, ppmax,
1755                             lldi->iscsi_llimit, lldi->vr->iscsi.start, 2);
1756
1757         cdev->csk_ddp_setup_digest = ddp_setup_conn_digest;
1758         cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx;
1759         cdev->csk_ddp_set_map = ddp_set_map;
1760         cdev->tx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
1761                                   lldi->iscsi_iolen - ISCSI_PDU_NONPAYLOAD_LEN);
1762         cdev->rx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
1763                                   lldi->iscsi_iolen - ISCSI_PDU_NONPAYLOAD_LEN);
1764         cdev->cdev2ppm = cdev2ppm;
1765
1766         return 0;
1767 }
1768
1769 static void *t4_uld_add(const struct cxgb4_lld_info *lldi)
1770 {
1771         struct cxgbi_device *cdev;
1772         struct port_info *pi;
1773         int i, rc;
1774
1775         cdev = cxgbi_device_register(sizeof(*lldi), lldi->nports);
1776         if (!cdev) {
1777                 pr_info("t4 device 0x%p, register failed.\n", lldi);
1778                 return NULL;
1779         }
1780         pr_info("0x%p,0x%x, ports %u,%s, chan %u, q %u,%u, wr %u.\n",
1781                 cdev, lldi->adapter_type, lldi->nports,
1782                 lldi->ports[0]->name, lldi->nchan, lldi->ntxq,
1783                 lldi->nrxq, lldi->wr_cred);
1784         for (i = 0; i < lldi->nrxq; i++)
1785                 log_debug(1 << CXGBI_DBG_DEV,
1786                         "t4 0x%p, rxq id #%d: %u.\n",
1787                         cdev, i, lldi->rxq_ids[i]);
1788
1789         memcpy(cxgbi_cdev_priv(cdev), lldi, sizeof(*lldi));
1790         cdev->flags = CXGBI_FLAG_DEV_T4;
1791         cdev->pdev = lldi->pdev;
1792         cdev->ports = lldi->ports;
1793         cdev->nports = lldi->nports;
1794         cdev->mtus = lldi->mtus;
1795         cdev->nmtus = NMTUS;
1796         cdev->rx_credit_thres = cxgb4i_rx_credit_thres;
1797         cdev->skb_tx_rsvd = CXGB4I_TX_HEADER_LEN;
1798         cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr);
1799         cdev->itp = &cxgb4i_iscsi_transport;
1800
1801         cdev->pfvf = FW_VIID_PFN_G(cxgb4_port_viid(lldi->ports[0]))
1802                         << FW_VIID_PFN_S;
1803         pr_info("cdev 0x%p,%s, pfvf %u.\n",
1804                 cdev, lldi->ports[0]->name, cdev->pfvf);
1805
1806         rc = cxgb4i_ddp_init(cdev);
1807         if (rc) {
1808                 pr_info("t4 0x%p ddp init failed.\n", cdev);
1809                 goto err_out;
1810         }
1811         rc = cxgb4i_ofld_init(cdev);
1812         if (rc) {
1813                 pr_info("t4 0x%p ofld init failed.\n", cdev);
1814                 goto err_out;
1815         }
1816
1817         rc = cxgbi_hbas_add(cdev, CXGB4I_MAX_LUN, CXGBI_MAX_CONN,
1818                                 &cxgb4i_host_template, cxgb4i_stt);
1819         if (rc)
1820                 goto err_out;
1821
1822         for (i = 0; i < cdev->nports; i++) {
1823                 pi = netdev_priv(lldi->ports[i]);
1824                 cdev->hbas[i]->port_id = pi->port_id;
1825         }
1826         return cdev;
1827
1828 err_out:
1829         cxgbi_device_unregister(cdev);
1830         return ERR_PTR(-ENOMEM);
1831 }
1832
1833 #define RX_PULL_LEN     128
1834 static int t4_uld_rx_handler(void *handle, const __be64 *rsp,
1835                                 const struct pkt_gl *pgl)
1836 {
1837         const struct cpl_act_establish *rpl;
1838         struct sk_buff *skb;
1839         unsigned int opc;
1840         struct cxgbi_device *cdev = handle;
1841
1842         if (pgl == NULL) {
1843                 unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8;
1844
1845                 skb = alloc_wr(len, 0, GFP_ATOMIC);
1846                 if (!skb)
1847                         goto nomem;
1848                 skb_copy_to_linear_data(skb, &rsp[1], len);
1849         } else {
1850                 if (unlikely(*(u8 *)rsp != *(u8 *)pgl->va)) {
1851                         pr_info("? FL 0x%p,RSS%#llx,FL %#llx,len %u.\n",
1852                                 pgl->va, be64_to_cpu(*rsp),
1853                                 be64_to_cpu(*(u64 *)pgl->va),
1854                                 pgl->tot_len);
1855                         return 0;
1856                 }
1857                 skb = cxgb4_pktgl_to_skb(pgl, RX_PULL_LEN, RX_PULL_LEN);
1858                 if (unlikely(!skb))
1859                         goto nomem;
1860         }
1861
1862         rpl = (struct cpl_act_establish *)skb->data;
1863         opc = rpl->ot.opcode;
1864         log_debug(1 << CXGBI_DBG_TOE,
1865                 "cdev %p, opcode 0x%x(0x%x,0x%x), skb %p.\n",
1866                  cdev, opc, rpl->ot.opcode_tid, ntohl(rpl->ot.opcode_tid), skb);
1867         if (cxgb4i_cplhandlers[opc])
1868                 cxgb4i_cplhandlers[opc](cdev, skb);
1869         else {
1870                 pr_err("No handler for opcode 0x%x.\n", opc);
1871                 __kfree_skb(skb);
1872         }
1873         return 0;
1874 nomem:
1875         log_debug(1 << CXGBI_DBG_TOE, "OOM bailing out.\n");
1876         return 1;
1877 }
1878
1879 static int t4_uld_state_change(void *handle, enum cxgb4_state state)
1880 {
1881         struct cxgbi_device *cdev = handle;
1882
1883         switch (state) {
1884         case CXGB4_STATE_UP:
1885                 pr_info("cdev 0x%p, UP.\n", cdev);
1886                 break;
1887         case CXGB4_STATE_START_RECOVERY:
1888                 pr_info("cdev 0x%p, RECOVERY.\n", cdev);
1889                 /* close all connections */
1890                 break;
1891         case CXGB4_STATE_DOWN:
1892                 pr_info("cdev 0x%p, DOWN.\n", cdev);
1893                 break;
1894         case CXGB4_STATE_DETACH:
1895                 pr_info("cdev 0x%p, DETACH.\n", cdev);
1896                 cxgbi_device_unregister(cdev);
1897                 break;
1898         default:
1899                 pr_info("cdev 0x%p, unknown state %d.\n", cdev, state);
1900                 break;
1901         }
1902         return 0;
1903 }
1904
1905 static int __init cxgb4i_init_module(void)
1906 {
1907         int rc;
1908
1909         printk(KERN_INFO "%s", version);
1910
1911         rc = cxgbi_iscsi_init(&cxgb4i_iscsi_transport, &cxgb4i_stt);
1912         if (rc < 0)
1913                 return rc;
1914         cxgb4_register_uld(CXGB4_ULD_ISCSI, &cxgb4i_uld_info);
1915
1916         return 0;
1917 }
1918
1919 static void __exit cxgb4i_exit_module(void)
1920 {
1921         cxgb4_unregister_uld(CXGB4_ULD_ISCSI);
1922         cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T4);
1923         cxgbi_iscsi_cleanup(&cxgb4i_iscsi_transport, &cxgb4i_stt);
1924 }
1925
1926 module_init(cxgb4i_init_module);
1927 module_exit(cxgb4i_exit_module);