2 * cxgb4i.c: Chelsio T4 iSCSI driver.
4 * Copyright (c) 2010 Chelsio Communications, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
10 * Written by: Karen Xie (kxie@chelsio.com)
11 * Rakesh Ranjan (rranjan@chelsio.com)
14 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
18 #include <scsi/scsi_host.h>
21 #include <linux/netdevice.h>
22 #include <net/addrconf.h>
27 #include "cxgb4_uld.h"
32 static unsigned int dbg_level;
34 #include "../libcxgbi.h"
36 #define DRV_MODULE_NAME "cxgb4i"
37 #define DRV_MODULE_DESC "Chelsio T4/T5 iSCSI Driver"
38 #define DRV_MODULE_VERSION "0.9.4"
40 static char version[] =
41 DRV_MODULE_DESC " " DRV_MODULE_NAME
42 " v" DRV_MODULE_VERSION "\n";
44 MODULE_AUTHOR("Chelsio Communications, Inc.");
45 MODULE_DESCRIPTION(DRV_MODULE_DESC);
46 MODULE_VERSION(DRV_MODULE_VERSION);
47 MODULE_LICENSE("GPL");
49 module_param(dbg_level, uint, 0644);
50 MODULE_PARM_DESC(dbg_level, "Debug flag (default=0)");
52 static int cxgb4i_rcv_win = 256 * 1024;
53 module_param(cxgb4i_rcv_win, int, 0644);
54 MODULE_PARM_DESC(cxgb4i_rcv_win, "TCP reveive window in bytes");
56 static int cxgb4i_snd_win = 128 * 1024;
57 module_param(cxgb4i_snd_win, int, 0644);
58 MODULE_PARM_DESC(cxgb4i_snd_win, "TCP send window in bytes");
60 static int cxgb4i_rx_credit_thres = 10 * 1024;
61 module_param(cxgb4i_rx_credit_thres, int, 0644);
62 MODULE_PARM_DESC(cxgb4i_rx_credit_thres,
63 "RX credits return threshold in bytes (default=10KB)");
65 static unsigned int cxgb4i_max_connect = (8 * 1024);
66 module_param(cxgb4i_max_connect, uint, 0644);
67 MODULE_PARM_DESC(cxgb4i_max_connect, "Maximum number of connections");
69 static unsigned short cxgb4i_sport_base = 20000;
70 module_param(cxgb4i_sport_base, ushort, 0644);
71 MODULE_PARM_DESC(cxgb4i_sport_base, "Starting port number (default 20000)");
73 typedef void (*cxgb4i_cplhandler_func)(struct cxgbi_device *, struct sk_buff *);
75 static void *t4_uld_add(const struct cxgb4_lld_info *);
76 static int t4_uld_rx_handler(void *, const __be64 *, const struct pkt_gl *);
77 static int t4_uld_state_change(void *, enum cxgb4_state state);
78 static inline int send_tx_flowc_wr(struct cxgbi_sock *);
80 static const struct cxgb4_uld_info cxgb4i_uld_info = {
81 .name = DRV_MODULE_NAME,
83 .rx_handler = t4_uld_rx_handler,
84 .state_change = t4_uld_state_change,
87 static struct scsi_host_template cxgb4i_host_template = {
88 .module = THIS_MODULE,
89 .name = DRV_MODULE_NAME,
90 .proc_name = DRV_MODULE_NAME,
91 .can_queue = CXGB4I_SCSI_HOST_QDEPTH,
92 .queuecommand = iscsi_queuecommand,
93 .change_queue_depth = scsi_change_queue_depth,
94 .sg_tablesize = SG_ALL,
95 .max_sectors = 0xFFFF,
96 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
97 .eh_abort_handler = iscsi_eh_abort,
98 .eh_device_reset_handler = iscsi_eh_device_reset,
99 .eh_target_reset_handler = iscsi_eh_recover_target,
100 .target_alloc = iscsi_target_alloc,
101 .use_clustering = DISABLE_CLUSTERING,
103 .track_queue_depth = 1,
106 static struct iscsi_transport cxgb4i_iscsi_transport = {
107 .owner = THIS_MODULE,
108 .name = DRV_MODULE_NAME,
109 .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST |
110 CAP_DATADGST | CAP_DIGEST_OFFLOAD |
111 CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO,
112 .attr_is_visible = cxgbi_attr_is_visible,
113 .get_host_param = cxgbi_get_host_param,
114 .set_host_param = cxgbi_set_host_param,
115 /* session management */
116 .create_session = cxgbi_create_session,
117 .destroy_session = cxgbi_destroy_session,
118 .get_session_param = iscsi_session_get_param,
119 /* connection management */
120 .create_conn = cxgbi_create_conn,
121 .bind_conn = cxgbi_bind_conn,
122 .destroy_conn = iscsi_tcp_conn_teardown,
123 .start_conn = iscsi_conn_start,
124 .stop_conn = iscsi_conn_stop,
125 .get_conn_param = iscsi_conn_get_param,
126 .set_param = cxgbi_set_conn_param,
127 .get_stats = cxgbi_get_conn_stats,
128 /* pdu xmit req from user space */
129 .send_pdu = iscsi_conn_send_pdu,
131 .init_task = iscsi_tcp_task_init,
132 .xmit_task = iscsi_tcp_task_xmit,
133 .cleanup_task = cxgbi_cleanup_task,
135 .alloc_pdu = cxgbi_conn_alloc_pdu,
136 .init_pdu = cxgbi_conn_init_pdu,
137 .xmit_pdu = cxgbi_conn_xmit_pdu,
138 .parse_pdu_itt = cxgbi_parse_pdu_itt,
139 /* TCP connect/disconnect */
140 .get_ep_param = cxgbi_get_ep_param,
141 .ep_connect = cxgbi_ep_connect,
142 .ep_poll = cxgbi_ep_poll,
143 .ep_disconnect = cxgbi_ep_disconnect,
144 /* Error recovery timeout call */
145 .session_recovery_timedout = iscsi_session_recovery_timedout,
148 static struct scsi_transport_template *cxgb4i_stt;
151 * CPL (Chelsio Protocol Language) defines a message passing interface between
152 * the host driver and Chelsio asic.
153 * The section below implments CPLs that related to iscsi tcp connection
154 * open/close/abort and data send/receive.
157 #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
158 #define RCV_BUFSIZ_MASK 0x3FFU
159 #define MAX_IMM_TX_PKT_LEN 128
161 static int push_tx_frames(struct cxgbi_sock *, int);
164 * is_ofld_imm - check whether a packet can be sent as immediate data
167 * Returns true if a packet can be sent as an offload WR with immediate
168 * data. We currently use the same limit as for Ethernet packets.
170 static inline bool is_ofld_imm(const struct sk_buff *skb)
174 if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR)))
175 len += sizeof(struct fw_ofld_tx_data_wr);
177 return len <= MAX_IMM_TX_PKT_LEN;
180 static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
183 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
184 int t4 = is_t4(lldi->adapter_type);
185 int wscale = cxgbi_sock_compute_wscale(csk->mss_idx);
186 unsigned long long opt0;
188 unsigned int qid_atid = ((unsigned int)csk->atid) |
189 (((unsigned int)csk->rss_qid) << 14);
191 opt0 = KEEP_ALIVE_F |
192 WND_SCALE_V(wscale) |
193 MSS_IDX_V(csk->mss_idx) |
194 L2T_IDX_V(((struct l2t_entry *)csk->l2t)->idx) |
195 TX_CHAN_V(csk->tx_chan) |
196 SMAC_SEL_V(csk->smac_idx) |
197 ULP_MODE_V(ULP_MODE_ISCSI) |
198 RCV_BUFSIZ_V(cxgb4i_rcv_win >> 10);
199 opt2 = RX_CHANNEL_V(0) |
202 RSS_QUEUE_V(csk->rss_qid);
204 if (is_t4(lldi->adapter_type)) {
205 struct cpl_act_open_req *req =
206 (struct cpl_act_open_req *)skb->head;
209 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
211 req->local_port = csk->saddr.sin_port;
212 req->peer_port = csk->daddr.sin_port;
213 req->local_ip = csk->saddr.sin_addr.s_addr;
214 req->peer_ip = csk->daddr.sin_addr.s_addr;
215 req->opt0 = cpu_to_be64(opt0);
216 req->params = cpu_to_be32(cxgb4_select_ntuple(
217 csk->cdev->ports[csk->port_id],
219 opt2 |= RX_FC_VALID_F;
220 req->opt2 = cpu_to_be32(opt2);
222 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
223 "csk t4 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n",
224 csk, &req->local_ip, ntohs(req->local_port),
225 &req->peer_ip, ntohs(req->peer_port),
226 csk->atid, csk->rss_qid);
228 struct cpl_t5_act_open_req *req =
229 (struct cpl_t5_act_open_req *)skb->head;
232 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
234 req->local_port = csk->saddr.sin_port;
235 req->peer_port = csk->daddr.sin_port;
236 req->local_ip = csk->saddr.sin_addr.s_addr;
237 req->peer_ip = csk->daddr.sin_addr.s_addr;
238 req->opt0 = cpu_to_be64(opt0);
239 req->params = cpu_to_be64(FILTER_TUPLE_V(
241 csk->cdev->ports[csk->port_id],
244 req->opt2 = cpu_to_be32(opt2);
246 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
247 "csk t5 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n",
248 csk, &req->local_ip, ntohs(req->local_port),
249 &req->peer_ip, ntohs(req->peer_port),
250 csk->atid, csk->rss_qid);
253 set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
255 pr_info_ipaddr("t%d csk 0x%p,%u,0x%lx,%u, rss_qid %u.\n",
256 (&csk->saddr), (&csk->daddr), t4 ? 4 : 5, csk,
257 csk->state, csk->flags, csk->atid, csk->rss_qid);
259 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
262 #if IS_ENABLED(CONFIG_IPV6)
263 static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
266 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
267 int t4 = is_t4(lldi->adapter_type);
268 int wscale = cxgbi_sock_compute_wscale(csk->mss_idx);
269 unsigned long long opt0;
271 unsigned int qid_atid = ((unsigned int)csk->atid) |
272 (((unsigned int)csk->rss_qid) << 14);
274 opt0 = KEEP_ALIVE_F |
275 WND_SCALE_V(wscale) |
276 MSS_IDX_V(csk->mss_idx) |
277 L2T_IDX_V(((struct l2t_entry *)csk->l2t)->idx) |
278 TX_CHAN_V(csk->tx_chan) |
279 SMAC_SEL_V(csk->smac_idx) |
280 ULP_MODE_V(ULP_MODE_ISCSI) |
281 RCV_BUFSIZ_V(cxgb4i_rcv_win >> 10);
283 opt2 = RX_CHANNEL_V(0) |
286 RSS_QUEUE_V(csk->rss_qid);
289 struct cpl_act_open_req6 *req =
290 (struct cpl_act_open_req6 *)skb->head;
293 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
295 req->local_port = csk->saddr6.sin6_port;
296 req->peer_port = csk->daddr6.sin6_port;
298 req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr);
299 req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr +
301 req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr);
302 req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr +
305 req->opt0 = cpu_to_be64(opt0);
307 opt2 |= RX_FC_VALID_F;
308 req->opt2 = cpu_to_be32(opt2);
310 req->params = cpu_to_be32(cxgb4_select_ntuple(
311 csk->cdev->ports[csk->port_id],
314 struct cpl_t5_act_open_req6 *req =
315 (struct cpl_t5_act_open_req6 *)skb->head;
318 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
320 req->local_port = csk->saddr6.sin6_port;
321 req->peer_port = csk->daddr6.sin6_port;
322 req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr);
323 req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr +
325 req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr);
326 req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr +
328 req->opt0 = cpu_to_be64(opt0);
330 opt2 |= T5_OPT_2_VALID_F;
331 req->opt2 = cpu_to_be32(opt2);
333 req->params = cpu_to_be64(FILTER_TUPLE_V(cxgb4_select_ntuple(
334 csk->cdev->ports[csk->port_id],
338 set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
340 pr_info("t%d csk 0x%p,%u,0x%lx,%u, [%pI6]:%u-[%pI6]:%u, rss_qid %u.\n",
341 t4 ? 4 : 5, csk, csk->state, csk->flags, csk->atid,
342 &csk->saddr6.sin6_addr, ntohs(csk->saddr.sin_port),
343 &csk->daddr6.sin6_addr, ntohs(csk->daddr.sin_port),
346 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
350 static void send_close_req(struct cxgbi_sock *csk)
352 struct sk_buff *skb = csk->cpl_close;
353 struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head;
354 unsigned int tid = csk->tid;
356 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
357 "csk 0x%p,%u,0x%lx, tid %u.\n",
358 csk, csk->state, csk->flags, csk->tid);
359 csk->cpl_close = NULL;
360 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
361 INIT_TP_WR(req, tid);
362 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
365 cxgbi_sock_skb_entail(csk, skb);
366 if (csk->state >= CTP_ESTABLISHED)
367 push_tx_frames(csk, 1);
370 static void abort_arp_failure(void *handle, struct sk_buff *skb)
372 struct cxgbi_sock *csk = (struct cxgbi_sock *)handle;
373 struct cpl_abort_req *req;
375 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
376 "csk 0x%p,%u,0x%lx, tid %u, abort.\n",
377 csk, csk->state, csk->flags, csk->tid);
378 req = (struct cpl_abort_req *)skb->data;
379 req->cmd = CPL_ABORT_NO_RST;
380 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
383 static void send_abort_req(struct cxgbi_sock *csk)
385 struct cpl_abort_req *req;
386 struct sk_buff *skb = csk->cpl_abort_req;
388 if (unlikely(csk->state == CTP_ABORTING) || !skb || !csk->cdev)
391 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
392 send_tx_flowc_wr(csk);
393 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
396 cxgbi_sock_set_state(csk, CTP_ABORTING);
397 cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_PENDING);
398 cxgbi_sock_purge_write_queue(csk);
400 csk->cpl_abort_req = NULL;
401 req = (struct cpl_abort_req *)skb->head;
402 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
403 req->cmd = CPL_ABORT_SEND_RST;
404 t4_set_arp_err_handler(skb, csk, abort_arp_failure);
405 INIT_TP_WR(req, csk->tid);
406 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, csk->tid));
407 req->rsvd0 = htonl(csk->snd_nxt);
408 req->rsvd1 = !cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT);
410 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
411 "csk 0x%p,%u,0x%lx,%u, snd_nxt %u, 0x%x.\n",
412 csk, csk->state, csk->flags, csk->tid, csk->snd_nxt,
415 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
418 static void send_abort_rpl(struct cxgbi_sock *csk, int rst_status)
420 struct sk_buff *skb = csk->cpl_abort_rpl;
421 struct cpl_abort_rpl *rpl = (struct cpl_abort_rpl *)skb->head;
423 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
424 "csk 0x%p,%u,0x%lx,%u, status %d.\n",
425 csk, csk->state, csk->flags, csk->tid, rst_status);
427 csk->cpl_abort_rpl = NULL;
428 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
429 INIT_TP_WR(rpl, csk->tid);
430 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, csk->tid));
431 rpl->cmd = rst_status;
432 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
436 * CPL connection rx data ack: host ->
437 * Send RX credits through an RX_DATA_ACK CPL message. Returns the number of
440 static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits)
443 struct cpl_rx_data_ack *req;
445 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
446 "csk 0x%p,%u,0x%lx,%u, credit %u.\n",
447 csk, csk->state, csk->flags, csk->tid, credits);
449 skb = alloc_wr(sizeof(*req), 0, GFP_ATOMIC);
451 pr_info("csk 0x%p, credit %u, OOM.\n", csk, credits);
454 req = (struct cpl_rx_data_ack *)skb->head;
456 set_wr_txq(skb, CPL_PRIORITY_ACK, csk->port_id);
457 INIT_TP_WR(req, csk->tid);
458 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
460 req->credit_dack = cpu_to_be32(RX_CREDITS_V(credits)
462 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
467 * sgl_len - calculates the size of an SGL of the given capacity
468 * @n: the number of SGL entries
469 * Calculates the number of flits needed for a scatter/gather list that
470 * can hold the given number of entries.
472 static inline unsigned int sgl_len(unsigned int n)
475 return (3 * n) / 2 + (n & 1) + 2;
479 * calc_tx_flits_ofld - calculate # of flits for an offload packet
482 * Returns the number of flits needed for the given offload packet.
483 * These packets are already fully constructed and no additional headers
486 static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
488 unsigned int flits, cnt;
490 if (is_ofld_imm(skb))
491 return DIV_ROUND_UP(skb->len, 8);
492 flits = skb_transport_offset(skb) / 8;
493 cnt = skb_shinfo(skb)->nr_frags;
494 if (skb_tail_pointer(skb) != skb_transport_header(skb))
496 return flits + sgl_len(cnt);
499 #define FLOWC_WR_NPARAMS_MIN 9
500 static inline int tx_flowc_wr_credits(int *nparamsp, int *flowclenp)
502 int nparams, flowclen16, flowclen;
504 nparams = FLOWC_WR_NPARAMS_MIN;
505 flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]);
506 flowclen16 = DIV_ROUND_UP(flowclen, 16);
507 flowclen = flowclen16 * 16;
509 * Return the number of 16-byte credits used by the FlowC request.
510 * Pass back the nparams and actual FlowC length if requested.
515 *flowclenp = flowclen;
520 static inline int send_tx_flowc_wr(struct cxgbi_sock *csk)
523 struct fw_flowc_wr *flowc;
524 int nparams, flowclen16, flowclen;
526 flowclen16 = tx_flowc_wr_credits(&nparams, &flowclen);
527 skb = alloc_wr(flowclen, 0, GFP_ATOMIC);
528 flowc = (struct fw_flowc_wr *)skb->head;
529 flowc->op_to_nparams =
530 htonl(FW_WR_OP_V(FW_FLOWC_WR) | FW_FLOWC_WR_NPARAMS_V(nparams));
531 flowc->flowid_len16 =
532 htonl(FW_WR_LEN16_V(flowclen16) | FW_WR_FLOWID_V(csk->tid));
533 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
534 flowc->mnemval[0].val = htonl(csk->cdev->pfvf);
535 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
536 flowc->mnemval[1].val = htonl(csk->tx_chan);
537 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
538 flowc->mnemval[2].val = htonl(csk->tx_chan);
539 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
540 flowc->mnemval[3].val = htonl(csk->rss_qid);
541 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
542 flowc->mnemval[4].val = htonl(csk->snd_nxt);
543 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
544 flowc->mnemval[5].val = htonl(csk->rcv_nxt);
545 flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
546 flowc->mnemval[6].val = htonl(cxgb4i_snd_win);
547 flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
548 flowc->mnemval[7].val = htonl(csk->advmss);
549 flowc->mnemval[8].mnemonic = 0;
550 flowc->mnemval[8].val = 0;
551 flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX;
552 flowc->mnemval[8].val = 16384;
554 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
556 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
557 "csk 0x%p, tid 0x%x, %u,%u,%u,%u,%u,%u,%u.\n",
558 csk, csk->tid, 0, csk->tx_chan, csk->rss_qid,
559 csk->snd_nxt, csk->rcv_nxt, cxgb4i_snd_win,
562 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
567 static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb,
568 int dlen, int len, u32 credits, int compl)
570 struct fw_ofld_tx_data_wr *req;
571 unsigned int submode = cxgbi_skcb_ulp_mode(skb) & 3;
572 unsigned int wr_ulp_mode = 0, val;
573 bool imm = is_ofld_imm(skb);
575 req = (struct fw_ofld_tx_data_wr *)__skb_push(skb, sizeof(*req));
578 req->op_to_immdlen = htonl(FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
580 FW_WR_IMMDLEN_V(dlen));
581 req->flowid_len16 = htonl(FW_WR_FLOWID_V(csk->tid) |
582 FW_WR_LEN16_V(credits));
585 cpu_to_be32(FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
589 cpu_to_be32(FW_WR_FLOWID_V(csk->tid) |
590 FW_WR_LEN16_V(credits));
593 wr_ulp_mode = FW_OFLD_TX_DATA_WR_ULPMODE_V(ULP2_MODE_ISCSI) |
594 FW_OFLD_TX_DATA_WR_ULPSUBMODE_V(submode);
595 val = skb_peek(&csk->write_queue) ? 0 : 1;
596 req->tunnel_to_proxy = htonl(wr_ulp_mode |
597 FW_OFLD_TX_DATA_WR_SHOVE_V(val));
598 req->plen = htonl(len);
599 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT))
600 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
603 static void arp_failure_skb_discard(void *handle, struct sk_buff *skb)
608 static int push_tx_frames(struct cxgbi_sock *csk, int req_completion)
613 if (unlikely(csk->state < CTP_ESTABLISHED ||
614 csk->state == CTP_CLOSE_WAIT_1 || csk->state >= CTP_ABORTING)) {
615 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK |
616 1 << CXGBI_DBG_PDU_TX,
617 "csk 0x%p,%u,0x%lx,%u, in closing state.\n",
618 csk, csk->state, csk->flags, csk->tid);
622 while (csk->wr_cred && (skb = skb_peek(&csk->write_queue)) != NULL) {
625 unsigned int credits_needed;
628 skb_reset_transport_header(skb);
629 if (is_ofld_imm(skb))
630 credits_needed = DIV_ROUND_UP(dlen, 16);
632 credits_needed = DIV_ROUND_UP(
633 8 * calc_tx_flits_ofld(skb),
636 if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR)))
637 credits_needed += DIV_ROUND_UP(
638 sizeof(struct fw_ofld_tx_data_wr),
642 * Assumes the initial credits is large enough to support
643 * fw_flowc_wr plus largest possible first payload
645 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
646 flowclen16 = send_tx_flowc_wr(csk);
647 csk->wr_cred -= flowclen16;
648 csk->wr_una_cred += flowclen16;
649 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
652 if (csk->wr_cred < credits_needed) {
653 log_debug(1 << CXGBI_DBG_PDU_TX,
654 "csk 0x%p, skb %u/%u, wr %d < %u.\n",
655 csk, skb->len, skb->data_len,
656 credits_needed, csk->wr_cred);
659 __skb_unlink(skb, &csk->write_queue);
660 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
661 skb->csum = credits_needed + flowclen16;
662 csk->wr_cred -= credits_needed;
663 csk->wr_una_cred += credits_needed;
664 cxgbi_sock_enqueue_wr(csk, skb);
666 log_debug(1 << CXGBI_DBG_PDU_TX,
667 "csk 0x%p, skb %u/%u, wr %d, left %u, unack %u.\n",
668 csk, skb->len, skb->data_len, credits_needed,
669 csk->wr_cred, csk->wr_una_cred);
671 if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) {
672 len += cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb));
673 make_tx_data_wr(csk, skb, dlen, len, credits_needed,
676 cxgbi_skcb_clear_flag(skb, SKCBF_TX_NEED_HDR);
678 total_size += skb->truesize;
679 t4_set_arp_err_handler(skb, csk, arp_failure_skb_discard);
681 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX,
682 "csk 0x%p,%u,0x%lx,%u, skb 0x%p, %u.\n",
683 csk, csk->state, csk->flags, csk->tid, skb, len);
685 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
690 static inline void free_atid(struct cxgbi_sock *csk)
692 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
694 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) {
695 cxgb4_free_atid(lldi->tids, csk->atid);
696 cxgbi_sock_clear_flag(csk, CTPF_HAS_ATID);
701 static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb)
703 struct cxgbi_sock *csk;
704 struct cpl_act_establish *req = (struct cpl_act_establish *)skb->data;
705 unsigned short tcp_opt = ntohs(req->tcp_opt);
706 unsigned int tid = GET_TID(req);
707 unsigned int atid = GET_TID_TID(ntohl(req->tos_atid));
708 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
709 struct tid_info *t = lldi->tids;
710 u32 rcv_isn = be32_to_cpu(req->rcv_isn);
712 csk = lookup_atid(t, atid);
713 if (unlikely(!csk)) {
714 pr_err("NO conn. for atid %u, cdev 0x%p.\n", atid, cdev);
718 if (csk->atid != atid) {
719 pr_err("bad conn atid %u, csk 0x%p,%u,0x%lx,tid %u, atid %u.\n",
720 atid, csk, csk->state, csk->flags, csk->tid, csk->atid);
724 pr_info_ipaddr("atid 0x%x, tid 0x%x, csk 0x%p,%u,0x%lx, isn %u.\n",
725 (&csk->saddr), (&csk->daddr),
726 atid, tid, csk, csk->state, csk->flags, rcv_isn);
728 module_put(THIS_MODULE);
732 cxgb4_insert_tid(lldi->tids, csk, tid);
733 cxgbi_sock_set_flag(csk, CTPF_HAS_TID);
737 spin_lock_bh(&csk->lock);
738 if (unlikely(csk->state != CTP_ACTIVE_OPEN))
739 pr_info("csk 0x%p,%u,0x%lx,%u, got EST.\n",
740 csk, csk->state, csk->flags, csk->tid);
742 if (csk->retry_timer.function) {
743 del_timer(&csk->retry_timer);
744 csk->retry_timer.function = NULL;
747 csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn;
749 * Causes the first RX_DATA_ACK to supply any Rx credits we couldn't
752 if (cxgb4i_rcv_win > (RCV_BUFSIZ_MASK << 10))
753 csk->rcv_wup -= cxgb4i_rcv_win - (RCV_BUFSIZ_MASK << 10);
755 csk->advmss = lldi->mtus[GET_TCPOPT_MSS(tcp_opt)] - 40;
756 if (GET_TCPOPT_TSTAMP(tcp_opt))
758 if (csk->advmss < 128)
761 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
762 "csk 0x%p, mss_idx %u, advmss %u.\n",
763 csk, GET_TCPOPT_MSS(tcp_opt), csk->advmss);
765 cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt));
767 if (unlikely(cxgbi_sock_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED)))
770 if (skb_queue_len(&csk->write_queue))
771 push_tx_frames(csk, 0);
772 cxgbi_conn_tx_open(csk);
774 spin_unlock_bh(&csk->lock);
780 static int act_open_rpl_status_to_errno(int status)
783 case CPL_ERR_CONN_RESET:
784 return -ECONNREFUSED;
785 case CPL_ERR_ARP_MISS:
786 return -EHOSTUNREACH;
787 case CPL_ERR_CONN_TIMEDOUT:
789 case CPL_ERR_TCAM_FULL:
791 case CPL_ERR_CONN_EXIST:
798 static void csk_act_open_retry_timer(unsigned long data)
800 struct sk_buff *skb = NULL;
801 struct cxgbi_sock *csk = (struct cxgbi_sock *)data;
802 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
803 void (*send_act_open_func)(struct cxgbi_sock *, struct sk_buff *,
805 int t4 = is_t4(lldi->adapter_type), size, size6;
807 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
808 "csk 0x%p,%u,0x%lx,%u.\n",
809 csk, csk->state, csk->flags, csk->tid);
812 spin_lock_bh(&csk->lock);
815 size = sizeof(struct cpl_act_open_req);
816 size6 = sizeof(struct cpl_act_open_req6);
818 size = sizeof(struct cpl_t5_act_open_req);
819 size6 = sizeof(struct cpl_t5_act_open_req6);
822 if (csk->csk_family == AF_INET) {
823 send_act_open_func = send_act_open_req;
824 skb = alloc_wr(size, 0, GFP_ATOMIC);
825 #if IS_ENABLED(CONFIG_IPV6)
827 send_act_open_func = send_act_open_req6;
828 skb = alloc_wr(size6, 0, GFP_ATOMIC);
833 cxgbi_sock_fail_act_open(csk, -ENOMEM);
835 skb->sk = (struct sock *)csk;
836 t4_set_arp_err_handler(skb, csk,
837 cxgbi_sock_act_open_req_arp_failure);
838 send_act_open_func(csk, skb, csk->l2t);
841 spin_unlock_bh(&csk->lock);
846 static inline bool is_neg_adv(unsigned int status)
848 return status == CPL_ERR_RTX_NEG_ADVICE ||
849 status == CPL_ERR_KEEPALV_NEG_ADVICE ||
850 status == CPL_ERR_PERSIST_NEG_ADVICE;
853 static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
855 struct cxgbi_sock *csk;
856 struct cpl_act_open_rpl *rpl = (struct cpl_act_open_rpl *)skb->data;
857 unsigned int tid = GET_TID(rpl);
859 GET_TID_TID(GET_AOPEN_ATID(be32_to_cpu(rpl->atid_status)));
860 unsigned int status = GET_AOPEN_STATUS(be32_to_cpu(rpl->atid_status));
861 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
862 struct tid_info *t = lldi->tids;
864 csk = lookup_atid(t, atid);
865 if (unlikely(!csk)) {
866 pr_err("NO matching conn. atid %u, tid %u.\n", atid, tid);
870 pr_info_ipaddr("tid %u/%u, status %u.\n"
871 "csk 0x%p,%u,0x%lx. ", (&csk->saddr), (&csk->daddr),
872 atid, tid, status, csk, csk->state, csk->flags);
874 if (is_neg_adv(status))
877 module_put(THIS_MODULE);
879 if (status && status != CPL_ERR_TCAM_FULL &&
880 status != CPL_ERR_CONN_EXIST &&
881 status != CPL_ERR_ARP_MISS)
882 cxgb4_remove_tid(lldi->tids, csk->port_id, GET_TID(rpl));
885 spin_lock_bh(&csk->lock);
887 if (status == CPL_ERR_CONN_EXIST &&
888 csk->retry_timer.function != csk_act_open_retry_timer) {
889 csk->retry_timer.function = csk_act_open_retry_timer;
890 mod_timer(&csk->retry_timer, jiffies + HZ / 2);
892 cxgbi_sock_fail_act_open(csk,
893 act_open_rpl_status_to_errno(status));
895 spin_unlock_bh(&csk->lock);
901 static void do_peer_close(struct cxgbi_device *cdev, struct sk_buff *skb)
903 struct cxgbi_sock *csk;
904 struct cpl_peer_close *req = (struct cpl_peer_close *)skb->data;
905 unsigned int tid = GET_TID(req);
906 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
907 struct tid_info *t = lldi->tids;
909 csk = lookup_tid(t, tid);
910 if (unlikely(!csk)) {
911 pr_err("can't find connection for tid %u.\n", tid);
914 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n",
915 (&csk->saddr), (&csk->daddr),
916 csk, csk->state, csk->flags, csk->tid);
917 cxgbi_sock_rcv_peer_close(csk);
922 static void do_close_con_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
924 struct cxgbi_sock *csk;
925 struct cpl_close_con_rpl *rpl = (struct cpl_close_con_rpl *)skb->data;
926 unsigned int tid = GET_TID(rpl);
927 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
928 struct tid_info *t = lldi->tids;
930 csk = lookup_tid(t, tid);
931 if (unlikely(!csk)) {
932 pr_err("can't find connection for tid %u.\n", tid);
935 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n",
936 (&csk->saddr), (&csk->daddr),
937 csk, csk->state, csk->flags, csk->tid);
938 cxgbi_sock_rcv_close_conn_rpl(csk, ntohl(rpl->snd_nxt));
943 static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason,
946 switch (abort_reason) {
947 case CPL_ERR_BAD_SYN: /* fall through */
948 case CPL_ERR_CONN_RESET:
949 return csk->state > CTP_ESTABLISHED ?
950 -EPIPE : -ECONNRESET;
951 case CPL_ERR_XMIT_TIMEDOUT:
952 case CPL_ERR_PERSIST_TIMEDOUT:
953 case CPL_ERR_FINWAIT2_TIMEDOUT:
954 case CPL_ERR_KEEPALIVE_TIMEDOUT:
961 static void do_abort_req_rss(struct cxgbi_device *cdev, struct sk_buff *skb)
963 struct cxgbi_sock *csk;
964 struct cpl_abort_req_rss *req = (struct cpl_abort_req_rss *)skb->data;
965 unsigned int tid = GET_TID(req);
966 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
967 struct tid_info *t = lldi->tids;
968 int rst_status = CPL_ABORT_NO_RST;
970 csk = lookup_tid(t, tid);
971 if (unlikely(!csk)) {
972 pr_err("can't find connection for tid %u.\n", tid);
976 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n",
977 (&csk->saddr), (&csk->daddr),
978 csk, csk->state, csk->flags, csk->tid, req->status);
980 if (is_neg_adv(req->status))
984 spin_lock_bh(&csk->lock);
986 cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD);
988 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
989 send_tx_flowc_wr(csk);
990 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
993 cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD);
994 cxgbi_sock_set_state(csk, CTP_ABORTING);
996 send_abort_rpl(csk, rst_status);
998 if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) {
999 csk->err = abort_status_to_errno(csk, req->status, &rst_status);
1000 cxgbi_sock_closed(csk);
1003 spin_unlock_bh(&csk->lock);
1004 cxgbi_sock_put(csk);
1009 static void do_abort_rpl_rss(struct cxgbi_device *cdev, struct sk_buff *skb)
1011 struct cxgbi_sock *csk;
1012 struct cpl_abort_rpl_rss *rpl = (struct cpl_abort_rpl_rss *)skb->data;
1013 unsigned int tid = GET_TID(rpl);
1014 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1015 struct tid_info *t = lldi->tids;
1017 csk = lookup_tid(t, tid);
1022 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n",
1023 (&csk->saddr), (&csk->daddr), csk,
1024 csk->state, csk->flags, csk->tid, rpl->status);
1026 if (rpl->status == CPL_ERR_ABORT_FAILED)
1029 cxgbi_sock_rcv_abort_rpl(csk);
1034 static void do_rx_data(struct cxgbi_device *cdev, struct sk_buff *skb)
1036 struct cxgbi_sock *csk;
1037 struct cpl_rx_data *cpl = (struct cpl_rx_data *)skb->data;
1038 unsigned int tid = GET_TID(cpl);
1039 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1040 struct tid_info *t = lldi->tids;
1042 csk = lookup_tid(t, tid);
1044 pr_err("can't find connection for tid %u.\n", tid);
1046 /* not expecting this, reset the connection. */
1047 pr_err("csk 0x%p, tid %u, rcv cpl_rx_data.\n", csk, tid);
1048 spin_lock_bh(&csk->lock);
1049 send_abort_req(csk);
1050 spin_unlock_bh(&csk->lock);
1055 static void do_rx_iscsi_hdr(struct cxgbi_device *cdev, struct sk_buff *skb)
1057 struct cxgbi_sock *csk;
1058 struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)skb->data;
1059 unsigned short pdu_len_ddp = be16_to_cpu(cpl->pdu_len_ddp);
1060 unsigned int tid = GET_TID(cpl);
1061 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1062 struct tid_info *t = lldi->tids;
1064 csk = lookup_tid(t, tid);
1065 if (unlikely(!csk)) {
1066 pr_err("can't find conn. for tid %u.\n", tid);
1070 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1071 "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p,%u, 0x%x.\n",
1072 csk, csk->state, csk->flags, csk->tid, skb, skb->len,
1075 spin_lock_bh(&csk->lock);
1077 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
1078 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1079 "csk 0x%p,%u,0x%lx,%u, bad state.\n",
1080 csk, csk->state, csk->flags, csk->tid);
1081 if (csk->state != CTP_ABORTING)
1087 cxgbi_skcb_tcp_seq(skb) = ntohl(cpl->seq);
1088 cxgbi_skcb_flags(skb) = 0;
1090 skb_reset_transport_header(skb);
1091 __skb_pull(skb, sizeof(*cpl));
1092 __pskb_trim(skb, ntohs(cpl->len));
1094 if (!csk->skb_ulp_lhdr) {
1096 unsigned int hlen, dlen, plen;
1098 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1099 "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p header.\n",
1100 csk, csk->state, csk->flags, csk->tid, skb);
1101 csk->skb_ulp_lhdr = skb;
1102 cxgbi_skcb_set_flag(skb, SKCBF_RX_HDR);
1104 if (cxgbi_skcb_tcp_seq(skb) != csk->rcv_nxt) {
1105 pr_info("tid %u, CPL_ISCSI_HDR, bad seq, 0x%x/0x%x.\n",
1106 csk->tid, cxgbi_skcb_tcp_seq(skb),
1112 hlen = ntohs(cpl->len);
1113 dlen = ntohl(*(unsigned int *)(bhs + 4)) & 0xFFFFFF;
1115 plen = ISCSI_PDU_LEN(pdu_len_ddp);
1116 if (is_t4(lldi->adapter_type))
1119 if ((hlen + dlen) != plen) {
1120 pr_info("tid 0x%x, CPL_ISCSI_HDR, pdu len "
1121 "mismatch %u != %u + %u, seq 0x%x.\n",
1122 csk->tid, plen, hlen, dlen,
1123 cxgbi_skcb_tcp_seq(skb));
1127 cxgbi_skcb_rx_pdulen(skb) = (hlen + dlen + 3) & (~0x3);
1129 cxgbi_skcb_rx_pdulen(skb) += csk->dcrc_len;
1130 csk->rcv_nxt += cxgbi_skcb_rx_pdulen(skb);
1132 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1133 "csk 0x%p, skb 0x%p, 0x%x,%u+%u,0x%x,0x%x.\n",
1134 csk, skb, *bhs, hlen, dlen,
1135 ntohl(*((unsigned int *)(bhs + 16))),
1136 ntohl(*((unsigned int *)(bhs + 24))));
1139 struct sk_buff *lskb = csk->skb_ulp_lhdr;
1141 cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA);
1142 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1143 "csk 0x%p,%u,0x%lx, skb 0x%p data, 0x%p.\n",
1144 csk, csk->state, csk->flags, skb, lskb);
1147 __skb_queue_tail(&csk->receive_queue, skb);
1148 spin_unlock_bh(&csk->lock);
1152 send_abort_req(csk);
1154 spin_unlock_bh(&csk->lock);
1159 static void do_rx_data_ddp(struct cxgbi_device *cdev,
1160 struct sk_buff *skb)
1162 struct cxgbi_sock *csk;
1163 struct sk_buff *lskb;
1164 struct cpl_rx_data_ddp *rpl = (struct cpl_rx_data_ddp *)skb->data;
1165 unsigned int tid = GET_TID(rpl);
1166 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1167 struct tid_info *t = lldi->tids;
1168 unsigned int status = ntohl(rpl->ddpvld);
1170 csk = lookup_tid(t, tid);
1171 if (unlikely(!csk)) {
1172 pr_err("can't find connection for tid %u.\n", tid);
1176 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1177 "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p.\n",
1178 csk, csk->state, csk->flags, skb, status, csk->skb_ulp_lhdr);
1180 spin_lock_bh(&csk->lock);
1182 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
1183 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1184 "csk 0x%p,%u,0x%lx,%u, bad state.\n",
1185 csk, csk->state, csk->flags, csk->tid);
1186 if (csk->state != CTP_ABORTING)
1192 if (!csk->skb_ulp_lhdr) {
1193 pr_err("tid 0x%x, rcv RX_DATA_DDP w/o pdu bhs.\n", csk->tid);
1197 lskb = csk->skb_ulp_lhdr;
1198 csk->skb_ulp_lhdr = NULL;
1200 cxgbi_skcb_rx_ddigest(lskb) = ntohl(rpl->ulp_crc);
1202 if (ntohs(rpl->len) != cxgbi_skcb_rx_pdulen(lskb))
1203 pr_info("tid 0x%x, RX_DATA_DDP pdulen %u != %u.\n",
1204 csk->tid, ntohs(rpl->len), cxgbi_skcb_rx_pdulen(lskb));
1206 if (status & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT)) {
1207 pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, hcrc bad 0x%lx.\n",
1208 csk, lskb, status, cxgbi_skcb_flags(lskb));
1209 cxgbi_skcb_set_flag(lskb, SKCBF_RX_HCRC_ERR);
1211 if (status & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT)) {
1212 pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, dcrc bad 0x%lx.\n",
1213 csk, lskb, status, cxgbi_skcb_flags(lskb));
1214 cxgbi_skcb_set_flag(lskb, SKCBF_RX_DCRC_ERR);
1216 if (status & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT)) {
1217 log_debug(1 << CXGBI_DBG_PDU_RX,
1218 "csk 0x%p, lhdr 0x%p, status 0x%x, pad bad.\n",
1220 cxgbi_skcb_set_flag(lskb, SKCBF_RX_PAD_ERR);
1222 if ((status & (1 << CPL_RX_DDP_STATUS_DDP_SHIFT)) &&
1223 !cxgbi_skcb_test_flag(lskb, SKCBF_RX_DATA)) {
1224 log_debug(1 << CXGBI_DBG_PDU_RX,
1225 "csk 0x%p, lhdr 0x%p, 0x%x, data ddp'ed.\n",
1227 cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA_DDPD);
1229 log_debug(1 << CXGBI_DBG_PDU_RX,
1230 "csk 0x%p, lskb 0x%p, f 0x%lx.\n",
1231 csk, lskb, cxgbi_skcb_flags(lskb));
1233 cxgbi_skcb_set_flag(lskb, SKCBF_RX_STATUS);
1234 cxgbi_conn_pdu_ready(csk);
1235 spin_unlock_bh(&csk->lock);
1239 send_abort_req(csk);
1241 spin_unlock_bh(&csk->lock);
1246 static void do_fw4_ack(struct cxgbi_device *cdev, struct sk_buff *skb)
1248 struct cxgbi_sock *csk;
1249 struct cpl_fw4_ack *rpl = (struct cpl_fw4_ack *)skb->data;
1250 unsigned int tid = GET_TID(rpl);
1251 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1252 struct tid_info *t = lldi->tids;
1254 csk = lookup_tid(t, tid);
1256 pr_err("can't find connection for tid %u.\n", tid);
1258 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1259 "csk 0x%p,%u,0x%lx,%u.\n",
1260 csk, csk->state, csk->flags, csk->tid);
1261 cxgbi_sock_rcv_wr_ack(csk, rpl->credits, ntohl(rpl->snd_una),
1267 static void do_set_tcb_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
1269 struct cpl_set_tcb_rpl *rpl = (struct cpl_set_tcb_rpl *)skb->data;
1270 unsigned int tid = GET_TID(rpl);
1271 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1272 struct tid_info *t = lldi->tids;
1273 struct cxgbi_sock *csk;
1275 csk = lookup_tid(t, tid);
1277 pr_err("can't find conn. for tid %u.\n", tid);
1279 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1280 "csk 0x%p,%u,%lx,%u, status 0x%x.\n",
1281 csk, csk->state, csk->flags, csk->tid, rpl->status);
1283 if (rpl->status != CPL_ERR_NONE)
1284 pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n",
1285 csk, tid, rpl->status);
1290 static int alloc_cpls(struct cxgbi_sock *csk)
1292 csk->cpl_close = alloc_wr(sizeof(struct cpl_close_con_req),
1294 if (!csk->cpl_close)
1297 csk->cpl_abort_req = alloc_wr(sizeof(struct cpl_abort_req),
1299 if (!csk->cpl_abort_req)
1302 csk->cpl_abort_rpl = alloc_wr(sizeof(struct cpl_abort_rpl),
1304 if (!csk->cpl_abort_rpl)
1309 cxgbi_sock_free_cpl_skbs(csk);
1313 static inline void l2t_put(struct cxgbi_sock *csk)
1316 cxgb4_l2t_release(csk->l2t);
1318 cxgbi_sock_put(csk);
1322 static void release_offload_resources(struct cxgbi_sock *csk)
1324 struct cxgb4_lld_info *lldi;
1326 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1327 "csk 0x%p,%u,0x%lx,%u.\n",
1328 csk, csk->state, csk->flags, csk->tid);
1330 cxgbi_sock_free_cpl_skbs(csk);
1331 if (csk->wr_cred != csk->wr_max_cred) {
1332 cxgbi_sock_purge_wr_queue(csk);
1333 cxgbi_sock_reset_wr_list(csk);
1337 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID))
1339 else if (cxgbi_sock_flag(csk, CTPF_HAS_TID)) {
1340 lldi = cxgbi_cdev_priv(csk->cdev);
1341 cxgb4_remove_tid(lldi->tids, 0, csk->tid);
1342 cxgbi_sock_clear_flag(csk, CTPF_HAS_TID);
1343 cxgbi_sock_put(csk);
1349 static int init_act_open(struct cxgbi_sock *csk)
1351 struct cxgbi_device *cdev = csk->cdev;
1352 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1353 struct net_device *ndev = cdev->ports[csk->port_id];
1354 struct sk_buff *skb = NULL;
1355 struct neighbour *n = NULL;
1358 unsigned int size, size6;
1359 int t4 = is_t4(lldi->adapter_type);
1361 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1362 "csk 0x%p,%u,0x%lx,%u.\n",
1363 csk, csk->state, csk->flags, csk->tid);
1365 if (csk->csk_family == AF_INET)
1366 daddr = &csk->daddr.sin_addr.s_addr;
1367 #if IS_ENABLED(CONFIG_IPV6)
1368 else if (csk->csk_family == AF_INET6)
1369 daddr = &csk->daddr6.sin6_addr;
1372 pr_err("address family 0x%x not supported\n", csk->csk_family);
1376 n = dst_neigh_lookup(csk->dst, daddr);
1379 pr_err("%s, can't get neighbour of csk->dst.\n", ndev->name);
1383 csk->atid = cxgb4_alloc_atid(lldi->tids, csk);
1384 if (csk->atid < 0) {
1385 pr_err("%s, NO atid available.\n", ndev->name);
1388 cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
1389 cxgbi_sock_get(csk);
1391 csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, 0);
1393 pr_err("%s, cannot alloc l2t.\n", ndev->name);
1396 cxgbi_sock_get(csk);
1399 size = sizeof(struct cpl_act_open_req);
1400 size6 = sizeof(struct cpl_act_open_req6);
1402 size = sizeof(struct cpl_t5_act_open_req);
1403 size6 = sizeof(struct cpl_t5_act_open_req6);
1406 if (csk->csk_family == AF_INET)
1407 skb = alloc_wr(size, 0, GFP_NOIO);
1408 #if IS_ENABLED(CONFIG_IPV6)
1410 skb = alloc_wr(size6, 0, GFP_NOIO);
1415 skb->sk = (struct sock *)csk;
1416 t4_set_arp_err_handler(skb, csk, cxgbi_sock_act_open_req_arp_failure);
1419 csk->mtu = dst_mtu(csk->dst);
1420 cxgb4_best_mtu(lldi->mtus, csk->mtu, &csk->mss_idx);
1421 csk->tx_chan = cxgb4_port_chan(ndev);
1422 /* SMT two entries per row */
1423 csk->smac_idx = ((cxgb4_port_viid(ndev) & 0x7F)) << 1;
1424 step = lldi->ntxq / lldi->nchan;
1425 csk->txq_idx = cxgb4_port_idx(ndev) * step;
1426 step = lldi->nrxq / lldi->nchan;
1427 csk->rss_qid = lldi->rxq_ids[cxgb4_port_idx(ndev) * step];
1428 csk->wr_cred = lldi->wr_cred -
1429 DIV_ROUND_UP(sizeof(struct cpl_abort_req), 16);
1430 csk->wr_max_cred = csk->wr_cred;
1431 csk->wr_una_cred = 0;
1432 cxgbi_sock_reset_wr_list(csk);
1435 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u,%u,%u, mtu %u,%u, smac %u.\n",
1436 (&csk->saddr), (&csk->daddr), csk, csk->state,
1437 csk->flags, csk->tx_chan, csk->txq_idx, csk->rss_qid,
1438 csk->mtu, csk->mss_idx, csk->smac_idx);
1440 /* must wait for either a act_open_rpl or act_open_establish */
1441 try_module_get(THIS_MODULE);
1442 cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN);
1443 if (csk->csk_family == AF_INET)
1444 send_act_open_req(csk, skb, csk->l2t);
1445 #if IS_ENABLED(CONFIG_IPV6)
1447 send_act_open_req6(csk, skb, csk->l2t);
1461 cxgb4i_cplhandler_func cxgb4i_cplhandlers[NUM_CPL_CMDS] = {
1462 [CPL_ACT_ESTABLISH] = do_act_establish,
1463 [CPL_ACT_OPEN_RPL] = do_act_open_rpl,
1464 [CPL_PEER_CLOSE] = do_peer_close,
1465 [CPL_ABORT_REQ_RSS] = do_abort_req_rss,
1466 [CPL_ABORT_RPL_RSS] = do_abort_rpl_rss,
1467 [CPL_CLOSE_CON_RPL] = do_close_con_rpl,
1468 [CPL_FW4_ACK] = do_fw4_ack,
1469 [CPL_ISCSI_HDR] = do_rx_iscsi_hdr,
1470 [CPL_ISCSI_DATA] = do_rx_iscsi_hdr,
1471 [CPL_SET_TCB_RPL] = do_set_tcb_rpl,
1472 [CPL_RX_DATA_DDP] = do_rx_data_ddp,
1473 [CPL_RX_ISCSI_DDP] = do_rx_data_ddp,
1474 [CPL_RX_DATA] = do_rx_data,
1477 int cxgb4i_ofld_init(struct cxgbi_device *cdev)
1481 if (cxgb4i_max_connect > CXGB4I_MAX_CONN)
1482 cxgb4i_max_connect = CXGB4I_MAX_CONN;
1484 rc = cxgbi_device_portmap_create(cdev, cxgb4i_sport_base,
1485 cxgb4i_max_connect);
1489 cdev->csk_release_offload_resources = release_offload_resources;
1490 cdev->csk_push_tx_frames = push_tx_frames;
1491 cdev->csk_send_abort_req = send_abort_req;
1492 cdev->csk_send_close_req = send_close_req;
1493 cdev->csk_send_rx_credits = send_rx_credits;
1494 cdev->csk_alloc_cpls = alloc_cpls;
1495 cdev->csk_init_act_open = init_act_open;
1497 pr_info("cdev 0x%p, offload up, added.\n", cdev);
1502 * functions to program the pagepod in h/w
1504 #define ULPMEM_IDATA_MAX_NPPODS 4 /* 256/PPOD_SIZE */
1505 static inline void ulp_mem_io_set_hdr(struct cxgb4_lld_info *lldi,
1506 struct ulp_mem_io *req,
1507 unsigned int wr_len, unsigned int dlen,
1508 unsigned int pm_addr)
1510 struct ulptx_idata *idata = (struct ulptx_idata *)(req + 1);
1512 INIT_ULPTX_WR(req, wr_len, 0, 0);
1513 if (is_t4(lldi->adapter_type))
1514 req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) |
1515 (ULP_MEMIO_ORDER_F));
1517 req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) |
1518 (T5_ULP_MEMIO_IMM_F));
1519 req->dlen = htonl(ULP_MEMIO_DATA_LEN_V(dlen >> 5));
1520 req->lock_addr = htonl(ULP_MEMIO_ADDR_V(pm_addr >> 5));
1521 req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16));
1523 idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
1524 idata->len = htonl(dlen);
1527 static int ddp_ppod_write_idata(struct cxgbi_device *cdev, unsigned int port_id,
1528 struct cxgbi_pagepod_hdr *hdr, unsigned int idx,
1530 struct cxgbi_gather_list *gl,
1531 unsigned int gl_pidx)
1533 struct cxgbi_ddp_info *ddp = cdev->ddp;
1534 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1535 struct sk_buff *skb;
1536 struct ulp_mem_io *req;
1537 struct ulptx_idata *idata;
1538 struct cxgbi_pagepod *ppod;
1539 unsigned int pm_addr = idx * PPOD_SIZE + ddp->llimit;
1540 unsigned int dlen = PPOD_SIZE * npods;
1541 unsigned int wr_len = roundup(sizeof(struct ulp_mem_io) +
1542 sizeof(struct ulptx_idata) + dlen, 16);
1545 skb = alloc_wr(wr_len, 0, GFP_ATOMIC);
1547 pr_err("cdev 0x%p, idx %u, npods %u, OOM.\n",
1551 req = (struct ulp_mem_io *)skb->head;
1552 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
1554 ulp_mem_io_set_hdr(lldi, req, wr_len, dlen, pm_addr);
1555 idata = (struct ulptx_idata *)(req + 1);
1556 ppod = (struct cxgbi_pagepod *)(idata + 1);
1558 for (i = 0; i < npods; i++, ppod++, gl_pidx += PPOD_PAGES_MAX) {
1560 cxgbi_ddp_ppod_clear(ppod);
1562 cxgbi_ddp_ppod_set(ppod, hdr, gl, gl_pidx);
1565 cxgb4_ofld_send(cdev->ports[port_id], skb);
1569 static int ddp_set_map(struct cxgbi_sock *csk, struct cxgbi_pagepod_hdr *hdr,
1570 unsigned int idx, unsigned int npods,
1571 struct cxgbi_gather_list *gl)
1573 unsigned int i, cnt;
1576 for (i = 0; i < npods; i += cnt, idx += cnt) {
1578 if (cnt > ULPMEM_IDATA_MAX_NPPODS)
1579 cnt = ULPMEM_IDATA_MAX_NPPODS;
1580 err = ddp_ppod_write_idata(csk->cdev, csk->port_id, hdr,
1581 idx, cnt, gl, 4 * i);
1588 static void ddp_clear_map(struct cxgbi_hba *chba, unsigned int tag,
1589 unsigned int idx, unsigned int npods)
1591 unsigned int i, cnt;
1594 for (i = 0; i < npods; i += cnt, idx += cnt) {
1596 if (cnt > ULPMEM_IDATA_MAX_NPPODS)
1597 cnt = ULPMEM_IDATA_MAX_NPPODS;
1598 err = ddp_ppod_write_idata(chba->cdev, chba->port_id, NULL,
1605 static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
1606 int pg_idx, bool reply)
1608 struct sk_buff *skb;
1609 struct cpl_set_tcb_field *req;
1611 if (!pg_idx || pg_idx >= DDP_PGIDX_MAX)
1614 skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL);
1618 /* set up ulp page size */
1619 req = (struct cpl_set_tcb_field *)skb->head;
1620 INIT_TP_WR(req, csk->tid);
1621 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
1622 req->reply_ctrl = htons(NO_REPLY(reply) | QUEUENO(csk->rss_qid));
1623 req->word_cookie = htons(0);
1624 req->mask = cpu_to_be64(0x3 << 8);
1625 req->val = cpu_to_be64(pg_idx << 8);
1626 set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id);
1628 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1629 "csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx);
1631 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
1635 static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
1636 int hcrc, int dcrc, int reply)
1638 struct sk_buff *skb;
1639 struct cpl_set_tcb_field *req;
1644 skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL);
1648 csk->hcrc_len = (hcrc ? 4 : 0);
1649 csk->dcrc_len = (dcrc ? 4 : 0);
1650 /* set up ulp submode */
1651 req = (struct cpl_set_tcb_field *)skb->head;
1652 INIT_TP_WR(req, tid);
1653 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
1654 req->reply_ctrl = htons(NO_REPLY(reply) | QUEUENO(csk->rss_qid));
1655 req->word_cookie = htons(0);
1656 req->mask = cpu_to_be64(0x3 << 4);
1657 req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |
1658 (dcrc ? ULP_CRC_DATA : 0)) << 4);
1659 set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id);
1661 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1662 "csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc);
1664 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
1668 static int cxgb4i_ddp_init(struct cxgbi_device *cdev)
1670 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1671 struct cxgbi_ddp_info *ddp = cdev->ddp;
1672 unsigned int tagmask, pgsz_factor[4];
1676 kref_get(&ddp->refcnt);
1677 pr_warn("cdev 0x%p, ddp 0x%p already set up.\n",
1682 err = cxgbi_ddp_init(cdev, lldi->vr->iscsi.start,
1683 lldi->vr->iscsi.start + lldi->vr->iscsi.size - 1,
1684 lldi->iscsi_iolen, lldi->iscsi_iolen);
1690 tagmask = ddp->idx_mask << PPOD_IDX_SHIFT;
1691 cxgbi_ddp_page_size_factor(pgsz_factor);
1692 cxgb4_iscsi_init(lldi->ports[0], tagmask, pgsz_factor);
1694 cdev->csk_ddp_setup_digest = ddp_setup_conn_digest;
1695 cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx;
1696 cdev->csk_ddp_set = ddp_set_map;
1697 cdev->csk_ddp_clear = ddp_clear_map;
1699 pr_info("cxgb4i 0x%p tag: sw %u, rsvd %u,%u, mask 0x%x.\n",
1700 cdev, cdev->tag_format.sw_bits, cdev->tag_format.rsvd_bits,
1701 cdev->tag_format.rsvd_shift, cdev->tag_format.rsvd_mask);
1702 pr_info("cxgb4i 0x%p, nppods %u, bits %u, mask 0x%x,0x%x pkt %u/%u, "
1704 cdev, ddp->nppods, ddp->idx_bits, ddp->idx_mask,
1705 ddp->rsvd_tag_mask, ddp->max_txsz, lldi->iscsi_iolen,
1706 ddp->max_rxsz, lldi->iscsi_iolen);
1707 pr_info("cxgb4i 0x%p max payload size: %u/%u, %u/%u.\n",
1708 cdev, cdev->tx_max_size, ddp->max_txsz, cdev->rx_max_size,
1713 static void *t4_uld_add(const struct cxgb4_lld_info *lldi)
1715 struct cxgbi_device *cdev;
1716 struct port_info *pi;
1719 cdev = cxgbi_device_register(sizeof(*lldi), lldi->nports);
1721 pr_info("t4 device 0x%p, register failed.\n", lldi);
1724 pr_info("0x%p,0x%x, ports %u,%s, chan %u, q %u,%u, wr %u.\n",
1725 cdev, lldi->adapter_type, lldi->nports,
1726 lldi->ports[0]->name, lldi->nchan, lldi->ntxq,
1727 lldi->nrxq, lldi->wr_cred);
1728 for (i = 0; i < lldi->nrxq; i++)
1729 log_debug(1 << CXGBI_DBG_DEV,
1730 "t4 0x%p, rxq id #%d: %u.\n",
1731 cdev, i, lldi->rxq_ids[i]);
1733 memcpy(cxgbi_cdev_priv(cdev), lldi, sizeof(*lldi));
1734 cdev->flags = CXGBI_FLAG_DEV_T4;
1735 cdev->pdev = lldi->pdev;
1736 cdev->ports = lldi->ports;
1737 cdev->nports = lldi->nports;
1738 cdev->mtus = lldi->mtus;
1739 cdev->nmtus = NMTUS;
1740 cdev->snd_win = cxgb4i_snd_win;
1741 cdev->rcv_win = cxgb4i_rcv_win;
1742 cdev->rx_credit_thres = cxgb4i_rx_credit_thres;
1743 cdev->skb_tx_rsvd = CXGB4I_TX_HEADER_LEN;
1744 cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr);
1745 cdev->itp = &cxgb4i_iscsi_transport;
1747 cdev->pfvf = FW_VIID_PFN_G(cxgb4_port_viid(lldi->ports[0]))
1749 pr_info("cdev 0x%p,%s, pfvf %u.\n",
1750 cdev, lldi->ports[0]->name, cdev->pfvf);
1752 rc = cxgb4i_ddp_init(cdev);
1754 pr_info("t4 0x%p ddp init failed.\n", cdev);
1757 rc = cxgb4i_ofld_init(cdev);
1759 pr_info("t4 0x%p ofld init failed.\n", cdev);
1763 rc = cxgbi_hbas_add(cdev, CXGB4I_MAX_LUN, CXGBI_MAX_CONN,
1764 &cxgb4i_host_template, cxgb4i_stt);
1768 for (i = 0; i < cdev->nports; i++) {
1769 pi = netdev_priv(lldi->ports[i]);
1770 cdev->hbas[i]->port_id = pi->port_id;
1775 cxgbi_device_unregister(cdev);
1776 return ERR_PTR(-ENOMEM);
1779 #define RX_PULL_LEN 128
1780 static int t4_uld_rx_handler(void *handle, const __be64 *rsp,
1781 const struct pkt_gl *pgl)
1783 const struct cpl_act_establish *rpl;
1784 struct sk_buff *skb;
1786 struct cxgbi_device *cdev = handle;
1789 unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8;
1791 skb = alloc_wr(len, 0, GFP_ATOMIC);
1794 skb_copy_to_linear_data(skb, &rsp[1], len);
1796 if (unlikely(*(u8 *)rsp != *(u8 *)pgl->va)) {
1797 pr_info("? FL 0x%p,RSS%#llx,FL %#llx,len %u.\n",
1798 pgl->va, be64_to_cpu(*rsp),
1799 be64_to_cpu(*(u64 *)pgl->va),
1803 skb = cxgb4_pktgl_to_skb(pgl, RX_PULL_LEN, RX_PULL_LEN);
1808 rpl = (struct cpl_act_establish *)skb->data;
1809 opc = rpl->ot.opcode;
1810 log_debug(1 << CXGBI_DBG_TOE,
1811 "cdev %p, opcode 0x%x(0x%x,0x%x), skb %p.\n",
1812 cdev, opc, rpl->ot.opcode_tid, ntohl(rpl->ot.opcode_tid), skb);
1813 if (cxgb4i_cplhandlers[opc])
1814 cxgb4i_cplhandlers[opc](cdev, skb);
1816 pr_err("No handler for opcode 0x%x.\n", opc);
1821 log_debug(1 << CXGBI_DBG_TOE, "OOM bailing out.\n");
1825 static int t4_uld_state_change(void *handle, enum cxgb4_state state)
1827 struct cxgbi_device *cdev = handle;
1830 case CXGB4_STATE_UP:
1831 pr_info("cdev 0x%p, UP.\n", cdev);
1833 case CXGB4_STATE_START_RECOVERY:
1834 pr_info("cdev 0x%p, RECOVERY.\n", cdev);
1835 /* close all connections */
1837 case CXGB4_STATE_DOWN:
1838 pr_info("cdev 0x%p, DOWN.\n", cdev);
1840 case CXGB4_STATE_DETACH:
1841 pr_info("cdev 0x%p, DETACH.\n", cdev);
1842 cxgbi_device_unregister(cdev);
1845 pr_info("cdev 0x%p, unknown state %d.\n", cdev, state);
1851 static int __init cxgb4i_init_module(void)
1855 printk(KERN_INFO "%s", version);
1857 rc = cxgbi_iscsi_init(&cxgb4i_iscsi_transport, &cxgb4i_stt);
1860 cxgb4_register_uld(CXGB4_ULD_ISCSI, &cxgb4i_uld_info);
1865 static void __exit cxgb4i_exit_module(void)
1867 cxgb4_unregister_uld(CXGB4_ULD_ISCSI);
1868 cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T4);
1869 cxgbi_iscsi_cleanup(&cxgb4i_iscsi_transport, &cxgb4i_stt);
1872 module_init(cxgb4i_init_module);
1873 module_exit(cxgb4i_exit_module);