Merge tag 'xfs-for-linus-3.18-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git...
[cascardo/linux.git] / drivers / scsi / cxgbi / libcxgbi.c
1 /*
2  * libcxgbi.c: Chelsio common library for T3/T4 iSCSI driver.
3  *
4  * Copyright (c) 2010 Chelsio Communications, Inc.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  *
10  * Written by: Karen Xie (kxie@chelsio.com)
11  * Written by: Rakesh Ranjan (rranjan@chelsio.com)
12  */
13
14 #define pr_fmt(fmt)     KBUILD_MODNAME ":%s: " fmt, __func__
15
16 #include <linux/skbuff.h>
17 #include <linux/crypto.h>
18 #include <linux/scatterlist.h>
19 #include <linux/pci.h>
20 #include <scsi/scsi.h>
21 #include <scsi/scsi_cmnd.h>
22 #include <scsi/scsi_host.h>
23 #include <linux/if_vlan.h>
24 #include <linux/inet.h>
25 #include <net/dst.h>
26 #include <net/route.h>
27 #include <net/ipv6.h>
28 #include <net/ip6_route.h>
29 #include <net/addrconf.h>
30
31 #include <linux/inetdevice.h>   /* ip_dev_find */
32 #include <linux/module.h>
33 #include <net/tcp.h>
34
35 static unsigned int dbg_level;
36
37 #include "libcxgbi.h"
38
39 #define DRV_MODULE_NAME         "libcxgbi"
40 #define DRV_MODULE_DESC         "Chelsio iSCSI driver library"
41 #define DRV_MODULE_VERSION      "0.9.0"
42 #define DRV_MODULE_RELDATE      "Jun. 2010"
43
44 MODULE_AUTHOR("Chelsio Communications, Inc.");
45 MODULE_DESCRIPTION(DRV_MODULE_DESC);
46 MODULE_VERSION(DRV_MODULE_VERSION);
47 MODULE_LICENSE("GPL");
48
49 module_param(dbg_level, uint, 0644);
50 MODULE_PARM_DESC(dbg_level, "libiscsi debug level (default=0)");
51
52
53 /*
54  * cxgbi device management
55  * maintains a list of the cxgbi devices
56  */
57 static LIST_HEAD(cdev_list);
58 static DEFINE_MUTEX(cdev_mutex);
59
60 static LIST_HEAD(cdev_rcu_list);
61 static DEFINE_SPINLOCK(cdev_rcu_lock);
62
63 int cxgbi_device_portmap_create(struct cxgbi_device *cdev, unsigned int base,
64                                 unsigned int max_conn)
65 {
66         struct cxgbi_ports_map *pmap = &cdev->pmap;
67
68         pmap->port_csk = cxgbi_alloc_big_mem(max_conn *
69                                              sizeof(struct cxgbi_sock *),
70                                              GFP_KERNEL);
71         if (!pmap->port_csk) {
72                 pr_warn("cdev 0x%p, portmap OOM %u.\n", cdev, max_conn);
73                 return -ENOMEM;
74         }
75
76         pmap->max_connect = max_conn;
77         pmap->sport_base = base;
78         spin_lock_init(&pmap->lock);
79         return 0;
80 }
81 EXPORT_SYMBOL_GPL(cxgbi_device_portmap_create);
82
83 void cxgbi_device_portmap_cleanup(struct cxgbi_device *cdev)
84 {
85         struct cxgbi_ports_map *pmap = &cdev->pmap;
86         struct cxgbi_sock *csk;
87         int i;
88
89         for (i = 0; i < pmap->max_connect; i++) {
90                 if (pmap->port_csk[i]) {
91                         csk = pmap->port_csk[i];
92                         pmap->port_csk[i] = NULL;
93                         log_debug(1 << CXGBI_DBG_SOCK,
94                                 "csk 0x%p, cdev 0x%p, offload down.\n",
95                                 csk, cdev);
96                         spin_lock_bh(&csk->lock);
97                         cxgbi_sock_set_flag(csk, CTPF_OFFLOAD_DOWN);
98                         cxgbi_sock_closed(csk);
99                         spin_unlock_bh(&csk->lock);
100                         cxgbi_sock_put(csk);
101                 }
102         }
103 }
104 EXPORT_SYMBOL_GPL(cxgbi_device_portmap_cleanup);
105
106 static inline void cxgbi_device_destroy(struct cxgbi_device *cdev)
107 {
108         log_debug(1 << CXGBI_DBG_DEV,
109                 "cdev 0x%p, p# %u.\n", cdev, cdev->nports);
110         cxgbi_hbas_remove(cdev);
111         cxgbi_device_portmap_cleanup(cdev);
112         if (cdev->dev_ddp_cleanup)
113                 cdev->dev_ddp_cleanup(cdev);
114         else
115                 cxgbi_ddp_cleanup(cdev);
116         if (cdev->ddp)
117                 cxgbi_ddp_cleanup(cdev);
118         if (cdev->pmap.max_connect)
119                 cxgbi_free_big_mem(cdev->pmap.port_csk);
120         kfree(cdev);
121 }
122
123 struct cxgbi_device *cxgbi_device_register(unsigned int extra,
124                                            unsigned int nports)
125 {
126         struct cxgbi_device *cdev;
127
128         cdev = kzalloc(sizeof(*cdev) + extra + nports *
129                         (sizeof(struct cxgbi_hba *) +
130                          sizeof(struct net_device *)),
131                         GFP_KERNEL);
132         if (!cdev) {
133                 pr_warn("nport %d, OOM.\n", nports);
134                 return NULL;
135         }
136         cdev->ports = (struct net_device **)(cdev + 1);
137         cdev->hbas = (struct cxgbi_hba **)(((char*)cdev->ports) + nports *
138                                                 sizeof(struct net_device *));
139         if (extra)
140                 cdev->dd_data = ((char *)cdev->hbas) +
141                                 nports * sizeof(struct cxgbi_hba *);
142         spin_lock_init(&cdev->pmap.lock);
143
144         mutex_lock(&cdev_mutex);
145         list_add_tail(&cdev->list_head, &cdev_list);
146         mutex_unlock(&cdev_mutex);
147
148         spin_lock(&cdev_rcu_lock);
149         list_add_tail_rcu(&cdev->rcu_node, &cdev_rcu_list);
150         spin_unlock(&cdev_rcu_lock);
151
152         log_debug(1 << CXGBI_DBG_DEV,
153                 "cdev 0x%p, p# %u.\n", cdev, nports);
154         return cdev;
155 }
156 EXPORT_SYMBOL_GPL(cxgbi_device_register);
157
158 void cxgbi_device_unregister(struct cxgbi_device *cdev)
159 {
160         log_debug(1 << CXGBI_DBG_DEV,
161                 "cdev 0x%p, p# %u,%s.\n",
162                 cdev, cdev->nports, cdev->nports ? cdev->ports[0]->name : "");
163
164         mutex_lock(&cdev_mutex);
165         list_del(&cdev->list_head);
166         mutex_unlock(&cdev_mutex);
167
168         spin_lock(&cdev_rcu_lock);
169         list_del_rcu(&cdev->rcu_node);
170         spin_unlock(&cdev_rcu_lock);
171         synchronize_rcu();
172
173         cxgbi_device_destroy(cdev);
174 }
175 EXPORT_SYMBOL_GPL(cxgbi_device_unregister);
176
177 void cxgbi_device_unregister_all(unsigned int flag)
178 {
179         struct cxgbi_device *cdev, *tmp;
180
181         mutex_lock(&cdev_mutex);
182         list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) {
183                 if ((cdev->flags & flag) == flag) {
184                         mutex_unlock(&cdev_mutex);
185                         cxgbi_device_unregister(cdev);
186                         mutex_lock(&cdev_mutex);
187                 }
188         }
189         mutex_unlock(&cdev_mutex);
190 }
191 EXPORT_SYMBOL_GPL(cxgbi_device_unregister_all);
192
193 struct cxgbi_device *cxgbi_device_find_by_lldev(void *lldev)
194 {
195         struct cxgbi_device *cdev, *tmp;
196
197         mutex_lock(&cdev_mutex);
198         list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) {
199                 if (cdev->lldev == lldev) {
200                         mutex_unlock(&cdev_mutex);
201                         return cdev;
202                 }
203         }
204         mutex_unlock(&cdev_mutex);
205
206         log_debug(1 << CXGBI_DBG_DEV,
207                 "lldev 0x%p, NO match found.\n", lldev);
208         return NULL;
209 }
210 EXPORT_SYMBOL_GPL(cxgbi_device_find_by_lldev);
211
212 struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *ndev,
213                                                  int *port)
214 {
215         struct net_device *vdev = NULL;
216         struct cxgbi_device *cdev, *tmp;
217         int i;
218
219         if (ndev->priv_flags & IFF_802_1Q_VLAN) {
220                 vdev = ndev;
221                 ndev = vlan_dev_real_dev(ndev);
222                 log_debug(1 << CXGBI_DBG_DEV,
223                         "vlan dev %s -> %s.\n", vdev->name, ndev->name);
224         }
225
226         mutex_lock(&cdev_mutex);
227         list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) {
228                 for (i = 0; i < cdev->nports; i++) {
229                         if (ndev == cdev->ports[i]) {
230                                 cdev->hbas[i]->vdev = vdev;
231                                 mutex_unlock(&cdev_mutex);
232                                 if (port)
233                                         *port = i;
234                                 return cdev;
235                         }
236                 }
237         }
238         mutex_unlock(&cdev_mutex);
239         log_debug(1 << CXGBI_DBG_DEV,
240                 "ndev 0x%p, %s, NO match found.\n", ndev, ndev->name);
241         return NULL;
242 }
243 EXPORT_SYMBOL_GPL(cxgbi_device_find_by_netdev);
244
245 struct cxgbi_device *cxgbi_device_find_by_netdev_rcu(struct net_device *ndev,
246                                                      int *port)
247 {
248         struct net_device *vdev = NULL;
249         struct cxgbi_device *cdev;
250         int i;
251
252         if (ndev->priv_flags & IFF_802_1Q_VLAN) {
253                 vdev = ndev;
254                 ndev = vlan_dev_real_dev(ndev);
255                 pr_info("vlan dev %s -> %s.\n", vdev->name, ndev->name);
256         }
257
258         rcu_read_lock();
259         list_for_each_entry_rcu(cdev, &cdev_rcu_list, rcu_node) {
260                 for (i = 0; i < cdev->nports; i++) {
261                         if (ndev == cdev->ports[i]) {
262                                 cdev->hbas[i]->vdev = vdev;
263                                 rcu_read_unlock();
264                                 if (port)
265                                         *port = i;
266                                 return cdev;
267                         }
268                 }
269         }
270         rcu_read_unlock();
271
272         log_debug(1 << CXGBI_DBG_DEV,
273                   "ndev 0x%p, %s, NO match found.\n", ndev, ndev->name);
274         return NULL;
275 }
276 EXPORT_SYMBOL_GPL(cxgbi_device_find_by_netdev_rcu);
277
278 #if IS_ENABLED(CONFIG_IPV6)
279 static struct cxgbi_device *cxgbi_device_find_by_mac(struct net_device *ndev,
280                                                      int *port)
281 {
282         struct net_device *vdev = NULL;
283         struct cxgbi_device *cdev, *tmp;
284         int i;
285
286         if (ndev->priv_flags & IFF_802_1Q_VLAN) {
287                 vdev = ndev;
288                 ndev = vlan_dev_real_dev(ndev);
289                 pr_info("vlan dev %s -> %s.\n", vdev->name, ndev->name);
290         }
291
292         mutex_lock(&cdev_mutex);
293         list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) {
294                 for (i = 0; i < cdev->nports; i++) {
295                         if (!memcmp(ndev->dev_addr, cdev->ports[i]->dev_addr,
296                                     MAX_ADDR_LEN)) {
297                                 cdev->hbas[i]->vdev = vdev;
298                                 mutex_unlock(&cdev_mutex);
299                                 if (port)
300                                         *port = i;
301                                 return cdev;
302                         }
303                 }
304         }
305         mutex_unlock(&cdev_mutex);
306         log_debug(1 << CXGBI_DBG_DEV,
307                   "ndev 0x%p, %s, NO match mac found.\n",
308                   ndev, ndev->name);
309         return NULL;
310 }
311 #endif
312
313 void cxgbi_hbas_remove(struct cxgbi_device *cdev)
314 {
315         int i;
316         struct cxgbi_hba *chba;
317
318         log_debug(1 << CXGBI_DBG_DEV,
319                 "cdev 0x%p, p#%u.\n", cdev, cdev->nports);
320
321         for (i = 0; i < cdev->nports; i++) {
322                 chba = cdev->hbas[i];
323                 if (chba) {
324                         cdev->hbas[i] = NULL;
325                         iscsi_host_remove(chba->shost);
326                         pci_dev_put(cdev->pdev);
327                         iscsi_host_free(chba->shost);
328                 }
329         }
330 }
331 EXPORT_SYMBOL_GPL(cxgbi_hbas_remove);
332
333 int cxgbi_hbas_add(struct cxgbi_device *cdev, u64 max_lun,
334                 unsigned int max_id, struct scsi_host_template *sht,
335                 struct scsi_transport_template *stt)
336 {
337         struct cxgbi_hba *chba;
338         struct Scsi_Host *shost;
339         int i, err;
340
341         log_debug(1 << CXGBI_DBG_DEV, "cdev 0x%p, p#%u.\n", cdev, cdev->nports);
342
343         for (i = 0; i < cdev->nports; i++) {
344                 shost = iscsi_host_alloc(sht, sizeof(*chba), 1);
345                 if (!shost) {
346                         pr_info("0x%p, p%d, %s, host alloc failed.\n",
347                                 cdev, i, cdev->ports[i]->name);
348                         err = -ENOMEM;
349                         goto err_out;
350                 }
351
352                 shost->transportt = stt;
353                 shost->max_lun = max_lun;
354                 shost->max_id = max_id;
355                 shost->max_channel = 0;
356                 shost->max_cmd_len = 16;
357
358                 chba = iscsi_host_priv(shost);
359                 chba->cdev = cdev;
360                 chba->ndev = cdev->ports[i];
361                 chba->shost = shost;
362
363                 log_debug(1 << CXGBI_DBG_DEV,
364                         "cdev 0x%p, p#%d %s: chba 0x%p.\n",
365                         cdev, i, cdev->ports[i]->name, chba);
366
367                 pci_dev_get(cdev->pdev);
368                 err = iscsi_host_add(shost, &cdev->pdev->dev);
369                 if (err) {
370                         pr_info("cdev 0x%p, p#%d %s, host add failed.\n",
371                                 cdev, i, cdev->ports[i]->name);
372                         pci_dev_put(cdev->pdev);
373                         scsi_host_put(shost);
374                         goto  err_out;
375                 }
376
377                 cdev->hbas[i] = chba;
378         }
379
380         return 0;
381
382 err_out:
383         cxgbi_hbas_remove(cdev);
384         return err;
385 }
386 EXPORT_SYMBOL_GPL(cxgbi_hbas_add);
387
388 /*
389  * iSCSI offload
390  *
391  * - source port management
392  *   To find a free source port in the port allocation map we use a very simple
393  *   rotor scheme to look for the next free port.
394  *
395  *   If a source port has been specified make sure that it doesn't collide with
396  *   our normal source port allocation map.  If it's outside the range of our
397  *   allocation/deallocation scheme just let them use it.
398  *
399  *   If the source port is outside our allocation range, the caller is
400  *   responsible for keeping track of their port usage.
401  */
402
403 static struct cxgbi_sock *find_sock_on_port(struct cxgbi_device *cdev,
404                                             unsigned char port_id)
405 {
406         struct cxgbi_ports_map *pmap = &cdev->pmap;
407         unsigned int i;
408         unsigned int used;
409
410         if (!pmap->max_connect || !pmap->used)
411                 return NULL;
412
413         spin_lock_bh(&pmap->lock);
414         used = pmap->used;
415         for (i = 0; used && i < pmap->max_connect; i++) {
416                 struct cxgbi_sock *csk = pmap->port_csk[i];
417
418                 if (csk) {
419                         if (csk->port_id == port_id) {
420                                 spin_unlock_bh(&pmap->lock);
421                                 return csk;
422                         }
423                         used--;
424                 }
425         }
426         spin_unlock_bh(&pmap->lock);
427
428         return NULL;
429 }
430
431 static int sock_get_port(struct cxgbi_sock *csk)
432 {
433         struct cxgbi_device *cdev = csk->cdev;
434         struct cxgbi_ports_map *pmap = &cdev->pmap;
435         unsigned int start;
436         int idx;
437         __be16 *port;
438
439         if (!pmap->max_connect) {
440                 pr_err("cdev 0x%p, p#%u %s, NO port map.\n",
441                            cdev, csk->port_id, cdev->ports[csk->port_id]->name);
442                 return -EADDRNOTAVAIL;
443         }
444
445         if (csk->csk_family == AF_INET)
446                 port = &csk->saddr.sin_port;
447         else /* ipv6 */
448                 port = &csk->saddr6.sin6_port;
449
450         if (*port) {
451                 pr_err("source port NON-ZERO %u.\n",
452                         ntohs(*port));
453                 return -EADDRINUSE;
454         }
455
456         spin_lock_bh(&pmap->lock);
457         if (pmap->used >= pmap->max_connect) {
458                 spin_unlock_bh(&pmap->lock);
459                 pr_info("cdev 0x%p, p#%u %s, ALL ports used.\n",
460                         cdev, csk->port_id, cdev->ports[csk->port_id]->name);
461                 return -EADDRNOTAVAIL;
462         }
463
464         start = idx = pmap->next;
465         do {
466                 if (++idx >= pmap->max_connect)
467                         idx = 0;
468                 if (!pmap->port_csk[idx]) {
469                         pmap->used++;
470                         *port = htons(pmap->sport_base + idx);
471                         pmap->next = idx;
472                         pmap->port_csk[idx] = csk;
473                         spin_unlock_bh(&pmap->lock);
474                         cxgbi_sock_get(csk);
475                         log_debug(1 << CXGBI_DBG_SOCK,
476                                 "cdev 0x%p, p#%u %s, p %u, %u.\n",
477                                 cdev, csk->port_id,
478                                 cdev->ports[csk->port_id]->name,
479                                 pmap->sport_base + idx, pmap->next);
480                         return 0;
481                 }
482         } while (idx != start);
483         spin_unlock_bh(&pmap->lock);
484
485         /* should not happen */
486         pr_warn("cdev 0x%p, p#%u %s, next %u?\n",
487                 cdev, csk->port_id, cdev->ports[csk->port_id]->name,
488                 pmap->next);
489         return -EADDRNOTAVAIL;
490 }
491
492 static void sock_put_port(struct cxgbi_sock *csk)
493 {
494         struct cxgbi_device *cdev = csk->cdev;
495         struct cxgbi_ports_map *pmap = &cdev->pmap;
496         __be16 *port;
497
498         if (csk->csk_family == AF_INET)
499                 port = &csk->saddr.sin_port;
500         else /* ipv6 */
501                 port = &csk->saddr6.sin6_port;
502
503         if (*port) {
504                 int idx = ntohs(*port) - pmap->sport_base;
505
506                 *port = 0;
507                 if (idx < 0 || idx >= pmap->max_connect) {
508                         pr_err("cdev 0x%p, p#%u %s, port %u OOR.\n",
509                                 cdev, csk->port_id,
510                                 cdev->ports[csk->port_id]->name,
511                                 ntohs(*port));
512                         return;
513                 }
514
515                 spin_lock_bh(&pmap->lock);
516                 pmap->port_csk[idx] = NULL;
517                 pmap->used--;
518                 spin_unlock_bh(&pmap->lock);
519
520                 log_debug(1 << CXGBI_DBG_SOCK,
521                         "cdev 0x%p, p#%u %s, release %u.\n",
522                         cdev, csk->port_id, cdev->ports[csk->port_id]->name,
523                         pmap->sport_base + idx);
524
525                 cxgbi_sock_put(csk);
526         }
527 }
528
529 /*
530  * iscsi tcp connection
531  */
532 void cxgbi_sock_free_cpl_skbs(struct cxgbi_sock *csk)
533 {
534         if (csk->cpl_close) {
535                 kfree_skb(csk->cpl_close);
536                 csk->cpl_close = NULL;
537         }
538         if (csk->cpl_abort_req) {
539                 kfree_skb(csk->cpl_abort_req);
540                 csk->cpl_abort_req = NULL;
541         }
542         if (csk->cpl_abort_rpl) {
543                 kfree_skb(csk->cpl_abort_rpl);
544                 csk->cpl_abort_rpl = NULL;
545         }
546 }
547 EXPORT_SYMBOL_GPL(cxgbi_sock_free_cpl_skbs);
548
549 static struct cxgbi_sock *cxgbi_sock_create(struct cxgbi_device *cdev)
550 {
551         struct cxgbi_sock *csk = kzalloc(sizeof(*csk), GFP_NOIO);
552
553         if (!csk) {
554                 pr_info("alloc csk %zu failed.\n", sizeof(*csk));
555                 return NULL;
556         }
557
558         if (cdev->csk_alloc_cpls(csk) < 0) {
559                 pr_info("csk 0x%p, alloc cpls failed.\n", csk);
560                 kfree(csk);
561                 return NULL;
562         }
563
564         spin_lock_init(&csk->lock);
565         kref_init(&csk->refcnt);
566         skb_queue_head_init(&csk->receive_queue);
567         skb_queue_head_init(&csk->write_queue);
568         setup_timer(&csk->retry_timer, NULL, (unsigned long)csk);
569         rwlock_init(&csk->callback_lock);
570         csk->cdev = cdev;
571         csk->flags = 0;
572         cxgbi_sock_set_state(csk, CTP_CLOSED);
573
574         log_debug(1 << CXGBI_DBG_SOCK, "cdev 0x%p, new csk 0x%p.\n", cdev, csk);
575
576         return csk;
577 }
578
579 static struct rtable *find_route_ipv4(struct flowi4 *fl4,
580                                       __be32 saddr, __be32 daddr,
581                                       __be16 sport, __be16 dport, u8 tos)
582 {
583         struct rtable *rt;
584
585         rt = ip_route_output_ports(&init_net, fl4, NULL, daddr, saddr,
586                                    dport, sport, IPPROTO_TCP, tos, 0);
587         if (IS_ERR(rt))
588                 return NULL;
589
590         return rt;
591 }
592
593 static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
594 {
595         struct sockaddr_in *daddr = (struct sockaddr_in *)dst_addr;
596         struct dst_entry *dst;
597         struct net_device *ndev;
598         struct cxgbi_device *cdev;
599         struct rtable *rt = NULL;
600         struct neighbour *n;
601         struct flowi4 fl4;
602         struct cxgbi_sock *csk = NULL;
603         unsigned int mtu = 0;
604         int port = 0xFFFF;
605         int err = 0;
606
607         rt = find_route_ipv4(&fl4, 0, daddr->sin_addr.s_addr, 0, daddr->sin_port, 0);
608         if (!rt) {
609                 pr_info("no route to ipv4 0x%x, port %u.\n",
610                         be32_to_cpu(daddr->sin_addr.s_addr),
611                         be16_to_cpu(daddr->sin_port));
612                 err = -ENETUNREACH;
613                 goto err_out;
614         }
615         dst = &rt->dst;
616         n = dst_neigh_lookup(dst, &daddr->sin_addr.s_addr);
617         if (!n) {
618                 err = -ENODEV;
619                 goto rel_rt;
620         }
621         ndev = n->dev;
622
623         if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
624                 pr_info("multi-cast route %pI4, port %u, dev %s.\n",
625                         &daddr->sin_addr.s_addr, ntohs(daddr->sin_port),
626                         ndev->name);
627                 err = -ENETUNREACH;
628                 goto rel_neigh;
629         }
630
631         if (ndev->flags & IFF_LOOPBACK) {
632                 ndev = ip_dev_find(&init_net, daddr->sin_addr.s_addr);
633                 mtu = ndev->mtu;
634                 pr_info("rt dev %s, loopback -> %s, mtu %u.\n",
635                         n->dev->name, ndev->name, mtu);
636         }
637
638         cdev = cxgbi_device_find_by_netdev(ndev, &port);
639         if (!cdev) {
640                 pr_info("dst %pI4, %s, NOT cxgbi device.\n",
641                         &daddr->sin_addr.s_addr, ndev->name);
642                 err = -ENETUNREACH;
643                 goto rel_neigh;
644         }
645         log_debug(1 << CXGBI_DBG_SOCK,
646                 "route to %pI4 :%u, ndev p#%d,%s, cdev 0x%p.\n",
647                 &daddr->sin_addr.s_addr, ntohs(daddr->sin_port),
648                            port, ndev->name, cdev);
649
650         csk = cxgbi_sock_create(cdev);
651         if (!csk) {
652                 err = -ENOMEM;
653                 goto rel_neigh;
654         }
655         csk->cdev = cdev;
656         csk->port_id = port;
657         csk->mtu = mtu;
658         csk->dst = dst;
659
660         csk->csk_family = AF_INET;
661         csk->daddr.sin_addr.s_addr = daddr->sin_addr.s_addr;
662         csk->daddr.sin_port = daddr->sin_port;
663         csk->daddr.sin_family = daddr->sin_family;
664         csk->saddr.sin_family = daddr->sin_family;
665         csk->saddr.sin_addr.s_addr = fl4.saddr;
666         neigh_release(n);
667
668         return csk;
669
670 rel_neigh:
671         neigh_release(n);
672
673 rel_rt:
674         ip_rt_put(rt);
675         if (csk)
676                 cxgbi_sock_closed(csk);
677 err_out:
678         return ERR_PTR(err);
679 }
680
681 #if IS_ENABLED(CONFIG_IPV6)
682 static struct rt6_info *find_route_ipv6(const struct in6_addr *saddr,
683                                         const struct in6_addr *daddr)
684 {
685         struct flowi6 fl;
686
687         if (saddr)
688                 memcpy(&fl.saddr, saddr, sizeof(struct in6_addr));
689         if (daddr)
690                 memcpy(&fl.daddr, daddr, sizeof(struct in6_addr));
691         return (struct rt6_info *)ip6_route_output(&init_net, NULL, &fl);
692 }
693
694 static struct cxgbi_sock *cxgbi_check_route6(struct sockaddr *dst_addr)
695 {
696         struct sockaddr_in6 *daddr6 = (struct sockaddr_in6 *)dst_addr;
697         struct dst_entry *dst;
698         struct net_device *ndev;
699         struct cxgbi_device *cdev;
700         struct rt6_info *rt = NULL;
701         struct neighbour *n;
702         struct in6_addr pref_saddr;
703         struct cxgbi_sock *csk = NULL;
704         unsigned int mtu = 0;
705         int port = 0xFFFF;
706         int err = 0;
707
708         rt = find_route_ipv6(NULL, &daddr6->sin6_addr);
709
710         if (!rt) {
711                 pr_info("no route to ipv6 %pI6 port %u\n",
712                         daddr6->sin6_addr.s6_addr,
713                         be16_to_cpu(daddr6->sin6_port));
714                 err = -ENETUNREACH;
715                 goto err_out;
716         }
717
718         dst = &rt->dst;
719
720         n = dst_neigh_lookup(dst, &daddr6->sin6_addr);
721
722         if (!n) {
723                 pr_info("%pI6, port %u, dst no neighbour.\n",
724                         daddr6->sin6_addr.s6_addr,
725                         be16_to_cpu(daddr6->sin6_port));
726                 err = -ENETUNREACH;
727                 goto rel_rt;
728         }
729         ndev = n->dev;
730
731         if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) {
732                 pr_info("multi-cast route %pI6 port %u, dev %s.\n",
733                         daddr6->sin6_addr.s6_addr,
734                         ntohs(daddr6->sin6_port), ndev->name);
735                 err = -ENETUNREACH;
736                 goto rel_rt;
737         }
738
739         cdev = cxgbi_device_find_by_netdev(ndev, &port);
740         if (!cdev)
741                 cdev = cxgbi_device_find_by_mac(ndev, &port);
742         if (!cdev) {
743                 pr_info("dst %pI6 %s, NOT cxgbi device.\n",
744                         daddr6->sin6_addr.s6_addr, ndev->name);
745                 err = -ENETUNREACH;
746                 goto rel_rt;
747         }
748         log_debug(1 << CXGBI_DBG_SOCK,
749                   "route to %pI6 :%u, ndev p#%d,%s, cdev 0x%p.\n",
750                   daddr6->sin6_addr.s6_addr, ntohs(daddr6->sin6_port), port,
751                   ndev->name, cdev);
752
753         csk = cxgbi_sock_create(cdev);
754         if (!csk) {
755                 err = -ENOMEM;
756                 goto rel_rt;
757         }
758         csk->cdev = cdev;
759         csk->port_id = port;
760         csk->mtu = mtu;
761         csk->dst = dst;
762
763         if (ipv6_addr_any(&rt->rt6i_prefsrc.addr)) {
764                 struct inet6_dev *idev = ip6_dst_idev((struct dst_entry *)rt);
765
766                 err = ipv6_dev_get_saddr(&init_net, idev ? idev->dev : NULL,
767                                          &daddr6->sin6_addr, 0, &pref_saddr);
768                 if (err) {
769                         pr_info("failed to get source address to reach %pI6\n",
770                                 &daddr6->sin6_addr);
771                         goto rel_rt;
772                 }
773         } else {
774                 pref_saddr = rt->rt6i_prefsrc.addr;
775         }
776
777         csk->csk_family = AF_INET6;
778         csk->daddr6.sin6_addr = daddr6->sin6_addr;
779         csk->daddr6.sin6_port = daddr6->sin6_port;
780         csk->daddr6.sin6_family = daddr6->sin6_family;
781         csk->saddr6.sin6_family = daddr6->sin6_family;
782         csk->saddr6.sin6_addr = pref_saddr;
783
784         neigh_release(n);
785         return csk;
786
787 rel_rt:
788         if (n)
789                 neigh_release(n);
790
791         ip6_rt_put(rt);
792         if (csk)
793                 cxgbi_sock_closed(csk);
794 err_out:
795         return ERR_PTR(err);
796 }
797 #endif /* IS_ENABLED(CONFIG_IPV6) */
798
799 void cxgbi_sock_established(struct cxgbi_sock *csk, unsigned int snd_isn,
800                         unsigned int opt)
801 {
802         csk->write_seq = csk->snd_nxt = csk->snd_una = snd_isn;
803         dst_confirm(csk->dst);
804         smp_mb();
805         cxgbi_sock_set_state(csk, CTP_ESTABLISHED);
806 }
807 EXPORT_SYMBOL_GPL(cxgbi_sock_established);
808
809 static void cxgbi_inform_iscsi_conn_closing(struct cxgbi_sock *csk)
810 {
811         log_debug(1 << CXGBI_DBG_SOCK,
812                 "csk 0x%p, state %u, flags 0x%lx, conn 0x%p.\n",
813                 csk, csk->state, csk->flags, csk->user_data);
814
815         if (csk->state != CTP_ESTABLISHED) {
816                 read_lock_bh(&csk->callback_lock);
817                 if (csk->user_data)
818                         iscsi_conn_failure(csk->user_data,
819                                         ISCSI_ERR_CONN_FAILED);
820                 read_unlock_bh(&csk->callback_lock);
821         }
822 }
823
824 void cxgbi_sock_closed(struct cxgbi_sock *csk)
825 {
826         log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n",
827                 csk, (csk)->state, (csk)->flags, (csk)->tid);
828         cxgbi_sock_set_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED);
829         if (csk->state == CTP_ACTIVE_OPEN || csk->state == CTP_CLOSED)
830                 return;
831         if (csk->saddr.sin_port)
832                 sock_put_port(csk);
833         if (csk->dst)
834                 dst_release(csk->dst);
835         csk->cdev->csk_release_offload_resources(csk);
836         cxgbi_sock_set_state(csk, CTP_CLOSED);
837         cxgbi_inform_iscsi_conn_closing(csk);
838         cxgbi_sock_put(csk);
839 }
840 EXPORT_SYMBOL_GPL(cxgbi_sock_closed);
841
842 static void need_active_close(struct cxgbi_sock *csk)
843 {
844         int data_lost;
845         int close_req = 0;
846
847         log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n",
848                 csk, (csk)->state, (csk)->flags, (csk)->tid);
849         spin_lock_bh(&csk->lock);
850         dst_confirm(csk->dst);
851         data_lost = skb_queue_len(&csk->receive_queue);
852         __skb_queue_purge(&csk->receive_queue);
853
854         if (csk->state == CTP_ACTIVE_OPEN)
855                 cxgbi_sock_set_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED);
856         else if (csk->state == CTP_ESTABLISHED) {
857                 close_req = 1;
858                 cxgbi_sock_set_state(csk, CTP_ACTIVE_CLOSE);
859         } else if (csk->state == CTP_PASSIVE_CLOSE) {
860                 close_req = 1;
861                 cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_2);
862         }
863
864         if (close_req) {
865                 if (data_lost)
866                         csk->cdev->csk_send_abort_req(csk);
867                 else
868                         csk->cdev->csk_send_close_req(csk);
869         }
870
871         spin_unlock_bh(&csk->lock);
872 }
873
874 void cxgbi_sock_fail_act_open(struct cxgbi_sock *csk, int errno)
875 {
876         pr_info("csk 0x%p,%u,%lx, %pI4:%u-%pI4:%u, err %d.\n",
877                         csk, csk->state, csk->flags,
878                         &csk->saddr.sin_addr.s_addr, csk->saddr.sin_port,
879                         &csk->daddr.sin_addr.s_addr, csk->daddr.sin_port,
880                         errno);
881
882         cxgbi_sock_set_state(csk, CTP_CONNECTING);
883         csk->err = errno;
884         cxgbi_sock_closed(csk);
885 }
886 EXPORT_SYMBOL_GPL(cxgbi_sock_fail_act_open);
887
888 void cxgbi_sock_act_open_req_arp_failure(void *handle, struct sk_buff *skb)
889 {
890         struct cxgbi_sock *csk = (struct cxgbi_sock *)skb->sk;
891
892         log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n",
893                 csk, (csk)->state, (csk)->flags, (csk)->tid);
894         cxgbi_sock_get(csk);
895         spin_lock_bh(&csk->lock);
896         if (csk->state == CTP_ACTIVE_OPEN)
897                 cxgbi_sock_fail_act_open(csk, -EHOSTUNREACH);
898         spin_unlock_bh(&csk->lock);
899         cxgbi_sock_put(csk);
900         __kfree_skb(skb);
901 }
902 EXPORT_SYMBOL_GPL(cxgbi_sock_act_open_req_arp_failure);
903
904 void cxgbi_sock_rcv_abort_rpl(struct cxgbi_sock *csk)
905 {
906         cxgbi_sock_get(csk);
907         spin_lock_bh(&csk->lock);
908         if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) {
909                 if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_RCVD))
910                         cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_RCVD);
911                 else {
912                         cxgbi_sock_clear_flag(csk, CTPF_ABORT_RPL_RCVD);
913                         cxgbi_sock_clear_flag(csk, CTPF_ABORT_RPL_PENDING);
914                         if (cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD))
915                                 pr_err("csk 0x%p,%u,0x%lx,%u,ABT_RPL_RSS.\n",
916                                         csk, csk->state, csk->flags, csk->tid);
917                         cxgbi_sock_closed(csk);
918                 }
919         }
920         spin_unlock_bh(&csk->lock);
921         cxgbi_sock_put(csk);
922 }
923 EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_abort_rpl);
924
925 void cxgbi_sock_rcv_peer_close(struct cxgbi_sock *csk)
926 {
927         log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n",
928                 csk, (csk)->state, (csk)->flags, (csk)->tid);
929         cxgbi_sock_get(csk);
930         spin_lock_bh(&csk->lock);
931
932         if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING))
933                 goto done;
934
935         switch (csk->state) {
936         case CTP_ESTABLISHED:
937                 cxgbi_sock_set_state(csk, CTP_PASSIVE_CLOSE);
938                 break;
939         case CTP_ACTIVE_CLOSE:
940                 cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_2);
941                 break;
942         case CTP_CLOSE_WAIT_1:
943                 cxgbi_sock_closed(csk);
944                 break;
945         case CTP_ABORTING:
946                 break;
947         default:
948                 pr_err("csk 0x%p,%u,0x%lx,%u, bad state.\n",
949                         csk, csk->state, csk->flags, csk->tid);
950         }
951         cxgbi_inform_iscsi_conn_closing(csk);
952 done:
953         spin_unlock_bh(&csk->lock);
954         cxgbi_sock_put(csk);
955 }
956 EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_peer_close);
957
958 void cxgbi_sock_rcv_close_conn_rpl(struct cxgbi_sock *csk, u32 snd_nxt)
959 {
960         log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n",
961                 csk, (csk)->state, (csk)->flags, (csk)->tid);
962         cxgbi_sock_get(csk);
963         spin_lock_bh(&csk->lock);
964
965         csk->snd_una = snd_nxt - 1;
966         if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING))
967                 goto done;
968
969         switch (csk->state) {
970         case CTP_ACTIVE_CLOSE:
971                 cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_1);
972                 break;
973         case CTP_CLOSE_WAIT_1:
974         case CTP_CLOSE_WAIT_2:
975                 cxgbi_sock_closed(csk);
976                 break;
977         case CTP_ABORTING:
978                 break;
979         default:
980                 pr_err("csk 0x%p,%u,0x%lx,%u, bad state.\n",
981                         csk, csk->state, csk->flags, csk->tid);
982         }
983 done:
984         spin_unlock_bh(&csk->lock);
985         cxgbi_sock_put(csk);
986 }
987 EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_close_conn_rpl);
988
989 void cxgbi_sock_rcv_wr_ack(struct cxgbi_sock *csk, unsigned int credits,
990                            unsigned int snd_una, int seq_chk)
991 {
992         log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
993                         "csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, snd_una %u,%d.\n",
994                         csk, csk->state, csk->flags, csk->tid, credits,
995                         csk->wr_cred, csk->wr_una_cred, snd_una, seq_chk);
996
997         spin_lock_bh(&csk->lock);
998
999         csk->wr_cred += credits;
1000         if (csk->wr_una_cred > csk->wr_max_cred - csk->wr_cred)
1001                 csk->wr_una_cred = csk->wr_max_cred - csk->wr_cred;
1002
1003         while (credits) {
1004                 struct sk_buff *p = cxgbi_sock_peek_wr(csk);
1005
1006                 if (unlikely(!p)) {
1007                         pr_err("csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, empty.\n",
1008                                 csk, csk->state, csk->flags, csk->tid, credits,
1009                                 csk->wr_cred, csk->wr_una_cred);
1010                         break;
1011                 }
1012
1013                 if (unlikely(credits < p->csum)) {
1014                         pr_warn("csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, < %u.\n",
1015                                 csk, csk->state, csk->flags, csk->tid,
1016                                 credits, csk->wr_cred, csk->wr_una_cred,
1017                                 p->csum);
1018                         p->csum -= credits;
1019                         break;
1020                 } else {
1021                         cxgbi_sock_dequeue_wr(csk);
1022                         credits -= p->csum;
1023                         kfree_skb(p);
1024                 }
1025         }
1026
1027         cxgbi_sock_check_wr_invariants(csk);
1028
1029         if (seq_chk) {
1030                 if (unlikely(before(snd_una, csk->snd_una))) {
1031                         pr_warn("csk 0x%p,%u,0x%lx,%u, snd_una %u/%u.",
1032                                 csk, csk->state, csk->flags, csk->tid, snd_una,
1033                                 csk->snd_una);
1034                         goto done;
1035                 }
1036
1037                 if (csk->snd_una != snd_una) {
1038                         csk->snd_una = snd_una;
1039                         dst_confirm(csk->dst);
1040                 }
1041         }
1042
1043         if (skb_queue_len(&csk->write_queue)) {
1044                 if (csk->cdev->csk_push_tx_frames(csk, 0))
1045                         cxgbi_conn_tx_open(csk);
1046         } else
1047                 cxgbi_conn_tx_open(csk);
1048 done:
1049         spin_unlock_bh(&csk->lock);
1050 }
1051 EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_wr_ack);
1052
1053 static unsigned int cxgbi_sock_find_best_mtu(struct cxgbi_sock *csk,
1054                                              unsigned short mtu)
1055 {
1056         int i = 0;
1057
1058         while (i < csk->cdev->nmtus - 1 && csk->cdev->mtus[i + 1] <= mtu)
1059                 ++i;
1060
1061         return i;
1062 }
1063
1064 unsigned int cxgbi_sock_select_mss(struct cxgbi_sock *csk, unsigned int pmtu)
1065 {
1066         unsigned int idx;
1067         struct dst_entry *dst = csk->dst;
1068
1069         csk->advmss = dst_metric_advmss(dst);
1070
1071         if (csk->advmss > pmtu - 40)
1072                 csk->advmss = pmtu - 40;
1073         if (csk->advmss < csk->cdev->mtus[0] - 40)
1074                 csk->advmss = csk->cdev->mtus[0] - 40;
1075         idx = cxgbi_sock_find_best_mtu(csk, csk->advmss + 40);
1076
1077         return idx;
1078 }
1079 EXPORT_SYMBOL_GPL(cxgbi_sock_select_mss);
1080
1081 void cxgbi_sock_skb_entail(struct cxgbi_sock *csk, struct sk_buff *skb)
1082 {
1083         cxgbi_skcb_tcp_seq(skb) = csk->write_seq;
1084         __skb_queue_tail(&csk->write_queue, skb);
1085 }
1086 EXPORT_SYMBOL_GPL(cxgbi_sock_skb_entail);
1087
1088 void cxgbi_sock_purge_wr_queue(struct cxgbi_sock *csk)
1089 {
1090         struct sk_buff *skb;
1091
1092         while ((skb = cxgbi_sock_dequeue_wr(csk)) != NULL)
1093                 kfree_skb(skb);
1094 }
1095 EXPORT_SYMBOL_GPL(cxgbi_sock_purge_wr_queue);
1096
1097 void cxgbi_sock_check_wr_invariants(const struct cxgbi_sock *csk)
1098 {
1099         int pending = cxgbi_sock_count_pending_wrs(csk);
1100
1101         if (unlikely(csk->wr_cred + pending != csk->wr_max_cred))
1102                 pr_err("csk 0x%p, tid %u, credit %u + %u != %u.\n",
1103                         csk, csk->tid, csk->wr_cred, pending, csk->wr_max_cred);
1104 }
1105 EXPORT_SYMBOL_GPL(cxgbi_sock_check_wr_invariants);
1106
1107 static int cxgbi_sock_send_pdus(struct cxgbi_sock *csk, struct sk_buff *skb)
1108 {
1109         struct cxgbi_device *cdev = csk->cdev;
1110         struct sk_buff *next;
1111         int err, copied = 0;
1112
1113         spin_lock_bh(&csk->lock);
1114
1115         if (csk->state != CTP_ESTABLISHED) {
1116                 log_debug(1 << CXGBI_DBG_PDU_TX,
1117                         "csk 0x%p,%u,0x%lx,%u, EAGAIN.\n",
1118                         csk, csk->state, csk->flags, csk->tid);
1119                 err = -EAGAIN;
1120                 goto out_err;
1121         }
1122
1123         if (csk->err) {
1124                 log_debug(1 << CXGBI_DBG_PDU_TX,
1125                         "csk 0x%p,%u,0x%lx,%u, EPIPE %d.\n",
1126                         csk, csk->state, csk->flags, csk->tid, csk->err);
1127                 err = -EPIPE;
1128                 goto out_err;
1129         }
1130
1131         if (csk->write_seq - csk->snd_una >= cdev->snd_win) {
1132                 log_debug(1 << CXGBI_DBG_PDU_TX,
1133                         "csk 0x%p,%u,0x%lx,%u, FULL %u-%u >= %u.\n",
1134                         csk, csk->state, csk->flags, csk->tid, csk->write_seq,
1135                         csk->snd_una, cdev->snd_win);
1136                 err = -ENOBUFS;
1137                 goto out_err;
1138         }
1139
1140         while (skb) {
1141                 int frags = skb_shinfo(skb)->nr_frags +
1142                                 (skb->len != skb->data_len);
1143
1144                 if (unlikely(skb_headroom(skb) < cdev->skb_tx_rsvd)) {
1145                         pr_err("csk 0x%p, skb head %u < %u.\n",
1146                                 csk, skb_headroom(skb), cdev->skb_tx_rsvd);
1147                         err = -EINVAL;
1148                         goto out_err;
1149                 }
1150
1151                 if (frags >= SKB_WR_LIST_SIZE) {
1152                         pr_err("csk 0x%p, frags %d, %u,%u >%u.\n",
1153                                 csk, skb_shinfo(skb)->nr_frags, skb->len,
1154                                 skb->data_len, (uint)(SKB_WR_LIST_SIZE));
1155                         err = -EINVAL;
1156                         goto out_err;
1157                 }
1158
1159                 next = skb->next;
1160                 skb->next = NULL;
1161                 cxgbi_skcb_set_flag(skb, SKCBF_TX_NEED_HDR);
1162                 cxgbi_sock_skb_entail(csk, skb);
1163                 copied += skb->len;
1164                 csk->write_seq += skb->len +
1165                                 cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb));
1166                 skb = next;
1167         }
1168 done:
1169         if (likely(skb_queue_len(&csk->write_queue)))
1170                 cdev->csk_push_tx_frames(csk, 1);
1171         spin_unlock_bh(&csk->lock);
1172         return copied;
1173
1174 out_err:
1175         if (copied == 0 && err == -EPIPE)
1176                 copied = csk->err ? csk->err : -EPIPE;
1177         else
1178                 copied = err;
1179         goto done;
1180 }
1181
1182 /*
1183  * Direct Data Placement -
1184  * Directly place the iSCSI Data-In or Data-Out PDU's payload into pre-posted
1185  * final destination host-memory buffers based on the Initiator Task Tag (ITT)
1186  * in Data-In or Target Task Tag (TTT) in Data-Out PDUs.
1187  * The host memory address is programmed into h/w in the format of pagepod
1188  * entries.
1189  * The location of the pagepod entry is encoded into ddp tag which is used as
1190  * the base for ITT/TTT.
1191  */
1192
1193 static unsigned char ddp_page_order[DDP_PGIDX_MAX] = {0, 1, 2, 4};
1194 static unsigned char ddp_page_shift[DDP_PGIDX_MAX] = {12, 13, 14, 16};
1195 static unsigned char page_idx = DDP_PGIDX_MAX;
1196
1197 static unsigned char sw_tag_idx_bits;
1198 static unsigned char sw_tag_age_bits;
1199
1200 /*
1201  * Direct-Data Placement page size adjustment
1202  */
1203 static int ddp_adjust_page_table(void)
1204 {
1205         int i;
1206         unsigned int base_order, order;
1207
1208         if (PAGE_SIZE < (1UL << ddp_page_shift[0])) {
1209                 pr_info("PAGE_SIZE 0x%lx too small, min 0x%lx\n",
1210                         PAGE_SIZE, 1UL << ddp_page_shift[0]);
1211                 return -EINVAL;
1212         }
1213
1214         base_order = get_order(1UL << ddp_page_shift[0]);
1215         order = get_order(1UL << PAGE_SHIFT);
1216
1217         for (i = 0; i < DDP_PGIDX_MAX; i++) {
1218                 /* first is the kernel page size, then just doubling */
1219                 ddp_page_order[i] = order - base_order + i;
1220                 ddp_page_shift[i] = PAGE_SHIFT + i;
1221         }
1222         return 0;
1223 }
1224
1225 static int ddp_find_page_index(unsigned long pgsz)
1226 {
1227         int i;
1228
1229         for (i = 0; i < DDP_PGIDX_MAX; i++) {
1230                 if (pgsz == (1UL << ddp_page_shift[i]))
1231                         return i;
1232         }
1233         pr_info("ddp page size %lu not supported.\n", pgsz);
1234         return DDP_PGIDX_MAX;
1235 }
1236
1237 static void ddp_setup_host_page_size(void)
1238 {
1239         if (page_idx == DDP_PGIDX_MAX) {
1240                 page_idx = ddp_find_page_index(PAGE_SIZE);
1241
1242                 if (page_idx == DDP_PGIDX_MAX) {
1243                         pr_info("system PAGE %lu, update hw.\n", PAGE_SIZE);
1244                         if (ddp_adjust_page_table() < 0) {
1245                                 pr_info("PAGE %lu, disable ddp.\n", PAGE_SIZE);
1246                                 return;
1247                         }
1248                         page_idx = ddp_find_page_index(PAGE_SIZE);
1249                 }
1250                 pr_info("system PAGE %lu, ddp idx %u.\n", PAGE_SIZE, page_idx);
1251         }
1252 }
1253
1254 void cxgbi_ddp_page_size_factor(int *pgsz_factor)
1255 {
1256         int i;
1257
1258         for (i = 0; i < DDP_PGIDX_MAX; i++)
1259                 pgsz_factor[i] = ddp_page_order[i];
1260 }
1261 EXPORT_SYMBOL_GPL(cxgbi_ddp_page_size_factor);
1262
1263 /*
1264  * DDP setup & teardown
1265  */
1266
1267 void cxgbi_ddp_ppod_set(struct cxgbi_pagepod *ppod,
1268                         struct cxgbi_pagepod_hdr *hdr,
1269                         struct cxgbi_gather_list *gl, unsigned int gidx)
1270 {
1271         int i;
1272
1273         memcpy(ppod, hdr, sizeof(*hdr));
1274         for (i = 0; i < (PPOD_PAGES_MAX + 1); i++, gidx++) {
1275                 ppod->addr[i] = gidx < gl->nelem ?
1276                                 cpu_to_be64(gl->phys_addr[gidx]) : 0ULL;
1277         }
1278 }
1279 EXPORT_SYMBOL_GPL(cxgbi_ddp_ppod_set);
1280
1281 void cxgbi_ddp_ppod_clear(struct cxgbi_pagepod *ppod)
1282 {
1283         memset(ppod, 0, sizeof(*ppod));
1284 }
1285 EXPORT_SYMBOL_GPL(cxgbi_ddp_ppod_clear);
1286
1287 static inline int ddp_find_unused_entries(struct cxgbi_ddp_info *ddp,
1288                                         unsigned int start, unsigned int max,
1289                                         unsigned int count,
1290                                         struct cxgbi_gather_list *gl)
1291 {
1292         unsigned int i, j, k;
1293
1294         /*  not enough entries */
1295         if ((max - start) < count) {
1296                 log_debug(1 << CXGBI_DBG_DDP,
1297                         "NOT enough entries %u+%u < %u.\n", start, count, max);
1298                 return -EBUSY;
1299         }
1300
1301         max -= count;
1302         spin_lock(&ddp->map_lock);
1303         for (i = start; i < max;) {
1304                 for (j = 0, k = i; j < count; j++, k++) {
1305                         if (ddp->gl_map[k])
1306                                 break;
1307                 }
1308                 if (j == count) {
1309                         for (j = 0, k = i; j < count; j++, k++)
1310                                 ddp->gl_map[k] = gl;
1311                         spin_unlock(&ddp->map_lock);
1312                         return i;
1313                 }
1314                 i += j + 1;
1315         }
1316         spin_unlock(&ddp->map_lock);
1317         log_debug(1 << CXGBI_DBG_DDP,
1318                 "NO suitable entries %u available.\n", count);
1319         return -EBUSY;
1320 }
1321
1322 static inline void ddp_unmark_entries(struct cxgbi_ddp_info *ddp,
1323                                                 int start, int count)
1324 {
1325         spin_lock(&ddp->map_lock);
1326         memset(&ddp->gl_map[start], 0,
1327                 count * sizeof(struct cxgbi_gather_list *));
1328         spin_unlock(&ddp->map_lock);
1329 }
1330
1331 static inline void ddp_gl_unmap(struct pci_dev *pdev,
1332                                         struct cxgbi_gather_list *gl)
1333 {
1334         int i;
1335
1336         for (i = 0; i < gl->nelem; i++)
1337                 dma_unmap_page(&pdev->dev, gl->phys_addr[i], PAGE_SIZE,
1338                                 PCI_DMA_FROMDEVICE);
1339 }
1340
1341 static inline int ddp_gl_map(struct pci_dev *pdev,
1342                                     struct cxgbi_gather_list *gl)
1343 {
1344         int i;
1345
1346         for (i = 0; i < gl->nelem; i++) {
1347                 gl->phys_addr[i] = dma_map_page(&pdev->dev, gl->pages[i], 0,
1348                                                 PAGE_SIZE,
1349                                                 PCI_DMA_FROMDEVICE);
1350                 if (unlikely(dma_mapping_error(&pdev->dev, gl->phys_addr[i]))) {
1351                         log_debug(1 << CXGBI_DBG_DDP,
1352                                 "page %d 0x%p, 0x%p dma mapping err.\n",
1353                                 i, gl->pages[i], pdev);
1354                         goto unmap;
1355                 }
1356         }
1357         return i;
1358 unmap:
1359         if (i) {
1360                 unsigned int nelem = gl->nelem;
1361
1362                 gl->nelem = i;
1363                 ddp_gl_unmap(pdev, gl);
1364                 gl->nelem = nelem;
1365         }
1366         return -EINVAL;
1367 }
1368
1369 static void ddp_release_gl(struct cxgbi_gather_list *gl,
1370                                   struct pci_dev *pdev)
1371 {
1372         ddp_gl_unmap(pdev, gl);
1373         kfree(gl);
1374 }
1375
1376 static struct cxgbi_gather_list *ddp_make_gl(unsigned int xferlen,
1377                                                     struct scatterlist *sgl,
1378                                                     unsigned int sgcnt,
1379                                                     struct pci_dev *pdev,
1380                                                     gfp_t gfp)
1381 {
1382         struct cxgbi_gather_list *gl;
1383         struct scatterlist *sg = sgl;
1384         struct page *sgpage = sg_page(sg);
1385         unsigned int sglen = sg->length;
1386         unsigned int sgoffset = sg->offset;
1387         unsigned int npages = (xferlen + sgoffset + PAGE_SIZE - 1) >>
1388                                 PAGE_SHIFT;
1389         int i = 1, j = 0;
1390
1391         if (xferlen < DDP_THRESHOLD) {
1392                 log_debug(1 << CXGBI_DBG_DDP,
1393                         "xfer %u < threshold %u, no ddp.\n",
1394                         xferlen, DDP_THRESHOLD);
1395                 return NULL;
1396         }
1397
1398         gl = kzalloc(sizeof(struct cxgbi_gather_list) +
1399                      npages * (sizeof(dma_addr_t) +
1400                      sizeof(struct page *)), gfp);
1401         if (!gl) {
1402                 log_debug(1 << CXGBI_DBG_DDP,
1403                         "xfer %u, %u pages, OOM.\n", xferlen, npages);
1404                 return NULL;
1405         }
1406
1407          log_debug(1 << CXGBI_DBG_DDP,
1408                 "xfer %u, sgl %u, gl max %u.\n", xferlen, sgcnt, npages);
1409
1410         gl->pages = (struct page **)&gl->phys_addr[npages];
1411         gl->nelem = npages;
1412         gl->length = xferlen;
1413         gl->offset = sgoffset;
1414         gl->pages[0] = sgpage;
1415
1416         for (i = 1, sg = sg_next(sgl), j = 0; i < sgcnt;
1417                 i++, sg = sg_next(sg)) {
1418                 struct page *page = sg_page(sg);
1419
1420                 if (sgpage == page && sg->offset == sgoffset + sglen)
1421                         sglen += sg->length;
1422                 else {
1423                         /*  make sure the sgl is fit for ddp:
1424                          *  each has the same page size, and
1425                          *  all of the middle pages are used completely
1426                          */
1427                         if ((j && sgoffset) || ((i != sgcnt - 1) &&
1428                             ((sglen + sgoffset) & ~PAGE_MASK))) {
1429                                 log_debug(1 << CXGBI_DBG_DDP,
1430                                         "page %d/%u, %u + %u.\n",
1431                                         i, sgcnt, sgoffset, sglen);
1432                                 goto error_out;
1433                         }
1434
1435                         j++;
1436                         if (j == gl->nelem || sg->offset) {
1437                                 log_debug(1 << CXGBI_DBG_DDP,
1438                                         "page %d/%u, offset %u.\n",
1439                                         j, gl->nelem, sg->offset);
1440                                 goto error_out;
1441                         }
1442                         gl->pages[j] = page;
1443                         sglen = sg->length;
1444                         sgoffset = sg->offset;
1445                         sgpage = page;
1446                 }
1447         }
1448         gl->nelem = ++j;
1449
1450         if (ddp_gl_map(pdev, gl) < 0)
1451                 goto error_out;
1452
1453         return gl;
1454
1455 error_out:
1456         kfree(gl);
1457         return NULL;
1458 }
1459
1460 static void ddp_tag_release(struct cxgbi_hba *chba, u32 tag)
1461 {
1462         struct cxgbi_device *cdev = chba->cdev;
1463         struct cxgbi_ddp_info *ddp = cdev->ddp;
1464         u32 idx;
1465
1466         idx = (tag >> PPOD_IDX_SHIFT) & ddp->idx_mask;
1467         if (idx < ddp->nppods) {
1468                 struct cxgbi_gather_list *gl = ddp->gl_map[idx];
1469                 unsigned int npods;
1470
1471                 if (!gl || !gl->nelem) {
1472                         pr_warn("tag 0x%x, idx %u, gl 0x%p, %u.\n",
1473                                 tag, idx, gl, gl ? gl->nelem : 0);
1474                         return;
1475                 }
1476                 npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT;
1477                 log_debug(1 << CXGBI_DBG_DDP,
1478                         "tag 0x%x, release idx %u, npods %u.\n",
1479                         tag, idx, npods);
1480                 cdev->csk_ddp_clear(chba, tag, idx, npods);
1481                 ddp_unmark_entries(ddp, idx, npods);
1482                 ddp_release_gl(gl, ddp->pdev);
1483         } else
1484                 pr_warn("tag 0x%x, idx %u > max %u.\n", tag, idx, ddp->nppods);
1485 }
1486
1487 static int ddp_tag_reserve(struct cxgbi_sock *csk, unsigned int tid,
1488                            u32 sw_tag, u32 *tagp, struct cxgbi_gather_list *gl,
1489                            gfp_t gfp)
1490 {
1491         struct cxgbi_device *cdev = csk->cdev;
1492         struct cxgbi_ddp_info *ddp = cdev->ddp;
1493         struct cxgbi_tag_format *tformat = &cdev->tag_format;
1494         struct cxgbi_pagepod_hdr hdr;
1495         unsigned int npods;
1496         int idx = -1;
1497         int err = -ENOMEM;
1498         u32 tag;
1499
1500         npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT;
1501         if (ddp->idx_last == ddp->nppods)
1502                 idx = ddp_find_unused_entries(ddp, 0, ddp->nppods,
1503                                                         npods, gl);
1504         else {
1505                 idx = ddp_find_unused_entries(ddp, ddp->idx_last + 1,
1506                                                         ddp->nppods, npods,
1507                                                         gl);
1508                 if (idx < 0 && ddp->idx_last >= npods) {
1509                         idx = ddp_find_unused_entries(ddp, 0,
1510                                 min(ddp->idx_last + npods, ddp->nppods),
1511                                                         npods, gl);
1512                 }
1513         }
1514         if (idx < 0) {
1515                 log_debug(1 << CXGBI_DBG_DDP,
1516                         "xferlen %u, gl %u, npods %u NO DDP.\n",
1517                         gl->length, gl->nelem, npods);
1518                 return idx;
1519         }
1520
1521         tag = cxgbi_ddp_tag_base(tformat, sw_tag);
1522         tag |= idx << PPOD_IDX_SHIFT;
1523
1524         hdr.rsvd = 0;
1525         hdr.vld_tid = htonl(PPOD_VALID_FLAG | PPOD_TID(tid));
1526         hdr.pgsz_tag_clr = htonl(tag & ddp->rsvd_tag_mask);
1527         hdr.max_offset = htonl(gl->length);
1528         hdr.page_offset = htonl(gl->offset);
1529
1530         err = cdev->csk_ddp_set(csk, &hdr, idx, npods, gl);
1531         if (err < 0)
1532                 goto unmark_entries;
1533
1534         ddp->idx_last = idx;
1535         log_debug(1 << CXGBI_DBG_DDP,
1536                 "xfer %u, gl %u,%u, tid 0x%x, tag 0x%x->0x%x(%u,%u).\n",
1537                 gl->length, gl->nelem, gl->offset, tid, sw_tag, tag, idx,
1538                 npods);
1539         *tagp = tag;
1540         return 0;
1541
1542 unmark_entries:
1543         ddp_unmark_entries(ddp, idx, npods);
1544         return err;
1545 }
1546
1547 int cxgbi_ddp_reserve(struct cxgbi_sock *csk, unsigned int *tagp,
1548                         unsigned int sw_tag, unsigned int xferlen,
1549                         struct scatterlist *sgl, unsigned int sgcnt, gfp_t gfp)
1550 {
1551         struct cxgbi_device *cdev = csk->cdev;
1552         struct cxgbi_tag_format *tformat = &cdev->tag_format;
1553         struct cxgbi_gather_list *gl;
1554         int err;
1555
1556         if (page_idx >= DDP_PGIDX_MAX || !cdev->ddp ||
1557             xferlen < DDP_THRESHOLD) {
1558                 log_debug(1 << CXGBI_DBG_DDP,
1559                         "pgidx %u, xfer %u, NO ddp.\n", page_idx, xferlen);
1560                 return -EINVAL;
1561         }
1562
1563         if (!cxgbi_sw_tag_usable(tformat, sw_tag)) {
1564                 log_debug(1 << CXGBI_DBG_DDP,
1565                         "sw_tag 0x%x NOT usable.\n", sw_tag);
1566                 return -EINVAL;
1567         }
1568
1569         gl = ddp_make_gl(xferlen, sgl, sgcnt, cdev->pdev, gfp);
1570         if (!gl)
1571                 return -ENOMEM;
1572
1573         err = ddp_tag_reserve(csk, csk->tid, sw_tag, tagp, gl, gfp);
1574         if (err < 0)
1575                 ddp_release_gl(gl, cdev->pdev);
1576
1577         return err;
1578 }
1579
1580 static void ddp_destroy(struct kref *kref)
1581 {
1582         struct cxgbi_ddp_info *ddp = container_of(kref,
1583                                                 struct cxgbi_ddp_info,
1584                                                 refcnt);
1585         struct cxgbi_device *cdev = ddp->cdev;
1586         int i = 0;
1587
1588         pr_info("kref 0, destroy ddp 0x%p, cdev 0x%p.\n", ddp, cdev);
1589
1590         while (i < ddp->nppods) {
1591                 struct cxgbi_gather_list *gl = ddp->gl_map[i];
1592
1593                 if (gl) {
1594                         int npods = (gl->nelem + PPOD_PAGES_MAX - 1)
1595                                         >> PPOD_PAGES_SHIFT;
1596                         pr_info("cdev 0x%p, ddp %d + %d.\n", cdev, i, npods);
1597                         kfree(gl);
1598                         i += npods;
1599                 } else
1600                         i++;
1601         }
1602         cxgbi_free_big_mem(ddp);
1603 }
1604
1605 int cxgbi_ddp_cleanup(struct cxgbi_device *cdev)
1606 {
1607         struct cxgbi_ddp_info *ddp = cdev->ddp;
1608
1609         log_debug(1 << CXGBI_DBG_DDP,
1610                 "cdev 0x%p, release ddp 0x%p.\n", cdev, ddp);
1611         cdev->ddp = NULL;
1612         if (ddp)
1613                 return kref_put(&ddp->refcnt, ddp_destroy);
1614         return 0;
1615 }
1616 EXPORT_SYMBOL_GPL(cxgbi_ddp_cleanup);
1617
1618 int cxgbi_ddp_init(struct cxgbi_device *cdev,
1619                    unsigned int llimit, unsigned int ulimit,
1620                    unsigned int max_txsz, unsigned int max_rxsz)
1621 {
1622         struct cxgbi_ddp_info *ddp;
1623         unsigned int ppmax, bits;
1624
1625         ppmax = (ulimit - llimit + 1) >> PPOD_SIZE_SHIFT;
1626         bits = __ilog2_u32(ppmax) + 1;
1627         if (bits > PPOD_IDX_MAX_SIZE)
1628                 bits = PPOD_IDX_MAX_SIZE;
1629         ppmax = (1 << (bits - 1)) - 1;
1630
1631         ddp = cxgbi_alloc_big_mem(sizeof(struct cxgbi_ddp_info) +
1632                                 ppmax * (sizeof(struct cxgbi_gather_list *) +
1633                                          sizeof(struct sk_buff *)),
1634                                 GFP_KERNEL);
1635         if (!ddp) {
1636                 pr_warn("cdev 0x%p, ddp ppmax %u OOM.\n", cdev, ppmax);
1637                 return -ENOMEM;
1638         }
1639         ddp->gl_map = (struct cxgbi_gather_list **)(ddp + 1);
1640         cdev->ddp = ddp;
1641
1642         spin_lock_init(&ddp->map_lock);
1643         kref_init(&ddp->refcnt);
1644
1645         ddp->cdev = cdev;
1646         ddp->pdev = cdev->pdev;
1647         ddp->llimit = llimit;
1648         ddp->ulimit = ulimit;
1649         ddp->max_txsz = min_t(unsigned int, max_txsz, ULP2_MAX_PKT_SIZE);
1650         ddp->max_rxsz = min_t(unsigned int, max_rxsz, ULP2_MAX_PKT_SIZE);
1651         ddp->nppods = ppmax;
1652         ddp->idx_last = ppmax;
1653         ddp->idx_bits = bits;
1654         ddp->idx_mask = (1 << bits) - 1;
1655         ddp->rsvd_tag_mask = (1 << (bits + PPOD_IDX_SHIFT)) - 1;
1656
1657         cdev->tag_format.sw_bits = sw_tag_idx_bits + sw_tag_age_bits;
1658         cdev->tag_format.rsvd_bits = ddp->idx_bits;
1659         cdev->tag_format.rsvd_shift = PPOD_IDX_SHIFT;
1660         cdev->tag_format.rsvd_mask = (1 << cdev->tag_format.rsvd_bits) - 1;
1661
1662         pr_info("%s tag format, sw %u, rsvd %u,%u, mask 0x%x.\n",
1663                 cdev->ports[0]->name, cdev->tag_format.sw_bits,
1664                 cdev->tag_format.rsvd_bits, cdev->tag_format.rsvd_shift,
1665                 cdev->tag_format.rsvd_mask);
1666
1667         cdev->tx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
1668                                 ddp->max_txsz - ISCSI_PDU_NONPAYLOAD_LEN);
1669         cdev->rx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
1670                                 ddp->max_rxsz - ISCSI_PDU_NONPAYLOAD_LEN);
1671
1672         log_debug(1 << CXGBI_DBG_DDP,
1673                 "%s max payload size: %u/%u, %u/%u.\n",
1674                 cdev->ports[0]->name, cdev->tx_max_size, ddp->max_txsz,
1675                 cdev->rx_max_size, ddp->max_rxsz);
1676         return 0;
1677 }
1678 EXPORT_SYMBOL_GPL(cxgbi_ddp_init);
1679
1680 /*
1681  * APIs interacting with open-iscsi libraries
1682  */
1683
1684 static unsigned char padding[4];
1685
1686 static void task_release_itt(struct iscsi_task *task, itt_t hdr_itt)
1687 {
1688         struct scsi_cmnd *sc = task->sc;
1689         struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data;
1690         struct cxgbi_conn *cconn = tcp_conn->dd_data;
1691         struct cxgbi_hba *chba = cconn->chba;
1692         struct cxgbi_tag_format *tformat = &chba->cdev->tag_format;
1693         u32 tag = ntohl((__force u32)hdr_itt);
1694
1695         log_debug(1 << CXGBI_DBG_DDP,
1696                    "cdev 0x%p, release tag 0x%x.\n", chba->cdev, tag);
1697         if (sc &&
1698             (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE) &&
1699             cxgbi_is_ddp_tag(tformat, tag))
1700                 ddp_tag_release(chba, tag);
1701 }
1702
1703 static int task_reserve_itt(struct iscsi_task *task, itt_t *hdr_itt)
1704 {
1705         struct scsi_cmnd *sc = task->sc;
1706         struct iscsi_conn *conn = task->conn;
1707         struct iscsi_session *sess = conn->session;
1708         struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1709         struct cxgbi_conn *cconn = tcp_conn->dd_data;
1710         struct cxgbi_hba *chba = cconn->chba;
1711         struct cxgbi_tag_format *tformat = &chba->cdev->tag_format;
1712         u32 sw_tag = (sess->age << cconn->task_idx_bits) | task->itt;
1713         u32 tag = 0;
1714         int err = -EINVAL;
1715
1716         if (sc &&
1717             (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE)) {
1718                 err = cxgbi_ddp_reserve(cconn->cep->csk, &tag, sw_tag,
1719                                         scsi_in(sc)->length,
1720                                         scsi_in(sc)->table.sgl,
1721                                         scsi_in(sc)->table.nents,
1722                                         GFP_ATOMIC);
1723                 if (err < 0)
1724                         log_debug(1 << CXGBI_DBG_DDP,
1725                                 "csk 0x%p, R task 0x%p, %u,%u, no ddp.\n",
1726                                 cconn->cep->csk, task, scsi_in(sc)->length,
1727                                 scsi_in(sc)->table.nents);
1728         }
1729
1730         if (err < 0)
1731                 tag = cxgbi_set_non_ddp_tag(tformat, sw_tag);
1732         /*  the itt need to sent in big-endian order */
1733         *hdr_itt = (__force itt_t)htonl(tag);
1734
1735         log_debug(1 << CXGBI_DBG_DDP,
1736                 "cdev 0x%p, task 0x%p, 0x%x(0x%x,0x%x)->0x%x/0x%x.\n",
1737                 chba->cdev, task, sw_tag, task->itt, sess->age, tag, *hdr_itt);
1738         return 0;
1739 }
1740
1741 void cxgbi_parse_pdu_itt(struct iscsi_conn *conn, itt_t itt, int *idx, int *age)
1742 {
1743         struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1744         struct cxgbi_conn *cconn = tcp_conn->dd_data;
1745         struct cxgbi_device *cdev = cconn->chba->cdev;
1746         u32 tag = ntohl((__force u32) itt);
1747         u32 sw_bits;
1748
1749         sw_bits = cxgbi_tag_nonrsvd_bits(&cdev->tag_format, tag);
1750         if (idx)
1751                 *idx = sw_bits & ((1 << cconn->task_idx_bits) - 1);
1752         if (age)
1753                 *age = (sw_bits >> cconn->task_idx_bits) & ISCSI_AGE_MASK;
1754
1755         log_debug(1 << CXGBI_DBG_DDP,
1756                 "cdev 0x%p, tag 0x%x/0x%x, -> 0x%x(0x%x,0x%x).\n",
1757                 cdev, tag, itt, sw_bits, idx ? *idx : 0xFFFFF,
1758                 age ? *age : 0xFF);
1759 }
1760 EXPORT_SYMBOL_GPL(cxgbi_parse_pdu_itt);
1761
1762 void cxgbi_conn_tx_open(struct cxgbi_sock *csk)
1763 {
1764         struct iscsi_conn *conn = csk->user_data;
1765
1766         if (conn) {
1767                 log_debug(1 << CXGBI_DBG_SOCK,
1768                         "csk 0x%p, cid %d.\n", csk, conn->id);
1769                 iscsi_conn_queue_work(conn);
1770         }
1771 }
1772 EXPORT_SYMBOL_GPL(cxgbi_conn_tx_open);
1773
1774 /*
1775  * pdu receive, interact with libiscsi_tcp
1776  */
1777 static inline int read_pdu_skb(struct iscsi_conn *conn,
1778                                struct sk_buff *skb,
1779                                unsigned int offset,
1780                                int offloaded)
1781 {
1782         int status = 0;
1783         int bytes_read;
1784
1785         bytes_read = iscsi_tcp_recv_skb(conn, skb, offset, offloaded, &status);
1786         switch (status) {
1787         case ISCSI_TCP_CONN_ERR:
1788                 pr_info("skb 0x%p, off %u, %d, TCP_ERR.\n",
1789                           skb, offset, offloaded);
1790                 return -EIO;
1791         case ISCSI_TCP_SUSPENDED:
1792                 log_debug(1 << CXGBI_DBG_PDU_RX,
1793                         "skb 0x%p, off %u, %d, TCP_SUSPEND, rc %d.\n",
1794                         skb, offset, offloaded, bytes_read);
1795                 /* no transfer - just have caller flush queue */
1796                 return bytes_read;
1797         case ISCSI_TCP_SKB_DONE:
1798                 pr_info("skb 0x%p, off %u, %d, TCP_SKB_DONE.\n",
1799                         skb, offset, offloaded);
1800                 /*
1801                  * pdus should always fit in the skb and we should get
1802                  * segment done notifcation.
1803                  */
1804                 iscsi_conn_printk(KERN_ERR, conn, "Invalid pdu or skb.");
1805                 return -EFAULT;
1806         case ISCSI_TCP_SEGMENT_DONE:
1807                 log_debug(1 << CXGBI_DBG_PDU_RX,
1808                         "skb 0x%p, off %u, %d, TCP_SEG_DONE, rc %d.\n",
1809                         skb, offset, offloaded, bytes_read);
1810                 return bytes_read;
1811         default:
1812                 pr_info("skb 0x%p, off %u, %d, invalid status %d.\n",
1813                         skb, offset, offloaded, status);
1814                 return -EINVAL;
1815         }
1816 }
1817
1818 static int skb_read_pdu_bhs(struct iscsi_conn *conn, struct sk_buff *skb)
1819 {
1820         struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1821
1822         log_debug(1 << CXGBI_DBG_PDU_RX,
1823                 "conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n",
1824                 conn, skb, skb->len, cxgbi_skcb_flags(skb));
1825
1826         if (!iscsi_tcp_recv_segment_is_hdr(tcp_conn)) {
1827                 pr_info("conn 0x%p, skb 0x%p, not hdr.\n", conn, skb);
1828                 iscsi_conn_failure(conn, ISCSI_ERR_PROTO);
1829                 return -EIO;
1830         }
1831
1832         if (conn->hdrdgst_en &&
1833             cxgbi_skcb_test_flag(skb, SKCBF_RX_HCRC_ERR)) {
1834                 pr_info("conn 0x%p, skb 0x%p, hcrc.\n", conn, skb);
1835                 iscsi_conn_failure(conn, ISCSI_ERR_HDR_DGST);
1836                 return -EIO;
1837         }
1838
1839         return read_pdu_skb(conn, skb, 0, 0);
1840 }
1841
1842 static int skb_read_pdu_data(struct iscsi_conn *conn, struct sk_buff *lskb,
1843                              struct sk_buff *skb, unsigned int offset)
1844 {
1845         struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1846         bool offloaded = 0;
1847         int opcode = tcp_conn->in.hdr->opcode & ISCSI_OPCODE_MASK;
1848
1849         log_debug(1 << CXGBI_DBG_PDU_RX,
1850                 "conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n",
1851                 conn, skb, skb->len, cxgbi_skcb_flags(skb));
1852
1853         if (conn->datadgst_en &&
1854             cxgbi_skcb_test_flag(lskb, SKCBF_RX_DCRC_ERR)) {
1855                 pr_info("conn 0x%p, skb 0x%p, dcrc 0x%lx.\n",
1856                         conn, lskb, cxgbi_skcb_flags(lskb));
1857                 iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST);
1858                 return -EIO;
1859         }
1860
1861         if (iscsi_tcp_recv_segment_is_hdr(tcp_conn))
1862                 return 0;
1863
1864         /* coalesced, add header digest length */
1865         if (lskb == skb && conn->hdrdgst_en)
1866                 offset += ISCSI_DIGEST_SIZE;
1867
1868         if (cxgbi_skcb_test_flag(lskb, SKCBF_RX_DATA_DDPD))
1869                 offloaded = 1;
1870
1871         if (opcode == ISCSI_OP_SCSI_DATA_IN)
1872                 log_debug(1 << CXGBI_DBG_PDU_RX,
1873                         "skb 0x%p, op 0x%x, itt 0x%x, %u %s ddp'ed.\n",
1874                         skb, opcode, ntohl(tcp_conn->in.hdr->itt),
1875                         tcp_conn->in.datalen, offloaded ? "is" : "not");
1876
1877         return read_pdu_skb(conn, skb, offset, offloaded);
1878 }
1879
1880 static void csk_return_rx_credits(struct cxgbi_sock *csk, int copied)
1881 {
1882         struct cxgbi_device *cdev = csk->cdev;
1883         int must_send;
1884         u32 credits;
1885
1886         log_debug(1 << CXGBI_DBG_PDU_RX,
1887                 "csk 0x%p,%u,0x%lx,%u, seq %u, wup %u, thre %u, %u.\n",
1888                 csk, csk->state, csk->flags, csk->tid, csk->copied_seq,
1889                 csk->rcv_wup, cdev->rx_credit_thres,
1890                 cdev->rcv_win);
1891
1892         if (csk->state != CTP_ESTABLISHED)
1893                 return;
1894
1895         credits = csk->copied_seq - csk->rcv_wup;
1896         if (unlikely(!credits))
1897                 return;
1898         if (unlikely(cdev->rx_credit_thres == 0))
1899                 return;
1900
1901         must_send = credits + 16384 >= cdev->rcv_win;
1902         if (must_send || credits >= cdev->rx_credit_thres)
1903                 csk->rcv_wup += cdev->csk_send_rx_credits(csk, credits);
1904 }
1905
1906 void cxgbi_conn_pdu_ready(struct cxgbi_sock *csk)
1907 {
1908         struct cxgbi_device *cdev = csk->cdev;
1909         struct iscsi_conn *conn = csk->user_data;
1910         struct sk_buff *skb;
1911         unsigned int read = 0;
1912         int err = 0;
1913
1914         log_debug(1 << CXGBI_DBG_PDU_RX,
1915                 "csk 0x%p, conn 0x%p.\n", csk, conn);
1916
1917         if (unlikely(!conn || conn->suspend_rx)) {
1918                 log_debug(1 << CXGBI_DBG_PDU_RX,
1919                         "csk 0x%p, conn 0x%p, id %d, suspend_rx %lu!\n",
1920                         csk, conn, conn ? conn->id : 0xFF,
1921                         conn ? conn->suspend_rx : 0xFF);
1922                 return;
1923         }
1924
1925         while (!err) {
1926                 skb = skb_peek(&csk->receive_queue);
1927                 if (!skb ||
1928                     !(cxgbi_skcb_test_flag(skb, SKCBF_RX_STATUS))) {
1929                         if (skb)
1930                                 log_debug(1 << CXGBI_DBG_PDU_RX,
1931                                         "skb 0x%p, NOT ready 0x%lx.\n",
1932                                         skb, cxgbi_skcb_flags(skb));
1933                         break;
1934                 }
1935                 __skb_unlink(skb, &csk->receive_queue);
1936
1937                 read += cxgbi_skcb_rx_pdulen(skb);
1938                 log_debug(1 << CXGBI_DBG_PDU_RX,
1939                         "csk 0x%p, skb 0x%p,%u,f 0x%lx, pdu len %u.\n",
1940                         csk, skb, skb->len, cxgbi_skcb_flags(skb),
1941                         cxgbi_skcb_rx_pdulen(skb));
1942
1943                 if (cxgbi_skcb_test_flag(skb, SKCBF_RX_COALESCED)) {
1944                         err = skb_read_pdu_bhs(conn, skb);
1945                         if (err < 0) {
1946                                 pr_err("coalesced bhs, csk 0x%p, skb 0x%p,%u, "
1947                                         "f 0x%lx, plen %u.\n",
1948                                         csk, skb, skb->len,
1949                                         cxgbi_skcb_flags(skb),
1950                                         cxgbi_skcb_rx_pdulen(skb));
1951                                 goto skb_done;
1952                         }
1953                         err = skb_read_pdu_data(conn, skb, skb,
1954                                                 err + cdev->skb_rx_extra);
1955                         if (err < 0)
1956                                 pr_err("coalesced data, csk 0x%p, skb 0x%p,%u, "
1957                                         "f 0x%lx, plen %u.\n",
1958                                         csk, skb, skb->len,
1959                                         cxgbi_skcb_flags(skb),
1960                                         cxgbi_skcb_rx_pdulen(skb));
1961                 } else {
1962                         err = skb_read_pdu_bhs(conn, skb);
1963                         if (err < 0) {
1964                                 pr_err("bhs, csk 0x%p, skb 0x%p,%u, "
1965                                         "f 0x%lx, plen %u.\n",
1966                                         csk, skb, skb->len,
1967                                         cxgbi_skcb_flags(skb),
1968                                         cxgbi_skcb_rx_pdulen(skb));
1969                                 goto skb_done;
1970                         }
1971
1972                         if (cxgbi_skcb_test_flag(skb, SKCBF_RX_DATA)) {
1973                                 struct sk_buff *dskb;
1974
1975                                 dskb = skb_peek(&csk->receive_queue);
1976                                 if (!dskb) {
1977                                         pr_err("csk 0x%p, skb 0x%p,%u, f 0x%lx,"
1978                                                 " plen %u, NO data.\n",
1979                                                 csk, skb, skb->len,
1980                                                 cxgbi_skcb_flags(skb),
1981                                                 cxgbi_skcb_rx_pdulen(skb));
1982                                         err = -EIO;
1983                                         goto skb_done;
1984                                 }
1985                                 __skb_unlink(dskb, &csk->receive_queue);
1986
1987                                 err = skb_read_pdu_data(conn, skb, dskb, 0);
1988                                 if (err < 0)
1989                                         pr_err("data, csk 0x%p, skb 0x%p,%u, "
1990                                                 "f 0x%lx, plen %u, dskb 0x%p,"
1991                                                 "%u.\n",
1992                                                 csk, skb, skb->len,
1993                                                 cxgbi_skcb_flags(skb),
1994                                                 cxgbi_skcb_rx_pdulen(skb),
1995                                                 dskb, dskb->len);
1996                                 __kfree_skb(dskb);
1997                         } else
1998                                 err = skb_read_pdu_data(conn, skb, skb, 0);
1999                 }
2000 skb_done:
2001                 __kfree_skb(skb);
2002
2003                 if (err < 0)
2004                         break;
2005         }
2006
2007         log_debug(1 << CXGBI_DBG_PDU_RX, "csk 0x%p, read %u.\n", csk, read);
2008         if (read) {
2009                 csk->copied_seq += read;
2010                 csk_return_rx_credits(csk, read);
2011                 conn->rxdata_octets += read;
2012         }
2013
2014         if (err < 0) {
2015                 pr_info("csk 0x%p, 0x%p, rx failed %d, read %u.\n",
2016                         csk, conn, err, read);
2017                 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
2018         }
2019 }
2020 EXPORT_SYMBOL_GPL(cxgbi_conn_pdu_ready);
2021
2022 static int sgl_seek_offset(struct scatterlist *sgl, unsigned int sgcnt,
2023                                 unsigned int offset, unsigned int *off,
2024                                 struct scatterlist **sgp)
2025 {
2026         int i;
2027         struct scatterlist *sg;
2028
2029         for_each_sg(sgl, sg, sgcnt, i) {
2030                 if (offset < sg->length) {
2031                         *off = offset;
2032                         *sgp = sg;
2033                         return 0;
2034                 }
2035                 offset -= sg->length;
2036         }
2037         return -EFAULT;
2038 }
2039
2040 static int sgl_read_to_frags(struct scatterlist *sg, unsigned int sgoffset,
2041                                 unsigned int dlen, struct page_frag *frags,
2042                                 int frag_max)
2043 {
2044         unsigned int datalen = dlen;
2045         unsigned int sglen = sg->length - sgoffset;
2046         struct page *page = sg_page(sg);
2047         int i;
2048
2049         i = 0;
2050         do {
2051                 unsigned int copy;
2052
2053                 if (!sglen) {
2054                         sg = sg_next(sg);
2055                         if (!sg) {
2056                                 pr_warn("sg %d NULL, len %u/%u.\n",
2057                                         i, datalen, dlen);
2058                                 return -EINVAL;
2059                         }
2060                         sgoffset = 0;
2061                         sglen = sg->length;
2062                         page = sg_page(sg);
2063
2064                 }
2065                 copy = min(datalen, sglen);
2066                 if (i && page == frags[i - 1].page &&
2067                     sgoffset + sg->offset ==
2068                         frags[i - 1].offset + frags[i - 1].size) {
2069                         frags[i - 1].size += copy;
2070                 } else {
2071                         if (i >= frag_max) {
2072                                 pr_warn("too many pages %u, dlen %u.\n",
2073                                         frag_max, dlen);
2074                                 return -EINVAL;
2075                         }
2076
2077                         frags[i].page = page;
2078                         frags[i].offset = sg->offset + sgoffset;
2079                         frags[i].size = copy;
2080                         i++;
2081                 }
2082                 datalen -= copy;
2083                 sgoffset += copy;
2084                 sglen -= copy;
2085         } while (datalen);
2086
2087         return i;
2088 }
2089
2090 int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 opcode)
2091 {
2092         struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data;
2093         struct cxgbi_conn *cconn = tcp_conn->dd_data;
2094         struct cxgbi_device *cdev = cconn->chba->cdev;
2095         struct iscsi_conn *conn = task->conn;
2096         struct iscsi_tcp_task *tcp_task = task->dd_data;
2097         struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
2098         struct scsi_cmnd *sc = task->sc;
2099         int headroom = SKB_TX_ISCSI_PDU_HEADER_MAX;
2100
2101         tcp_task->dd_data = tdata;
2102         task->hdr = NULL;
2103
2104         if (SKB_MAX_HEAD(cdev->skb_tx_rsvd) > (512 * MAX_SKB_FRAGS) &&
2105             (opcode == ISCSI_OP_SCSI_DATA_OUT ||
2106              (opcode == ISCSI_OP_SCSI_CMD &&
2107               (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_TO_DEVICE))))
2108                 /* data could goes into skb head */
2109                 headroom += min_t(unsigned int,
2110                                 SKB_MAX_HEAD(cdev->skb_tx_rsvd),
2111                                 conn->max_xmit_dlength);
2112
2113         tdata->skb = alloc_skb(cdev->skb_tx_rsvd + headroom, GFP_ATOMIC);
2114         if (!tdata->skb) {
2115                 struct cxgbi_sock *csk = cconn->cep->csk;
2116                 struct net_device *ndev = cdev->ports[csk->port_id];
2117                 ndev->stats.tx_dropped++;
2118                 return -ENOMEM;
2119         }
2120
2121         skb_reserve(tdata->skb, cdev->skb_tx_rsvd);
2122         task->hdr = (struct iscsi_hdr *)tdata->skb->data;
2123         task->hdr_max = SKB_TX_ISCSI_PDU_HEADER_MAX; /* BHS + AHS */
2124
2125         /* data_out uses scsi_cmd's itt */
2126         if (opcode != ISCSI_OP_SCSI_DATA_OUT)
2127                 task_reserve_itt(task, &task->hdr->itt);
2128
2129         log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
2130                 "task 0x%p, op 0x%x, skb 0x%p,%u+%u/%u, itt 0x%x.\n",
2131                 task, opcode, tdata->skb, cdev->skb_tx_rsvd, headroom,
2132                 conn->max_xmit_dlength, ntohl(task->hdr->itt));
2133
2134         return 0;
2135 }
2136 EXPORT_SYMBOL_GPL(cxgbi_conn_alloc_pdu);
2137
2138 static inline void tx_skb_setmode(struct sk_buff *skb, int hcrc, int dcrc)
2139 {
2140         if (hcrc || dcrc) {
2141                 u8 submode = 0;
2142
2143                 if (hcrc)
2144                         submode |= 1;
2145                 if (dcrc)
2146                         submode |= 2;
2147                 cxgbi_skcb_ulp_mode(skb) = (ULP2_MODE_ISCSI << 4) | submode;
2148         } else
2149                 cxgbi_skcb_ulp_mode(skb) = 0;
2150 }
2151
2152 int cxgbi_conn_init_pdu(struct iscsi_task *task, unsigned int offset,
2153                               unsigned int count)
2154 {
2155         struct iscsi_conn *conn = task->conn;
2156         struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
2157         struct sk_buff *skb = tdata->skb;
2158         unsigned int datalen = count;
2159         int i, padlen = iscsi_padding(count);
2160         struct page *pg;
2161
2162         log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
2163                 "task 0x%p,0x%p, skb 0x%p, 0x%x,0x%x,0x%x, %u+%u.\n",
2164                 task, task->sc, skb, (*skb->data) & ISCSI_OPCODE_MASK,
2165                 ntohl(task->cmdsn), ntohl(task->hdr->itt), offset, count);
2166
2167         skb_put(skb, task->hdr_len);
2168         tx_skb_setmode(skb, conn->hdrdgst_en, datalen ? conn->datadgst_en : 0);
2169         if (!count)
2170                 return 0;
2171
2172         if (task->sc) {
2173                 struct scsi_data_buffer *sdb = scsi_out(task->sc);
2174                 struct scatterlist *sg = NULL;
2175                 int err;
2176
2177                 tdata->offset = offset;
2178                 tdata->count = count;
2179                 err = sgl_seek_offset(
2180                                         sdb->table.sgl, sdb->table.nents,
2181                                         tdata->offset, &tdata->sgoffset, &sg);
2182                 if (err < 0) {
2183                         pr_warn("tpdu, sgl %u, bad offset %u/%u.\n",
2184                                 sdb->table.nents, tdata->offset, sdb->length);
2185                         return err;
2186                 }
2187                 err = sgl_read_to_frags(sg, tdata->sgoffset, tdata->count,
2188                                         tdata->frags, MAX_PDU_FRAGS);
2189                 if (err < 0) {
2190                         pr_warn("tpdu, sgl %u, bad offset %u + %u.\n",
2191                                 sdb->table.nents, tdata->offset, tdata->count);
2192                         return err;
2193                 }
2194                 tdata->nr_frags = err;
2195
2196                 if (tdata->nr_frags > MAX_SKB_FRAGS ||
2197                     (padlen && tdata->nr_frags == MAX_SKB_FRAGS)) {
2198                         char *dst = skb->data + task->hdr_len;
2199                         struct page_frag *frag = tdata->frags;
2200
2201                         /* data fits in the skb's headroom */
2202                         for (i = 0; i < tdata->nr_frags; i++, frag++) {
2203                                 char *src = kmap_atomic(frag->page);
2204
2205                                 memcpy(dst, src+frag->offset, frag->size);
2206                                 dst += frag->size;
2207                                 kunmap_atomic(src);
2208                         }
2209                         if (padlen) {
2210                                 memset(dst, 0, padlen);
2211                                 padlen = 0;
2212                         }
2213                         skb_put(skb, count + padlen);
2214                 } else {
2215                         /* data fit into frag_list */
2216                         for (i = 0; i < tdata->nr_frags; i++) {
2217                                 __skb_fill_page_desc(skb, i,
2218                                                 tdata->frags[i].page,
2219                                                 tdata->frags[i].offset,
2220                                                 tdata->frags[i].size);
2221                                 skb_frag_ref(skb, i);
2222                         }
2223                         skb_shinfo(skb)->nr_frags = tdata->nr_frags;
2224                         skb->len += count;
2225                         skb->data_len += count;
2226                         skb->truesize += count;
2227                 }
2228
2229         } else {
2230                 pg = virt_to_page(task->data);
2231
2232                 get_page(pg);
2233                 skb_fill_page_desc(skb, 0, pg, offset_in_page(task->data),
2234                                         count);
2235                 skb->len += count;
2236                 skb->data_len += count;
2237                 skb->truesize += count;
2238         }
2239
2240         if (padlen) {
2241                 i = skb_shinfo(skb)->nr_frags;
2242                 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
2243                                 virt_to_page(padding), offset_in_page(padding),
2244                                 padlen);
2245
2246                 skb->data_len += padlen;
2247                 skb->truesize += padlen;
2248                 skb->len += padlen;
2249         }
2250
2251         return 0;
2252 }
2253 EXPORT_SYMBOL_GPL(cxgbi_conn_init_pdu);
2254
2255 int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
2256 {
2257         struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data;
2258         struct cxgbi_conn *cconn = tcp_conn->dd_data;
2259         struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
2260         struct sk_buff *skb = tdata->skb;
2261         unsigned int datalen;
2262         int err;
2263
2264         if (!skb) {
2265                 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
2266                         "task 0x%p, skb NULL.\n", task);
2267                 return 0;
2268         }
2269
2270         datalen = skb->data_len;
2271         tdata->skb = NULL;
2272         err = cxgbi_sock_send_pdus(cconn->cep->csk, skb);
2273         if (err > 0) {
2274                 int pdulen = err;
2275
2276                 log_debug(1 << CXGBI_DBG_PDU_TX,
2277                         "task 0x%p,0x%p, skb 0x%p, len %u/%u, rv %d.\n",
2278                         task, task->sc, skb, skb->len, skb->data_len, err);
2279
2280                 if (task->conn->hdrdgst_en)
2281                         pdulen += ISCSI_DIGEST_SIZE;
2282
2283                 if (datalen && task->conn->datadgst_en)
2284                         pdulen += ISCSI_DIGEST_SIZE;
2285
2286                 task->conn->txdata_octets += pdulen;
2287                 return 0;
2288         }
2289
2290         if (err == -EAGAIN || err == -ENOBUFS) {
2291                 log_debug(1 << CXGBI_DBG_PDU_TX,
2292                         "task 0x%p, skb 0x%p, len %u/%u, %d EAGAIN.\n",
2293                         task, skb, skb->len, skb->data_len, err);
2294                 /* reset skb to send when we are called again */
2295                 tdata->skb = skb;
2296                 return err;
2297         }
2298
2299         kfree_skb(skb);
2300         log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
2301                 "itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n",
2302                 task->itt, skb, skb->len, skb->data_len, err);
2303         iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err);
2304         iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED);
2305         return err;
2306 }
2307 EXPORT_SYMBOL_GPL(cxgbi_conn_xmit_pdu);
2308
2309 void cxgbi_cleanup_task(struct iscsi_task *task)
2310 {
2311         struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
2312
2313         log_debug(1 << CXGBI_DBG_ISCSI,
2314                 "task 0x%p, skb 0x%p, itt 0x%x.\n",
2315                 task, tdata->skb, task->hdr_itt);
2316
2317         /*  never reached the xmit task callout */
2318         if (tdata->skb)
2319                 __kfree_skb(tdata->skb);
2320         memset(tdata, 0, sizeof(*tdata));
2321
2322         task_release_itt(task, task->hdr_itt);
2323         iscsi_tcp_cleanup_task(task);
2324 }
2325 EXPORT_SYMBOL_GPL(cxgbi_cleanup_task);
2326
2327 void cxgbi_get_conn_stats(struct iscsi_cls_conn *cls_conn,
2328                                 struct iscsi_stats *stats)
2329 {
2330         struct iscsi_conn *conn = cls_conn->dd_data;
2331
2332         stats->txdata_octets = conn->txdata_octets;
2333         stats->rxdata_octets = conn->rxdata_octets;
2334         stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
2335         stats->dataout_pdus = conn->dataout_pdus_cnt;
2336         stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
2337         stats->datain_pdus = conn->datain_pdus_cnt;
2338         stats->r2t_pdus = conn->r2t_pdus_cnt;
2339         stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
2340         stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
2341         stats->digest_err = 0;
2342         stats->timeout_err = 0;
2343         stats->custom_length = 1;
2344         strcpy(stats->custom[0].desc, "eh_abort_cnt");
2345         stats->custom[0].value = conn->eh_abort_cnt;
2346 }
2347 EXPORT_SYMBOL_GPL(cxgbi_get_conn_stats);
2348
2349 static int cxgbi_conn_max_xmit_dlength(struct iscsi_conn *conn)
2350 {
2351         struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
2352         struct cxgbi_conn *cconn = tcp_conn->dd_data;
2353         struct cxgbi_device *cdev = cconn->chba->cdev;
2354         unsigned int headroom = SKB_MAX_HEAD(cdev->skb_tx_rsvd);
2355         unsigned int max_def = 512 * MAX_SKB_FRAGS;
2356         unsigned int max = max(max_def, headroom);
2357
2358         max = min(cconn->chba->cdev->tx_max_size, max);
2359         if (conn->max_xmit_dlength)
2360                 conn->max_xmit_dlength = min(conn->max_xmit_dlength, max);
2361         else
2362                 conn->max_xmit_dlength = max;
2363         cxgbi_align_pdu_size(conn->max_xmit_dlength);
2364
2365         return 0;
2366 }
2367
2368 static int cxgbi_conn_max_recv_dlength(struct iscsi_conn *conn)
2369 {
2370         struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
2371         struct cxgbi_conn *cconn = tcp_conn->dd_data;
2372         unsigned int max = cconn->chba->cdev->rx_max_size;
2373
2374         cxgbi_align_pdu_size(max);
2375
2376         if (conn->max_recv_dlength) {
2377                 if (conn->max_recv_dlength > max) {
2378                         pr_err("MaxRecvDataSegmentLength %u > %u.\n",
2379                                 conn->max_recv_dlength, max);
2380                         return -EINVAL;
2381                 }
2382                 conn->max_recv_dlength = min(conn->max_recv_dlength, max);
2383                 cxgbi_align_pdu_size(conn->max_recv_dlength);
2384         } else
2385                 conn->max_recv_dlength = max;
2386
2387         return 0;
2388 }
2389
2390 int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn,
2391                         enum iscsi_param param, char *buf, int buflen)
2392 {
2393         struct iscsi_conn *conn = cls_conn->dd_data;
2394         struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
2395         struct cxgbi_conn *cconn = tcp_conn->dd_data;
2396         struct cxgbi_sock *csk = cconn->cep->csk;
2397         int err;
2398
2399         log_debug(1 << CXGBI_DBG_ISCSI,
2400                 "cls_conn 0x%p, param %d, buf(%d) %s.\n",
2401                 cls_conn, param, buflen, buf);
2402
2403         switch (param) {
2404         case ISCSI_PARAM_HDRDGST_EN:
2405                 err = iscsi_set_param(cls_conn, param, buf, buflen);
2406                 if (!err && conn->hdrdgst_en)
2407                         err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid,
2408                                                         conn->hdrdgst_en,
2409                                                         conn->datadgst_en, 0);
2410                 break;
2411         case ISCSI_PARAM_DATADGST_EN:
2412                 err = iscsi_set_param(cls_conn, param, buf, buflen);
2413                 if (!err && conn->datadgst_en)
2414                         err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid,
2415                                                         conn->hdrdgst_en,
2416                                                         conn->datadgst_en, 0);
2417                 break;
2418         case ISCSI_PARAM_MAX_R2T:
2419                 return iscsi_tcp_set_max_r2t(conn, buf);
2420         case ISCSI_PARAM_MAX_RECV_DLENGTH:
2421                 err = iscsi_set_param(cls_conn, param, buf, buflen);
2422                 if (!err)
2423                         err = cxgbi_conn_max_recv_dlength(conn);
2424                 break;
2425         case ISCSI_PARAM_MAX_XMIT_DLENGTH:
2426                 err = iscsi_set_param(cls_conn, param, buf, buflen);
2427                 if (!err)
2428                         err = cxgbi_conn_max_xmit_dlength(conn);
2429                 break;
2430         default:
2431                 return iscsi_set_param(cls_conn, param, buf, buflen);
2432         }
2433         return err;
2434 }
2435 EXPORT_SYMBOL_GPL(cxgbi_set_conn_param);
2436
2437 static inline int csk_print_port(struct cxgbi_sock *csk, char *buf)
2438 {
2439         int len;
2440
2441         cxgbi_sock_get(csk);
2442         len = sprintf(buf, "%hu\n", ntohs(csk->daddr.sin_port));
2443         cxgbi_sock_put(csk);
2444
2445         return len;
2446 }
2447
2448 static inline int csk_print_ip(struct cxgbi_sock *csk, char *buf)
2449 {
2450         int len;
2451
2452         cxgbi_sock_get(csk);
2453         if (csk->csk_family == AF_INET)
2454                 len = sprintf(buf, "%pI4",
2455                               &csk->daddr.sin_addr.s_addr);
2456         else
2457                 len = sprintf(buf, "%pI6",
2458                               &csk->daddr6.sin6_addr);
2459
2460         cxgbi_sock_put(csk);
2461
2462         return len;
2463 }
2464
2465 int cxgbi_get_ep_param(struct iscsi_endpoint *ep, enum iscsi_param param,
2466                        char *buf)
2467 {
2468         struct cxgbi_endpoint *cep = ep->dd_data;
2469         struct cxgbi_sock *csk;
2470         int len;
2471
2472         log_debug(1 << CXGBI_DBG_ISCSI,
2473                 "cls_conn 0x%p, param %d.\n", ep, param);
2474
2475         switch (param) {
2476         case ISCSI_PARAM_CONN_PORT:
2477         case ISCSI_PARAM_CONN_ADDRESS:
2478                 if (!cep)
2479                         return -ENOTCONN;
2480
2481                 csk = cep->csk;
2482                 if (!csk)
2483                         return -ENOTCONN;
2484
2485                 return iscsi_conn_get_addr_param((struct sockaddr_storage *)
2486                                                  &csk->daddr, param, buf);
2487         default:
2488                 return -ENOSYS;
2489         }
2490         return len;
2491 }
2492 EXPORT_SYMBOL_GPL(cxgbi_get_ep_param);
2493
2494 struct iscsi_cls_conn *
2495 cxgbi_create_conn(struct iscsi_cls_session *cls_session, u32 cid)
2496 {
2497         struct iscsi_cls_conn *cls_conn;
2498         struct iscsi_conn *conn;
2499         struct iscsi_tcp_conn *tcp_conn;
2500         struct cxgbi_conn *cconn;
2501
2502         cls_conn = iscsi_tcp_conn_setup(cls_session, sizeof(*cconn), cid);
2503         if (!cls_conn)
2504                 return NULL;
2505
2506         conn = cls_conn->dd_data;
2507         tcp_conn = conn->dd_data;
2508         cconn = tcp_conn->dd_data;
2509         cconn->iconn = conn;
2510
2511         log_debug(1 << CXGBI_DBG_ISCSI,
2512                 "cid %u(0x%x), cls 0x%p,0x%p, conn 0x%p,0x%p,0x%p.\n",
2513                 cid, cid, cls_session, cls_conn, conn, tcp_conn, cconn);
2514
2515         return cls_conn;
2516 }
2517 EXPORT_SYMBOL_GPL(cxgbi_create_conn);
2518
2519 int cxgbi_bind_conn(struct iscsi_cls_session *cls_session,
2520                                 struct iscsi_cls_conn *cls_conn,
2521                                 u64 transport_eph, int is_leading)
2522 {
2523         struct iscsi_conn *conn = cls_conn->dd_data;
2524         struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
2525         struct cxgbi_conn *cconn = tcp_conn->dd_data;
2526         struct iscsi_endpoint *ep;
2527         struct cxgbi_endpoint *cep;
2528         struct cxgbi_sock *csk;
2529         int err;
2530
2531         ep = iscsi_lookup_endpoint(transport_eph);
2532         if (!ep)
2533                 return -EINVAL;
2534
2535         /*  setup ddp pagesize */
2536         cep = ep->dd_data;
2537         csk = cep->csk;
2538         err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid, page_idx, 0);
2539         if (err < 0)
2540                 return err;
2541
2542         err = iscsi_conn_bind(cls_session, cls_conn, is_leading);
2543         if (err)
2544                 return -EINVAL;
2545
2546         /*  calculate the tag idx bits needed for this conn based on cmds_max */
2547         cconn->task_idx_bits = (__ilog2_u32(conn->session->cmds_max - 1)) + 1;
2548
2549         write_lock_bh(&csk->callback_lock);
2550         csk->user_data = conn;
2551         cconn->chba = cep->chba;
2552         cconn->cep = cep;
2553         cep->cconn = cconn;
2554         write_unlock_bh(&csk->callback_lock);
2555
2556         cxgbi_conn_max_xmit_dlength(conn);
2557         cxgbi_conn_max_recv_dlength(conn);
2558
2559         log_debug(1 << CXGBI_DBG_ISCSI,
2560                 "cls 0x%p,0x%p, ep 0x%p, cconn 0x%p, csk 0x%p.\n",
2561                 cls_session, cls_conn, ep, cconn, csk);
2562         /*  init recv engine */
2563         iscsi_tcp_hdr_recv_prep(tcp_conn);
2564
2565         return 0;
2566 }
2567 EXPORT_SYMBOL_GPL(cxgbi_bind_conn);
2568
2569 struct iscsi_cls_session *cxgbi_create_session(struct iscsi_endpoint *ep,
2570                                                 u16 cmds_max, u16 qdepth,
2571                                                 u32 initial_cmdsn)
2572 {
2573         struct cxgbi_endpoint *cep;
2574         struct cxgbi_hba *chba;
2575         struct Scsi_Host *shost;
2576         struct iscsi_cls_session *cls_session;
2577         struct iscsi_session *session;
2578
2579         if (!ep) {
2580                 pr_err("missing endpoint.\n");
2581                 return NULL;
2582         }
2583
2584         cep = ep->dd_data;
2585         chba = cep->chba;
2586         shost = chba->shost;
2587
2588         BUG_ON(chba != iscsi_host_priv(shost));
2589
2590         cls_session = iscsi_session_setup(chba->cdev->itp, shost,
2591                                         cmds_max, 0,
2592                                         sizeof(struct iscsi_tcp_task) +
2593                                         sizeof(struct cxgbi_task_data),
2594                                         initial_cmdsn, ISCSI_MAX_TARGET);
2595         if (!cls_session)
2596                 return NULL;
2597
2598         session = cls_session->dd_data;
2599         if (iscsi_tcp_r2tpool_alloc(session))
2600                 goto remove_session;
2601
2602         log_debug(1 << CXGBI_DBG_ISCSI,
2603                 "ep 0x%p, cls sess 0x%p.\n", ep, cls_session);
2604         return cls_session;
2605
2606 remove_session:
2607         iscsi_session_teardown(cls_session);
2608         return NULL;
2609 }
2610 EXPORT_SYMBOL_GPL(cxgbi_create_session);
2611
2612 void cxgbi_destroy_session(struct iscsi_cls_session *cls_session)
2613 {
2614         log_debug(1 << CXGBI_DBG_ISCSI,
2615                 "cls sess 0x%p.\n", cls_session);
2616
2617         iscsi_tcp_r2tpool_free(cls_session->dd_data);
2618         iscsi_session_teardown(cls_session);
2619 }
2620 EXPORT_SYMBOL_GPL(cxgbi_destroy_session);
2621
2622 int cxgbi_set_host_param(struct Scsi_Host *shost, enum iscsi_host_param param,
2623                         char *buf, int buflen)
2624 {
2625         struct cxgbi_hba *chba = iscsi_host_priv(shost);
2626
2627         if (!chba->ndev) {
2628                 shost_printk(KERN_ERR, shost, "Could not get host param. "
2629                                 "netdev for host not set.\n");
2630                 return -ENODEV;
2631         }
2632
2633         log_debug(1 << CXGBI_DBG_ISCSI,
2634                 "shost 0x%p, hba 0x%p,%s, param %d, buf(%d) %s.\n",
2635                 shost, chba, chba->ndev->name, param, buflen, buf);
2636
2637         switch (param) {
2638         case ISCSI_HOST_PARAM_IPADDRESS:
2639         {
2640                 __be32 addr = in_aton(buf);
2641                 log_debug(1 << CXGBI_DBG_ISCSI,
2642                         "hba %s, req. ipv4 %pI4.\n", chba->ndev->name, &addr);
2643                 cxgbi_set_iscsi_ipv4(chba, addr);
2644                 return 0;
2645         }
2646         case ISCSI_HOST_PARAM_HWADDRESS:
2647         case ISCSI_HOST_PARAM_NETDEV_NAME:
2648                 return 0;
2649         default:
2650                 return iscsi_host_set_param(shost, param, buf, buflen);
2651         }
2652 }
2653 EXPORT_SYMBOL_GPL(cxgbi_set_host_param);
2654
2655 int cxgbi_get_host_param(struct Scsi_Host *shost, enum iscsi_host_param param,
2656                         char *buf)
2657 {
2658         struct cxgbi_hba *chba = iscsi_host_priv(shost);
2659         int len = 0;
2660
2661         if (!chba->ndev) {
2662                 shost_printk(KERN_ERR, shost, "Could not get host param. "
2663                                 "netdev for host not set.\n");
2664                 return -ENODEV;
2665         }
2666
2667         log_debug(1 << CXGBI_DBG_ISCSI,
2668                 "shost 0x%p, hba 0x%p,%s, param %d.\n",
2669                 shost, chba, chba->ndev->name, param);
2670
2671         switch (param) {
2672         case ISCSI_HOST_PARAM_HWADDRESS:
2673                 len = sysfs_format_mac(buf, chba->ndev->dev_addr, 6);
2674                 break;
2675         case ISCSI_HOST_PARAM_NETDEV_NAME:
2676                 len = sprintf(buf, "%s\n", chba->ndev->name);
2677                 break;
2678         case ISCSI_HOST_PARAM_IPADDRESS:
2679         {
2680                 struct cxgbi_sock *csk = find_sock_on_port(chba->cdev,
2681                                                            chba->port_id);
2682                 if (csk) {
2683                         len = sprintf(buf, "%pIS",
2684                                       (struct sockaddr *)&csk->saddr);
2685                 }
2686                 log_debug(1 << CXGBI_DBG_ISCSI,
2687                           "hba %s, addr %s.\n", chba->ndev->name, buf);
2688                 break;
2689         }
2690         default:
2691                 return iscsi_host_get_param(shost, param, buf);
2692         }
2693
2694         return len;
2695 }
2696 EXPORT_SYMBOL_GPL(cxgbi_get_host_param);
2697
2698 struct iscsi_endpoint *cxgbi_ep_connect(struct Scsi_Host *shost,
2699                                         struct sockaddr *dst_addr,
2700                                         int non_blocking)
2701 {
2702         struct iscsi_endpoint *ep;
2703         struct cxgbi_endpoint *cep;
2704         struct cxgbi_hba *hba = NULL;
2705         struct cxgbi_sock *csk;
2706         int err = -EINVAL;
2707
2708         log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK,
2709                 "shost 0x%p, non_blocking %d, dst_addr 0x%p.\n",
2710                 shost, non_blocking, dst_addr);
2711
2712         if (shost) {
2713                 hba = iscsi_host_priv(shost);
2714                 if (!hba) {
2715                         pr_info("shost 0x%p, priv NULL.\n", shost);
2716                         goto err_out;
2717                 }
2718         }
2719
2720         if (dst_addr->sa_family == AF_INET) {
2721                 csk = cxgbi_check_route(dst_addr);
2722 #if IS_ENABLED(CONFIG_IPV6)
2723         } else if (dst_addr->sa_family == AF_INET6) {
2724                 csk = cxgbi_check_route6(dst_addr);
2725 #endif
2726         } else {
2727                 pr_info("address family 0x%x NOT supported.\n",
2728                         dst_addr->sa_family);
2729                 err = -EAFNOSUPPORT;
2730                 return (struct iscsi_endpoint *)ERR_PTR(err);
2731         }
2732
2733         if (IS_ERR(csk))
2734                 return (struct iscsi_endpoint *)csk;
2735         cxgbi_sock_get(csk);
2736
2737         if (!hba)
2738                 hba = csk->cdev->hbas[csk->port_id];
2739         else if (hba != csk->cdev->hbas[csk->port_id]) {
2740                 pr_info("Could not connect through requested host %u"
2741                         "hba 0x%p != 0x%p (%u).\n",
2742                         shost->host_no, hba,
2743                         csk->cdev->hbas[csk->port_id], csk->port_id);
2744                 err = -ENOSPC;
2745                 goto release_conn;
2746         }
2747
2748         err = sock_get_port(csk);
2749         if (err)
2750                 goto release_conn;
2751
2752         cxgbi_sock_set_state(csk, CTP_CONNECTING);
2753         err = csk->cdev->csk_init_act_open(csk);
2754         if (err)
2755                 goto release_conn;
2756
2757         if (cxgbi_sock_is_closing(csk)) {
2758                 err = -ENOSPC;
2759                 pr_info("csk 0x%p is closing.\n", csk);
2760                 goto release_conn;
2761         }
2762
2763         ep = iscsi_create_endpoint(sizeof(*cep));
2764         if (!ep) {
2765                 err = -ENOMEM;
2766                 pr_info("iscsi alloc ep, OOM.\n");
2767                 goto release_conn;
2768         }
2769
2770         cep = ep->dd_data;
2771         cep->csk = csk;
2772         cep->chba = hba;
2773
2774         log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK,
2775                 "ep 0x%p, cep 0x%p, csk 0x%p, hba 0x%p,%s.\n",
2776                 ep, cep, csk, hba, hba->ndev->name);
2777         return ep;
2778
2779 release_conn:
2780         cxgbi_sock_put(csk);
2781         cxgbi_sock_closed(csk);
2782 err_out:
2783         return ERR_PTR(err);
2784 }
2785 EXPORT_SYMBOL_GPL(cxgbi_ep_connect);
2786
2787 int cxgbi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
2788 {
2789         struct cxgbi_endpoint *cep = ep->dd_data;
2790         struct cxgbi_sock *csk = cep->csk;
2791
2792         if (!cxgbi_sock_is_established(csk))
2793                 return 0;
2794         return 1;
2795 }
2796 EXPORT_SYMBOL_GPL(cxgbi_ep_poll);
2797
2798 void cxgbi_ep_disconnect(struct iscsi_endpoint *ep)
2799 {
2800         struct cxgbi_endpoint *cep = ep->dd_data;
2801         struct cxgbi_conn *cconn = cep->cconn;
2802         struct cxgbi_sock *csk = cep->csk;
2803
2804         log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK,
2805                 "ep 0x%p, cep 0x%p, cconn 0x%p, csk 0x%p,%u,0x%lx.\n",
2806                 ep, cep, cconn, csk, csk->state, csk->flags);
2807
2808         if (cconn && cconn->iconn) {
2809                 iscsi_suspend_tx(cconn->iconn);
2810                 write_lock_bh(&csk->callback_lock);
2811                 cep->csk->user_data = NULL;
2812                 cconn->cep = NULL;
2813                 write_unlock_bh(&csk->callback_lock);
2814         }
2815         iscsi_destroy_endpoint(ep);
2816
2817         if (likely(csk->state >= CTP_ESTABLISHED))
2818                 need_active_close(csk);
2819         else
2820                 cxgbi_sock_closed(csk);
2821
2822         cxgbi_sock_put(csk);
2823 }
2824 EXPORT_SYMBOL_GPL(cxgbi_ep_disconnect);
2825
2826 int cxgbi_iscsi_init(struct iscsi_transport *itp,
2827                         struct scsi_transport_template **stt)
2828 {
2829         *stt = iscsi_register_transport(itp);
2830         if (*stt == NULL) {
2831                 pr_err("unable to register %s transport 0x%p.\n",
2832                         itp->name, itp);
2833                 return -ENODEV;
2834         }
2835         log_debug(1 << CXGBI_DBG_ISCSI,
2836                 "%s, registered iscsi transport 0x%p.\n",
2837                 itp->name, stt);
2838         return 0;
2839 }
2840 EXPORT_SYMBOL_GPL(cxgbi_iscsi_init);
2841
2842 void cxgbi_iscsi_cleanup(struct iscsi_transport *itp,
2843                         struct scsi_transport_template **stt)
2844 {
2845         if (*stt) {
2846                 log_debug(1 << CXGBI_DBG_ISCSI,
2847                         "de-register transport 0x%p, %s, stt 0x%p.\n",
2848                         itp, itp->name, *stt);
2849                 *stt = NULL;
2850                 iscsi_unregister_transport(itp);
2851         }
2852 }
2853 EXPORT_SYMBOL_GPL(cxgbi_iscsi_cleanup);
2854
2855 umode_t cxgbi_attr_is_visible(int param_type, int param)
2856 {
2857         switch (param_type) {
2858         case ISCSI_HOST_PARAM:
2859                 switch (param) {
2860                 case ISCSI_HOST_PARAM_NETDEV_NAME:
2861                 case ISCSI_HOST_PARAM_HWADDRESS:
2862                 case ISCSI_HOST_PARAM_IPADDRESS:
2863                 case ISCSI_HOST_PARAM_INITIATOR_NAME:
2864                         return S_IRUGO;
2865                 default:
2866                         return 0;
2867                 }
2868         case ISCSI_PARAM:
2869                 switch (param) {
2870                 case ISCSI_PARAM_MAX_RECV_DLENGTH:
2871                 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
2872                 case ISCSI_PARAM_HDRDGST_EN:
2873                 case ISCSI_PARAM_DATADGST_EN:
2874                 case ISCSI_PARAM_CONN_ADDRESS:
2875                 case ISCSI_PARAM_CONN_PORT:
2876                 case ISCSI_PARAM_EXP_STATSN:
2877                 case ISCSI_PARAM_PERSISTENT_ADDRESS:
2878                 case ISCSI_PARAM_PERSISTENT_PORT:
2879                 case ISCSI_PARAM_PING_TMO:
2880                 case ISCSI_PARAM_RECV_TMO:
2881                 case ISCSI_PARAM_INITIAL_R2T_EN:
2882                 case ISCSI_PARAM_MAX_R2T:
2883                 case ISCSI_PARAM_IMM_DATA_EN:
2884                 case ISCSI_PARAM_FIRST_BURST:
2885                 case ISCSI_PARAM_MAX_BURST:
2886                 case ISCSI_PARAM_PDU_INORDER_EN:
2887                 case ISCSI_PARAM_DATASEQ_INORDER_EN:
2888                 case ISCSI_PARAM_ERL:
2889                 case ISCSI_PARAM_TARGET_NAME:
2890                 case ISCSI_PARAM_TPGT:
2891                 case ISCSI_PARAM_USERNAME:
2892                 case ISCSI_PARAM_PASSWORD:
2893                 case ISCSI_PARAM_USERNAME_IN:
2894                 case ISCSI_PARAM_PASSWORD_IN:
2895                 case ISCSI_PARAM_FAST_ABORT:
2896                 case ISCSI_PARAM_ABORT_TMO:
2897                 case ISCSI_PARAM_LU_RESET_TMO:
2898                 case ISCSI_PARAM_TGT_RESET_TMO:
2899                 case ISCSI_PARAM_IFACE_NAME:
2900                 case ISCSI_PARAM_INITIATOR_NAME:
2901                         return S_IRUGO;
2902                 default:
2903                         return 0;
2904                 }
2905         }
2906
2907         return 0;
2908 }
2909 EXPORT_SYMBOL_GPL(cxgbi_attr_is_visible);
2910
2911 static int __init libcxgbi_init_module(void)
2912 {
2913         sw_tag_idx_bits = (__ilog2_u32(ISCSI_ITT_MASK)) + 1;
2914         sw_tag_age_bits = (__ilog2_u32(ISCSI_AGE_MASK)) + 1;
2915
2916         pr_info("tag itt 0x%x, %u bits, age 0x%x, %u bits.\n",
2917                 ISCSI_ITT_MASK, sw_tag_idx_bits,
2918                 ISCSI_AGE_MASK, sw_tag_age_bits);
2919
2920         ddp_setup_host_page_size();
2921         return 0;
2922 }
2923
2924 static void __exit libcxgbi_exit_module(void)
2925 {
2926         cxgbi_device_unregister_all(0xFF);
2927         return;
2928 }
2929
2930 module_init(libcxgbi_init_module);
2931 module_exit(libcxgbi_exit_module);