1 /* Broadcom NetXtreme-C/E network driver.
3 * Copyright (c) 2014-2015 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
10 #include <linux/module.h>
12 #include <linux/stringify.h>
13 #include <linux/kernel.h>
14 #include <linux/timer.h>
15 #include <linux/errno.h>
16 #include <linux/ioport.h>
17 #include <linux/slab.h>
18 #include <linux/vmalloc.h>
19 #include <linux/interrupt.h>
20 #include <linux/pci.h>
21 #include <linux/netdevice.h>
22 #include <linux/etherdevice.h>
23 #include <linux/skbuff.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/bitops.h>
27 #include <linux/irq.h>
28 #include <linux/delay.h>
29 #include <asm/byteorder.h>
31 #include <linux/time.h>
32 #include <linux/mii.h>
34 #include <linux/if_vlan.h>
38 #include <net/checksum.h>
39 #include <net/ip6_checksum.h>
40 #if defined(CONFIG_VXLAN) || defined(CONFIG_VXLAN_MODULE)
41 #include <net/vxlan.h>
43 #ifdef CONFIG_NET_RX_BUSY_POLL
44 #include <net/busy_poll.h>
46 #include <linux/workqueue.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/log2.h>
50 #include <linux/aer.h>
51 #include <linux/bitmap.h>
52 #include <linux/cpu_rmap.h>
56 #include "bnxt_sriov.h"
57 #include "bnxt_ethtool.h"
59 #define BNXT_TX_TIMEOUT (5 * HZ)
61 static const char version[] =
62 "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n";
64 MODULE_LICENSE("GPL");
65 MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
66 MODULE_VERSION(DRV_MODULE_VERSION);
68 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
69 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
70 #define BNXT_RX_COPY_THRESH 256
72 #define BNXT_TX_PUSH_THRESH 92
83 /* indexed by enum above */
87 { "Broadcom BCM57302 NetXtreme-C Single-port 10Gb/25Gb/40Gb/50Gb Ethernet" },
88 { "Broadcom BCM57304 NetXtreme-C Dual-port 10Gb/25Gb/40Gb/50Gb Ethernet" },
89 { "Broadcom BCM57404 NetXtreme-E Dual-port 10Gb/25Gb Ethernet" },
90 { "Broadcom BCM57406 NetXtreme-E Dual-port 10Gb Ethernet" },
91 { "Broadcom BCM57304 NetXtreme-C Ethernet Virtual Function" },
92 { "Broadcom BCM57404 NetXtreme-E Ethernet Virtual Function" },
95 static const struct pci_device_id bnxt_pci_tbl[] = {
96 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
97 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
98 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
99 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
100 #ifdef CONFIG_BNXT_SRIOV
101 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = BCM57304_VF },
102 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = BCM57404_VF },
107 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
109 static const u16 bnxt_vf_req_snif[] = {
112 HWRM_CFA_L2_FILTER_ALLOC,
115 static bool bnxt_vf_pciid(enum board_idx idx)
117 return (idx == BCM57304_VF || idx == BCM57404_VF);
120 #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
121 #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
122 #define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS)
124 #define BNXT_CP_DB_REARM(db, raw_cons) \
125 writel(DB_CP_REARM_FLAGS | RING_CMP(raw_cons), db)
127 #define BNXT_CP_DB(db, raw_cons) \
128 writel(DB_CP_FLAGS | RING_CMP(raw_cons), db)
130 #define BNXT_CP_DB_IRQ_DIS(db) \
131 writel(DB_CP_IRQ_DIS_FLAGS, db)
133 static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
135 /* Tell compiler to fetch tx indices from memory. */
138 return bp->tx_ring_size -
139 ((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask);
142 static const u16 bnxt_lhint_arr[] = {
143 TX_BD_FLAGS_LHINT_512_AND_SMALLER,
144 TX_BD_FLAGS_LHINT_512_TO_1023,
145 TX_BD_FLAGS_LHINT_1024_TO_2047,
146 TX_BD_FLAGS_LHINT_1024_TO_2047,
147 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
148 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
149 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
150 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
151 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
152 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
153 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
154 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
155 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
156 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
157 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
158 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
159 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
160 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
161 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
164 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
166 struct bnxt *bp = netdev_priv(dev);
168 struct tx_bd_ext *txbd1;
169 struct netdev_queue *txq;
172 unsigned int length, pad = 0;
173 u32 len, free_size, vlan_tag_flags, cfa_action, flags;
175 struct pci_dev *pdev = bp->pdev;
176 struct bnxt_napi *bnapi;
177 struct bnxt_tx_ring_info *txr;
178 struct bnxt_sw_tx_bd *tx_buf;
180 i = skb_get_queue_mapping(skb);
181 if (unlikely(i >= bp->tx_nr_rings)) {
182 dev_kfree_skb_any(skb);
186 bnapi = bp->bnapi[i];
187 txr = &bnapi->tx_ring;
188 txq = netdev_get_tx_queue(dev, i);
191 free_size = bnxt_tx_avail(bp, txr);
192 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
193 netif_tx_stop_queue(txq);
194 return NETDEV_TX_BUSY;
198 len = skb_headlen(skb);
199 last_frag = skb_shinfo(skb)->nr_frags;
201 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
203 txbd->tx_bd_opaque = prod;
205 tx_buf = &txr->tx_buf_ring[prod];
207 tx_buf->nr_frags = last_frag;
211 if (skb_vlan_tag_present(skb)) {
212 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
213 skb_vlan_tag_get(skb);
214 /* Currently supports 8021Q, 8021AD vlan offloads
215 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
217 if (skb->vlan_proto == htons(ETH_P_8021Q))
218 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
221 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
222 struct tx_push_bd *push = txr->tx_push;
223 struct tx_bd *tx_push = &push->txbd1;
224 struct tx_bd_ext *tx_push1 = &push->txbd2;
225 void *pdata = tx_push1 + 1;
228 /* Set COAL_NOW to be ready quickly for the next push */
229 tx_push->tx_bd_len_flags_type =
230 cpu_to_le32((length << TX_BD_LEN_SHIFT) |
231 TX_BD_TYPE_LONG_TX_BD |
232 TX_BD_FLAGS_LHINT_512_AND_SMALLER |
233 TX_BD_FLAGS_COAL_NOW |
234 TX_BD_FLAGS_PACKET_END |
235 (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
237 if (skb->ip_summed == CHECKSUM_PARTIAL)
238 tx_push1->tx_bd_hsize_lflags =
239 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
241 tx_push1->tx_bd_hsize_lflags = 0;
243 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
244 tx_push1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
246 skb_copy_from_linear_data(skb, pdata, len);
248 for (j = 0; j < last_frag; j++) {
249 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
252 fptr = skb_frag_address_safe(frag);
256 memcpy(pdata, fptr, skb_frag_size(frag));
257 pdata += skb_frag_size(frag);
260 memcpy(txbd, tx_push, sizeof(*txbd));
261 prod = NEXT_TX(prod);
262 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
263 memcpy(txbd, tx_push1, sizeof(*txbd));
264 prod = NEXT_TX(prod);
266 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
269 netdev_tx_sent_queue(txq, skb->len);
271 __iowrite64_copy(txr->tx_doorbell, push,
272 (length + sizeof(*push) + 8) / 8);
280 if (length < BNXT_MIN_PKT_SIZE) {
281 pad = BNXT_MIN_PKT_SIZE - length;
282 if (skb_pad(skb, pad)) {
283 /* SKB already freed. */
287 length = BNXT_MIN_PKT_SIZE;
290 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
292 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
293 dev_kfree_skb_any(skb);
298 dma_unmap_addr_set(tx_buf, mapping, mapping);
299 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
300 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
302 txbd->tx_bd_haddr = cpu_to_le64(mapping);
304 prod = NEXT_TX(prod);
305 txbd1 = (struct tx_bd_ext *)
306 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
308 txbd1->tx_bd_hsize_lflags = 0;
309 if (skb_is_gso(skb)) {
312 if (skb->encapsulation)
313 hdr_len = skb_inner_network_offset(skb) +
314 skb_inner_network_header_len(skb) +
315 inner_tcp_hdrlen(skb);
317 hdr_len = skb_transport_offset(skb) +
320 txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO |
322 (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
323 length = skb_shinfo(skb)->gso_size;
324 txbd1->tx_bd_mss = cpu_to_le32(length);
326 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
327 txbd1->tx_bd_hsize_lflags =
328 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
329 txbd1->tx_bd_mss = 0;
333 flags |= bnxt_lhint_arr[length];
334 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
336 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
337 txbd1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
338 for (i = 0; i < last_frag; i++) {
339 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
341 prod = NEXT_TX(prod);
342 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
344 len = skb_frag_size(frag);
345 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
348 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
351 tx_buf = &txr->tx_buf_ring[prod];
352 dma_unmap_addr_set(tx_buf, mapping, mapping);
354 txbd->tx_bd_haddr = cpu_to_le64(mapping);
356 flags = len << TX_BD_LEN_SHIFT;
357 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
361 txbd->tx_bd_len_flags_type =
362 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
363 TX_BD_FLAGS_PACKET_END);
365 netdev_tx_sent_queue(txq, skb->len);
367 /* Sync BD data before updating doorbell */
370 prod = NEXT_TX(prod);
373 writel(DB_KEY_TX | prod, txr->tx_doorbell);
374 writel(DB_KEY_TX | prod, txr->tx_doorbell);
380 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
381 netif_tx_stop_queue(txq);
383 /* netif_tx_stop_queue() must be done before checking
384 * tx index in bnxt_tx_avail() below, because in
385 * bnxt_tx_int(), we update tx index before checking for
386 * netif_tx_queue_stopped().
389 if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
390 netif_tx_wake_queue(txq);
397 /* start back at beginning and unmap skb */
399 tx_buf = &txr->tx_buf_ring[prod];
401 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
402 skb_headlen(skb), PCI_DMA_TODEVICE);
403 prod = NEXT_TX(prod);
405 /* unmap remaining mapped pages */
406 for (i = 0; i < last_frag; i++) {
407 prod = NEXT_TX(prod);
408 tx_buf = &txr->tx_buf_ring[prod];
409 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
410 skb_frag_size(&skb_shinfo(skb)->frags[i]),
414 dev_kfree_skb_any(skb);
418 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
420 struct bnxt_tx_ring_info *txr = &bnapi->tx_ring;
421 int index = bnapi->index;
422 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, index);
423 u16 cons = txr->tx_cons;
424 struct pci_dev *pdev = bp->pdev;
426 unsigned int tx_bytes = 0;
428 for (i = 0; i < nr_pkts; i++) {
429 struct bnxt_sw_tx_bd *tx_buf;
433 tx_buf = &txr->tx_buf_ring[cons];
434 cons = NEXT_TX(cons);
438 if (tx_buf->is_push) {
443 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
444 skb_headlen(skb), PCI_DMA_TODEVICE);
445 last = tx_buf->nr_frags;
447 for (j = 0; j < last; j++) {
448 cons = NEXT_TX(cons);
449 tx_buf = &txr->tx_buf_ring[cons];
452 dma_unmap_addr(tx_buf, mapping),
453 skb_frag_size(&skb_shinfo(skb)->frags[j]),
458 cons = NEXT_TX(cons);
460 tx_bytes += skb->len;
461 dev_kfree_skb_any(skb);
464 netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
467 /* Need to make the tx_cons update visible to bnxt_start_xmit()
468 * before checking for netif_tx_queue_stopped(). Without the
469 * memory barrier, there is a small possibility that bnxt_start_xmit()
470 * will miss it and cause the queue to be stopped forever.
474 if (unlikely(netif_tx_queue_stopped(txq)) &&
475 (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
476 __netif_tx_lock(txq, smp_processor_id());
477 if (netif_tx_queue_stopped(txq) &&
478 bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
479 txr->dev_state != BNXT_DEV_STATE_CLOSING)
480 netif_tx_wake_queue(txq);
481 __netif_tx_unlock(txq);
485 static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
489 struct pci_dev *pdev = bp->pdev;
491 data = kmalloc(bp->rx_buf_size, gfp);
495 *mapping = dma_map_single(&pdev->dev, data + BNXT_RX_DMA_OFFSET,
496 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
498 if (dma_mapping_error(&pdev->dev, *mapping)) {
505 static inline int bnxt_alloc_rx_data(struct bnxt *bp,
506 struct bnxt_rx_ring_info *rxr,
509 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
510 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
514 data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
519 dma_unmap_addr_set(rx_buf, mapping, mapping);
521 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
526 static void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons,
529 u16 prod = rxr->rx_prod;
530 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
531 struct rx_bd *cons_bd, *prod_bd;
533 prod_rx_buf = &rxr->rx_buf_ring[prod];
534 cons_rx_buf = &rxr->rx_buf_ring[cons];
536 prod_rx_buf->data = data;
538 dma_unmap_addr_set(prod_rx_buf, mapping,
539 dma_unmap_addr(cons_rx_buf, mapping));
541 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
542 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
544 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
547 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
549 u16 next, max = rxr->rx_agg_bmap_size;
551 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
553 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
557 static inline int bnxt_alloc_rx_page(struct bnxt *bp,
558 struct bnxt_rx_ring_info *rxr,
562 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
563 struct bnxt_sw_rx_agg_bd *rx_agg_buf;
564 struct pci_dev *pdev = bp->pdev;
567 u16 sw_prod = rxr->rx_sw_agg_prod;
569 page = alloc_page(gfp);
573 mapping = dma_map_page(&pdev->dev, page, 0, PAGE_SIZE,
575 if (dma_mapping_error(&pdev->dev, mapping)) {
580 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
581 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
583 __set_bit(sw_prod, rxr->rx_agg_bmap);
584 rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
585 rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
587 rx_agg_buf->page = page;
588 rx_agg_buf->mapping = mapping;
589 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
590 rxbd->rx_bd_opaque = sw_prod;
594 static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons,
597 struct bnxt *bp = bnapi->bp;
598 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
599 struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
600 u16 prod = rxr->rx_agg_prod;
601 u16 sw_prod = rxr->rx_sw_agg_prod;
604 for (i = 0; i < agg_bufs; i++) {
606 struct rx_agg_cmp *agg;
607 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
608 struct rx_bd *prod_bd;
611 agg = (struct rx_agg_cmp *)
612 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
613 cons = agg->rx_agg_cmp_opaque;
614 __clear_bit(cons, rxr->rx_agg_bmap);
616 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
617 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
619 __set_bit(sw_prod, rxr->rx_agg_bmap);
620 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
621 cons_rx_buf = &rxr->rx_agg_ring[cons];
623 /* It is possible for sw_prod to be equal to cons, so
624 * set cons_rx_buf->page to NULL first.
626 page = cons_rx_buf->page;
627 cons_rx_buf->page = NULL;
628 prod_rx_buf->page = page;
630 prod_rx_buf->mapping = cons_rx_buf->mapping;
632 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
634 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
635 prod_bd->rx_bd_opaque = sw_prod;
637 prod = NEXT_RX_AGG(prod);
638 sw_prod = NEXT_RX_AGG(sw_prod);
639 cp_cons = NEXT_CMP(cp_cons);
641 rxr->rx_agg_prod = prod;
642 rxr->rx_sw_agg_prod = sw_prod;
645 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
646 struct bnxt_rx_ring_info *rxr, u16 cons,
647 u16 prod, u8 *data, dma_addr_t dma_addr,
653 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
655 bnxt_reuse_rx_data(rxr, cons, data);
659 skb = build_skb(data, 0);
660 dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
667 skb_reserve(skb, BNXT_RX_OFFSET);
672 static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
673 struct sk_buff *skb, u16 cp_cons,
676 struct pci_dev *pdev = bp->pdev;
677 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
678 struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
679 u16 prod = rxr->rx_agg_prod;
682 for (i = 0; i < agg_bufs; i++) {
684 struct rx_agg_cmp *agg;
685 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
689 agg = (struct rx_agg_cmp *)
690 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
691 cons = agg->rx_agg_cmp_opaque;
692 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
693 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
695 cons_rx_buf = &rxr->rx_agg_ring[cons];
696 skb_fill_page_desc(skb, i, cons_rx_buf->page, 0, frag_len);
697 __clear_bit(cons, rxr->rx_agg_bmap);
699 /* It is possible for bnxt_alloc_rx_page() to allocate
700 * a sw_prod index that equals the cons index, so we
701 * need to clear the cons entry now.
703 mapping = dma_unmap_addr(cons_rx_buf, mapping);
704 page = cons_rx_buf->page;
705 cons_rx_buf->page = NULL;
707 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
708 struct skb_shared_info *shinfo;
709 unsigned int nr_frags;
711 shinfo = skb_shinfo(skb);
712 nr_frags = --shinfo->nr_frags;
713 __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
717 cons_rx_buf->page = page;
719 /* Update prod since possibly some pages have been
722 rxr->rx_agg_prod = prod;
723 bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs - i);
727 dma_unmap_page(&pdev->dev, mapping, PAGE_SIZE,
730 skb->data_len += frag_len;
731 skb->len += frag_len;
732 skb->truesize += PAGE_SIZE;
734 prod = NEXT_RX_AGG(prod);
735 cp_cons = NEXT_CMP(cp_cons);
737 rxr->rx_agg_prod = prod;
741 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
742 u8 agg_bufs, u32 *raw_cons)
745 struct rx_agg_cmp *agg;
747 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
748 last = RING_CMP(*raw_cons);
749 agg = (struct rx_agg_cmp *)
750 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
751 return RX_AGG_CMP_VALID(agg, *raw_cons);
754 static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
758 struct bnxt *bp = bnapi->bp;
759 struct pci_dev *pdev = bp->pdev;
762 skb = napi_alloc_skb(&bnapi->napi, len);
766 dma_sync_single_for_cpu(&pdev->dev, mapping,
767 bp->rx_copy_thresh, PCI_DMA_FROMDEVICE);
769 memcpy(skb->data - BNXT_RX_OFFSET, data, len + BNXT_RX_OFFSET);
771 dma_sync_single_for_device(&pdev->dev, mapping,
779 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
780 struct rx_tpa_start_cmp *tpa_start,
781 struct rx_tpa_start_cmp_ext *tpa_start1)
783 u8 agg_id = TPA_START_AGG_ID(tpa_start);
785 struct bnxt_tpa_info *tpa_info;
786 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
787 struct rx_bd *prod_bd;
790 cons = tpa_start->rx_tpa_start_cmp_opaque;
792 cons_rx_buf = &rxr->rx_buf_ring[cons];
793 prod_rx_buf = &rxr->rx_buf_ring[prod];
794 tpa_info = &rxr->rx_tpa[agg_id];
796 prod_rx_buf->data = tpa_info->data;
798 mapping = tpa_info->mapping;
799 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
801 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
803 prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
805 tpa_info->data = cons_rx_buf->data;
806 cons_rx_buf->data = NULL;
807 tpa_info->mapping = dma_unmap_addr(cons_rx_buf, mapping);
810 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
811 RX_TPA_START_CMP_LEN_SHIFT;
812 if (likely(TPA_START_HASH_VALID(tpa_start))) {
813 u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
815 tpa_info->hash_type = PKT_HASH_TYPE_L4;
816 tpa_info->gso_type = SKB_GSO_TCPV4;
817 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
819 tpa_info->gso_type = SKB_GSO_TCPV6;
821 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
823 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
824 tpa_info->gso_type = 0;
825 if (netif_msg_rx_err(bp))
826 netdev_warn(bp->dev, "TPA packet without valid hash\n");
828 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
829 tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
831 rxr->rx_prod = NEXT_RX(prod);
832 cons = NEXT_RX(cons);
833 cons_rx_buf = &rxr->rx_buf_ring[cons];
835 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
836 rxr->rx_prod = NEXT_RX(rxr->rx_prod);
837 cons_rx_buf->data = NULL;
840 static void bnxt_abort_tpa(struct bnxt *bp, struct bnxt_napi *bnapi,
841 u16 cp_cons, u32 agg_bufs)
844 bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
847 #define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr))
848 #define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
850 static inline struct sk_buff *bnxt_gro_skb(struct bnxt_tpa_info *tpa_info,
851 struct rx_tpa_end_cmp *tpa_end,
852 struct rx_tpa_end_cmp_ext *tpa_end1,
857 int payload_off, tcp_opt_len = 0;
861 segs = TPA_END_TPA_SEGS(tpa_end);
865 NAPI_GRO_CB(skb)->count = segs;
866 skb_shinfo(skb)->gso_size =
867 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
868 skb_shinfo(skb)->gso_type = tpa_info->gso_type;
869 payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
870 RX_TPA_END_CMP_PAYLOAD_OFFSET) >>
871 RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT;
872 if (TPA_END_GRO_TS(tpa_end))
875 if (tpa_info->gso_type == SKB_GSO_TCPV4) {
878 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
880 skb_set_network_header(skb, nw_off);
882 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
883 len = skb->len - skb_transport_offset(skb);
885 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
886 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
889 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
891 skb_set_network_header(skb, nw_off);
893 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
894 len = skb->len - skb_transport_offset(skb);
896 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
898 dev_kfree_skb_any(skb);
901 tcp_gro_complete(skb);
903 if (nw_off) { /* tunnel */
904 struct udphdr *uh = NULL;
906 if (skb->protocol == htons(ETH_P_IP)) {
907 struct iphdr *iph = (struct iphdr *)skb->data;
909 if (iph->protocol == IPPROTO_UDP)
910 uh = (struct udphdr *)(iph + 1);
912 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
914 if (iph->nexthdr == IPPROTO_UDP)
915 uh = (struct udphdr *)(iph + 1);
919 skb_shinfo(skb)->gso_type |=
920 SKB_GSO_UDP_TUNNEL_CSUM;
922 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
929 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
930 struct bnxt_napi *bnapi,
932 struct rx_tpa_end_cmp *tpa_end,
933 struct rx_tpa_end_cmp_ext *tpa_end1,
936 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
937 struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
938 u8 agg_id = TPA_END_AGG_ID(tpa_end);
940 u16 cp_cons = RING_CMP(*raw_cons);
942 struct bnxt_tpa_info *tpa_info;
946 tpa_info = &rxr->rx_tpa[agg_id];
947 data = tpa_info->data;
950 mapping = tpa_info->mapping;
952 agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
953 RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT;
956 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
957 return ERR_PTR(-EBUSY);
960 cp_cons = NEXT_CMP(cp_cons);
963 if (unlikely(agg_bufs > MAX_SKB_FRAGS)) {
964 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
965 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
966 agg_bufs, (int)MAX_SKB_FRAGS);
970 if (len <= bp->rx_copy_thresh) {
971 skb = bnxt_copy_skb(bnapi, data, len, mapping);
973 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
978 dma_addr_t new_mapping;
980 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
982 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
986 tpa_info->data = new_data;
987 tpa_info->mapping = new_mapping;
989 skb = build_skb(data, 0);
990 dma_unmap_single(&bp->pdev->dev, mapping, bp->rx_buf_use_size,
995 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
998 skb_reserve(skb, BNXT_RX_OFFSET);
1003 skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
1005 /* Page reuse already handled by bnxt_rx_pages(). */
1009 skb->protocol = eth_type_trans(skb, bp->dev);
1011 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1012 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1014 if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) {
1015 netdev_features_t features = skb->dev->features;
1016 u16 vlan_proto = tpa_info->metadata >>
1017 RX_CMP_FLAGS2_METADATA_TPID_SFT;
1019 if (((features & NETIF_F_HW_VLAN_CTAG_RX) &&
1020 vlan_proto == ETH_P_8021Q) ||
1021 ((features & NETIF_F_HW_VLAN_STAG_RX) &&
1022 vlan_proto == ETH_P_8021AD)) {
1023 __vlan_hwaccel_put_tag(skb, htons(vlan_proto),
1024 tpa_info->metadata &
1025 RX_CMP_FLAGS2_METADATA_VID_MASK);
1029 skb_checksum_none_assert(skb);
1030 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1031 skb->ip_summed = CHECKSUM_UNNECESSARY;
1033 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1036 if (TPA_END_GRO(tpa_end))
1037 skb = bnxt_gro_skb(tpa_info, tpa_end, tpa_end1, skb);
1042 /* returns the following:
1043 * 1 - 1 packet successfully received
1044 * 0 - successful TPA_START, packet not completed yet
1045 * -EBUSY - completion ring does not have all the agg buffers yet
1046 * -ENOMEM - packet aborted due to out of memory
1047 * -EIO - packet aborted due to hw error indicated in BD
1049 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
1052 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1053 struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
1054 struct net_device *dev = bp->dev;
1055 struct rx_cmp *rxcmp;
1056 struct rx_cmp_ext *rxcmp1;
1057 u32 tmp_raw_cons = *raw_cons;
1058 u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
1059 struct bnxt_sw_rx_bd *rx_buf;
1061 u8 *data, agg_bufs, cmp_type;
1062 dma_addr_t dma_addr;
1063 struct sk_buff *skb;
1066 rxcmp = (struct rx_cmp *)
1067 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1069 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1070 cp_cons = RING_CMP(tmp_raw_cons);
1071 rxcmp1 = (struct rx_cmp_ext *)
1072 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1074 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1077 cmp_type = RX_CMP_TYPE(rxcmp);
1079 prod = rxr->rx_prod;
1081 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1082 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1083 (struct rx_tpa_start_cmp_ext *)rxcmp1);
1085 goto next_rx_no_prod;
1087 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1088 skb = bnxt_tpa_end(bp, bnapi, &tmp_raw_cons,
1089 (struct rx_tpa_end_cmp *)rxcmp,
1090 (struct rx_tpa_end_cmp_ext *)rxcmp1,
1093 if (unlikely(IS_ERR(skb)))
1098 skb_record_rx_queue(skb, bnapi->index);
1099 skb_mark_napi_id(skb, &bnapi->napi);
1100 if (bnxt_busy_polling(bnapi))
1101 netif_receive_skb(skb);
1103 napi_gro_receive(&bnapi->napi, skb);
1106 goto next_rx_no_prod;
1109 cons = rxcmp->rx_cmp_opaque;
1110 rx_buf = &rxr->rx_buf_ring[cons];
1111 data = rx_buf->data;
1114 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & RX_CMP_AGG_BUFS) >>
1115 RX_CMP_AGG_BUFS_SHIFT;
1118 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1121 cp_cons = NEXT_CMP(cp_cons);
1125 rx_buf->data = NULL;
1126 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
1127 bnxt_reuse_rx_data(rxr, cons, data);
1129 bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
1135 len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
1136 dma_addr = dma_unmap_addr(rx_buf, mapping);
1138 if (len <= bp->rx_copy_thresh) {
1139 skb = bnxt_copy_skb(bnapi, data, len, dma_addr);
1140 bnxt_reuse_rx_data(rxr, cons, data);
1146 skb = bnxt_rx_skb(bp, rxr, cons, prod, data, dma_addr, len);
1154 skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
1161 if (RX_CMP_HASH_VALID(rxcmp)) {
1162 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1163 enum pkt_hash_types type = PKT_HASH_TYPE_L4;
1165 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1166 if (hash_type != 1 && hash_type != 3)
1167 type = PKT_HASH_TYPE_L3;
1168 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
1171 skb->protocol = eth_type_trans(skb, dev);
1173 if (rxcmp1->rx_cmp_flags2 &
1174 cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) {
1175 netdev_features_t features = skb->dev->features;
1176 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
1177 u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
1179 if (((features & NETIF_F_HW_VLAN_CTAG_RX) &&
1180 vlan_proto == ETH_P_8021Q) ||
1181 ((features & NETIF_F_HW_VLAN_STAG_RX) &&
1182 vlan_proto == ETH_P_8021AD))
1183 __vlan_hwaccel_put_tag(skb, htons(vlan_proto),
1185 RX_CMP_FLAGS2_METADATA_VID_MASK);
1188 skb_checksum_none_assert(skb);
1189 if (RX_CMP_L4_CS_OK(rxcmp1)) {
1190 if (dev->features & NETIF_F_RXCSUM) {
1191 skb->ip_summed = CHECKSUM_UNNECESSARY;
1192 skb->csum_level = RX_CMP_ENCAP(rxcmp1);
1195 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
1196 if (dev->features & NETIF_F_RXCSUM)
1197 cpr->rx_l4_csum_errors++;
1201 skb_record_rx_queue(skb, bnapi->index);
1202 skb_mark_napi_id(skb, &bnapi->napi);
1203 if (bnxt_busy_polling(bnapi))
1204 netif_receive_skb(skb);
1206 napi_gro_receive(&bnapi->napi, skb);
1210 rxr->rx_prod = NEXT_RX(prod);
1213 *raw_cons = tmp_raw_cons;
1218 static int bnxt_async_event_process(struct bnxt *bp,
1219 struct hwrm_async_event_cmpl *cmpl)
1221 u16 event_id = le16_to_cpu(cmpl->event_id);
1223 /* TODO CHIMP_FW: Define event id's for link change, error etc */
1225 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
1226 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
1227 schedule_work(&bp->sp_task);
1230 netdev_err(bp->dev, "unhandled ASYNC event (id 0x%x)\n",
1237 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
1239 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
1240 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
1241 struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
1242 (struct hwrm_fwd_req_cmpl *)txcmp;
1244 switch (cmpl_type) {
1245 case CMPL_BASE_TYPE_HWRM_DONE:
1246 seq_id = le16_to_cpu(h_cmpl->sequence_id);
1247 if (seq_id == bp->hwrm_intr_seq_id)
1248 bp->hwrm_intr_seq_id = HWRM_SEQ_ID_INVALID;
1250 netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
1253 case CMPL_BASE_TYPE_HWRM_FWD_REQ:
1254 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
1256 if ((vf_id < bp->pf.first_vf_id) ||
1257 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
1258 netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
1263 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
1264 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
1265 schedule_work(&bp->sp_task);
1268 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
1269 bnxt_async_event_process(bp,
1270 (struct hwrm_async_event_cmpl *)txcmp);
1279 static irqreturn_t bnxt_msix(int irq, void *dev_instance)
1281 struct bnxt_napi *bnapi = dev_instance;
1282 struct bnxt *bp = bnapi->bp;
1283 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1284 u32 cons = RING_CMP(cpr->cp_raw_cons);
1286 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
1287 napi_schedule(&bnapi->napi);
1291 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1293 u32 raw_cons = cpr->cp_raw_cons;
1294 u16 cons = RING_CMP(raw_cons);
1295 struct tx_cmp *txcmp;
1297 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
1299 return TX_CMP_VALID(txcmp, raw_cons);
1302 static irqreturn_t bnxt_inta(int irq, void *dev_instance)
1304 struct bnxt_napi *bnapi = dev_instance;
1305 struct bnxt *bp = bnapi->bp;
1306 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1307 u32 cons = RING_CMP(cpr->cp_raw_cons);
1310 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
1312 if (!bnxt_has_work(bp, cpr)) {
1313 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
1314 /* return if erroneous interrupt */
1315 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
1319 /* disable ring IRQ */
1320 BNXT_CP_DB_IRQ_DIS(cpr->cp_doorbell);
1322 /* Return here if interrupt is shared and is disabled. */
1323 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1326 napi_schedule(&bnapi->napi);
1330 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
1332 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1333 u32 raw_cons = cpr->cp_raw_cons;
1337 bool rx_event = false;
1338 bool agg_event = false;
1339 struct tx_cmp *txcmp;
1344 cons = RING_CMP(raw_cons);
1345 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
1347 if (!TX_CMP_VALID(txcmp, raw_cons))
1350 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
1352 /* return full budget so NAPI will complete. */
1353 if (unlikely(tx_pkts > bp->tx_wake_thresh))
1355 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
1356 rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &agg_event);
1357 if (likely(rc >= 0))
1359 else if (rc == -EBUSY) /* partial completion */
1362 } else if (unlikely((TX_CMP_TYPE(txcmp) ==
1363 CMPL_BASE_TYPE_HWRM_DONE) ||
1364 (TX_CMP_TYPE(txcmp) ==
1365 CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
1366 (TX_CMP_TYPE(txcmp) ==
1367 CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
1368 bnxt_hwrm_handler(bp, txcmp);
1370 raw_cons = NEXT_RAW_CMP(raw_cons);
1372 if (rx_pkts == budget)
1376 cpr->cp_raw_cons = raw_cons;
1377 /* ACK completion ring before freeing tx ring and producing new
1378 * buffers in rx/agg rings to prevent overflowing the completion
1381 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
1384 bnxt_tx_int(bp, bnapi, tx_pkts);
1387 struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
1389 writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
1390 writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
1392 writel(DB_KEY_RX | rxr->rx_agg_prod,
1393 rxr->rx_agg_doorbell);
1394 writel(DB_KEY_RX | rxr->rx_agg_prod,
1395 rxr->rx_agg_doorbell);
1401 static int bnxt_poll(struct napi_struct *napi, int budget)
1403 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
1404 struct bnxt *bp = bnapi->bp;
1405 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1408 if (!bnxt_lock_napi(bnapi))
1412 work_done += bnxt_poll_work(bp, bnapi, budget - work_done);
1414 if (work_done >= budget)
1417 if (!bnxt_has_work(bp, cpr)) {
1418 napi_complete(napi);
1419 BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
1424 bnxt_unlock_napi(bnapi);
1428 #ifdef CONFIG_NET_RX_BUSY_POLL
1429 static int bnxt_busy_poll(struct napi_struct *napi)
1431 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
1432 struct bnxt *bp = bnapi->bp;
1433 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1434 int rx_work, budget = 4;
1436 if (atomic_read(&bp->intr_sem) != 0)
1437 return LL_FLUSH_FAILED;
1439 if (!bnxt_lock_poll(bnapi))
1440 return LL_FLUSH_BUSY;
1442 rx_work = bnxt_poll_work(bp, bnapi, budget);
1444 BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
1446 bnxt_unlock_poll(bnapi);
1451 static void bnxt_free_tx_skbs(struct bnxt *bp)
1454 struct pci_dev *pdev = bp->pdev;
1459 max_idx = bp->tx_nr_pages * TX_DESC_CNT;
1460 for (i = 0; i < bp->tx_nr_rings; i++) {
1461 struct bnxt_napi *bnapi = bp->bnapi[i];
1462 struct bnxt_tx_ring_info *txr;
1468 txr = &bnapi->tx_ring;
1469 for (j = 0; j < max_idx;) {
1470 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
1471 struct sk_buff *skb = tx_buf->skb;
1481 if (tx_buf->is_push) {
1487 dma_unmap_single(&pdev->dev,
1488 dma_unmap_addr(tx_buf, mapping),
1492 last = tx_buf->nr_frags;
1494 for (k = 0; k < last; k++, j = NEXT_TX(j)) {
1495 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
1497 tx_buf = &txr->tx_buf_ring[j];
1500 dma_unmap_addr(tx_buf, mapping),
1501 skb_frag_size(frag), PCI_DMA_TODEVICE);
1505 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
1509 static void bnxt_free_rx_skbs(struct bnxt *bp)
1511 int i, max_idx, max_agg_idx;
1512 struct pci_dev *pdev = bp->pdev;
1517 max_idx = bp->rx_nr_pages * RX_DESC_CNT;
1518 max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
1519 for (i = 0; i < bp->rx_nr_rings; i++) {
1520 struct bnxt_napi *bnapi = bp->bnapi[i];
1521 struct bnxt_rx_ring_info *rxr;
1527 rxr = &bnapi->rx_ring;
1530 for (j = 0; j < MAX_TPA; j++) {
1531 struct bnxt_tpa_info *tpa_info =
1533 u8 *data = tpa_info->data;
1540 dma_unmap_addr(tpa_info, mapping),
1541 bp->rx_buf_use_size,
1542 PCI_DMA_FROMDEVICE);
1544 tpa_info->data = NULL;
1550 for (j = 0; j < max_idx; j++) {
1551 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
1552 u8 *data = rx_buf->data;
1557 dma_unmap_single(&pdev->dev,
1558 dma_unmap_addr(rx_buf, mapping),
1559 bp->rx_buf_use_size,
1560 PCI_DMA_FROMDEVICE);
1562 rx_buf->data = NULL;
1567 for (j = 0; j < max_agg_idx; j++) {
1568 struct bnxt_sw_rx_agg_bd *rx_agg_buf =
1569 &rxr->rx_agg_ring[j];
1570 struct page *page = rx_agg_buf->page;
1575 dma_unmap_page(&pdev->dev,
1576 dma_unmap_addr(rx_agg_buf, mapping),
1577 PAGE_SIZE, PCI_DMA_FROMDEVICE);
1579 rx_agg_buf->page = NULL;
1580 __clear_bit(j, rxr->rx_agg_bmap);
1587 static void bnxt_free_skbs(struct bnxt *bp)
1589 bnxt_free_tx_skbs(bp);
1590 bnxt_free_rx_skbs(bp);
1593 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
1595 struct pci_dev *pdev = bp->pdev;
1598 for (i = 0; i < ring->nr_pages; i++) {
1599 if (!ring->pg_arr[i])
1602 dma_free_coherent(&pdev->dev, ring->page_size,
1603 ring->pg_arr[i], ring->dma_arr[i]);
1605 ring->pg_arr[i] = NULL;
1608 dma_free_coherent(&pdev->dev, ring->nr_pages * 8,
1609 ring->pg_tbl, ring->pg_tbl_map);
1610 ring->pg_tbl = NULL;
1612 if (ring->vmem_size && *ring->vmem) {
1618 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
1621 struct pci_dev *pdev = bp->pdev;
1623 if (ring->nr_pages > 1) {
1624 ring->pg_tbl = dma_alloc_coherent(&pdev->dev,
1632 for (i = 0; i < ring->nr_pages; i++) {
1633 ring->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
1637 if (!ring->pg_arr[i])
1640 if (ring->nr_pages > 1)
1641 ring->pg_tbl[i] = cpu_to_le64(ring->dma_arr[i]);
1644 if (ring->vmem_size) {
1645 *ring->vmem = vzalloc(ring->vmem_size);
1652 static void bnxt_free_rx_rings(struct bnxt *bp)
1659 for (i = 0; i < bp->rx_nr_rings; i++) {
1660 struct bnxt_napi *bnapi = bp->bnapi[i];
1661 struct bnxt_rx_ring_info *rxr;
1662 struct bnxt_ring_struct *ring;
1667 rxr = &bnapi->rx_ring;
1672 kfree(rxr->rx_agg_bmap);
1673 rxr->rx_agg_bmap = NULL;
1675 ring = &rxr->rx_ring_struct;
1676 bnxt_free_ring(bp, ring);
1678 ring = &rxr->rx_agg_ring_struct;
1679 bnxt_free_ring(bp, ring);
1683 static int bnxt_alloc_rx_rings(struct bnxt *bp)
1685 int i, rc, agg_rings = 0, tpa_rings = 0;
1687 if (bp->flags & BNXT_FLAG_AGG_RINGS)
1690 if (bp->flags & BNXT_FLAG_TPA)
1693 for (i = 0; i < bp->rx_nr_rings; i++) {
1694 struct bnxt_napi *bnapi = bp->bnapi[i];
1695 struct bnxt_rx_ring_info *rxr;
1696 struct bnxt_ring_struct *ring;
1701 rxr = &bnapi->rx_ring;
1702 ring = &rxr->rx_ring_struct;
1704 rc = bnxt_alloc_ring(bp, ring);
1711 ring = &rxr->rx_agg_ring_struct;
1712 rc = bnxt_alloc_ring(bp, ring);
1716 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
1717 mem_size = rxr->rx_agg_bmap_size / 8;
1718 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
1719 if (!rxr->rx_agg_bmap)
1723 rxr->rx_tpa = kcalloc(MAX_TPA,
1724 sizeof(struct bnxt_tpa_info),
1734 static void bnxt_free_tx_rings(struct bnxt *bp)
1737 struct pci_dev *pdev = bp->pdev;
1742 for (i = 0; i < bp->tx_nr_rings; i++) {
1743 struct bnxt_napi *bnapi = bp->bnapi[i];
1744 struct bnxt_tx_ring_info *txr;
1745 struct bnxt_ring_struct *ring;
1750 txr = &bnapi->tx_ring;
1753 dma_free_coherent(&pdev->dev, bp->tx_push_size,
1754 txr->tx_push, txr->tx_push_mapping);
1755 txr->tx_push = NULL;
1758 ring = &txr->tx_ring_struct;
1760 bnxt_free_ring(bp, ring);
1764 static int bnxt_alloc_tx_rings(struct bnxt *bp)
1767 struct pci_dev *pdev = bp->pdev;
1769 bp->tx_push_size = 0;
1770 if (bp->tx_push_thresh) {
1773 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
1774 bp->tx_push_thresh);
1776 if (push_size > 128) {
1778 bp->tx_push_thresh = 0;
1781 bp->tx_push_size = push_size;
1784 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
1785 struct bnxt_napi *bnapi = bp->bnapi[i];
1786 struct bnxt_tx_ring_info *txr;
1787 struct bnxt_ring_struct *ring;
1792 txr = &bnapi->tx_ring;
1793 ring = &txr->tx_ring_struct;
1795 rc = bnxt_alloc_ring(bp, ring);
1799 if (bp->tx_push_size) {
1803 /* One pre-allocated DMA buffer to backup
1806 txr->tx_push = dma_alloc_coherent(&pdev->dev,
1808 &txr->tx_push_mapping,
1814 txbd = &txr->tx_push->txbd1;
1816 mapping = txr->tx_push_mapping +
1817 sizeof(struct tx_push_bd);
1818 txbd->tx_bd_haddr = cpu_to_le64(mapping);
1820 memset(txbd + 1, 0, sizeof(struct tx_bd_ext));
1822 ring->queue_id = bp->q_info[j].queue_id;
1823 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
1829 static void bnxt_free_cp_rings(struct bnxt *bp)
1836 for (i = 0; i < bp->cp_nr_rings; i++) {
1837 struct bnxt_napi *bnapi = bp->bnapi[i];
1838 struct bnxt_cp_ring_info *cpr;
1839 struct bnxt_ring_struct *ring;
1844 cpr = &bnapi->cp_ring;
1845 ring = &cpr->cp_ring_struct;
1847 bnxt_free_ring(bp, ring);
1851 static int bnxt_alloc_cp_rings(struct bnxt *bp)
1855 for (i = 0; i < bp->cp_nr_rings; i++) {
1856 struct bnxt_napi *bnapi = bp->bnapi[i];
1857 struct bnxt_cp_ring_info *cpr;
1858 struct bnxt_ring_struct *ring;
1863 cpr = &bnapi->cp_ring;
1864 ring = &cpr->cp_ring_struct;
1866 rc = bnxt_alloc_ring(bp, ring);
1873 static void bnxt_init_ring_struct(struct bnxt *bp)
1877 for (i = 0; i < bp->cp_nr_rings; i++) {
1878 struct bnxt_napi *bnapi = bp->bnapi[i];
1879 struct bnxt_cp_ring_info *cpr;
1880 struct bnxt_rx_ring_info *rxr;
1881 struct bnxt_tx_ring_info *txr;
1882 struct bnxt_ring_struct *ring;
1887 cpr = &bnapi->cp_ring;
1888 ring = &cpr->cp_ring_struct;
1889 ring->nr_pages = bp->cp_nr_pages;
1890 ring->page_size = HW_CMPD_RING_SIZE;
1891 ring->pg_arr = (void **)cpr->cp_desc_ring;
1892 ring->dma_arr = cpr->cp_desc_mapping;
1893 ring->vmem_size = 0;
1895 rxr = &bnapi->rx_ring;
1896 ring = &rxr->rx_ring_struct;
1897 ring->nr_pages = bp->rx_nr_pages;
1898 ring->page_size = HW_RXBD_RING_SIZE;
1899 ring->pg_arr = (void **)rxr->rx_desc_ring;
1900 ring->dma_arr = rxr->rx_desc_mapping;
1901 ring->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
1902 ring->vmem = (void **)&rxr->rx_buf_ring;
1904 ring = &rxr->rx_agg_ring_struct;
1905 ring->nr_pages = bp->rx_agg_nr_pages;
1906 ring->page_size = HW_RXBD_RING_SIZE;
1907 ring->pg_arr = (void **)rxr->rx_agg_desc_ring;
1908 ring->dma_arr = rxr->rx_agg_desc_mapping;
1909 ring->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
1910 ring->vmem = (void **)&rxr->rx_agg_ring;
1912 txr = &bnapi->tx_ring;
1913 ring = &txr->tx_ring_struct;
1914 ring->nr_pages = bp->tx_nr_pages;
1915 ring->page_size = HW_RXBD_RING_SIZE;
1916 ring->pg_arr = (void **)txr->tx_desc_ring;
1917 ring->dma_arr = txr->tx_desc_mapping;
1918 ring->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
1919 ring->vmem = (void **)&txr->tx_buf_ring;
1923 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
1927 struct rx_bd **rx_buf_ring;
1929 rx_buf_ring = (struct rx_bd **)ring->pg_arr;
1930 for (i = 0, prod = 0; i < ring->nr_pages; i++) {
1934 rxbd = rx_buf_ring[i];
1938 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
1939 rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
1940 rxbd->rx_bd_opaque = prod;
1945 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
1947 struct net_device *dev = bp->dev;
1948 struct bnxt_napi *bnapi = bp->bnapi[ring_nr];
1949 struct bnxt_rx_ring_info *rxr;
1950 struct bnxt_ring_struct *ring;
1957 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
1958 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
1960 if (NET_IP_ALIGN == 2)
1961 type |= RX_BD_FLAGS_SOP;
1963 rxr = &bnapi->rx_ring;
1964 ring = &rxr->rx_ring_struct;
1965 bnxt_init_rxbd_pages(ring, type);
1967 prod = rxr->rx_prod;
1968 for (i = 0; i < bp->rx_ring_size; i++) {
1969 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) {
1970 netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
1971 ring_nr, i, bp->rx_ring_size);
1974 prod = NEXT_RX(prod);
1976 rxr->rx_prod = prod;
1977 ring->fw_ring_id = INVALID_HW_RING_ID;
1979 ring = &rxr->rx_agg_ring_struct;
1980 ring->fw_ring_id = INVALID_HW_RING_ID;
1982 if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
1985 type = ((u32)PAGE_SIZE << RX_BD_LEN_SHIFT) |
1986 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
1988 bnxt_init_rxbd_pages(ring, type);
1990 prod = rxr->rx_agg_prod;
1991 for (i = 0; i < bp->rx_agg_ring_size; i++) {
1992 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) {
1993 netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
1994 ring_nr, i, bp->rx_ring_size);
1997 prod = NEXT_RX_AGG(prod);
1999 rxr->rx_agg_prod = prod;
2001 if (bp->flags & BNXT_FLAG_TPA) {
2006 for (i = 0; i < MAX_TPA; i++) {
2007 data = __bnxt_alloc_rx_data(bp, &mapping,
2012 rxr->rx_tpa[i].data = data;
2013 rxr->rx_tpa[i].mapping = mapping;
2016 netdev_err(bp->dev, "No resource allocated for LRO/GRO\n");
2024 static int bnxt_init_rx_rings(struct bnxt *bp)
2028 for (i = 0; i < bp->rx_nr_rings; i++) {
2029 rc = bnxt_init_one_rx_ring(bp, i);
2037 static int bnxt_init_tx_rings(struct bnxt *bp)
2041 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
2044 for (i = 0; i < bp->tx_nr_rings; i++) {
2045 struct bnxt_napi *bnapi = bp->bnapi[i];
2046 struct bnxt_tx_ring_info *txr = &bnapi->tx_ring;
2047 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
2049 ring->fw_ring_id = INVALID_HW_RING_ID;
2055 static void bnxt_free_ring_grps(struct bnxt *bp)
2057 kfree(bp->grp_info);
2058 bp->grp_info = NULL;
2061 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
2066 bp->grp_info = kcalloc(bp->cp_nr_rings,
2067 sizeof(struct bnxt_ring_grp_info),
2072 for (i = 0; i < bp->cp_nr_rings; i++) {
2074 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
2075 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
2076 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
2077 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
2078 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
2083 static void bnxt_free_vnics(struct bnxt *bp)
2085 kfree(bp->vnic_info);
2086 bp->vnic_info = NULL;
2090 static int bnxt_alloc_vnics(struct bnxt *bp)
2094 #ifdef CONFIG_RFS_ACCEL
2095 if (bp->flags & BNXT_FLAG_RFS)
2096 num_vnics += bp->rx_nr_rings;
2099 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
2104 bp->nr_vnics = num_vnics;
2108 static void bnxt_init_vnics(struct bnxt *bp)
2112 for (i = 0; i < bp->nr_vnics; i++) {
2113 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2115 vnic->fw_vnic_id = INVALID_HW_RING_ID;
2116 vnic->fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
2117 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
2119 if (bp->vnic_info[i].rss_hash_key) {
2121 prandom_bytes(vnic->rss_hash_key,
2124 memcpy(vnic->rss_hash_key,
2125 bp->vnic_info[0].rss_hash_key,
2131 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
2135 pages = ring_size / desc_per_pg;
2142 while (pages & (pages - 1))
2148 static void bnxt_set_tpa_flags(struct bnxt *bp)
2150 bp->flags &= ~BNXT_FLAG_TPA;
2151 if (bp->dev->features & NETIF_F_LRO)
2152 bp->flags |= BNXT_FLAG_LRO;
2153 if ((bp->dev->features & NETIF_F_GRO) && (bp->pdev->revision > 0))
2154 bp->flags |= BNXT_FLAG_GRO;
2157 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
2160 void bnxt_set_ring_params(struct bnxt *bp)
2162 u32 ring_size, rx_size, rx_space;
2163 u32 agg_factor = 0, agg_ring_size = 0;
2165 /* 8 for CRC and VLAN */
2166 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
2168 rx_space = rx_size + NET_SKB_PAD +
2169 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2171 bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
2172 ring_size = bp->rx_ring_size;
2173 bp->rx_agg_ring_size = 0;
2174 bp->rx_agg_nr_pages = 0;
2176 if (bp->flags & BNXT_FLAG_TPA)
2179 bp->flags &= ~BNXT_FLAG_JUMBO;
2180 if (rx_space > PAGE_SIZE) {
2183 bp->flags |= BNXT_FLAG_JUMBO;
2184 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
2185 if (jumbo_factor > agg_factor)
2186 agg_factor = jumbo_factor;
2188 agg_ring_size = ring_size * agg_factor;
2190 if (agg_ring_size) {
2191 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
2193 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
2194 u32 tmp = agg_ring_size;
2196 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
2197 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
2198 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
2199 tmp, agg_ring_size);
2201 bp->rx_agg_ring_size = agg_ring_size;
2202 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
2203 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
2204 rx_space = rx_size + NET_SKB_PAD +
2205 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2208 bp->rx_buf_use_size = rx_size;
2209 bp->rx_buf_size = rx_space;
2211 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
2212 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
2214 ring_size = bp->tx_ring_size;
2215 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
2216 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
2218 ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size;
2219 bp->cp_ring_size = ring_size;
2221 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
2222 if (bp->cp_nr_pages > MAX_CP_PAGES) {
2223 bp->cp_nr_pages = MAX_CP_PAGES;
2224 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
2225 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
2226 ring_size, bp->cp_ring_size);
2228 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
2229 bp->cp_ring_mask = bp->cp_bit - 1;
2232 static void bnxt_free_vnic_attributes(struct bnxt *bp)
2235 struct bnxt_vnic_info *vnic;
2236 struct pci_dev *pdev = bp->pdev;
2241 for (i = 0; i < bp->nr_vnics; i++) {
2242 vnic = &bp->vnic_info[i];
2244 kfree(vnic->fw_grp_ids);
2245 vnic->fw_grp_ids = NULL;
2247 kfree(vnic->uc_list);
2248 vnic->uc_list = NULL;
2250 if (vnic->mc_list) {
2251 dma_free_coherent(&pdev->dev, vnic->mc_list_size,
2252 vnic->mc_list, vnic->mc_list_mapping);
2253 vnic->mc_list = NULL;
2256 if (vnic->rss_table) {
2257 dma_free_coherent(&pdev->dev, PAGE_SIZE,
2259 vnic->rss_table_dma_addr);
2260 vnic->rss_table = NULL;
2263 vnic->rss_hash_key = NULL;
2268 static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
2270 int i, rc = 0, size;
2271 struct bnxt_vnic_info *vnic;
2272 struct pci_dev *pdev = bp->pdev;
2275 for (i = 0; i < bp->nr_vnics; i++) {
2276 vnic = &bp->vnic_info[i];
2278 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
2279 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
2282 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
2283 if (!vnic->uc_list) {
2290 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
2291 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
2293 dma_alloc_coherent(&pdev->dev,
2295 &vnic->mc_list_mapping,
2297 if (!vnic->mc_list) {
2303 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
2304 max_rings = bp->rx_nr_rings;
2308 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
2309 if (!vnic->fw_grp_ids) {
2314 /* Allocate rss table and hash key */
2315 vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
2316 &vnic->rss_table_dma_addr,
2318 if (!vnic->rss_table) {
2323 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
2325 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
2326 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
2334 static void bnxt_free_hwrm_resources(struct bnxt *bp)
2336 struct pci_dev *pdev = bp->pdev;
2338 dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
2339 bp->hwrm_cmd_resp_dma_addr);
2341 bp->hwrm_cmd_resp_addr = NULL;
2342 if (bp->hwrm_dbg_resp_addr) {
2343 dma_free_coherent(&pdev->dev, HWRM_DBG_REG_BUF_SIZE,
2344 bp->hwrm_dbg_resp_addr,
2345 bp->hwrm_dbg_resp_dma_addr);
2347 bp->hwrm_dbg_resp_addr = NULL;
2351 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
2353 struct pci_dev *pdev = bp->pdev;
2355 bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
2356 &bp->hwrm_cmd_resp_dma_addr,
2358 if (!bp->hwrm_cmd_resp_addr)
2360 bp->hwrm_dbg_resp_addr = dma_alloc_coherent(&pdev->dev,
2361 HWRM_DBG_REG_BUF_SIZE,
2362 &bp->hwrm_dbg_resp_dma_addr,
2364 if (!bp->hwrm_dbg_resp_addr)
2365 netdev_warn(bp->dev, "fail to alloc debug register dma mem\n");
2370 static void bnxt_free_stats(struct bnxt *bp)
2373 struct pci_dev *pdev = bp->pdev;
2378 size = sizeof(struct ctx_hw_stats);
2380 for (i = 0; i < bp->cp_nr_rings; i++) {
2381 struct bnxt_napi *bnapi = bp->bnapi[i];
2382 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2384 if (cpr->hw_stats) {
2385 dma_free_coherent(&pdev->dev, size, cpr->hw_stats,
2387 cpr->hw_stats = NULL;
2392 static int bnxt_alloc_stats(struct bnxt *bp)
2395 struct pci_dev *pdev = bp->pdev;
2397 size = sizeof(struct ctx_hw_stats);
2399 for (i = 0; i < bp->cp_nr_rings; i++) {
2400 struct bnxt_napi *bnapi = bp->bnapi[i];
2401 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2403 cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size,
2409 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
2414 static void bnxt_clear_ring_indices(struct bnxt *bp)
2421 for (i = 0; i < bp->cp_nr_rings; i++) {
2422 struct bnxt_napi *bnapi = bp->bnapi[i];
2423 struct bnxt_cp_ring_info *cpr;
2424 struct bnxt_rx_ring_info *rxr;
2425 struct bnxt_tx_ring_info *txr;
2430 cpr = &bnapi->cp_ring;
2431 cpr->cp_raw_cons = 0;
2433 txr = &bnapi->tx_ring;
2437 rxr = &bnapi->rx_ring;
2439 rxr->rx_agg_prod = 0;
2440 rxr->rx_sw_agg_prod = 0;
2444 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
2446 #ifdef CONFIG_RFS_ACCEL
2449 /* Under rtnl_lock and all our NAPIs have been disabled. It's
2450 * safe to delete the hash table.
2452 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
2453 struct hlist_head *head;
2454 struct hlist_node *tmp;
2455 struct bnxt_ntuple_filter *fltr;
2457 head = &bp->ntp_fltr_hash_tbl[i];
2458 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
2459 hlist_del(&fltr->hash);
2464 kfree(bp->ntp_fltr_bmap);
2465 bp->ntp_fltr_bmap = NULL;
2467 bp->ntp_fltr_count = 0;
2471 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
2473 #ifdef CONFIG_RFS_ACCEL
2476 if (!(bp->flags & BNXT_FLAG_RFS))
2479 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
2480 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
2482 bp->ntp_fltr_count = 0;
2483 bp->ntp_fltr_bmap = kzalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
2486 if (!bp->ntp_fltr_bmap)
2495 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
2497 bnxt_free_vnic_attributes(bp);
2498 bnxt_free_tx_rings(bp);
2499 bnxt_free_rx_rings(bp);
2500 bnxt_free_cp_rings(bp);
2501 bnxt_free_ntp_fltrs(bp, irq_re_init);
2503 bnxt_free_stats(bp);
2504 bnxt_free_ring_grps(bp);
2505 bnxt_free_vnics(bp);
2509 bnxt_clear_ring_indices(bp);
2513 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
2515 int i, rc, size, arr_size;
2519 /* Allocate bnapi mem pointer array and mem block for
2522 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
2524 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
2525 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
2531 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
2532 bp->bnapi[i] = bnapi;
2533 bp->bnapi[i]->index = i;
2534 bp->bnapi[i]->bp = bp;
2537 rc = bnxt_alloc_stats(bp);
2541 rc = bnxt_alloc_ntp_fltrs(bp);
2545 rc = bnxt_alloc_vnics(bp);
2550 bnxt_init_ring_struct(bp);
2552 rc = bnxt_alloc_rx_rings(bp);
2556 rc = bnxt_alloc_tx_rings(bp);
2560 rc = bnxt_alloc_cp_rings(bp);
2564 bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
2565 BNXT_VNIC_UCAST_FLAG;
2566 rc = bnxt_alloc_vnic_attributes(bp);
2572 bnxt_free_mem(bp, true);
2576 void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
2577 u16 cmpl_ring, u16 target_id)
2579 struct hwrm_cmd_req_hdr *req = request;
2581 req->cmpl_ring_req_type =
2582 cpu_to_le32(req_type | (cmpl_ring << HWRM_CMPL_RING_SFT));
2583 req->target_id_seq_id = cpu_to_le32(target_id << HWRM_TARGET_FID_SFT);
2584 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
2587 int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
2589 int i, intr_process, rc;
2590 struct hwrm_cmd_req_hdr *req = msg;
2592 __le32 *resp_len, *valid;
2593 u16 cp_ring_id, len = 0;
2594 struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
2596 req->target_id_seq_id |= cpu_to_le32(bp->hwrm_cmd_seq++);
2597 memset(resp, 0, PAGE_SIZE);
2598 cp_ring_id = (le32_to_cpu(req->cmpl_ring_req_type) &
2599 HWRM_CMPL_RING_MASK) >>
2601 intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
2603 /* Write request msg to hwrm channel */
2604 __iowrite32_copy(bp->bar0, data, msg_len / 4);
2606 /* currently supports only one outstanding message */
2608 bp->hwrm_intr_seq_id = le32_to_cpu(req->target_id_seq_id) &
2611 /* Ring channel doorbell */
2612 writel(1, bp->bar0 + 0x100);
2616 /* Wait until hwrm response cmpl interrupt is processed */
2617 while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID &&
2619 usleep_range(600, 800);
2622 if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) {
2623 netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
2624 req->cmpl_ring_req_type);
2628 /* Check if response len is updated */
2629 resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET;
2630 for (i = 0; i < timeout; i++) {
2631 len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
2635 usleep_range(600, 800);
2639 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
2640 timeout, req->cmpl_ring_req_type,
2641 req->target_id_seq_id, *resp_len);
2645 /* Last word of resp contains valid bit */
2646 valid = bp->hwrm_cmd_resp_addr + len - 4;
2647 for (i = 0; i < timeout; i++) {
2648 if (le32_to_cpu(*valid) & HWRM_RESP_VALID_MASK)
2650 usleep_range(600, 800);
2654 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
2655 timeout, req->cmpl_ring_req_type,
2656 req->target_id_seq_id, len, *valid);
2661 rc = le16_to_cpu(resp->error_code);
2663 netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
2664 le16_to_cpu(resp->req_type),
2665 le16_to_cpu(resp->seq_id), rc);
2671 int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
2675 mutex_lock(&bp->hwrm_cmd_lock);
2676 rc = _hwrm_send_message(bp, msg, msg_len, timeout);
2677 mutex_unlock(&bp->hwrm_cmd_lock);
2681 static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
2683 struct hwrm_func_drv_rgtr_input req = {0};
2686 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
2689 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
2690 FUNC_DRV_RGTR_REQ_ENABLES_VER |
2691 FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
2693 /* TODO: current async event fwd bits are not defined and the firmware
2694 * only checks if it is non-zero to enable async event forwarding
2696 req.async_event_fwd[0] |= cpu_to_le32(1);
2697 req.os_type = cpu_to_le16(1);
2698 req.ver_maj = DRV_VER_MAJ;
2699 req.ver_min = DRV_VER_MIN;
2700 req.ver_upd = DRV_VER_UPD;
2703 DECLARE_BITMAP(vf_req_snif_bmap, 256);
2704 u32 *data = (u32 *)vf_req_snif_bmap;
2706 memset(vf_req_snif_bmap, 0, sizeof(vf_req_snif_bmap));
2707 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++)
2708 __set_bit(bnxt_vf_req_snif[i], vf_req_snif_bmap);
2710 for (i = 0; i < 8; i++)
2711 req.vf_req_fwd[i] = cpu_to_le32(data[i]);
2714 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
2717 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2720 static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
2722 struct hwrm_func_drv_unrgtr_input req = {0};
2724 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1);
2725 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2728 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
2731 struct hwrm_tunnel_dst_port_free_input req = {0};
2733 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
2734 req.tunnel_type = tunnel_type;
2736 switch (tunnel_type) {
2737 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
2738 req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id;
2740 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
2741 req.tunnel_dst_port_id = bp->nge_fw_dst_port_id;
2747 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2749 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
2754 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
2758 struct hwrm_tunnel_dst_port_alloc_input req = {0};
2759 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2761 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
2763 req.tunnel_type = tunnel_type;
2764 req.tunnel_dst_port_val = port;
2766 mutex_lock(&bp->hwrm_cmd_lock);
2767 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2769 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
2774 if (tunnel_type & TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN)
2775 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
2777 else if (tunnel_type & TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE)
2778 bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id;
2780 mutex_unlock(&bp->hwrm_cmd_lock);
2784 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
2786 struct hwrm_cfa_l2_set_rx_mask_input req = {0};
2787 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
2789 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
2790 req.dflt_vnic_id = cpu_to_le32(vnic->fw_vnic_id);
2792 req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
2793 req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
2794 req.mask = cpu_to_le32(vnic->rx_mask);
2795 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2798 #ifdef CONFIG_RFS_ACCEL
2799 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
2800 struct bnxt_ntuple_filter *fltr)
2802 struct hwrm_cfa_ntuple_filter_free_input req = {0};
2804 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
2805 req.ntuple_filter_id = fltr->filter_id;
2806 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2809 #define BNXT_NTP_FLTR_FLAGS \
2810 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \
2811 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \
2812 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \
2813 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \
2814 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \
2815 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \
2816 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \
2817 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \
2818 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \
2819 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \
2820 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \
2821 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \
2822 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \
2823 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID)
2825 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
2826 struct bnxt_ntuple_filter *fltr)
2829 struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
2830 struct hwrm_cfa_ntuple_filter_alloc_output *resp =
2831 bp->hwrm_cmd_resp_addr;
2832 struct flow_keys *keys = &fltr->fkeys;
2833 struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1];
2835 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
2836 req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[0];
2838 req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
2840 req.ethertype = htons(ETH_P_IP);
2841 memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
2842 req.ipaddr_type = 4;
2843 req.ip_protocol = keys->basic.ip_proto;
2845 req.src_ipaddr[0] = keys->addrs.v4addrs.src;
2846 req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
2847 req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
2848 req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
2850 req.src_port = keys->ports.src;
2851 req.src_port_mask = cpu_to_be16(0xffff);
2852 req.dst_port = keys->ports.dst;
2853 req.dst_port_mask = cpu_to_be16(0xffff);
2855 req.dst_vnic_id = cpu_to_le16(vnic->fw_vnic_id);
2856 mutex_lock(&bp->hwrm_cmd_lock);
2857 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2859 fltr->filter_id = resp->ntuple_filter_id;
2860 mutex_unlock(&bp->hwrm_cmd_lock);
2865 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
2869 struct hwrm_cfa_l2_filter_alloc_input req = {0};
2870 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2872 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
2873 req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX |
2874 CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
2875 req.dst_vnic_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
2877 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
2878 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID |
2879 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
2880 memcpy(req.l2_addr, mac_addr, ETH_ALEN);
2881 req.l2_addr_mask[0] = 0xff;
2882 req.l2_addr_mask[1] = 0xff;
2883 req.l2_addr_mask[2] = 0xff;
2884 req.l2_addr_mask[3] = 0xff;
2885 req.l2_addr_mask[4] = 0xff;
2886 req.l2_addr_mask[5] = 0xff;
2888 mutex_lock(&bp->hwrm_cmd_lock);
2889 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2891 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
2893 mutex_unlock(&bp->hwrm_cmd_lock);
2897 static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
2899 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
2902 /* Any associated ntuple filters will also be cleared by firmware. */
2903 mutex_lock(&bp->hwrm_cmd_lock);
2904 for (i = 0; i < num_of_vnics; i++) {
2905 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2907 for (j = 0; j < vnic->uc_filter_count; j++) {
2908 struct hwrm_cfa_l2_filter_free_input req = {0};
2910 bnxt_hwrm_cmd_hdr_init(bp, &req,
2911 HWRM_CFA_L2_FILTER_FREE, -1, -1);
2913 req.l2_filter_id = vnic->fw_l2_filter_id[j];
2915 rc = _hwrm_send_message(bp, &req, sizeof(req),
2918 vnic->uc_filter_count = 0;
2920 mutex_unlock(&bp->hwrm_cmd_lock);
2925 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
2927 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
2928 struct hwrm_vnic_tpa_cfg_input req = {0};
2930 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
2933 u16 mss = bp->dev->mtu - 40;
2934 u32 nsegs, n, segs = 0, flags;
2936 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
2937 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
2938 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
2939 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
2940 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
2941 if (tpa_flags & BNXT_FLAG_GRO)
2942 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
2944 req.flags = cpu_to_le32(flags);
2947 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
2948 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS);
2950 /* Number of segs are log2 units, and first packet is not
2951 * included as part of this units.
2953 if (mss <= PAGE_SIZE) {
2954 n = PAGE_SIZE / mss;
2955 nsegs = (MAX_SKB_FRAGS - 1) * n;
2957 n = mss / PAGE_SIZE;
2958 if (mss & (PAGE_SIZE - 1))
2960 nsegs = (MAX_SKB_FRAGS - n) / n;
2963 segs = ilog2(nsegs);
2964 req.max_agg_segs = cpu_to_le16(segs);
2965 req.max_aggs = cpu_to_le16(VNIC_TPA_CFG_REQ_MAX_AGGS_MAX);
2967 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
2969 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2972 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
2974 u32 i, j, max_rings;
2975 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
2976 struct hwrm_vnic_rss_cfg_input req = {0};
2978 if (vnic->fw_rss_cos_lb_ctx == INVALID_HW_RING_ID)
2981 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
2983 vnic->hash_type = BNXT_RSS_HASH_TYPE_FLAG_IPV4 |
2984 BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV4 |
2985 BNXT_RSS_HASH_TYPE_FLAG_IPV6 |
2986 BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV6;
2988 req.hash_type = cpu_to_le32(vnic->hash_type);
2990 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
2991 max_rings = bp->rx_nr_rings;
2995 /* Fill the RSS indirection table with ring group ids */
2996 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) {
2999 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
3002 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
3003 req.hash_key_tbl_addr =
3004 cpu_to_le64(vnic->rss_hash_key_dma_addr);
3006 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx);
3007 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3010 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
3012 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3013 struct hwrm_vnic_plcmodes_cfg_input req = {0};
3015 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
3016 req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
3017 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
3018 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
3020 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
3021 VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
3022 /* thresholds not implemented in firmware yet */
3023 req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
3024 req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
3025 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
3026 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3029 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id)
3031 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
3033 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
3034 req.rss_cos_lb_ctx_id =
3035 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx);
3037 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3038 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
3041 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
3045 for (i = 0; i < bp->nr_vnics; i++) {
3046 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3048 if (vnic->fw_rss_cos_lb_ctx != INVALID_HW_RING_ID)
3049 bnxt_hwrm_vnic_ctx_free_one(bp, i);
3051 bp->rsscos_nr_ctxs = 0;
3054 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id)
3057 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
3058 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
3059 bp->hwrm_cmd_resp_addr;
3061 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
3064 mutex_lock(&bp->hwrm_cmd_lock);
3065 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3067 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx =
3068 le16_to_cpu(resp->rss_cos_lb_ctx_id);
3069 mutex_unlock(&bp->hwrm_cmd_lock);
3074 static int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
3077 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3078 struct hwrm_vnic_cfg_input req = {0};
3080 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
3081 /* Only RSS support for now TBD: COS & LB */
3082 req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP |
3083 VNIC_CFG_REQ_ENABLES_RSS_RULE);
3084 req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx);
3085 req.cos_rule = cpu_to_le16(0xffff);
3086 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
3088 else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
3089 grp_idx = vnic_id - 1;
3091 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
3092 req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
3094 req.lb_rule = cpu_to_le16(0xffff);
3095 req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN +
3098 if (bp->flags & BNXT_FLAG_STRIP_VLAN)
3099 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
3101 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3104 static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
3108 if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
3109 struct hwrm_vnic_free_input req = {0};
3111 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
3113 cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
3115 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3118 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
3123 static void bnxt_hwrm_vnic_free(struct bnxt *bp)
3127 for (i = 0; i < bp->nr_vnics; i++)
3128 bnxt_hwrm_vnic_free_one(bp, i);
3131 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id, u16 start_grp_id,
3135 struct hwrm_vnic_alloc_input req = {0};
3136 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3138 /* map ring groups to this vnic */
3139 for (i = start_grp_id, j = 0; i < end_grp_id; i++, j++) {
3140 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID) {
3141 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
3142 j, (end_grp_id - start_grp_id));
3145 bp->vnic_info[vnic_id].fw_grp_ids[j] =
3146 bp->grp_info[i].fw_grp_id;
3149 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
3151 req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
3153 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
3155 mutex_lock(&bp->hwrm_cmd_lock);
3156 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3158 bp->vnic_info[vnic_id].fw_vnic_id = le32_to_cpu(resp->vnic_id);
3159 mutex_unlock(&bp->hwrm_cmd_lock);
3163 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
3168 mutex_lock(&bp->hwrm_cmd_lock);
3169 for (i = 0; i < bp->rx_nr_rings; i++) {
3170 struct hwrm_ring_grp_alloc_input req = {0};
3171 struct hwrm_ring_grp_alloc_output *resp =
3172 bp->hwrm_cmd_resp_addr;
3174 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
3176 req.cr = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id);
3177 req.rr = cpu_to_le16(bp->grp_info[i].rx_fw_ring_id);
3178 req.ar = cpu_to_le16(bp->grp_info[i].agg_fw_ring_id);
3179 req.sc = cpu_to_le16(bp->grp_info[i].fw_stats_ctx);
3181 rc = _hwrm_send_message(bp, &req, sizeof(req),
3186 bp->grp_info[i].fw_grp_id = le32_to_cpu(resp->ring_group_id);
3188 mutex_unlock(&bp->hwrm_cmd_lock);
3192 static int bnxt_hwrm_ring_grp_free(struct bnxt *bp)
3196 struct hwrm_ring_grp_free_input req = {0};
3201 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
3203 mutex_lock(&bp->hwrm_cmd_lock);
3204 for (i = 0; i < bp->cp_nr_rings; i++) {
3205 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
3208 cpu_to_le32(bp->grp_info[i].fw_grp_id);
3210 rc = _hwrm_send_message(bp, &req, sizeof(req),
3214 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
3216 mutex_unlock(&bp->hwrm_cmd_lock);
3220 static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
3221 struct bnxt_ring_struct *ring,
3222 u32 ring_type, u32 map_index,
3225 int rc = 0, err = 0;
3226 struct hwrm_ring_alloc_input req = {0};
3227 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3230 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
3233 if (ring->nr_pages > 1) {
3234 req.page_tbl_addr = cpu_to_le64(ring->pg_tbl_map);
3235 /* Page size is in log2 units */
3236 req.page_size = BNXT_PAGE_SHIFT;
3237 req.page_tbl_depth = 1;
3239 req.page_tbl_addr = cpu_to_le64(ring->dma_arr[0]);
3242 /* Association of ring index with doorbell index and MSIX number */
3243 req.logical_id = cpu_to_le16(map_index);
3245 switch (ring_type) {
3246 case HWRM_RING_ALLOC_TX:
3247 req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
3248 /* Association of transmit ring with completion ring */
3250 cpu_to_le16(bp->grp_info[map_index].cp_fw_ring_id);
3251 req.length = cpu_to_le32(bp->tx_ring_mask + 1);
3252 req.stat_ctx_id = cpu_to_le32(stats_ctx_id);
3253 req.queue_id = cpu_to_le16(ring->queue_id);
3255 case HWRM_RING_ALLOC_RX:
3256 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
3257 req.length = cpu_to_le32(bp->rx_ring_mask + 1);
3259 case HWRM_RING_ALLOC_AGG:
3260 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
3261 req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
3263 case HWRM_RING_ALLOC_CMPL:
3264 req.ring_type = RING_ALLOC_REQ_RING_TYPE_CMPL;
3265 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
3266 if (bp->flags & BNXT_FLAG_USING_MSIX)
3267 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
3270 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
3275 mutex_lock(&bp->hwrm_cmd_lock);
3276 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3277 err = le16_to_cpu(resp->error_code);
3278 ring_id = le16_to_cpu(resp->ring_id);
3279 mutex_unlock(&bp->hwrm_cmd_lock);
3282 switch (ring_type) {
3283 case RING_FREE_REQ_RING_TYPE_CMPL:
3284 netdev_err(bp->dev, "hwrm_ring_alloc cp failed. rc:%x err:%x\n",
3288 case RING_FREE_REQ_RING_TYPE_RX:
3289 netdev_err(bp->dev, "hwrm_ring_alloc rx failed. rc:%x err:%x\n",
3293 case RING_FREE_REQ_RING_TYPE_TX:
3294 netdev_err(bp->dev, "hwrm_ring_alloc tx failed. rc:%x err:%x\n",
3299 netdev_err(bp->dev, "Invalid ring\n");
3303 ring->fw_ring_id = ring_id;
3307 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
3311 for (i = 0; i < bp->cp_nr_rings; i++) {
3312 struct bnxt_napi *bnapi = bp->bnapi[i];
3313 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3314 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3316 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL, i,
3317 INVALID_STATS_CTX_ID);
3320 cpr->cp_doorbell = bp->bar1 + i * 0x80;
3321 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
3322 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
3325 for (i = 0; i < bp->tx_nr_rings; i++) {
3326 struct bnxt_napi *bnapi = bp->bnapi[i];
3327 struct bnxt_tx_ring_info *txr = &bnapi->tx_ring;
3328 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
3329 u16 fw_stats_ctx = bp->grp_info[i].fw_stats_ctx;
3331 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_TX, i,
3335 txr->tx_doorbell = bp->bar1 + i * 0x80;
3338 for (i = 0; i < bp->rx_nr_rings; i++) {
3339 struct bnxt_napi *bnapi = bp->bnapi[i];
3340 struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
3341 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
3343 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_RX, i,
3344 INVALID_STATS_CTX_ID);
3347 rxr->rx_doorbell = bp->bar1 + i * 0x80;
3348 writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
3349 bp->grp_info[i].rx_fw_ring_id = ring->fw_ring_id;
3352 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
3353 for (i = 0; i < bp->rx_nr_rings; i++) {
3354 struct bnxt_napi *bnapi = bp->bnapi[i];
3355 struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
3356 struct bnxt_ring_struct *ring =
3357 &rxr->rx_agg_ring_struct;
3359 rc = hwrm_ring_alloc_send_msg(bp, ring,
3360 HWRM_RING_ALLOC_AGG,
3361 bp->rx_nr_rings + i,
3362 INVALID_STATS_CTX_ID);
3366 rxr->rx_agg_doorbell =
3367 bp->bar1 + (bp->rx_nr_rings + i) * 0x80;
3368 writel(DB_KEY_RX | rxr->rx_agg_prod,
3369 rxr->rx_agg_doorbell);
3370 bp->grp_info[i].agg_fw_ring_id = ring->fw_ring_id;
3377 static int hwrm_ring_free_send_msg(struct bnxt *bp,
3378 struct bnxt_ring_struct *ring,
3379 u32 ring_type, int cmpl_ring_id)
3382 struct hwrm_ring_free_input req = {0};
3383 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
3386 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, -1, -1);
3387 req.ring_type = ring_type;
3388 req.ring_id = cpu_to_le16(ring->fw_ring_id);
3390 mutex_lock(&bp->hwrm_cmd_lock);
3391 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3392 error_code = le16_to_cpu(resp->error_code);
3393 mutex_unlock(&bp->hwrm_cmd_lock);
3395 if (rc || error_code) {
3396 switch (ring_type) {
3397 case RING_FREE_REQ_RING_TYPE_CMPL:
3398 netdev_err(bp->dev, "hwrm_ring_free cp failed. rc:%d\n",
3401 case RING_FREE_REQ_RING_TYPE_RX:
3402 netdev_err(bp->dev, "hwrm_ring_free rx failed. rc:%d\n",
3405 case RING_FREE_REQ_RING_TYPE_TX:
3406 netdev_err(bp->dev, "hwrm_ring_free tx failed. rc:%d\n",
3410 netdev_err(bp->dev, "Invalid ring\n");
3417 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
3424 for (i = 0; i < bp->tx_nr_rings; i++) {
3425 struct bnxt_napi *bnapi = bp->bnapi[i];
3426 struct bnxt_tx_ring_info *txr = &bnapi->tx_ring;
3427 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
3428 u32 cmpl_ring_id = bp->grp_info[i].cp_fw_ring_id;
3430 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
3431 hwrm_ring_free_send_msg(bp, ring,
3432 RING_FREE_REQ_RING_TYPE_TX,
3433 close_path ? cmpl_ring_id :
3434 INVALID_HW_RING_ID);
3435 ring->fw_ring_id = INVALID_HW_RING_ID;
3439 for (i = 0; i < bp->rx_nr_rings; i++) {
3440 struct bnxt_napi *bnapi = bp->bnapi[i];
3441 struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
3442 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
3443 u32 cmpl_ring_id = bp->grp_info[i].cp_fw_ring_id;
3445 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
3446 hwrm_ring_free_send_msg(bp, ring,
3447 RING_FREE_REQ_RING_TYPE_RX,
3448 close_path ? cmpl_ring_id :
3449 INVALID_HW_RING_ID);
3450 ring->fw_ring_id = INVALID_HW_RING_ID;
3451 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
3455 for (i = 0; i < bp->rx_nr_rings; i++) {
3456 struct bnxt_napi *bnapi = bp->bnapi[i];
3457 struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
3458 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
3459 u32 cmpl_ring_id = bp->grp_info[i].cp_fw_ring_id;
3461 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
3462 hwrm_ring_free_send_msg(bp, ring,
3463 RING_FREE_REQ_RING_TYPE_RX,
3464 close_path ? cmpl_ring_id :
3465 INVALID_HW_RING_ID);
3466 ring->fw_ring_id = INVALID_HW_RING_ID;
3467 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
3471 for (i = 0; i < bp->cp_nr_rings; i++) {
3472 struct bnxt_napi *bnapi = bp->bnapi[i];
3473 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3474 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3476 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
3477 hwrm_ring_free_send_msg(bp, ring,
3478 RING_FREE_REQ_RING_TYPE_CMPL,
3479 INVALID_HW_RING_ID);
3480 ring->fw_ring_id = INVALID_HW_RING_ID;
3481 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
3486 int bnxt_hwrm_set_coal(struct bnxt *bp)
3489 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
3490 u16 max_buf, max_buf_irq;
3491 u16 buf_tmr, buf_tmr_irq;
3494 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
3497 /* Each rx completion (2 records) should be DMAed immediately */
3498 max_buf = min_t(u16, bp->coal_bufs / 4, 2);
3499 /* max_buf must not be zero */
3500 max_buf = clamp_t(u16, max_buf, 1, 63);
3501 max_buf_irq = clamp_t(u16, bp->coal_bufs_irq, 1, 63);
3502 buf_tmr = max_t(u16, bp->coal_ticks / 4, 1);
3503 buf_tmr_irq = max_t(u16, bp->coal_ticks_irq, 1);
3505 flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
3507 /* RING_IDLE generates more IRQs for lower latency. Enable it only
3508 * if coal_ticks is less than 25 us.
3510 if (BNXT_COAL_TIMER_TO_USEC(bp->coal_ticks) < 25)
3511 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
3513 req.flags = cpu_to_le16(flags);
3514 req.num_cmpl_dma_aggr = cpu_to_le16(max_buf);
3515 req.num_cmpl_dma_aggr_during_int = cpu_to_le16(max_buf_irq);
3516 req.cmpl_aggr_dma_tmr = cpu_to_le16(buf_tmr);
3517 req.cmpl_aggr_dma_tmr_during_int = cpu_to_le16(buf_tmr_irq);
3518 req.int_lat_tmr_min = cpu_to_le16(buf_tmr);
3519 req.int_lat_tmr_max = cpu_to_le16(bp->coal_ticks);
3520 req.num_cmpl_aggr_int = cpu_to_le16(bp->coal_bufs);
3522 mutex_lock(&bp->hwrm_cmd_lock);
3523 for (i = 0; i < bp->cp_nr_rings; i++) {
3524 req.ring_id = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id);
3526 rc = _hwrm_send_message(bp, &req, sizeof(req),
3531 mutex_unlock(&bp->hwrm_cmd_lock);
3535 static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
3538 struct hwrm_stat_ctx_free_input req = {0};
3543 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
3545 mutex_lock(&bp->hwrm_cmd_lock);
3546 for (i = 0; i < bp->cp_nr_rings; i++) {
3547 struct bnxt_napi *bnapi = bp->bnapi[i];
3548 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3550 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
3551 req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
3553 rc = _hwrm_send_message(bp, &req, sizeof(req),
3558 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
3561 mutex_unlock(&bp->hwrm_cmd_lock);
3565 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
3568 struct hwrm_stat_ctx_alloc_input req = {0};
3569 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3571 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
3573 req.update_period_ms = cpu_to_le32(1000);
3575 mutex_lock(&bp->hwrm_cmd_lock);
3576 for (i = 0; i < bp->cp_nr_rings; i++) {
3577 struct bnxt_napi *bnapi = bp->bnapi[i];
3578 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3580 req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map);
3582 rc = _hwrm_send_message(bp, &req, sizeof(req),
3587 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
3589 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
3591 mutex_unlock(&bp->hwrm_cmd_lock);
3595 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
3598 struct hwrm_func_qcaps_input req = {0};
3599 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
3601 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
3602 req.fid = cpu_to_le16(0xffff);
3604 mutex_lock(&bp->hwrm_cmd_lock);
3605 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3607 goto hwrm_func_qcaps_exit;
3610 struct bnxt_pf_info *pf = &bp->pf;
3612 pf->fw_fid = le16_to_cpu(resp->fid);
3613 pf->port_id = le16_to_cpu(resp->port_id);
3614 memcpy(pf->mac_addr, resp->perm_mac_address, ETH_ALEN);
3615 memcpy(bp->dev->dev_addr, pf->mac_addr, ETH_ALEN);
3616 pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
3617 pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
3618 pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
3619 pf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
3620 pf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
3621 if (!pf->max_hw_ring_grps)
3622 pf->max_hw_ring_grps = pf->max_tx_rings;
3623 pf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
3624 pf->max_vnics = le16_to_cpu(resp->max_vnics);
3625 pf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
3626 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
3627 pf->max_vfs = le16_to_cpu(resp->max_vfs);
3628 pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
3629 pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
3630 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
3631 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
3632 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
3633 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
3635 #ifdef CONFIG_BNXT_SRIOV
3636 struct bnxt_vf_info *vf = &bp->vf;
3638 vf->fw_fid = le16_to_cpu(resp->fid);
3639 memcpy(vf->mac_addr, resp->perm_mac_address, ETH_ALEN);
3640 if (is_valid_ether_addr(vf->mac_addr))
3641 /* overwrite netdev dev_adr with admin VF MAC */
3642 memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
3644 random_ether_addr(bp->dev->dev_addr);
3646 vf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
3647 vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
3648 vf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
3649 vf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
3650 vf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
3651 if (!vf->max_hw_ring_grps)
3652 vf->max_hw_ring_grps = vf->max_tx_rings;
3653 vf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
3654 vf->max_vnics = le16_to_cpu(resp->max_vnics);
3655 vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
3659 bp->tx_push_thresh = 0;
3661 cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED))
3662 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
3664 hwrm_func_qcaps_exit:
3665 mutex_unlock(&bp->hwrm_cmd_lock);
3669 static int bnxt_hwrm_func_reset(struct bnxt *bp)
3671 struct hwrm_func_reset_input req = {0};
3673 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
3676 return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
3679 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
3682 struct hwrm_queue_qportcfg_input req = {0};
3683 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
3686 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
3688 mutex_lock(&bp->hwrm_cmd_lock);
3689 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3693 if (!resp->max_configurable_queues) {
3697 bp->max_tc = resp->max_configurable_queues;
3698 if (bp->max_tc > BNXT_MAX_QUEUE)
3699 bp->max_tc = BNXT_MAX_QUEUE;
3701 qptr = &resp->queue_id0;
3702 for (i = 0; i < bp->max_tc; i++) {
3703 bp->q_info[i].queue_id = *qptr++;
3704 bp->q_info[i].queue_profile = *qptr++;
3708 mutex_unlock(&bp->hwrm_cmd_lock);
3712 static int bnxt_hwrm_ver_get(struct bnxt *bp)
3715 struct hwrm_ver_get_input req = {0};
3716 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
3718 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
3719 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
3720 req.hwrm_intf_min = HWRM_VERSION_MINOR;
3721 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
3722 mutex_lock(&bp->hwrm_cmd_lock);
3723 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3725 goto hwrm_ver_get_exit;
3727 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
3729 if (req.hwrm_intf_maj != resp->hwrm_intf_maj ||
3730 req.hwrm_intf_min != resp->hwrm_intf_min ||
3731 req.hwrm_intf_upd != resp->hwrm_intf_upd) {
3732 netdev_warn(bp->dev, "HWRM interface %d.%d.%d does not match driver interface %d.%d.%d.\n",
3733 resp->hwrm_intf_maj, resp->hwrm_intf_min,
3734 resp->hwrm_intf_upd, req.hwrm_intf_maj,
3735 req.hwrm_intf_min, req.hwrm_intf_upd);
3736 netdev_warn(bp->dev, "Please update driver or firmware with matching interface versions.\n");
3738 snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "bc %d.%d.%d rm %d.%d.%d",
3739 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld,
3740 resp->hwrm_intf_maj, resp->hwrm_intf_min, resp->hwrm_intf_upd);
3743 mutex_unlock(&bp->hwrm_cmd_lock);
3747 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
3749 if (bp->vxlan_port_cnt) {
3750 bnxt_hwrm_tunnel_dst_port_free(
3751 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
3753 bp->vxlan_port_cnt = 0;
3754 if (bp->nge_port_cnt) {
3755 bnxt_hwrm_tunnel_dst_port_free(
3756 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
3758 bp->nge_port_cnt = 0;
3761 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
3767 tpa_flags = bp->flags & BNXT_FLAG_TPA;
3768 for (i = 0; i < bp->nr_vnics; i++) {
3769 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
3771 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
3779 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
3783 for (i = 0; i < bp->nr_vnics; i++)
3784 bnxt_hwrm_vnic_set_rss(bp, i, false);
3787 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
3790 if (bp->vnic_info) {
3791 bnxt_hwrm_clear_vnic_filter(bp);
3792 /* clear all RSS setting before free vnic ctx */
3793 bnxt_hwrm_clear_vnic_rss(bp);
3794 bnxt_hwrm_vnic_ctx_free(bp);
3795 /* before free the vnic, undo the vnic tpa settings */
3796 if (bp->flags & BNXT_FLAG_TPA)
3797 bnxt_set_tpa(bp, false);
3798 bnxt_hwrm_vnic_free(bp);
3800 bnxt_hwrm_ring_free(bp, close_path);
3801 bnxt_hwrm_ring_grp_free(bp);
3803 bnxt_hwrm_stat_ctx_free(bp);
3804 bnxt_hwrm_free_tunnel_ports(bp);
3808 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
3812 /* allocate context for vnic */
3813 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id);
3815 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
3817 goto vnic_setup_err;
3819 bp->rsscos_nr_ctxs++;
3821 /* configure default vnic, ring grp */
3822 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
3824 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
3826 goto vnic_setup_err;
3829 /* Enable RSS hashing on vnic */
3830 rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
3832 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
3834 goto vnic_setup_err;
3837 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
3838 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
3840 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
3849 static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
3851 #ifdef CONFIG_RFS_ACCEL
3854 for (i = 0; i < bp->rx_nr_rings; i++) {
3855 u16 vnic_id = i + 1;
3858 if (vnic_id >= bp->nr_vnics)
3861 bp->vnic_info[vnic_id].flags |= BNXT_VNIC_RFS_FLAG;
3862 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, ring_id + 1);
3864 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
3868 rc = bnxt_setup_vnic(bp, vnic_id);
3878 static int bnxt_cfg_rx_mode(struct bnxt *);
3880 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
3885 rc = bnxt_hwrm_stat_ctx_alloc(bp);
3887 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
3893 rc = bnxt_hwrm_ring_alloc(bp);
3895 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
3899 rc = bnxt_hwrm_ring_grp_alloc(bp);
3901 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
3905 /* default vnic 0 */
3906 rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, bp->rx_nr_rings);
3908 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
3912 rc = bnxt_setup_vnic(bp, 0);
3916 if (bp->flags & BNXT_FLAG_RFS) {
3917 rc = bnxt_alloc_rfs_vnics(bp);
3922 if (bp->flags & BNXT_FLAG_TPA) {
3923 rc = bnxt_set_tpa(bp, true);
3929 bnxt_update_vf_mac(bp);
3931 /* Filter for default vnic 0 */
3932 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
3934 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
3937 bp->vnic_info[0].uc_filter_count = 1;
3939 bp->vnic_info[0].rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_UNICAST |
3940 CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
3942 if ((bp->dev->flags & IFF_PROMISC) && BNXT_PF(bp))
3943 bp->vnic_info[0].rx_mask |=
3944 CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
3946 rc = bnxt_cfg_rx_mode(bp);
3950 rc = bnxt_hwrm_set_coal(bp);
3952 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
3958 bnxt_hwrm_resource_free(bp, 0, true);
3963 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
3965 bnxt_hwrm_resource_free(bp, 1, irq_re_init);
3969 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
3971 bnxt_init_rx_rings(bp);
3972 bnxt_init_tx_rings(bp);
3973 bnxt_init_ring_grps(bp, irq_re_init);
3974 bnxt_init_vnics(bp);
3976 return bnxt_init_chip(bp, irq_re_init);
3979 static void bnxt_disable_int(struct bnxt *bp)
3986 for (i = 0; i < bp->cp_nr_rings; i++) {
3987 struct bnxt_napi *bnapi = bp->bnapi[i];
3988 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3990 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
3994 static void bnxt_enable_int(struct bnxt *bp)
3998 atomic_set(&bp->intr_sem, 0);
3999 for (i = 0; i < bp->cp_nr_rings; i++) {
4000 struct bnxt_napi *bnapi = bp->bnapi[i];
4001 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4003 BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
4007 static int bnxt_set_real_num_queues(struct bnxt *bp)
4010 struct net_device *dev = bp->dev;
4012 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings);
4016 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
4020 #ifdef CONFIG_RFS_ACCEL
4021 if (bp->flags & BNXT_FLAG_RFS)
4022 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
4028 static int bnxt_setup_msix(struct bnxt *bp)
4030 struct msix_entry *msix_ent;
4031 struct net_device *dev = bp->dev;
4032 int i, total_vecs, rc = 0;
4033 const int len = sizeof(bp->irq_tbl[0].name);
4035 bp->flags &= ~BNXT_FLAG_USING_MSIX;
4036 total_vecs = bp->cp_nr_rings;
4038 msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
4042 for (i = 0; i < total_vecs; i++) {
4043 msix_ent[i].entry = i;
4044 msix_ent[i].vector = 0;
4047 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, 1, total_vecs);
4048 if (total_vecs < 0) {
4050 goto msix_setup_exit;
4053 bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
4057 /* Trim rings based upon num of vectors allocated */
4058 bp->rx_nr_rings = min_t(int, total_vecs, bp->rx_nr_rings);
4059 bp->tx_nr_rings = min_t(int, total_vecs, bp->tx_nr_rings);
4060 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
4061 tcs = netdev_get_num_tc(dev);
4063 bp->tx_nr_rings_per_tc = bp->tx_nr_rings / tcs;
4064 if (bp->tx_nr_rings_per_tc == 0) {
4065 netdev_reset_tc(dev);
4066 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
4070 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs;
4071 for (i = 0; i < tcs; i++) {
4072 count = bp->tx_nr_rings_per_tc;
4074 netdev_set_tc_queue(dev, i, count, off);
4078 bp->cp_nr_rings = max_t(int, bp->rx_nr_rings, bp->tx_nr_rings);
4080 for (i = 0; i < bp->cp_nr_rings; i++) {
4081 bp->irq_tbl[i].vector = msix_ent[i].vector;
4082 snprintf(bp->irq_tbl[i].name, len,
4083 "%s-%s-%d", dev->name, "TxRx", i);
4084 bp->irq_tbl[i].handler = bnxt_msix;
4086 rc = bnxt_set_real_num_queues(bp);
4088 goto msix_setup_exit;
4091 goto msix_setup_exit;
4093 bp->flags |= BNXT_FLAG_USING_MSIX;
4098 netdev_err(bp->dev, "bnxt_setup_msix err: %x\n", rc);
4099 pci_disable_msix(bp->pdev);
4104 static int bnxt_setup_inta(struct bnxt *bp)
4107 const int len = sizeof(bp->irq_tbl[0].name);
4109 if (netdev_get_num_tc(bp->dev))
4110 netdev_reset_tc(bp->dev);
4112 bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL);
4117 bp->rx_nr_rings = 1;
4118 bp->tx_nr_rings = 1;
4119 bp->cp_nr_rings = 1;
4120 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
4121 bp->irq_tbl[0].vector = bp->pdev->irq;
4122 snprintf(bp->irq_tbl[0].name, len,
4123 "%s-%s-%d", bp->dev->name, "TxRx", 0);
4124 bp->irq_tbl[0].handler = bnxt_inta;
4125 rc = bnxt_set_real_num_queues(bp);
4129 static int bnxt_setup_int_mode(struct bnxt *bp)
4133 if (bp->flags & BNXT_FLAG_MSIX_CAP)
4134 rc = bnxt_setup_msix(bp);
4136 if (!(bp->flags & BNXT_FLAG_USING_MSIX)) {
4137 /* fallback to INTA */
4138 rc = bnxt_setup_inta(bp);
4143 static void bnxt_free_irq(struct bnxt *bp)
4145 struct bnxt_irq *irq;
4148 #ifdef CONFIG_RFS_ACCEL
4149 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
4150 bp->dev->rx_cpu_rmap = NULL;
4155 for (i = 0; i < bp->cp_nr_rings; i++) {
4156 irq = &bp->irq_tbl[i];
4158 free_irq(irq->vector, bp->bnapi[i]);
4161 if (bp->flags & BNXT_FLAG_USING_MSIX)
4162 pci_disable_msix(bp->pdev);
4167 static int bnxt_request_irq(struct bnxt *bp)
4170 unsigned long flags = 0;
4171 #ifdef CONFIG_RFS_ACCEL
4172 struct cpu_rmap *rmap = bp->dev->rx_cpu_rmap;
4175 if (!(bp->flags & BNXT_FLAG_USING_MSIX))
4176 flags = IRQF_SHARED;
4178 for (i = 0; i < bp->cp_nr_rings; i++) {
4179 struct bnxt_irq *irq = &bp->irq_tbl[i];
4180 #ifdef CONFIG_RFS_ACCEL
4181 if (rmap && (i < bp->rx_nr_rings)) {
4182 rc = irq_cpu_rmap_add(rmap, irq->vector);
4184 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
4188 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
4198 static void bnxt_del_napi(struct bnxt *bp)
4205 for (i = 0; i < bp->cp_nr_rings; i++) {
4206 struct bnxt_napi *bnapi = bp->bnapi[i];
4208 napi_hash_del(&bnapi->napi);
4209 netif_napi_del(&bnapi->napi);
4213 static void bnxt_init_napi(struct bnxt *bp)
4216 struct bnxt_napi *bnapi;
4218 if (bp->flags & BNXT_FLAG_USING_MSIX) {
4219 for (i = 0; i < bp->cp_nr_rings; i++) {
4220 bnapi = bp->bnapi[i];
4221 netif_napi_add(bp->dev, &bnapi->napi,
4225 bnapi = bp->bnapi[0];
4226 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
4230 static void bnxt_disable_napi(struct bnxt *bp)
4237 for (i = 0; i < bp->cp_nr_rings; i++) {
4238 napi_disable(&bp->bnapi[i]->napi);
4239 bnxt_disable_poll(bp->bnapi[i]);
4243 static void bnxt_enable_napi(struct bnxt *bp)
4247 for (i = 0; i < bp->cp_nr_rings; i++) {
4248 bnxt_enable_poll(bp->bnapi[i]);
4249 napi_enable(&bp->bnapi[i]->napi);
4253 static void bnxt_tx_disable(struct bnxt *bp)
4256 struct bnxt_napi *bnapi;
4257 struct bnxt_tx_ring_info *txr;
4258 struct netdev_queue *txq;
4261 for (i = 0; i < bp->tx_nr_rings; i++) {
4262 bnapi = bp->bnapi[i];
4263 txr = &bnapi->tx_ring;
4264 txq = netdev_get_tx_queue(bp->dev, i);
4265 __netif_tx_lock(txq, smp_processor_id());
4266 txr->dev_state = BNXT_DEV_STATE_CLOSING;
4267 __netif_tx_unlock(txq);
4270 /* Stop all TX queues */
4271 netif_tx_disable(bp->dev);
4272 netif_carrier_off(bp->dev);
4275 static void bnxt_tx_enable(struct bnxt *bp)
4278 struct bnxt_napi *bnapi;
4279 struct bnxt_tx_ring_info *txr;
4280 struct netdev_queue *txq;
4282 for (i = 0; i < bp->tx_nr_rings; i++) {
4283 bnapi = bp->bnapi[i];
4284 txr = &bnapi->tx_ring;
4285 txq = netdev_get_tx_queue(bp->dev, i);
4288 netif_tx_wake_all_queues(bp->dev);
4289 if (bp->link_info.link_up)
4290 netif_carrier_on(bp->dev);
4293 static void bnxt_report_link(struct bnxt *bp)
4295 if (bp->link_info.link_up) {
4297 const char *flow_ctrl;
4300 netif_carrier_on(bp->dev);
4301 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
4305 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
4306 flow_ctrl = "ON - receive & transmit";
4307 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
4308 flow_ctrl = "ON - transmit";
4309 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
4310 flow_ctrl = "ON - receive";
4313 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
4314 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
4315 speed, duplex, flow_ctrl);
4317 netif_carrier_off(bp->dev);
4318 netdev_err(bp->dev, "NIC Link is Down\n");
4322 static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
4325 struct bnxt_link_info *link_info = &bp->link_info;
4326 struct hwrm_port_phy_qcfg_input req = {0};
4327 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
4328 u8 link_up = link_info->link_up;
4330 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
4332 mutex_lock(&bp->hwrm_cmd_lock);
4333 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4335 mutex_unlock(&bp->hwrm_cmd_lock);
4339 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
4340 link_info->phy_link_status = resp->link;
4341 link_info->duplex = resp->duplex;
4342 link_info->pause = resp->pause;
4343 link_info->auto_mode = resp->auto_mode;
4344 link_info->auto_pause_setting = resp->auto_pause;
4345 link_info->force_pause_setting = resp->force_pause;
4346 link_info->duplex_setting = resp->duplex_setting;
4347 if (link_info->phy_link_status == BNXT_LINK_LINK)
4348 link_info->link_speed = le16_to_cpu(resp->link_speed);
4350 link_info->link_speed = 0;
4351 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
4352 link_info->auto_link_speed = le16_to_cpu(resp->auto_link_speed);
4353 link_info->support_speeds = le16_to_cpu(resp->support_speeds);
4354 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
4355 link_info->preemphasis = le32_to_cpu(resp->preemphasis);
4356 link_info->phy_ver[0] = resp->phy_maj;
4357 link_info->phy_ver[1] = resp->phy_min;
4358 link_info->phy_ver[2] = resp->phy_bld;
4359 link_info->media_type = resp->media_type;
4360 link_info->transceiver = resp->transceiver_type;
4361 link_info->phy_addr = resp->phy_addr;
4363 /* TODO: need to add more logic to report VF link */
4364 if (chng_link_state) {
4365 if (link_info->phy_link_status == BNXT_LINK_LINK)
4366 link_info->link_up = 1;
4368 link_info->link_up = 0;
4369 if (link_up != link_info->link_up)
4370 bnxt_report_link(bp);
4372 /* alwasy link down if not require to update link state */
4373 link_info->link_up = 0;
4375 mutex_unlock(&bp->hwrm_cmd_lock);
4380 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
4382 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
4383 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
4384 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
4385 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
4386 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
4388 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
4390 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
4391 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
4392 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
4393 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
4395 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
4399 static void bnxt_hwrm_set_link_common(struct bnxt *bp,
4400 struct hwrm_port_phy_cfg_input *req)
4402 u8 autoneg = bp->link_info.autoneg;
4403 u16 fw_link_speed = bp->link_info.req_link_speed;
4404 u32 advertising = bp->link_info.advertising;
4406 if (autoneg & BNXT_AUTONEG_SPEED) {
4408 PORT_PHY_CFG_REQ_AUTO_MODE_MASK;
4410 req->enables |= cpu_to_le32(
4411 PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
4412 req->auto_link_speed_mask = cpu_to_le16(advertising);
4414 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
4416 cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
4418 req->force_link_speed = cpu_to_le16(fw_link_speed);
4419 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
4422 /* currently don't support half duplex */
4423 req->auto_duplex = PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL;
4424 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX);
4425 /* tell chimp that the setting takes effect immediately */
4426 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
4429 int bnxt_hwrm_set_pause(struct bnxt *bp)
4431 struct hwrm_port_phy_cfg_input req = {0};
4434 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
4435 bnxt_hwrm_set_pause_common(bp, &req);
4437 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
4438 bp->link_info.force_link_chng)
4439 bnxt_hwrm_set_link_common(bp, &req);
4441 mutex_lock(&bp->hwrm_cmd_lock);
4442 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4443 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
4444 /* since changing of pause setting doesn't trigger any link
4445 * change event, the driver needs to update the current pause
4446 * result upon successfully return of the phy_cfg command
4448 bp->link_info.pause =
4449 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
4450 bp->link_info.auto_pause_setting = 0;
4451 if (!bp->link_info.force_link_chng)
4452 bnxt_report_link(bp);
4454 bp->link_info.force_link_chng = false;
4455 mutex_unlock(&bp->hwrm_cmd_lock);
4459 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause)
4461 struct hwrm_port_phy_cfg_input req = {0};
4463 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
4465 bnxt_hwrm_set_pause_common(bp, &req);
4467 bnxt_hwrm_set_link_common(bp, &req);
4468 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4471 static int bnxt_update_phy_setting(struct bnxt *bp)
4474 bool update_link = false;
4475 bool update_pause = false;
4476 struct bnxt_link_info *link_info = &bp->link_info;
4478 rc = bnxt_update_link(bp, true);
4480 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
4484 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
4485 link_info->auto_pause_setting != link_info->req_flow_ctrl)
4486 update_pause = true;
4487 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
4488 link_info->force_pause_setting != link_info->req_flow_ctrl)
4489 update_pause = true;
4490 if (link_info->req_duplex != link_info->duplex_setting)
4492 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
4493 if (BNXT_AUTO_MODE(link_info->auto_mode))
4495 if (link_info->req_link_speed != link_info->force_link_speed)
4498 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
4500 if (link_info->advertising != link_info->auto_link_speeds)
4502 if (link_info->req_link_speed != link_info->auto_link_speed)
4507 rc = bnxt_hwrm_set_link_setting(bp, update_pause);
4508 else if (update_pause)
4509 rc = bnxt_hwrm_set_pause(bp);
4511 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
4519 /* Common routine to pre-map certain register block to different GRC window.
4520 * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
4521 * in PF and 3 windows in VF that can be customized to map in different
4524 static void bnxt_preset_reg_win(struct bnxt *bp)
4527 /* CAG registers map to GRC window #4 */
4528 writel(BNXT_CAG_REG_BASE,
4529 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
4533 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
4537 bnxt_preset_reg_win(bp);
4538 netif_carrier_off(bp->dev);
4540 rc = bnxt_setup_int_mode(bp);
4542 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
4547 if ((bp->flags & BNXT_FLAG_RFS) &&
4548 !(bp->flags & BNXT_FLAG_USING_MSIX)) {
4549 /* disable RFS if falling back to INTA */
4550 bp->dev->hw_features &= ~NETIF_F_NTUPLE;
4551 bp->flags &= ~BNXT_FLAG_RFS;
4554 rc = bnxt_alloc_mem(bp, irq_re_init);
4556 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
4557 goto open_err_free_mem;
4562 rc = bnxt_request_irq(bp);
4564 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
4569 bnxt_enable_napi(bp);
4571 rc = bnxt_init_nic(bp, irq_re_init);
4573 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
4578 rc = bnxt_update_phy_setting(bp);
4584 #if defined(CONFIG_VXLAN) || defined(CONFIG_VXLAN_MODULE)
4585 vxlan_get_rx_port(bp->dev);
4587 if (!bnxt_hwrm_tunnel_dst_port_alloc(
4589 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE))
4590 bp->nge_port_cnt = 1;
4593 set_bit(BNXT_STATE_OPEN, &bp->state);
4594 bnxt_enable_int(bp);
4595 /* Enable TX queues */
4597 mod_timer(&bp->timer, jiffies + bp->current_interval);
4602 bnxt_disable_napi(bp);
4608 bnxt_free_mem(bp, true);
4612 /* rtnl_lock held */
4613 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
4617 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
4619 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
4625 static int bnxt_open(struct net_device *dev)
4627 struct bnxt *bp = netdev_priv(dev);
4630 rc = bnxt_hwrm_func_reset(bp);
4632 netdev_err(bp->dev, "hwrm chip reset failure rc: %x\n",
4637 return __bnxt_open_nic(bp, true, true);
4640 static void bnxt_disable_int_sync(struct bnxt *bp)
4644 atomic_inc(&bp->intr_sem);
4645 if (!netif_running(bp->dev))
4648 bnxt_disable_int(bp);
4649 for (i = 0; i < bp->cp_nr_rings; i++)
4650 synchronize_irq(bp->irq_tbl[i].vector);
4653 int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
4657 #ifdef CONFIG_BNXT_SRIOV
4658 if (bp->sriov_cfg) {
4659 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
4661 BNXT_SRIOV_CFG_WAIT_TMO);
4663 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
4666 /* Change device state to avoid TX queue wake up's */
4667 bnxt_tx_disable(bp);
4669 clear_bit(BNXT_STATE_OPEN, &bp->state);
4670 smp_mb__after_atomic();
4671 while (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state))
4674 /* Flush rings before disabling interrupts */
4675 bnxt_shutdown_nic(bp, irq_re_init);
4677 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
4679 bnxt_disable_napi(bp);
4680 bnxt_disable_int_sync(bp);
4681 del_timer_sync(&bp->timer);
4688 bnxt_free_mem(bp, irq_re_init);
4692 static int bnxt_close(struct net_device *dev)
4694 struct bnxt *bp = netdev_priv(dev);
4696 bnxt_close_nic(bp, true, true);
4700 /* rtnl_lock held */
4701 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4707 if (!netif_running(dev))
4714 if (!netif_running(dev))
4726 static struct rtnl_link_stats64 *
4727 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
4730 struct bnxt *bp = netdev_priv(dev);
4732 memset(stats, 0, sizeof(struct rtnl_link_stats64));
4737 /* TODO check if we need to synchronize with bnxt_close path */
4738 for (i = 0; i < bp->cp_nr_rings; i++) {
4739 struct bnxt_napi *bnapi = bp->bnapi[i];
4740 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4741 struct ctx_hw_stats *hw_stats = cpr->hw_stats;
4743 stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts);
4744 stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts);
4745 stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts);
4747 stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts);
4748 stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts);
4749 stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts);
4751 stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes);
4752 stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes);
4753 stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes);
4755 stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes);
4756 stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes);
4757 stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes);
4759 stats->rx_missed_errors +=
4760 le64_to_cpu(hw_stats->rx_discard_pkts);
4762 stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts);
4764 stats->rx_dropped += le64_to_cpu(hw_stats->rx_drop_pkts);
4766 stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts);
4772 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
4774 struct net_device *dev = bp->dev;
4775 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
4776 struct netdev_hw_addr *ha;
4779 bool update = false;
4782 netdev_for_each_mc_addr(ha, dev) {
4783 if (mc_count >= BNXT_MAX_MC_ADDRS) {
4784 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
4785 vnic->mc_list_count = 0;
4789 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
4790 memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
4797 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
4799 if (mc_count != vnic->mc_list_count) {
4800 vnic->mc_list_count = mc_count;
4806 static bool bnxt_uc_list_updated(struct bnxt *bp)
4808 struct net_device *dev = bp->dev;
4809 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
4810 struct netdev_hw_addr *ha;
4813 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
4816 netdev_for_each_uc_addr(ha, dev) {
4817 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
4825 static void bnxt_set_rx_mode(struct net_device *dev)
4827 struct bnxt *bp = netdev_priv(dev);
4828 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
4829 u32 mask = vnic->rx_mask;
4830 bool mc_update = false;
4833 if (!netif_running(dev))
4836 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
4837 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
4838 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST);
4840 /* Only allow PF to be in promiscuous mode */
4841 if ((dev->flags & IFF_PROMISC) && BNXT_PF(bp))
4842 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
4844 uc_update = bnxt_uc_list_updated(bp);
4846 if (dev->flags & IFF_ALLMULTI) {
4847 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
4848 vnic->mc_list_count = 0;
4850 mc_update = bnxt_mc_list_updated(bp, &mask);
4853 if (mask != vnic->rx_mask || uc_update || mc_update) {
4854 vnic->rx_mask = mask;
4856 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
4857 schedule_work(&bp->sp_task);
4861 static int bnxt_cfg_rx_mode(struct bnxt *bp)
4863 struct net_device *dev = bp->dev;
4864 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
4865 struct netdev_hw_addr *ha;
4869 netif_addr_lock_bh(dev);
4870 uc_update = bnxt_uc_list_updated(bp);
4871 netif_addr_unlock_bh(dev);
4876 mutex_lock(&bp->hwrm_cmd_lock);
4877 for (i = 1; i < vnic->uc_filter_count; i++) {
4878 struct hwrm_cfa_l2_filter_free_input req = {0};
4880 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
4883 req.l2_filter_id = vnic->fw_l2_filter_id[i];
4885 rc = _hwrm_send_message(bp, &req, sizeof(req),
4888 mutex_unlock(&bp->hwrm_cmd_lock);
4890 vnic->uc_filter_count = 1;
4892 netif_addr_lock_bh(dev);
4893 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
4894 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
4896 netdev_for_each_uc_addr(ha, dev) {
4897 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
4899 vnic->uc_filter_count++;
4902 netif_addr_unlock_bh(dev);
4904 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
4905 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
4907 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
4909 vnic->uc_filter_count = i;
4915 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
4917 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n",
4923 static bool bnxt_rfs_capable(struct bnxt *bp)
4925 #ifdef CONFIG_RFS_ACCEL
4926 struct bnxt_pf_info *pf = &bp->pf;
4929 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_MSIX_CAP))
4932 vnics = 1 + bp->rx_nr_rings;
4933 if (vnics > pf->max_rsscos_ctxs || vnics > pf->max_vnics)
4942 static netdev_features_t bnxt_fix_features(struct net_device *dev,
4943 netdev_features_t features)
4945 struct bnxt *bp = netdev_priv(dev);
4947 if (!bnxt_rfs_capable(bp))
4948 features &= ~NETIF_F_NTUPLE;
4952 static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
4954 struct bnxt *bp = netdev_priv(dev);
4955 u32 flags = bp->flags;
4958 bool re_init = false;
4959 bool update_tpa = false;
4961 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
4962 if ((features & NETIF_F_GRO) && (bp->pdev->revision > 0))
4963 flags |= BNXT_FLAG_GRO;
4964 if (features & NETIF_F_LRO)
4965 flags |= BNXT_FLAG_LRO;
4967 if (features & NETIF_F_HW_VLAN_CTAG_RX)
4968 flags |= BNXT_FLAG_STRIP_VLAN;
4970 if (features & NETIF_F_NTUPLE)
4971 flags |= BNXT_FLAG_RFS;
4973 changes = flags ^ bp->flags;
4974 if (changes & BNXT_FLAG_TPA) {
4976 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
4977 (flags & BNXT_FLAG_TPA) == 0)
4981 if (changes & ~BNXT_FLAG_TPA)
4984 if (flags != bp->flags) {
4985 u32 old_flags = bp->flags;
4989 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
4991 bnxt_set_ring_params(bp);
4996 bnxt_close_nic(bp, false, false);
4998 bnxt_set_ring_params(bp);
5000 return bnxt_open_nic(bp, false, false);
5003 rc = bnxt_set_tpa(bp,
5004 (flags & BNXT_FLAG_TPA) ?
5007 bp->flags = old_flags;
5013 static void bnxt_dbg_dump_states(struct bnxt *bp)
5016 struct bnxt_napi *bnapi;
5017 struct bnxt_tx_ring_info *txr;
5018 struct bnxt_rx_ring_info *rxr;
5019 struct bnxt_cp_ring_info *cpr;
5021 for (i = 0; i < bp->cp_nr_rings; i++) {
5022 bnapi = bp->bnapi[i];
5023 txr = &bnapi->tx_ring;
5024 rxr = &bnapi->rx_ring;
5025 cpr = &bnapi->cp_ring;
5026 if (netif_msg_drv(bp)) {
5027 netdev_info(bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
5028 i, txr->tx_ring_struct.fw_ring_id,
5029 txr->tx_prod, txr->tx_cons);
5030 netdev_info(bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
5031 i, rxr->rx_ring_struct.fw_ring_id,
5033 rxr->rx_agg_ring_struct.fw_ring_id,
5034 rxr->rx_agg_prod, rxr->rx_sw_agg_prod);
5035 netdev_info(bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
5036 i, cpr->cp_ring_struct.fw_ring_id,
5042 static void bnxt_reset_task(struct bnxt *bp)
5044 bnxt_dbg_dump_states(bp);
5045 if (netif_running(bp->dev)) {
5046 bnxt_close_nic(bp, false, false);
5047 bnxt_open_nic(bp, false, false);
5051 static void bnxt_tx_timeout(struct net_device *dev)
5053 struct bnxt *bp = netdev_priv(dev);
5055 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
5056 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
5057 schedule_work(&bp->sp_task);
5060 #ifdef CONFIG_NET_POLL_CONTROLLER
5061 static void bnxt_poll_controller(struct net_device *dev)
5063 struct bnxt *bp = netdev_priv(dev);
5066 for (i = 0; i < bp->cp_nr_rings; i++) {
5067 struct bnxt_irq *irq = &bp->irq_tbl[i];
5069 disable_irq(irq->vector);
5070 irq->handler(irq->vector, bp->bnapi[i]);
5071 enable_irq(irq->vector);
5076 static void bnxt_timer(unsigned long data)
5078 struct bnxt *bp = (struct bnxt *)data;
5079 struct net_device *dev = bp->dev;
5081 if (!netif_running(dev))
5084 if (atomic_read(&bp->intr_sem) != 0)
5085 goto bnxt_restart_timer;
5088 mod_timer(&bp->timer, jiffies + bp->current_interval);
5091 static void bnxt_cfg_ntp_filters(struct bnxt *);
5093 static void bnxt_sp_task(struct work_struct *work)
5095 struct bnxt *bp = container_of(work, struct bnxt, sp_task);
5098 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5099 smp_mb__after_atomic();
5100 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
5101 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5105 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
5106 bnxt_cfg_rx_mode(bp);
5108 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
5109 bnxt_cfg_ntp_filters(bp);
5110 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
5111 rc = bnxt_update_link(bp, true);
5113 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
5116 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
5117 bnxt_hwrm_exec_fwd_req(bp);
5118 if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
5119 bnxt_hwrm_tunnel_dst_port_alloc(
5121 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
5123 if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) {
5124 bnxt_hwrm_tunnel_dst_port_free(
5125 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
5127 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) {
5128 /* bnxt_reset_task() calls bnxt_close_nic() which waits
5129 * for BNXT_STATE_IN_SP_TASK to clear.
5131 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5133 bnxt_reset_task(bp);
5134 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5138 smp_mb__before_atomic();
5139 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5142 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
5145 struct bnxt *bp = netdev_priv(dev);
5147 SET_NETDEV_DEV(dev, &pdev->dev);
5149 /* enable device (incl. PCI PM wakeup), and bus-mastering */
5150 rc = pci_enable_device(pdev);
5152 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
5156 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
5158 "Cannot find PCI device base address, aborting\n");
5160 goto init_err_disable;
5163 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5165 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
5166 goto init_err_disable;
5169 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
5170 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
5171 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
5172 goto init_err_disable;
5175 pci_set_master(pdev);
5180 bp->bar0 = pci_ioremap_bar(pdev, 0);
5182 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
5184 goto init_err_release;
5187 bp->bar1 = pci_ioremap_bar(pdev, 2);
5189 dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n");
5191 goto init_err_release;
5194 bp->bar2 = pci_ioremap_bar(pdev, 4);
5196 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
5198 goto init_err_release;
5201 INIT_WORK(&bp->sp_task, bnxt_sp_task);
5203 spin_lock_init(&bp->ntp_fltr_lock);
5205 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
5206 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
5208 bp->coal_ticks = BNXT_USEC_TO_COAL_TIMER(4);
5210 bp->coal_ticks_irq = BNXT_USEC_TO_COAL_TIMER(1);
5211 bp->coal_bufs_irq = 2;
5213 init_timer(&bp->timer);
5214 bp->timer.data = (unsigned long)bp;
5215 bp->timer.function = bnxt_timer;
5216 bp->current_interval = BNXT_TIMER_INTERVAL;
5218 clear_bit(BNXT_STATE_OPEN, &bp->state);
5224 pci_iounmap(pdev, bp->bar2);
5229 pci_iounmap(pdev, bp->bar1);
5234 pci_iounmap(pdev, bp->bar0);
5238 pci_release_regions(pdev);
5241 pci_disable_device(pdev);
5247 /* rtnl_lock held */
5248 static int bnxt_change_mac_addr(struct net_device *dev, void *p)
5250 struct sockaddr *addr = p;
5251 struct bnxt *bp = netdev_priv(dev);
5254 if (!is_valid_ether_addr(addr->sa_data))
5255 return -EADDRNOTAVAIL;
5257 #ifdef CONFIG_BNXT_SRIOV
5258 if (BNXT_VF(bp) && is_valid_ether_addr(bp->vf.mac_addr))
5259 return -EADDRNOTAVAIL;
5262 if (ether_addr_equal(addr->sa_data, dev->dev_addr))
5265 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5266 if (netif_running(dev)) {
5267 bnxt_close_nic(bp, false, false);
5268 rc = bnxt_open_nic(bp, false, false);
5274 /* rtnl_lock held */
5275 static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
5277 struct bnxt *bp = netdev_priv(dev);
5279 if (new_mtu < 60 || new_mtu > 9000)
5282 if (netif_running(dev))
5283 bnxt_close_nic(bp, false, false);
5286 bnxt_set_ring_params(bp);
5288 if (netif_running(dev))
5289 return bnxt_open_nic(bp, false, false);
5294 static int bnxt_setup_tc(struct net_device *dev, u8 tc)
5296 struct bnxt *bp = netdev_priv(dev);
5298 if (tc > bp->max_tc) {
5299 netdev_err(dev, "too many traffic classes requested: %d Max supported is %d\n",
5304 if (netdev_get_num_tc(dev) == tc)
5308 int max_rx_rings, max_tx_rings;
5310 bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings);
5311 if (bp->tx_nr_rings_per_tc * tc > max_tx_rings)
5315 /* Needs to close the device and do hw resource re-allocations */
5316 if (netif_running(bp->dev))
5317 bnxt_close_nic(bp, true, false);
5320 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
5321 netdev_set_num_tc(dev, tc);
5323 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
5324 netdev_reset_tc(dev);
5326 bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings);
5327 bp->num_stat_ctxs = bp->cp_nr_rings;
5329 if (netif_running(bp->dev))
5330 return bnxt_open_nic(bp, true, false);
5335 #ifdef CONFIG_RFS_ACCEL
5336 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
5337 struct bnxt_ntuple_filter *f2)
5339 struct flow_keys *keys1 = &f1->fkeys;
5340 struct flow_keys *keys2 = &f2->fkeys;
5342 if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src &&
5343 keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst &&
5344 keys1->ports.ports == keys2->ports.ports &&
5345 keys1->basic.ip_proto == keys2->basic.ip_proto &&
5346 keys1->basic.n_proto == keys2->basic.n_proto &&
5347 ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr))
5353 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
5354 u16 rxq_index, u32 flow_id)
5356 struct bnxt *bp = netdev_priv(dev);
5357 struct bnxt_ntuple_filter *fltr, *new_fltr;
5358 struct flow_keys *fkeys;
5359 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
5360 int rc = 0, idx, bit_id;
5361 struct hlist_head *head;
5363 if (skb->encapsulation)
5364 return -EPROTONOSUPPORT;
5366 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
5370 fkeys = &new_fltr->fkeys;
5371 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
5372 rc = -EPROTONOSUPPORT;
5376 if ((fkeys->basic.n_proto != htons(ETH_P_IP)) ||
5377 ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
5378 (fkeys->basic.ip_proto != IPPROTO_UDP))) {
5379 rc = -EPROTONOSUPPORT;
5383 memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
5385 idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
5386 head = &bp->ntp_fltr_hash_tbl[idx];
5388 hlist_for_each_entry_rcu(fltr, head, hash) {
5389 if (bnxt_fltr_match(fltr, new_fltr)) {
5397 spin_lock_bh(&bp->ntp_fltr_lock);
5398 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
5399 BNXT_NTP_FLTR_MAX_FLTR, 0);
5401 spin_unlock_bh(&bp->ntp_fltr_lock);
5406 new_fltr->sw_id = (u16)bit_id;
5407 new_fltr->flow_id = flow_id;
5408 new_fltr->rxq = rxq_index;
5409 hlist_add_head_rcu(&new_fltr->hash, head);
5410 bp->ntp_fltr_count++;
5411 spin_unlock_bh(&bp->ntp_fltr_lock);
5413 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
5414 schedule_work(&bp->sp_task);
5416 return new_fltr->sw_id;
5423 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
5427 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
5428 struct hlist_head *head;
5429 struct hlist_node *tmp;
5430 struct bnxt_ntuple_filter *fltr;
5433 head = &bp->ntp_fltr_hash_tbl[i];
5434 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
5437 if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
5438 if (rps_may_expire_flow(bp->dev, fltr->rxq,
5441 bnxt_hwrm_cfa_ntuple_filter_free(bp,
5446 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
5451 set_bit(BNXT_FLTR_VALID, &fltr->state);
5455 spin_lock_bh(&bp->ntp_fltr_lock);
5456 hlist_del_rcu(&fltr->hash);
5457 bp->ntp_fltr_count--;
5458 spin_unlock_bh(&bp->ntp_fltr_lock);
5460 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
5469 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
5473 #endif /* CONFIG_RFS_ACCEL */
5475 static void bnxt_add_vxlan_port(struct net_device *dev, sa_family_t sa_family,
5478 struct bnxt *bp = netdev_priv(dev);
5480 if (!netif_running(dev))
5483 if (sa_family != AF_INET6 && sa_family != AF_INET)
5486 if (bp->vxlan_port_cnt && bp->vxlan_port != port)
5489 bp->vxlan_port_cnt++;
5490 if (bp->vxlan_port_cnt == 1) {
5491 bp->vxlan_port = port;
5492 set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
5493 schedule_work(&bp->sp_task);
5497 static void bnxt_del_vxlan_port(struct net_device *dev, sa_family_t sa_family,
5500 struct bnxt *bp = netdev_priv(dev);
5502 if (!netif_running(dev))
5505 if (sa_family != AF_INET6 && sa_family != AF_INET)
5508 if (bp->vxlan_port_cnt && bp->vxlan_port == port) {
5509 bp->vxlan_port_cnt--;
5511 if (bp->vxlan_port_cnt == 0) {
5512 set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event);
5513 schedule_work(&bp->sp_task);
5518 static const struct net_device_ops bnxt_netdev_ops = {
5519 .ndo_open = bnxt_open,
5520 .ndo_start_xmit = bnxt_start_xmit,
5521 .ndo_stop = bnxt_close,
5522 .ndo_get_stats64 = bnxt_get_stats64,
5523 .ndo_set_rx_mode = bnxt_set_rx_mode,
5524 .ndo_do_ioctl = bnxt_ioctl,
5525 .ndo_validate_addr = eth_validate_addr,
5526 .ndo_set_mac_address = bnxt_change_mac_addr,
5527 .ndo_change_mtu = bnxt_change_mtu,
5528 .ndo_fix_features = bnxt_fix_features,
5529 .ndo_set_features = bnxt_set_features,
5530 .ndo_tx_timeout = bnxt_tx_timeout,
5531 #ifdef CONFIG_BNXT_SRIOV
5532 .ndo_get_vf_config = bnxt_get_vf_config,
5533 .ndo_set_vf_mac = bnxt_set_vf_mac,
5534 .ndo_set_vf_vlan = bnxt_set_vf_vlan,
5535 .ndo_set_vf_rate = bnxt_set_vf_bw,
5536 .ndo_set_vf_link_state = bnxt_set_vf_link_state,
5537 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
5539 #ifdef CONFIG_NET_POLL_CONTROLLER
5540 .ndo_poll_controller = bnxt_poll_controller,
5542 .ndo_setup_tc = bnxt_setup_tc,
5543 #ifdef CONFIG_RFS_ACCEL
5544 .ndo_rx_flow_steer = bnxt_rx_flow_steer,
5546 .ndo_add_vxlan_port = bnxt_add_vxlan_port,
5547 .ndo_del_vxlan_port = bnxt_del_vxlan_port,
5548 #ifdef CONFIG_NET_RX_BUSY_POLL
5549 .ndo_busy_poll = bnxt_busy_poll,
5553 static void bnxt_remove_one(struct pci_dev *pdev)
5555 struct net_device *dev = pci_get_drvdata(pdev);
5556 struct bnxt *bp = netdev_priv(dev);
5559 bnxt_sriov_disable(bp);
5561 unregister_netdev(dev);
5562 cancel_work_sync(&bp->sp_task);
5565 bnxt_hwrm_func_drv_unrgtr(bp);
5566 bnxt_free_hwrm_resources(bp);
5567 pci_iounmap(pdev, bp->bar2);
5568 pci_iounmap(pdev, bp->bar1);
5569 pci_iounmap(pdev, bp->bar0);
5572 pci_release_regions(pdev);
5573 pci_disable_device(pdev);
5576 static int bnxt_probe_phy(struct bnxt *bp)
5579 struct bnxt_link_info *link_info = &bp->link_info;
5580 char phy_ver[PHY_VER_STR_LEN];
5582 rc = bnxt_update_link(bp, false);
5584 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
5589 /*initialize the ethool setting copy with NVM settings */
5590 if (BNXT_AUTO_MODE(link_info->auto_mode))
5591 link_info->autoneg |= BNXT_AUTONEG_SPEED;
5593 if (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) {
5594 if (link_info->auto_pause_setting == BNXT_LINK_PAUSE_BOTH)
5595 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
5596 link_info->req_flow_ctrl = link_info->auto_pause_setting;
5597 } else if (link_info->force_pause_setting & BNXT_LINK_PAUSE_BOTH) {
5598 link_info->req_flow_ctrl = link_info->force_pause_setting;
5600 link_info->req_duplex = link_info->duplex_setting;
5601 if (link_info->autoneg & BNXT_AUTONEG_SPEED)
5602 link_info->req_link_speed = link_info->auto_link_speed;
5604 link_info->req_link_speed = link_info->force_link_speed;
5605 link_info->advertising = link_info->auto_link_speeds;
5606 snprintf(phy_ver, PHY_VER_STR_LEN, " ph %d.%d.%d",
5607 link_info->phy_ver[0],
5608 link_info->phy_ver[1],
5609 link_info->phy_ver[2]);
5610 strcat(bp->fw_ver_str, phy_ver);
5614 static int bnxt_get_max_irq(struct pci_dev *pdev)
5618 if (!pdev->msix_cap)
5621 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
5622 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
5625 void bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx)
5627 int max_rings = 0, max_ring_grps = 0;
5630 *max_tx = bp->pf.max_tx_rings;
5631 *max_rx = bp->pf.max_rx_rings;
5632 max_rings = min_t(int, bp->pf.max_irqs, bp->pf.max_cp_rings);
5633 max_rings = min_t(int, max_rings, bp->pf.max_stat_ctxs);
5634 max_ring_grps = bp->pf.max_hw_ring_grps;
5636 #ifdef CONFIG_BNXT_SRIOV
5637 *max_tx = bp->vf.max_tx_rings;
5638 *max_rx = bp->vf.max_rx_rings;
5639 max_rings = min_t(int, bp->vf.max_irqs, bp->vf.max_cp_rings);
5640 max_rings = min_t(int, max_rings, bp->vf.max_stat_ctxs);
5641 max_ring_grps = bp->vf.max_hw_ring_grps;
5644 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5647 *max_rx = min_t(int, *max_rx, max_rings);
5648 *max_rx = min_t(int, *max_rx, max_ring_grps);
5649 *max_tx = min_t(int, *max_tx, max_rings);
5652 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5654 static int version_printed;
5655 struct net_device *dev;
5657 int rc, max_rx_rings, max_tx_rings, max_irqs, dflt_rings;
5659 if (version_printed++ == 0)
5660 pr_info("%s", version);
5662 max_irqs = bnxt_get_max_irq(pdev);
5663 dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
5667 bp = netdev_priv(dev);
5669 if (bnxt_vf_pciid(ent->driver_data))
5670 bp->flags |= BNXT_FLAG_VF;
5673 bp->flags |= BNXT_FLAG_MSIX_CAP;
5675 rc = bnxt_init_board(pdev, dev);
5679 dev->netdev_ops = &bnxt_netdev_ops;
5680 dev->watchdog_timeo = BNXT_TX_TIMEOUT;
5681 dev->ethtool_ops = &bnxt_ethtool_ops;
5683 pci_set_drvdata(pdev, dev);
5685 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
5686 NETIF_F_TSO | NETIF_F_TSO6 |
5687 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
5688 NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT |
5690 NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO;
5692 dev->hw_enc_features =
5693 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
5694 NETIF_F_TSO | NETIF_F_TSO6 |
5695 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
5696 NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT;
5697 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
5698 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
5699 NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
5700 dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
5701 dev->priv_flags |= IFF_UNICAST_FLT;
5703 #ifdef CONFIG_BNXT_SRIOV
5704 init_waitqueue_head(&bp->sriov_cfg_wait);
5706 rc = bnxt_alloc_hwrm_resources(bp);
5710 mutex_init(&bp->hwrm_cmd_lock);
5711 bnxt_hwrm_ver_get(bp);
5713 rc = bnxt_hwrm_func_drv_rgtr(bp);
5717 /* Get the MAX capabilities for this function */
5718 rc = bnxt_hwrm_func_qcaps(bp);
5720 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
5726 rc = bnxt_hwrm_queue_qportcfg(bp);
5728 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n",
5734 bnxt_set_tpa_flags(bp);
5735 bnxt_set_ring_params(bp);
5736 dflt_rings = netif_get_num_default_rss_queues();
5738 bp->pf.max_irqs = max_irqs;
5739 #if defined(CONFIG_BNXT_SRIOV)
5741 bp->vf.max_irqs = max_irqs;
5743 bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings);
5744 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
5745 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
5746 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
5747 bp->cp_nr_rings = max_t(int, bp->rx_nr_rings, bp->tx_nr_rings);
5748 bp->num_stat_ctxs = bp->cp_nr_rings;
5751 dev->hw_features |= NETIF_F_NTUPLE;
5752 if (bnxt_rfs_capable(bp)) {
5753 bp->flags |= BNXT_FLAG_RFS;
5754 dev->features |= NETIF_F_NTUPLE;
5758 if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
5759 bp->flags |= BNXT_FLAG_STRIP_VLAN;
5761 rc = bnxt_probe_phy(bp);
5765 rc = register_netdev(dev);
5769 netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
5770 board_info[ent->driver_data].name,
5771 (long)pci_resource_start(pdev, 0), dev->dev_addr);
5776 pci_iounmap(pdev, bp->bar0);
5777 pci_release_regions(pdev);
5778 pci_disable_device(pdev);
5785 static struct pci_driver bnxt_pci_driver = {
5786 .name = DRV_MODULE_NAME,
5787 .id_table = bnxt_pci_tbl,
5788 .probe = bnxt_init_one,
5789 .remove = bnxt_remove_one,
5790 #if defined(CONFIG_BNXT_SRIOV)
5791 .sriov_configure = bnxt_sriov_configure,
5795 module_pci_driver(bnxt_pci_driver);