2 * Copyright (C) 2015 Netronome Systems, Inc.
4 * This software is dual licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree or the BSD 2-Clause License provided below. You have the
7 * option to license this software under the complete terms of either license.
9 * The BSD 2-Clause License:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * 1. Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * 2. Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 * Netronome network device driver: Common functions between PF and VF
37 * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
38 * Jason McMullan <jason.mcmullan@netronome.com>
39 * Rolf Neugebauer <rolf.neugebauer@netronome.com>
40 * Brad Petrus <brad.petrus@netronome.com>
41 * Chris Telfer <chris.telfer@netronome.com>
44 #include <linux/version.h>
45 #include <linux/module.h>
46 #include <linux/kernel.h>
47 #include <linux/init.h>
49 #include <linux/netdevice.h>
50 #include <linux/etherdevice.h>
51 #include <linux/interrupt.h>
53 #include <linux/ipv6.h>
54 #include <linux/pci.h>
55 #include <linux/pci_regs.h>
56 #include <linux/msi.h>
57 #include <linux/ethtool.h>
58 #include <linux/log2.h>
59 #include <linux/if_vlan.h>
60 #include <linux/random.h>
62 #include <linux/ktime.h>
64 #include <net/vxlan.h>
66 #include "nfp_net_ctrl.h"
70 * nfp_net_get_fw_version() - Read and parse the FW version
71 * @fw_ver: Output fw_version structure to read to
72 * @ctrl_bar: Mapped address of the control BAR
74 void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
75 void __iomem *ctrl_bar)
79 reg = readl(ctrl_bar + NFP_NET_CFG_VERSION);
80 put_unaligned_le32(reg, fw_ver);
84 * nfp_net_reconfig() - Reconfigure the firmware
85 * @nn: NFP Net device to reconfigure
86 * @update: The value for the update field in the BAR config
88 * Write the update word to the BAR and ping the reconfig queue. The
89 * poll until the firmware has acknowledged the update by zeroing the
92 * Return: Negative errno on error, 0 on success
94 int nfp_net_reconfig(struct nfp_net *nn, u32 update)
99 spin_lock_bh(&nn->reconfig_lock);
101 nn_writel(nn, NFP_NET_CFG_UPDATE, update);
102 /* ensure update is written before pinging HW */
104 nfp_qcp_wr_ptr_add(nn->qcp_cfg, 1);
106 /* Poll update field, waiting for NFP to ack the config */
107 for (cnt = 0; ; cnt++) {
108 new = nn_readl(nn, NFP_NET_CFG_UPDATE);
111 if (new & NFP_NET_CFG_UPDATE_ERR) {
112 nn_err(nn, "Reconfig error: 0x%08x\n", new);
115 } else if (cnt >= NFP_NET_POLL_TIMEOUT) {
116 nn_err(nn, "Reconfig timeout for 0x%08x after %dms\n",
124 spin_unlock_bh(&nn->reconfig_lock);
128 /* Interrupt configuration and handling
132 * nfp_net_irq_unmask_msix() - Unmask MSI-X after automasking
133 * @nn: NFP Network structure
134 * @entry_nr: MSI-X table entry
136 * Clear the MSI-X table mask bit for the given entry bypassing Linux irq
137 * handling subsystem. Use *only* to reenable automasked vectors.
139 static void nfp_net_irq_unmask_msix(struct nfp_net *nn, unsigned int entry_nr)
141 struct list_head *msi_head = &nn->pdev->dev.msi_list;
142 struct msi_desc *entry;
145 /* All MSI-Xs have the same mask_base */
146 entry = list_first_entry(msi_head, struct msi_desc, list);
148 off = (PCI_MSIX_ENTRY_SIZE * entry_nr) +
149 PCI_MSIX_ENTRY_VECTOR_CTRL;
150 writel(0, entry->mask_base + off);
151 readl(entry->mask_base);
155 * nfp_net_irq_unmask() - Unmask automasked interrupt
156 * @nn: NFP Network structure
157 * @entry_nr: MSI-X table entry
159 * If MSI-X auto-masking is enabled clear the mask bit, otherwise
160 * clear the ICR for the entry.
162 static void nfp_net_irq_unmask(struct nfp_net *nn, unsigned int entry_nr)
164 if (nn->ctrl & NFP_NET_CFG_CTRL_MSIXAUTO) {
165 nfp_net_irq_unmask_msix(nn, entry_nr);
169 nn_writeb(nn, NFP_NET_CFG_ICR(entry_nr), NFP_NET_CFG_ICR_UNMASKED);
174 * nfp_net_msix_alloc() - Try to allocate MSI-X irqs
175 * @nn: NFP Network structure
176 * @nr_vecs: Number of MSI-X vectors to allocate
178 * For MSI-X we want at least NFP_NET_NON_Q_VECTORS + 1 vectors.
180 * Return: Number of MSI-X vectors obtained or 0 on error.
182 static int nfp_net_msix_alloc(struct nfp_net *nn, int nr_vecs)
184 struct pci_dev *pdev = nn->pdev;
188 for (i = 0; i < nr_vecs; i++)
189 nn->irq_entries[i].entry = i;
191 nvecs = pci_enable_msix_range(pdev, nn->irq_entries,
192 NFP_NET_NON_Q_VECTORS + 1, nr_vecs);
194 nn_warn(nn, "Failed to enable MSI-X. Wanted %d-%d (err=%d)\n",
195 NFP_NET_NON_Q_VECTORS + 1, nr_vecs, nvecs);
203 * nfp_net_irqs_wanted() - Work out how many interrupt vectors we want
204 * @nn: NFP Network structure
206 * We want a vector per CPU (or ring), whatever is smaller plus
207 * NFP_NET_NON_Q_VECTORS for LSC etc.
209 * Return: Number of interrupts wanted
211 static int nfp_net_irqs_wanted(struct nfp_net *nn)
216 ncpus = num_online_cpus();
218 vecs = max_t(int, nn->num_tx_rings, nn->num_rx_rings);
219 vecs = min_t(int, vecs, ncpus);
221 return vecs + NFP_NET_NON_Q_VECTORS;
225 * nfp_net_irqs_alloc() - allocates MSI-X irqs
226 * @nn: NFP Network structure
228 * Return: Number of irqs obtained or 0 on error.
230 int nfp_net_irqs_alloc(struct nfp_net *nn)
234 wanted_irqs = nfp_net_irqs_wanted(nn);
236 nn->num_irqs = nfp_net_msix_alloc(nn, wanted_irqs);
237 if (nn->num_irqs == 0) {
238 nn_err(nn, "Failed to allocate MSI-X IRQs\n");
242 nn->num_r_vecs = nn->num_irqs - NFP_NET_NON_Q_VECTORS;
244 if (nn->num_irqs < wanted_irqs)
245 nn_warn(nn, "Unable to allocate %d vectors. Got %d instead\n",
246 wanted_irqs, nn->num_irqs);
252 * nfp_net_irqs_disable() - Disable interrupts
253 * @nn: NFP Network structure
255 * Undoes what @nfp_net_irqs_alloc() does.
257 void nfp_net_irqs_disable(struct nfp_net *nn)
259 pci_disable_msix(nn->pdev);
263 * nfp_net_irq_rxtx() - Interrupt service routine for RX/TX rings.
265 * @data: Opaque data structure
267 * Return: Indicate if the interrupt has been handled.
269 static irqreturn_t nfp_net_irq_rxtx(int irq, void *data)
271 struct nfp_net_r_vector *r_vec = data;
273 napi_schedule_irqoff(&r_vec->napi);
275 /* The FW auto-masks any interrupt, either via the MASK bit in
276 * the MSI-X table or via the per entry ICR field. So there
277 * is no need to disable interrupts here.
283 * nfp_net_read_link_status() - Reread link status from control BAR
284 * @nn: NFP Network structure
286 static void nfp_net_read_link_status(struct nfp_net *nn)
292 spin_lock_irqsave(&nn->link_status_lock, flags);
294 sts = nn_readl(nn, NFP_NET_CFG_STS);
295 link_up = !!(sts & NFP_NET_CFG_STS_LINK);
297 if (nn->link_up == link_up)
300 nn->link_up = link_up;
303 netif_carrier_on(nn->netdev);
304 netdev_info(nn->netdev, "NIC Link is Up\n");
306 netif_carrier_off(nn->netdev);
307 netdev_info(nn->netdev, "NIC Link is Down\n");
310 spin_unlock_irqrestore(&nn->link_status_lock, flags);
314 * nfp_net_irq_lsc() - Interrupt service routine for link state changes
316 * @data: Opaque data structure
318 * Return: Indicate if the interrupt has been handled.
320 static irqreturn_t nfp_net_irq_lsc(int irq, void *data)
322 struct nfp_net *nn = data;
324 nfp_net_read_link_status(nn);
326 nfp_net_irq_unmask(nn, NFP_NET_IRQ_LSC_IDX);
332 * nfp_net_irq_exn() - Interrupt service routine for exceptions
334 * @data: Opaque data structure
336 * Return: Indicate if the interrupt has been handled.
338 static irqreturn_t nfp_net_irq_exn(int irq, void *data)
340 struct nfp_net *nn = data;
342 nn_err(nn, "%s: UNIMPLEMENTED.\n", __func__);
343 /* XXX TO BE IMPLEMENTED */
348 * nfp_net_tx_ring_init() - Fill in the boilerplate for a TX ring
349 * @tx_ring: TX ring structure
350 * @r_vec: IRQ vector servicing this ring
354 nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring,
355 struct nfp_net_r_vector *r_vec, unsigned int idx)
357 struct nfp_net *nn = r_vec->nfp_net;
360 tx_ring->r_vec = r_vec;
362 tx_ring->qcidx = tx_ring->idx * nn->stride_tx;
363 tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx);
367 * nfp_net_rx_ring_init() - Fill in the boilerplate for a RX ring
368 * @rx_ring: RX ring structure
369 * @r_vec: IRQ vector servicing this ring
373 nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring,
374 struct nfp_net_r_vector *r_vec, unsigned int idx)
376 struct nfp_net *nn = r_vec->nfp_net;
379 rx_ring->r_vec = r_vec;
381 rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx;
382 rx_ring->rx_qcidx = rx_ring->fl_qcidx + (nn->stride_rx - 1);
384 rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx);
385 rx_ring->qcp_rx = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->rx_qcidx);
389 * nfp_net_irqs_assign() - Assign IRQs and setup rvecs.
390 * @netdev: netdev structure
392 static void nfp_net_irqs_assign(struct net_device *netdev)
394 struct nfp_net *nn = netdev_priv(netdev);
395 struct nfp_net_r_vector *r_vec;
398 /* Assumes nn->num_tx_rings == nn->num_rx_rings */
399 if (nn->num_tx_rings > nn->num_r_vecs) {
400 nn_warn(nn, "More rings (%d) than vectors (%d).\n",
401 nn->num_tx_rings, nn->num_r_vecs);
402 nn->num_tx_rings = nn->num_r_vecs;
403 nn->num_rx_rings = nn->num_r_vecs;
406 nn->lsc_handler = nfp_net_irq_lsc;
407 nn->exn_handler = nfp_net_irq_exn;
409 for (r = 0; r < nn->num_r_vecs; r++) {
410 r_vec = &nn->r_vecs[r];
412 r_vec->handler = nfp_net_irq_rxtx;
413 r_vec->irq_idx = NFP_NET_NON_Q_VECTORS + r;
415 cpumask_set_cpu(r, &r_vec->affinity_mask);
420 * nfp_net_aux_irq_request() - Request an auxiliary interrupt (LSC or EXN)
421 * @nn: NFP Network structure
422 * @ctrl_offset: Control BAR offset where IRQ configuration should be written
423 * @format: printf-style format to construct the interrupt name
424 * @name: Pointer to allocated space for interrupt name
425 * @name_sz: Size of space for interrupt name
426 * @vector_idx: Index of MSI-X vector used for this interrupt
427 * @handler: IRQ handler to register for this interrupt
430 nfp_net_aux_irq_request(struct nfp_net *nn, u32 ctrl_offset,
431 const char *format, char *name, size_t name_sz,
432 unsigned int vector_idx, irq_handler_t handler)
434 struct msix_entry *entry;
437 entry = &nn->irq_entries[vector_idx];
439 snprintf(name, name_sz, format, netdev_name(nn->netdev));
440 err = request_irq(entry->vector, handler, 0, name, nn);
442 nn_err(nn, "Failed to request IRQ %d (err=%d).\n",
446 nn_writeb(nn, ctrl_offset, vector_idx);
452 * nfp_net_aux_irq_free() - Free an auxiliary interrupt (LSC or EXN)
453 * @nn: NFP Network structure
454 * @ctrl_offset: Control BAR offset where IRQ configuration should be written
455 * @vector_idx: Index of MSI-X vector used for this interrupt
457 static void nfp_net_aux_irq_free(struct nfp_net *nn, u32 ctrl_offset,
458 unsigned int vector_idx)
460 nn_writeb(nn, ctrl_offset, 0xff);
461 free_irq(nn->irq_entries[vector_idx].vector, nn);
466 * One queue controller peripheral queue is used for transmit. The
467 * driver en-queues packets for transmit by advancing the write
468 * pointer. The device indicates that packets have transmitted by
469 * advancing the read pointer. The driver maintains a local copy of
470 * the read and write pointer in @struct nfp_net_tx_ring. The driver
471 * keeps @wr_p in sync with the queue controller write pointer and can
472 * determine how many packets have been transmitted by comparing its
473 * copy of the read pointer @rd_p with the read pointer maintained by
474 * the queue controller peripheral.
478 * nfp_net_tx_full() - Check if the TX ring is full
479 * @tx_ring: TX ring to check
480 * @dcnt: Number of descriptors that need to be enqueued (must be >= 1)
482 * This function checks, based on the *host copy* of read/write
483 * pointer if a given TX ring is full. The real TX queue may have
484 * some newly made available slots.
486 * Return: True if the ring is full.
488 static inline int nfp_net_tx_full(struct nfp_net_tx_ring *tx_ring, int dcnt)
490 return (tx_ring->wr_p - tx_ring->rd_p) >= (tx_ring->cnt - dcnt);
493 /* Wrappers for deciding when to stop and restart TX queues */
494 static int nfp_net_tx_ring_should_wake(struct nfp_net_tx_ring *tx_ring)
496 return !nfp_net_tx_full(tx_ring, MAX_SKB_FRAGS * 4);
499 static int nfp_net_tx_ring_should_stop(struct nfp_net_tx_ring *tx_ring)
501 return nfp_net_tx_full(tx_ring, MAX_SKB_FRAGS + 1);
505 * nfp_net_tx_ring_stop() - stop tx ring
506 * @nd_q: netdev queue
507 * @tx_ring: driver tx queue structure
509 * Safely stop TX ring. Remember that while we are running .start_xmit()
510 * someone else may be cleaning the TX ring completions so we need to be
511 * extra careful here.
513 static void nfp_net_tx_ring_stop(struct netdev_queue *nd_q,
514 struct nfp_net_tx_ring *tx_ring)
516 netif_tx_stop_queue(nd_q);
518 /* We can race with the TX completion out of NAPI so recheck */
520 if (unlikely(nfp_net_tx_ring_should_wake(tx_ring)))
521 netif_tx_start_queue(nd_q);
525 * nfp_net_tx_tso() - Set up Tx descriptor for LSO
526 * @nn: NFP Net device
527 * @r_vec: per-ring structure
528 * @txbuf: Pointer to driver soft TX descriptor
529 * @txd: Pointer to HW TX descriptor
530 * @skb: Pointer to SKB
532 * Set up Tx descriptor for LSO, do nothing for non-LSO skbs.
533 * Return error on packet header greater than maximum supported LSO header size.
535 static void nfp_net_tx_tso(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
536 struct nfp_net_tx_buf *txbuf,
537 struct nfp_net_tx_desc *txd, struct sk_buff *skb)
542 if (!skb_is_gso(skb))
545 if (!skb->encapsulation)
546 hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
548 hdrlen = skb_inner_transport_header(skb) - skb->data +
549 inner_tcp_hdrlen(skb);
551 txbuf->pkt_cnt = skb_shinfo(skb)->gso_segs;
552 txbuf->real_len += hdrlen * (txbuf->pkt_cnt - 1);
554 mss = skb_shinfo(skb)->gso_size & PCIE_DESC_TX_MSS_MASK;
555 txd->l4_offset = hdrlen;
556 txd->mss = cpu_to_le16(mss);
557 txd->flags |= PCIE_DESC_TX_LSO;
559 u64_stats_update_begin(&r_vec->tx_sync);
561 u64_stats_update_end(&r_vec->tx_sync);
565 * nfp_net_tx_csum() - Set TX CSUM offload flags in TX descriptor
566 * @nn: NFP Net device
567 * @r_vec: per-ring structure
568 * @txbuf: Pointer to driver soft TX descriptor
569 * @txd: Pointer to TX descriptor
570 * @skb: Pointer to SKB
572 * This function sets the TX checksum flags in the TX descriptor based
573 * on the configuration and the protocol of the packet to be transmitted.
575 static void nfp_net_tx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
576 struct nfp_net_tx_buf *txbuf,
577 struct nfp_net_tx_desc *txd, struct sk_buff *skb)
579 struct ipv6hdr *ipv6h;
583 if (!(nn->ctrl & NFP_NET_CFG_CTRL_TXCSUM))
586 if (skb->ip_summed != CHECKSUM_PARTIAL)
589 txd->flags |= PCIE_DESC_TX_CSUM;
590 if (skb->encapsulation)
591 txd->flags |= PCIE_DESC_TX_ENCAP;
593 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
594 ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
596 if (iph->version == 4) {
597 txd->flags |= PCIE_DESC_TX_IP4_CSUM;
598 l4_hdr = iph->protocol;
599 } else if (ipv6h->version == 6) {
600 l4_hdr = ipv6h->nexthdr;
602 nn_warn_ratelimit(nn, "partial checksum but ipv=%x!\n",
609 txd->flags |= PCIE_DESC_TX_TCP_CSUM;
612 txd->flags |= PCIE_DESC_TX_UDP_CSUM;
615 nn_warn_ratelimit(nn, "partial checksum but l4 proto=%x!\n",
620 u64_stats_update_begin(&r_vec->tx_sync);
621 if (skb->encapsulation)
622 r_vec->hw_csum_tx_inner += txbuf->pkt_cnt;
624 r_vec->hw_csum_tx += txbuf->pkt_cnt;
625 u64_stats_update_end(&r_vec->tx_sync);
629 * nfp_net_tx() - Main transmit entry point
630 * @skb: SKB to transmit
631 * @netdev: netdev structure
633 * Return: NETDEV_TX_OK on success.
635 static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
637 struct nfp_net *nn = netdev_priv(netdev);
638 const struct skb_frag_struct *frag;
639 struct nfp_net_r_vector *r_vec;
640 struct nfp_net_tx_desc *txd, txdg;
641 struct nfp_net_tx_buf *txbuf;
642 struct nfp_net_tx_ring *tx_ring;
643 struct netdev_queue *nd_q;
650 qidx = skb_get_queue_mapping(skb);
651 tx_ring = &nn->tx_rings[qidx];
652 r_vec = tx_ring->r_vec;
653 nd_q = netdev_get_tx_queue(nn->netdev, qidx);
655 nr_frags = skb_shinfo(skb)->nr_frags;
657 if (unlikely(nfp_net_tx_full(tx_ring, nr_frags + 1))) {
658 nn_warn_ratelimit(nn, "TX ring %d busy. wrp=%u rdp=%u\n",
659 qidx, tx_ring->wr_p, tx_ring->rd_p);
660 netif_tx_stop_queue(nd_q);
661 u64_stats_update_begin(&r_vec->tx_sync);
663 u64_stats_update_end(&r_vec->tx_sync);
664 return NETDEV_TX_BUSY;
667 /* Start with the head skbuf */
668 dma_addr = dma_map_single(&nn->pdev->dev, skb->data, skb_headlen(skb),
670 if (dma_mapping_error(&nn->pdev->dev, dma_addr))
673 wr_idx = tx_ring->wr_p % tx_ring->cnt;
675 /* Stash the soft descriptor of the head then initialize it */
676 txbuf = &tx_ring->txbufs[wr_idx];
678 txbuf->dma_addr = dma_addr;
681 txbuf->real_len = skb->len;
683 /* Build TX descriptor */
684 txd = &tx_ring->txds[wr_idx];
685 txd->offset_eop = (nr_frags == 0) ? PCIE_DESC_TX_EOP : 0;
686 txd->dma_len = cpu_to_le16(skb_headlen(skb));
687 nfp_desc_set_dma_addr(txd, dma_addr);
688 txd->data_len = cpu_to_le16(skb->len);
694 nfp_net_tx_tso(nn, r_vec, txbuf, txd, skb);
696 nfp_net_tx_csum(nn, r_vec, txbuf, txd, skb);
698 if (skb_vlan_tag_present(skb) && nn->ctrl & NFP_NET_CFG_CTRL_TXVLAN) {
699 txd->flags |= PCIE_DESC_TX_VLAN;
700 txd->vlan = cpu_to_le16(skb_vlan_tag_get(skb));
705 /* all descs must match except for in addr, length and eop */
708 for (f = 0; f < nr_frags; f++) {
709 frag = &skb_shinfo(skb)->frags[f];
710 fsize = skb_frag_size(frag);
712 dma_addr = skb_frag_dma_map(&nn->pdev->dev, frag, 0,
713 fsize, DMA_TO_DEVICE);
714 if (dma_mapping_error(&nn->pdev->dev, dma_addr))
717 wr_idx = (wr_idx + 1) % tx_ring->cnt;
718 tx_ring->txbufs[wr_idx].skb = skb;
719 tx_ring->txbufs[wr_idx].dma_addr = dma_addr;
720 tx_ring->txbufs[wr_idx].fidx = f;
722 txd = &tx_ring->txds[wr_idx];
724 txd->dma_len = cpu_to_le16(fsize);
725 nfp_desc_set_dma_addr(txd, dma_addr);
727 (f == nr_frags - 1) ? PCIE_DESC_TX_EOP : 0;
730 u64_stats_update_begin(&r_vec->tx_sync);
732 u64_stats_update_end(&r_vec->tx_sync);
735 netdev_tx_sent_queue(nd_q, txbuf->real_len);
737 tx_ring->wr_p += nr_frags + 1;
738 if (nfp_net_tx_ring_should_stop(tx_ring))
739 nfp_net_tx_ring_stop(nd_q, tx_ring);
741 tx_ring->wr_ptr_add += nr_frags + 1;
742 if (!skb->xmit_more || netif_xmit_stopped(nd_q)) {
743 /* force memory write before we let HW know */
745 nfp_qcp_wr_ptr_add(tx_ring->qcp_q, tx_ring->wr_ptr_add);
746 tx_ring->wr_ptr_add = 0;
749 skb_tx_timestamp(skb);
756 frag = &skb_shinfo(skb)->frags[f];
757 dma_unmap_page(&nn->pdev->dev,
758 tx_ring->txbufs[wr_idx].dma_addr,
759 skb_frag_size(frag), DMA_TO_DEVICE);
760 tx_ring->txbufs[wr_idx].skb = NULL;
761 tx_ring->txbufs[wr_idx].dma_addr = 0;
762 tx_ring->txbufs[wr_idx].fidx = -2;
765 wr_idx += tx_ring->cnt;
767 dma_unmap_single(&nn->pdev->dev, tx_ring->txbufs[wr_idx].dma_addr,
768 skb_headlen(skb), DMA_TO_DEVICE);
769 tx_ring->txbufs[wr_idx].skb = NULL;
770 tx_ring->txbufs[wr_idx].dma_addr = 0;
771 tx_ring->txbufs[wr_idx].fidx = -2;
773 nn_warn_ratelimit(nn, "Failed to map DMA TX buffer\n");
774 u64_stats_update_begin(&r_vec->tx_sync);
776 u64_stats_update_end(&r_vec->tx_sync);
777 dev_kfree_skb_any(skb);
782 * nfp_net_tx_complete() - Handled completed TX packets
783 * @tx_ring: TX ring structure
785 * Return: Number of completed TX descriptors
787 static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
789 struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
790 struct nfp_net *nn = r_vec->nfp_net;
791 const struct skb_frag_struct *frag;
792 struct netdev_queue *nd_q;
793 u32 done_pkts = 0, done_bytes = 0;
800 /* Work out how many descriptors have been transmitted */
801 qcp_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
803 if (qcp_rd_p == tx_ring->qcp_rd_p)
806 if (qcp_rd_p > tx_ring->qcp_rd_p)
807 todo = qcp_rd_p - tx_ring->qcp_rd_p;
809 todo = qcp_rd_p + tx_ring->cnt - tx_ring->qcp_rd_p;
812 idx = tx_ring->rd_p % tx_ring->cnt;
815 skb = tx_ring->txbufs[idx].skb;
819 nr_frags = skb_shinfo(skb)->nr_frags;
820 fidx = tx_ring->txbufs[idx].fidx;
824 dma_unmap_single(&nn->pdev->dev,
825 tx_ring->txbufs[idx].dma_addr,
826 skb_headlen(skb), DMA_TO_DEVICE);
828 done_pkts += tx_ring->txbufs[idx].pkt_cnt;
829 done_bytes += tx_ring->txbufs[idx].real_len;
832 frag = &skb_shinfo(skb)->frags[fidx];
833 dma_unmap_page(&nn->pdev->dev,
834 tx_ring->txbufs[idx].dma_addr,
835 skb_frag_size(frag), DMA_TO_DEVICE);
838 /* check for last gather fragment */
839 if (fidx == nr_frags - 1)
840 dev_kfree_skb_any(skb);
842 tx_ring->txbufs[idx].dma_addr = 0;
843 tx_ring->txbufs[idx].skb = NULL;
844 tx_ring->txbufs[idx].fidx = -2;
847 tx_ring->qcp_rd_p = qcp_rd_p;
849 u64_stats_update_begin(&r_vec->tx_sync);
850 r_vec->tx_bytes += done_bytes;
851 r_vec->tx_pkts += done_pkts;
852 u64_stats_update_end(&r_vec->tx_sync);
854 nd_q = netdev_get_tx_queue(nn->netdev, tx_ring->idx);
855 netdev_tx_completed_queue(nd_q, done_pkts, done_bytes);
856 if (nfp_net_tx_ring_should_wake(tx_ring)) {
857 /* Make sure TX thread will see updated tx_ring->rd_p */
860 if (unlikely(netif_tx_queue_stopped(nd_q)))
861 netif_tx_wake_queue(nd_q);
864 WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
865 "TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
866 tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
870 * nfp_net_tx_ring_reset() - Free any untransmitted buffers and reset pointers
871 * @nn: NFP Net device
872 * @tx_ring: TX ring structure
874 * Assumes that the device is stopped
877 nfp_net_tx_ring_reset(struct nfp_net *nn, struct nfp_net_tx_ring *tx_ring)
879 const struct skb_frag_struct *frag;
880 struct netdev_queue *nd_q;
881 struct pci_dev *pdev = nn->pdev;
883 while (tx_ring->rd_p != tx_ring->wr_p) {
884 int nr_frags, fidx, idx;
887 idx = tx_ring->rd_p % tx_ring->cnt;
888 skb = tx_ring->txbufs[idx].skb;
889 nr_frags = skb_shinfo(skb)->nr_frags;
890 fidx = tx_ring->txbufs[idx].fidx;
894 dma_unmap_single(&pdev->dev,
895 tx_ring->txbufs[idx].dma_addr,
896 skb_headlen(skb), DMA_TO_DEVICE);
899 frag = &skb_shinfo(skb)->frags[fidx];
900 dma_unmap_page(&pdev->dev,
901 tx_ring->txbufs[idx].dma_addr,
902 skb_frag_size(frag), DMA_TO_DEVICE);
905 /* check for last gather fragment */
906 if (fidx == nr_frags - 1)
907 dev_kfree_skb_any(skb);
909 tx_ring->txbufs[idx].dma_addr = 0;
910 tx_ring->txbufs[idx].skb = NULL;
911 tx_ring->txbufs[idx].fidx = -2;
917 memset(tx_ring->txds, 0, sizeof(*tx_ring->txds) * tx_ring->cnt);
920 tx_ring->qcp_rd_p = 0;
921 tx_ring->wr_ptr_add = 0;
923 nd_q = netdev_get_tx_queue(nn->netdev, tx_ring->idx);
924 netdev_tx_reset_queue(nd_q);
927 static void nfp_net_tx_timeout(struct net_device *netdev)
929 struct nfp_net *nn = netdev_priv(netdev);
932 for (i = 0; i < nn->num_tx_rings; i++) {
933 if (!netif_tx_queue_stopped(netdev_get_tx_queue(netdev, i)))
935 nn_warn(nn, "TX timeout on ring: %d\n", i);
937 nn_warn(nn, "TX watchdog timeout\n");
940 /* Receive processing
944 * nfp_net_rx_space() - return the number of free slots on the RX ring
945 * @rx_ring: RX ring structure
947 * Make sure we leave at least one slot free.
949 * Return: True if there is space on the RX ring
951 static inline int nfp_net_rx_space(struct nfp_net_rx_ring *rx_ring)
953 return (rx_ring->cnt - 1) - (rx_ring->wr_p - rx_ring->rd_p);
957 * nfp_net_rx_alloc_one() - Allocate and map skb for RX
958 * @rx_ring: RX ring structure of the skb
959 * @dma_addr: Pointer to storage for DMA address (output param)
960 * @fl_bufsz: size of freelist buffers
962 * This function will allcate a new skb, map it for DMA.
964 * Return: allocated skb or NULL on failure.
966 static struct sk_buff *
967 nfp_net_rx_alloc_one(struct nfp_net_rx_ring *rx_ring, dma_addr_t *dma_addr,
968 unsigned int fl_bufsz)
970 struct nfp_net *nn = rx_ring->r_vec->nfp_net;
973 skb = netdev_alloc_skb(nn->netdev, fl_bufsz);
975 nn_warn_ratelimit(nn, "Failed to alloc receive SKB\n");
979 *dma_addr = dma_map_single(&nn->pdev->dev, skb->data,
980 fl_bufsz, DMA_FROM_DEVICE);
981 if (dma_mapping_error(&nn->pdev->dev, *dma_addr)) {
982 dev_kfree_skb_any(skb);
983 nn_warn_ratelimit(nn, "Failed to map DMA RX buffer\n");
991 * nfp_net_rx_give_one() - Put mapped skb on the software and hardware rings
992 * @rx_ring: RX ring structure
993 * @skb: Skb to put on rings
994 * @dma_addr: DMA address of skb mapping
996 static void nfp_net_rx_give_one(struct nfp_net_rx_ring *rx_ring,
997 struct sk_buff *skb, dma_addr_t dma_addr)
1001 wr_idx = rx_ring->wr_p % rx_ring->cnt;
1003 /* Stash SKB and DMA address away */
1004 rx_ring->rxbufs[wr_idx].skb = skb;
1005 rx_ring->rxbufs[wr_idx].dma_addr = dma_addr;
1007 /* Fill freelist descriptor */
1008 rx_ring->rxds[wr_idx].fld.reserved = 0;
1009 rx_ring->rxds[wr_idx].fld.meta_len_dd = 0;
1010 nfp_desc_set_dma_addr(&rx_ring->rxds[wr_idx].fld, dma_addr);
1013 rx_ring->wr_ptr_add++;
1014 if (rx_ring->wr_ptr_add >= NFP_NET_FL_BATCH) {
1015 /* Update write pointer of the freelist queue. Make
1016 * sure all writes are flushed before telling the hardware.
1019 nfp_qcp_wr_ptr_add(rx_ring->qcp_fl, rx_ring->wr_ptr_add);
1020 rx_ring->wr_ptr_add = 0;
1025 * nfp_net_rx_ring_reset() - Reflect in SW state of freelist after disable
1026 * @rx_ring: RX ring structure
1028 * Warning: Do *not* call if ring buffers were never put on the FW freelist
1029 * (i.e. device was not enabled)!
1031 static void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring)
1033 unsigned int wr_idx, last_idx;
1035 /* Move the empty entry to the end of the list */
1036 wr_idx = rx_ring->wr_p % rx_ring->cnt;
1037 last_idx = rx_ring->cnt - 1;
1038 rx_ring->rxbufs[wr_idx].dma_addr = rx_ring->rxbufs[last_idx].dma_addr;
1039 rx_ring->rxbufs[wr_idx].skb = rx_ring->rxbufs[last_idx].skb;
1040 rx_ring->rxbufs[last_idx].dma_addr = 0;
1041 rx_ring->rxbufs[last_idx].skb = NULL;
1043 memset(rx_ring->rxds, 0, sizeof(*rx_ring->rxds) * rx_ring->cnt);
1046 rx_ring->wr_ptr_add = 0;
1050 * nfp_net_rx_ring_bufs_free() - Free any buffers currently on the RX ring
1051 * @nn: NFP Net device
1052 * @rx_ring: RX ring to remove buffers from
1054 * Assumes that the device is stopped and buffers are in [0, ring->cnt - 1)
1055 * entries. After device is disabled nfp_net_rx_ring_reset() must be called
1056 * to restore required ring geometry.
1059 nfp_net_rx_ring_bufs_free(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring)
1061 struct pci_dev *pdev = nn->pdev;
1064 for (i = 0; i < rx_ring->cnt - 1; i++) {
1065 /* NULL skb can only happen when initial filling of the ring
1066 * fails to allocate enough buffers and calls here to free
1067 * already allocated ones.
1069 if (!rx_ring->rxbufs[i].skb)
1072 dma_unmap_single(&pdev->dev, rx_ring->rxbufs[i].dma_addr,
1073 rx_ring->bufsz, DMA_FROM_DEVICE);
1074 dev_kfree_skb_any(rx_ring->rxbufs[i].skb);
1075 rx_ring->rxbufs[i].dma_addr = 0;
1076 rx_ring->rxbufs[i].skb = NULL;
1081 * nfp_net_rx_ring_bufs_alloc() - Fill RX ring with buffers (don't give to FW)
1082 * @nn: NFP Net device
1083 * @rx_ring: RX ring to remove buffers from
1086 nfp_net_rx_ring_bufs_alloc(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring)
1088 struct nfp_net_rx_buf *rxbufs;
1091 rxbufs = rx_ring->rxbufs;
1093 for (i = 0; i < rx_ring->cnt - 1; i++) {
1095 nfp_net_rx_alloc_one(rx_ring, &rxbufs[i].dma_addr,
1097 if (!rxbufs[i].skb) {
1098 nfp_net_rx_ring_bufs_free(nn, rx_ring);
1107 * nfp_net_rx_ring_fill_freelist() - Give buffers from the ring to FW
1108 * @rx_ring: RX ring to fill
1110 static void nfp_net_rx_ring_fill_freelist(struct nfp_net_rx_ring *rx_ring)
1114 for (i = 0; i < rx_ring->cnt - 1; i++)
1115 nfp_net_rx_give_one(rx_ring, rx_ring->rxbufs[i].skb,
1116 rx_ring->rxbufs[i].dma_addr);
1120 * nfp_net_rx_csum_has_errors() - group check if rxd has any csum errors
1121 * @flags: RX descriptor flags field in CPU byte order
1123 static int nfp_net_rx_csum_has_errors(u16 flags)
1125 u16 csum_all_checked, csum_all_ok;
1127 csum_all_checked = flags & __PCIE_DESC_RX_CSUM_ALL;
1128 csum_all_ok = flags & __PCIE_DESC_RX_CSUM_ALL_OK;
1130 return csum_all_checked != (csum_all_ok << PCIE_DESC_RX_CSUM_OK_SHIFT);
1134 * nfp_net_rx_csum() - set SKB checksum field based on RX descriptor flags
1135 * @nn: NFP Net device
1136 * @r_vec: per-ring structure
1137 * @rxd: Pointer to RX descriptor
1138 * @skb: Pointer to SKB
1140 static void nfp_net_rx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
1141 struct nfp_net_rx_desc *rxd, struct sk_buff *skb)
1143 skb_checksum_none_assert(skb);
1145 if (!(nn->netdev->features & NETIF_F_RXCSUM))
1148 if (nfp_net_rx_csum_has_errors(le16_to_cpu(rxd->rxd.flags))) {
1149 u64_stats_update_begin(&r_vec->rx_sync);
1150 r_vec->hw_csum_rx_error++;
1151 u64_stats_update_end(&r_vec->rx_sync);
1155 /* Assume that the firmware will never report inner CSUM_OK unless outer
1156 * L4 headers were successfully parsed. FW will always report zero UDP
1157 * checksum as CSUM_OK.
1159 if (rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM_OK ||
1160 rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM_OK) {
1161 __skb_incr_checksum_unnecessary(skb);
1162 u64_stats_update_begin(&r_vec->rx_sync);
1163 r_vec->hw_csum_rx_ok++;
1164 u64_stats_update_end(&r_vec->rx_sync);
1167 if (rxd->rxd.flags & PCIE_DESC_RX_I_TCP_CSUM_OK ||
1168 rxd->rxd.flags & PCIE_DESC_RX_I_UDP_CSUM_OK) {
1169 __skb_incr_checksum_unnecessary(skb);
1170 u64_stats_update_begin(&r_vec->rx_sync);
1171 r_vec->hw_csum_rx_inner_ok++;
1172 u64_stats_update_end(&r_vec->rx_sync);
1177 * nfp_net_set_hash() - Set SKB hash data
1178 * @netdev: adapter's net_device structure
1179 * @skb: SKB to set the hash data on
1180 * @rxd: RX descriptor
1182 * The RSS hash and hash-type are pre-pended to the packet data.
1183 * Extract and decode it and set the skb fields.
1185 static void nfp_net_set_hash(struct net_device *netdev, struct sk_buff *skb,
1186 struct nfp_net_rx_desc *rxd)
1188 struct nfp_net_rx_hash *rx_hash;
1190 if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS) ||
1191 !(netdev->features & NETIF_F_RXHASH))
1194 rx_hash = (struct nfp_net_rx_hash *)(skb->data - sizeof(*rx_hash));
1196 switch (be32_to_cpu(rx_hash->hash_type)) {
1197 case NFP_NET_RSS_IPV4:
1198 case NFP_NET_RSS_IPV6:
1199 case NFP_NET_RSS_IPV6_EX:
1200 skb_set_hash(skb, be32_to_cpu(rx_hash->hash), PKT_HASH_TYPE_L3);
1203 skb_set_hash(skb, be32_to_cpu(rx_hash->hash), PKT_HASH_TYPE_L4);
1209 * nfp_net_rx() - receive up to @budget packets on @rx_ring
1210 * @rx_ring: RX ring to receive from
1211 * @budget: NAPI budget
1213 * Note, this function is separated out from the napi poll function to
1214 * more cleanly separate packet receive code from other bookkeeping
1215 * functions performed in the napi poll function.
1217 * There are differences between the NFP-3200 firmware and the
1218 * NFP-6000 firmware. The NFP-3200 firmware uses a dedicated RX queue
1219 * to indicate that new packets have arrived. The NFP-6000 does not
1220 * have this queue and uses the DD bit in the RX descriptor. This
1221 * method cannot be used on the NFP-3200 as it causes a race
1222 * condition: The RX ring write pointer on the NFP-3200 is updated
1223 * after packets (and descriptors) have been DMAed. If the DD bit is
1224 * used and subsequently the read pointer is updated this may lead to
1225 * the RX queue to underflow (if the firmware has not yet update the
1226 * write pointer). Therefore we use slightly ugly conditional code
1227 * below to handle the differences. We may, in the future update the
1228 * NFP-3200 firmware to behave the same as the firmware on the
1231 * Return: Number of packets received.
1233 static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
1235 struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
1236 struct nfp_net *nn = r_vec->nfp_net;
1237 unsigned int data_len, meta_len;
1238 int avail = 0, pkts_polled = 0;
1239 struct sk_buff *skb, *new_skb;
1240 struct nfp_net_rx_desc *rxd;
1241 dma_addr_t new_dma_addr;
1245 if (nn->is_nfp3200) {
1246 /* Work out how many packets arrived */
1247 qcp_wr_p = nfp_qcp_wr_ptr_read(rx_ring->qcp_rx);
1248 idx = rx_ring->rd_p % rx_ring->cnt;
1250 if (qcp_wr_p == idx)
1251 /* No new packets */
1255 avail = qcp_wr_p - idx;
1257 avail = qcp_wr_p + rx_ring->cnt - idx;
1262 while (avail > 0 && pkts_polled < budget) {
1263 idx = rx_ring->rd_p % rx_ring->cnt;
1265 rxd = &rx_ring->rxds[idx];
1266 if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD)) {
1268 nn_dbg(nn, "RX descriptor not valid (DD)%d:%u rxd[0]=%#x rxd[1]=%#x\n",
1270 rxd->vals[0], rxd->vals[1]);
1273 /* Memory barrier to ensure that we won't do other reads
1274 * before the DD bit.
1282 skb = rx_ring->rxbufs[idx].skb;
1284 new_skb = nfp_net_rx_alloc_one(rx_ring, &new_dma_addr,
1287 nfp_net_rx_give_one(rx_ring, rx_ring->rxbufs[idx].skb,
1288 rx_ring->rxbufs[idx].dma_addr);
1289 u64_stats_update_begin(&r_vec->rx_sync);
1291 u64_stats_update_end(&r_vec->rx_sync);
1295 dma_unmap_single(&nn->pdev->dev,
1296 rx_ring->rxbufs[idx].dma_addr,
1297 nn->fl_bufsz, DMA_FROM_DEVICE);
1299 nfp_net_rx_give_one(rx_ring, new_skb, new_dma_addr);
1301 meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
1302 data_len = le16_to_cpu(rxd->rxd.data_len);
1304 if (WARN_ON_ONCE(data_len > nn->fl_bufsz)) {
1305 dev_kfree_skb_any(skb);
1309 if (nn->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC) {
1310 /* The packet data starts after the metadata */
1311 skb_reserve(skb, meta_len);
1313 /* The packet data starts at a fixed offset */
1314 skb_reserve(skb, nn->rx_offset);
1317 /* Adjust the SKB for the dynamic meta data pre-pended */
1318 skb_put(skb, data_len - meta_len);
1320 nfp_net_set_hash(nn->netdev, skb, rxd);
1322 /* Pad small frames to minimum */
1323 if (skb_put_padto(skb, 60))
1327 u64_stats_update_begin(&r_vec->rx_sync);
1329 r_vec->rx_bytes += skb->len;
1330 u64_stats_update_end(&r_vec->rx_sync);
1332 skb_record_rx_queue(skb, rx_ring->idx);
1333 skb->protocol = eth_type_trans(skb, nn->netdev);
1335 nfp_net_rx_csum(nn, r_vec, rxd, skb);
1337 if (rxd->rxd.flags & PCIE_DESC_RX_VLAN)
1338 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1339 le16_to_cpu(rxd->rxd.vlan));
1341 napi_gro_receive(&rx_ring->r_vec->napi, skb);
1345 nfp_qcp_rd_ptr_add(rx_ring->qcp_rx, pkts_polled);
1351 * nfp_net_poll() - napi poll function
1352 * @napi: NAPI structure
1353 * @budget: NAPI budget
1355 * Return: number of packets polled.
1357 static int nfp_net_poll(struct napi_struct *napi, int budget)
1359 struct nfp_net_r_vector *r_vec =
1360 container_of(napi, struct nfp_net_r_vector, napi);
1361 struct nfp_net_rx_ring *rx_ring = r_vec->rx_ring;
1362 struct nfp_net_tx_ring *tx_ring = r_vec->tx_ring;
1363 struct nfp_net *nn = r_vec->nfp_net;
1364 struct netdev_queue *txq;
1365 unsigned int pkts_polled;
1367 tx_ring = &nn->tx_rings[rx_ring->idx];
1368 txq = netdev_get_tx_queue(nn->netdev, tx_ring->idx);
1369 nfp_net_tx_complete(tx_ring);
1371 pkts_polled = nfp_net_rx(rx_ring, budget);
1373 if (pkts_polled < budget) {
1374 napi_complete_done(napi, pkts_polled);
1375 nfp_net_irq_unmask(nn, r_vec->irq_idx);
1381 /* Setup and Configuration
1385 * nfp_net_tx_ring_free() - Free resources allocated to a TX ring
1386 * @tx_ring: TX ring to free
1388 static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring)
1390 struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
1391 struct nfp_net *nn = r_vec->nfp_net;
1392 struct pci_dev *pdev = nn->pdev;
1394 kfree(tx_ring->txbufs);
1397 dma_free_coherent(&pdev->dev, tx_ring->size,
1398 tx_ring->txds, tx_ring->dma);
1401 tx_ring->txbufs = NULL;
1402 tx_ring->txds = NULL;
1408 * nfp_net_tx_ring_alloc() - Allocate resource for a TX ring
1409 * @tx_ring: TX Ring structure to allocate
1411 * Return: 0 on success, negative errno otherwise.
1413 static int nfp_net_tx_ring_alloc(struct nfp_net_tx_ring *tx_ring)
1415 struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
1416 struct nfp_net *nn = r_vec->nfp_net;
1417 struct pci_dev *pdev = nn->pdev;
1420 tx_ring->cnt = nn->txd_cnt;
1422 tx_ring->size = sizeof(*tx_ring->txds) * tx_ring->cnt;
1423 tx_ring->txds = dma_zalloc_coherent(&pdev->dev, tx_ring->size,
1424 &tx_ring->dma, GFP_KERNEL);
1428 sz = sizeof(*tx_ring->txbufs) * tx_ring->cnt;
1429 tx_ring->txbufs = kzalloc(sz, GFP_KERNEL);
1430 if (!tx_ring->txbufs)
1433 netif_set_xps_queue(nn->netdev, &r_vec->affinity_mask, tx_ring->idx);
1435 nn_dbg(nn, "TxQ%02d: QCidx=%02d cnt=%d dma=%#llx host=%p\n",
1436 tx_ring->idx, tx_ring->qcidx,
1437 tx_ring->cnt, (unsigned long long)tx_ring->dma, tx_ring->txds);
1442 nfp_net_tx_ring_free(tx_ring);
1447 * nfp_net_rx_ring_free() - Free resources allocated to a RX ring
1448 * @rx_ring: RX ring to free
1450 static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
1452 struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
1453 struct nfp_net *nn = r_vec->nfp_net;
1454 struct pci_dev *pdev = nn->pdev;
1456 kfree(rx_ring->rxbufs);
1459 dma_free_coherent(&pdev->dev, rx_ring->size,
1460 rx_ring->rxds, rx_ring->dma);
1463 rx_ring->rxbufs = NULL;
1464 rx_ring->rxds = NULL;
1470 * nfp_net_rx_ring_alloc() - Allocate resource for a RX ring
1471 * @rx_ring: RX ring to allocate
1472 * @fl_bufsz: Size of buffers to allocate
1474 * Return: 0 on success, negative errno otherwise.
1477 nfp_net_rx_ring_alloc(struct nfp_net_rx_ring *rx_ring, unsigned int fl_bufsz)
1479 struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
1480 struct nfp_net *nn = r_vec->nfp_net;
1481 struct pci_dev *pdev = nn->pdev;
1484 rx_ring->cnt = nn->rxd_cnt;
1485 rx_ring->bufsz = fl_bufsz;
1487 rx_ring->size = sizeof(*rx_ring->rxds) * rx_ring->cnt;
1488 rx_ring->rxds = dma_zalloc_coherent(&pdev->dev, rx_ring->size,
1489 &rx_ring->dma, GFP_KERNEL);
1493 sz = sizeof(*rx_ring->rxbufs) * rx_ring->cnt;
1494 rx_ring->rxbufs = kzalloc(sz, GFP_KERNEL);
1495 if (!rx_ring->rxbufs)
1498 nn_dbg(nn, "RxQ%02d: FlQCidx=%02d RxQCidx=%02d cnt=%d dma=%#llx host=%p\n",
1499 rx_ring->idx, rx_ring->fl_qcidx, rx_ring->rx_qcidx,
1500 rx_ring->cnt, (unsigned long long)rx_ring->dma, rx_ring->rxds);
1505 nfp_net_rx_ring_free(rx_ring);
1509 static struct nfp_net_rx_ring *
1510 nfp_net_shadow_rx_rings_prepare(struct nfp_net *nn, unsigned int fl_bufsz)
1512 struct nfp_net_rx_ring *rings;
1515 rings = kcalloc(nn->num_rx_rings, sizeof(*rings), GFP_KERNEL);
1519 for (r = 0; r < nn->num_rx_rings; r++) {
1520 nfp_net_rx_ring_init(&rings[r], nn->rx_rings[r].r_vec, r);
1522 if (nfp_net_rx_ring_alloc(&rings[r], fl_bufsz))
1525 if (nfp_net_rx_ring_bufs_alloc(nn, &rings[r]))
1533 nfp_net_rx_ring_bufs_free(nn, &rings[r]);
1535 nfp_net_rx_ring_free(&rings[r]);
1541 static struct nfp_net_rx_ring *
1542 nfp_net_shadow_rx_rings_swap(struct nfp_net *nn, struct nfp_net_rx_ring *rings)
1544 struct nfp_net_rx_ring *old = nn->rx_rings;
1547 for (r = 0; r < nn->num_rx_rings; r++)
1548 old[r].r_vec->rx_ring = &rings[r];
1550 nn->rx_rings = rings;
1555 nfp_net_shadow_rx_rings_free(struct nfp_net *nn, struct nfp_net_rx_ring *rings)
1559 for (r = 0; r < nn->num_r_vecs; r++) {
1560 nfp_net_rx_ring_bufs_free(nn, &rings[r]);
1561 nfp_net_rx_ring_free(&rings[r]);
1568 nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
1571 struct msix_entry *entry = &nn->irq_entries[r_vec->irq_idx];
1574 r_vec->tx_ring = &nn->tx_rings[idx];
1575 nfp_net_tx_ring_init(r_vec->tx_ring, r_vec, idx);
1577 r_vec->rx_ring = &nn->rx_rings[idx];
1578 nfp_net_rx_ring_init(r_vec->rx_ring, r_vec, idx);
1580 snprintf(r_vec->name, sizeof(r_vec->name),
1581 "%s-rxtx-%d", nn->netdev->name, idx);
1582 err = request_irq(entry->vector, r_vec->handler, 0, r_vec->name, r_vec);
1584 nn_err(nn, "Error requesting IRQ %d\n", entry->vector);
1587 disable_irq(entry->vector);
1590 netif_napi_add(nn->netdev, &r_vec->napi,
1591 nfp_net_poll, NAPI_POLL_WEIGHT);
1593 irq_set_affinity_hint(entry->vector, &r_vec->affinity_mask);
1595 nn_dbg(nn, "RV%02d: irq=%03d/%03d\n", idx, entry->vector, entry->entry);
1601 nfp_net_cleanup_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec)
1603 struct msix_entry *entry = &nn->irq_entries[r_vec->irq_idx];
1605 irq_set_affinity_hint(entry->vector, NULL);
1606 netif_napi_del(&r_vec->napi);
1607 free_irq(entry->vector, r_vec);
1611 * nfp_net_rss_write_itbl() - Write RSS indirection table to device
1612 * @nn: NFP Net device to reconfigure
1614 void nfp_net_rss_write_itbl(struct nfp_net *nn)
1618 for (i = 0; i < NFP_NET_CFG_RSS_ITBL_SZ; i += 4)
1619 nn_writel(nn, NFP_NET_CFG_RSS_ITBL + i,
1620 get_unaligned_le32(nn->rss_itbl + i));
1624 * nfp_net_rss_write_key() - Write RSS hash key to device
1625 * @nn: NFP Net device to reconfigure
1627 void nfp_net_rss_write_key(struct nfp_net *nn)
1631 for (i = 0; i < NFP_NET_CFG_RSS_KEY_SZ; i += 4)
1632 nn_writel(nn, NFP_NET_CFG_RSS_KEY + i,
1633 get_unaligned_le32(nn->rss_key + i));
1637 * nfp_net_coalesce_write_cfg() - Write irq coalescence configuration to HW
1638 * @nn: NFP Net device to reconfigure
1640 void nfp_net_coalesce_write_cfg(struct nfp_net *nn)
1646 /* Compute factor used to convert coalesce '_usecs' parameters to
1647 * ME timestamp ticks. There are 16 ME clock cycles for each timestamp
1650 factor = nn->me_freq_mhz / 16;
1652 /* copy RX interrupt coalesce parameters */
1653 value = (nn->rx_coalesce_max_frames << 16) |
1654 (factor * nn->rx_coalesce_usecs);
1655 for (i = 0; i < nn->num_r_vecs; i++)
1656 nn_writel(nn, NFP_NET_CFG_RXR_IRQ_MOD(i), value);
1658 /* copy TX interrupt coalesce parameters */
1659 value = (nn->tx_coalesce_max_frames << 16) |
1660 (factor * nn->tx_coalesce_usecs);
1661 for (i = 0; i < nn->num_r_vecs; i++)
1662 nn_writel(nn, NFP_NET_CFG_TXR_IRQ_MOD(i), value);
1666 * nfp_net_write_mac_addr() - Write mac address to device registers
1667 * @nn: NFP Net device to reconfigure
1668 * @mac: Six-byte MAC address to be written
1670 * We do a bit of byte swapping dance because firmware is LE.
1672 static void nfp_net_write_mac_addr(struct nfp_net *nn, const u8 *mac)
1674 nn_writel(nn, NFP_NET_CFG_MACADDR + 0,
1675 get_unaligned_be32(nn->netdev->dev_addr));
1676 /* We can't do writew for NFP-3200 compatibility */
1677 nn_writel(nn, NFP_NET_CFG_MACADDR + 4,
1678 get_unaligned_be16(nn->netdev->dev_addr + 4) << 16);
1681 static void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx)
1683 nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), 0);
1684 nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), 0);
1685 nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), 0);
1687 nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), 0);
1688 nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), 0);
1689 nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), 0);
1693 * nfp_net_clear_config_and_disable() - Clear control BAR and disable NFP
1694 * @nn: NFP Net device to reconfigure
1696 static void nfp_net_clear_config_and_disable(struct nfp_net *nn)
1698 u32 new_ctrl, update;
1702 new_ctrl = nn->ctrl;
1703 new_ctrl &= ~NFP_NET_CFG_CTRL_ENABLE;
1704 update = NFP_NET_CFG_UPDATE_GEN;
1705 update |= NFP_NET_CFG_UPDATE_MSIX;
1706 update |= NFP_NET_CFG_UPDATE_RING;
1708 if (nn->cap & NFP_NET_CFG_CTRL_RINGCFG)
1709 new_ctrl &= ~NFP_NET_CFG_CTRL_RINGCFG;
1711 nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, 0);
1712 nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, 0);
1714 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
1715 err = nfp_net_reconfig(nn, update);
1717 nn_err(nn, "Could not disable device: %d\n", err);
1719 for (r = 0; r < nn->num_r_vecs; r++) {
1720 nfp_net_rx_ring_reset(nn->r_vecs[r].rx_ring);
1721 nfp_net_tx_ring_reset(nn, nn->r_vecs[r].tx_ring);
1722 nfp_net_vec_clear_ring_data(nn, r);
1725 nn->ctrl = new_ctrl;
1729 nfp_net_vec_write_ring_data(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
1732 /* Write the DMA address, size and MSI-X info to the device */
1733 nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), r_vec->rx_ring->dma);
1734 nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), ilog2(r_vec->rx_ring->cnt));
1735 nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), r_vec->irq_idx);
1737 nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), r_vec->tx_ring->dma);
1738 nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), ilog2(r_vec->tx_ring->cnt));
1739 nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), r_vec->irq_idx);
1742 static int __nfp_net_set_config_and_enable(struct nfp_net *nn)
1744 u32 new_ctrl, update = 0;
1748 new_ctrl = nn->ctrl;
1750 if (nn->cap & NFP_NET_CFG_CTRL_RSS) {
1751 nfp_net_rss_write_key(nn);
1752 nfp_net_rss_write_itbl(nn);
1753 nn_writel(nn, NFP_NET_CFG_RSS_CTRL, nn->rss_cfg);
1754 update |= NFP_NET_CFG_UPDATE_RSS;
1757 if (nn->cap & NFP_NET_CFG_CTRL_IRQMOD) {
1758 nfp_net_coalesce_write_cfg(nn);
1760 new_ctrl |= NFP_NET_CFG_CTRL_IRQMOD;
1761 update |= NFP_NET_CFG_UPDATE_IRQMOD;
1764 for (r = 0; r < nn->num_r_vecs; r++)
1765 nfp_net_vec_write_ring_data(nn, &nn->r_vecs[r], r);
1767 nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, nn->num_tx_rings == 64 ?
1768 0xffffffffffffffffULL : ((u64)1 << nn->num_tx_rings) - 1);
1770 nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, nn->num_rx_rings == 64 ?
1771 0xffffffffffffffffULL : ((u64)1 << nn->num_rx_rings) - 1);
1773 nfp_net_write_mac_addr(nn, nn->netdev->dev_addr);
1775 nn_writel(nn, NFP_NET_CFG_MTU, nn->netdev->mtu);
1776 nn_writel(nn, NFP_NET_CFG_FLBUFSZ, nn->fl_bufsz);
1779 new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
1780 update |= NFP_NET_CFG_UPDATE_GEN;
1781 update |= NFP_NET_CFG_UPDATE_MSIX;
1782 update |= NFP_NET_CFG_UPDATE_RING;
1783 if (nn->cap & NFP_NET_CFG_CTRL_RINGCFG)
1784 new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
1786 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
1787 err = nfp_net_reconfig(nn, update);
1789 nn->ctrl = new_ctrl;
1791 for (r = 0; r < nn->num_r_vecs; r++)
1792 nfp_net_rx_ring_fill_freelist(nn->r_vecs[r].rx_ring);
1794 /* Since reconfiguration requests while NFP is down are ignored we
1795 * have to wipe the entire VXLAN configuration and reinitialize it.
1797 if (nn->ctrl & NFP_NET_CFG_CTRL_VXLAN) {
1798 memset(&nn->vxlan_ports, 0, sizeof(nn->vxlan_ports));
1799 memset(&nn->vxlan_usecnt, 0, sizeof(nn->vxlan_usecnt));
1800 vxlan_get_rx_port(nn->netdev);
1807 * nfp_net_set_config_and_enable() - Write control BAR and enable NFP
1808 * @nn: NFP Net device to reconfigure
1810 static int nfp_net_set_config_and_enable(struct nfp_net *nn)
1814 err = __nfp_net_set_config_and_enable(nn);
1816 nfp_net_clear_config_and_disable(nn);
1822 * nfp_net_open_stack() - Start the device from stack's perspective
1823 * @nn: NFP Net device to reconfigure
1825 static void nfp_net_open_stack(struct nfp_net *nn)
1829 for (r = 0; r < nn->num_r_vecs; r++) {
1830 napi_enable(&nn->r_vecs[r].napi);
1831 enable_irq(nn->irq_entries[nn->r_vecs[r].irq_idx].vector);
1834 netif_tx_wake_all_queues(nn->netdev);
1836 enable_irq(nn->irq_entries[NFP_NET_CFG_LSC].vector);
1837 nfp_net_read_link_status(nn);
1840 static int nfp_net_netdev_open(struct net_device *netdev)
1842 struct nfp_net *nn = netdev_priv(netdev);
1845 if (nn->ctrl & NFP_NET_CFG_CTRL_ENABLE) {
1846 nn_err(nn, "Dev is already enabled: 0x%08x\n", nn->ctrl);
1850 /* Step 1: Allocate resources for rings and the like
1851 * - Request interrupts
1852 * - Allocate RX and TX ring resources
1853 * - Setup initial RSS table
1855 err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_EXN, "%s-exn",
1856 nn->exn_name, sizeof(nn->exn_name),
1857 NFP_NET_IRQ_EXN_IDX, nn->exn_handler);
1860 err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_LSC, "%s-lsc",
1861 nn->lsc_name, sizeof(nn->lsc_name),
1862 NFP_NET_IRQ_LSC_IDX, nn->lsc_handler);
1865 disable_irq(nn->irq_entries[NFP_NET_CFG_LSC].vector);
1867 nn->rx_rings = kcalloc(nn->num_rx_rings, sizeof(*nn->rx_rings),
1871 nn->tx_rings = kcalloc(nn->num_tx_rings, sizeof(*nn->tx_rings),
1874 goto err_free_rx_rings;
1876 for (r = 0; r < nn->num_r_vecs; r++) {
1877 err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r);
1879 goto err_free_prev_vecs;
1881 err = nfp_net_tx_ring_alloc(nn->r_vecs[r].tx_ring);
1883 goto err_cleanup_vec_p;
1885 err = nfp_net_rx_ring_alloc(nn->r_vecs[r].rx_ring,
1888 goto err_free_tx_ring_p;
1890 err = nfp_net_rx_ring_bufs_alloc(nn, nn->r_vecs[r].rx_ring);
1892 goto err_flush_rx_ring_p;
1895 err = netif_set_real_num_tx_queues(netdev, nn->num_tx_rings);
1897 goto err_free_rings;
1899 err = netif_set_real_num_rx_queues(netdev, nn->num_rx_rings);
1901 goto err_free_rings;
1903 /* Step 2: Configure the NFP
1904 * - Enable rings from 0 to tx_rings/rx_rings - 1.
1905 * - Write MAC address (in case it changed)
1907 * - Set the Freelist buffer size
1910 err = nfp_net_set_config_and_enable(nn);
1912 goto err_free_rings;
1914 /* Step 3: Enable for kernel
1915 * - put some freelist descriptors on each RX ring
1916 * - enable NAPI on each ring
1917 * - enable all TX queues
1920 nfp_net_open_stack(nn);
1928 nfp_net_rx_ring_bufs_free(nn, nn->r_vecs[r].rx_ring);
1929 err_flush_rx_ring_p:
1930 nfp_net_rx_ring_free(nn->r_vecs[r].rx_ring);
1932 nfp_net_tx_ring_free(nn->r_vecs[r].tx_ring);
1934 nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
1936 kfree(nn->tx_rings);
1938 kfree(nn->rx_rings);
1940 nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
1942 nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
1947 * nfp_net_close_stack() - Quiescent the stack (part of close)
1948 * @nn: NFP Net device to reconfigure
1950 static void nfp_net_close_stack(struct nfp_net *nn)
1954 disable_irq(nn->irq_entries[NFP_NET_CFG_LSC].vector);
1955 netif_carrier_off(nn->netdev);
1956 nn->link_up = false;
1958 for (r = 0; r < nn->num_r_vecs; r++) {
1959 disable_irq(nn->irq_entries[nn->r_vecs[r].irq_idx].vector);
1960 napi_disable(&nn->r_vecs[r].napi);
1963 netif_tx_disable(nn->netdev);
1967 * nfp_net_close_free_all() - Free all runtime resources
1968 * @nn: NFP Net device to reconfigure
1970 static void nfp_net_close_free_all(struct nfp_net *nn)
1974 for (r = 0; r < nn->num_r_vecs; r++) {
1975 nfp_net_rx_ring_bufs_free(nn, nn->r_vecs[r].rx_ring);
1976 nfp_net_rx_ring_free(nn->r_vecs[r].rx_ring);
1977 nfp_net_tx_ring_free(nn->r_vecs[r].tx_ring);
1978 nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
1981 kfree(nn->rx_rings);
1982 kfree(nn->tx_rings);
1984 nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
1985 nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
1989 * nfp_net_netdev_close() - Called when the device is downed
1990 * @netdev: netdev structure
1992 static int nfp_net_netdev_close(struct net_device *netdev)
1994 struct nfp_net *nn = netdev_priv(netdev);
1996 if (!(nn->ctrl & NFP_NET_CFG_CTRL_ENABLE)) {
1997 nn_err(nn, "Dev is not up: 0x%08x\n", nn->ctrl);
2001 /* Step 1: Disable RX and TX rings from the Linux kernel perspective
2003 nfp_net_close_stack(nn);
2007 nfp_net_clear_config_and_disable(nn);
2009 /* Step 3: Free resources
2011 nfp_net_close_free_all(nn);
2013 nn_dbg(nn, "%s down", netdev->name);
2017 static void nfp_net_set_rx_mode(struct net_device *netdev)
2019 struct nfp_net *nn = netdev_priv(netdev);
2022 new_ctrl = nn->ctrl;
2024 if (netdev->flags & IFF_PROMISC) {
2025 if (nn->cap & NFP_NET_CFG_CTRL_PROMISC)
2026 new_ctrl |= NFP_NET_CFG_CTRL_PROMISC;
2028 nn_warn(nn, "FW does not support promiscuous mode\n");
2030 new_ctrl &= ~NFP_NET_CFG_CTRL_PROMISC;
2033 if (new_ctrl == nn->ctrl)
2036 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
2037 if (nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN))
2040 nn->ctrl = new_ctrl;
2043 static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu)
2045 unsigned int old_mtu, old_fl_bufsz, new_fl_bufsz;
2046 struct nfp_net *nn = netdev_priv(netdev);
2047 struct nfp_net_rx_ring *tmp_rings;
2050 if (new_mtu < 68 || new_mtu > nn->max_mtu) {
2051 nn_err(nn, "New MTU (%d) is not valid\n", new_mtu);
2055 old_mtu = netdev->mtu;
2056 old_fl_bufsz = nn->fl_bufsz;
2057 new_fl_bufsz = NFP_NET_MAX_PREPEND + ETH_HLEN + VLAN_HLEN * 2 + new_mtu;
2059 if (!netif_running(netdev)) {
2060 netdev->mtu = new_mtu;
2061 nn->fl_bufsz = new_fl_bufsz;
2065 /* Prepare new rings */
2066 tmp_rings = nfp_net_shadow_rx_rings_prepare(nn, new_fl_bufsz);
2070 /* Stop device, swap in new rings, try to start the firmware */
2071 nfp_net_close_stack(nn);
2072 nfp_net_clear_config_and_disable(nn);
2074 tmp_rings = nfp_net_shadow_rx_rings_swap(nn, tmp_rings);
2076 netdev->mtu = new_mtu;
2077 nn->fl_bufsz = new_fl_bufsz;
2079 err = nfp_net_set_config_and_enable(nn);
2081 const int err_new = err;
2083 /* Try with old configuration and old rings */
2084 tmp_rings = nfp_net_shadow_rx_rings_swap(nn, tmp_rings);
2086 netdev->mtu = old_mtu;
2087 nn->fl_bufsz = old_fl_bufsz;
2089 err = __nfp_net_set_config_and_enable(nn);
2091 nn_err(nn, "Can't restore MTU - FW communication failed (%d,%d)\n",
2095 nfp_net_shadow_rx_rings_free(nn, tmp_rings);
2097 nfp_net_open_stack(nn);
2102 static struct rtnl_link_stats64 *nfp_net_stat64(struct net_device *netdev,
2103 struct rtnl_link_stats64 *stats)
2105 struct nfp_net *nn = netdev_priv(netdev);
2108 for (r = 0; r < nn->num_r_vecs; r++) {
2109 struct nfp_net_r_vector *r_vec = &nn->r_vecs[r];
2114 start = u64_stats_fetch_begin(&r_vec->rx_sync);
2115 data[0] = r_vec->rx_pkts;
2116 data[1] = r_vec->rx_bytes;
2117 data[2] = r_vec->rx_drops;
2118 } while (u64_stats_fetch_retry(&r_vec->rx_sync, start));
2119 stats->rx_packets += data[0];
2120 stats->rx_bytes += data[1];
2121 stats->rx_dropped += data[2];
2124 start = u64_stats_fetch_begin(&r_vec->tx_sync);
2125 data[0] = r_vec->tx_pkts;
2126 data[1] = r_vec->tx_bytes;
2127 data[2] = r_vec->tx_errors;
2128 } while (u64_stats_fetch_retry(&r_vec->tx_sync, start));
2129 stats->tx_packets += data[0];
2130 stats->tx_bytes += data[1];
2131 stats->tx_errors += data[2];
2137 static int nfp_net_set_features(struct net_device *netdev,
2138 netdev_features_t features)
2140 netdev_features_t changed = netdev->features ^ features;
2141 struct nfp_net *nn = netdev_priv(netdev);
2145 /* Assume this is not called with features we have not advertised */
2147 new_ctrl = nn->ctrl;
2149 if (changed & NETIF_F_RXCSUM) {
2150 if (features & NETIF_F_RXCSUM)
2151 new_ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
2153 new_ctrl &= ~NFP_NET_CFG_CTRL_RXCSUM;
2156 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
2157 if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
2158 new_ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
2160 new_ctrl &= ~NFP_NET_CFG_CTRL_TXCSUM;
2163 if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) {
2164 if (features & (NETIF_F_TSO | NETIF_F_TSO6))
2165 new_ctrl |= NFP_NET_CFG_CTRL_LSO;
2167 new_ctrl &= ~NFP_NET_CFG_CTRL_LSO;
2170 if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
2171 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2172 new_ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
2174 new_ctrl &= ~NFP_NET_CFG_CTRL_RXVLAN;
2177 if (changed & NETIF_F_HW_VLAN_CTAG_TX) {
2178 if (features & NETIF_F_HW_VLAN_CTAG_TX)
2179 new_ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
2181 new_ctrl &= ~NFP_NET_CFG_CTRL_TXVLAN;
2184 if (changed & NETIF_F_SG) {
2185 if (features & NETIF_F_SG)
2186 new_ctrl |= NFP_NET_CFG_CTRL_GATHER;
2188 new_ctrl &= ~NFP_NET_CFG_CTRL_GATHER;
2191 nn_dbg(nn, "Feature change 0x%llx -> 0x%llx (changed=0x%llx)\n",
2192 netdev->features, features, changed);
2194 if (new_ctrl == nn->ctrl)
2197 nn_dbg(nn, "NIC ctrl: 0x%x -> 0x%x\n", nn->ctrl, new_ctrl);
2198 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
2199 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
2203 nn->ctrl = new_ctrl;
2208 static netdev_features_t
2209 nfp_net_features_check(struct sk_buff *skb, struct net_device *dev,
2210 netdev_features_t features)
2214 /* We can't do TSO over double tagged packets (802.1AD) */
2215 features &= vlan_features_check(skb, features);
2217 if (!skb->encapsulation)
2220 /* Ensure that inner L4 header offset fits into TX descriptor field */
2221 if (skb_is_gso(skb)) {
2224 hdrlen = skb_inner_transport_header(skb) - skb->data +
2225 inner_tcp_hdrlen(skb);
2227 if (unlikely(hdrlen > NFP_NET_LSO_MAX_HDR_SZ))
2228 features &= ~NETIF_F_GSO_MASK;
2231 /* VXLAN/GRE check */
2232 switch (vlan_get_protocol(skb)) {
2233 case htons(ETH_P_IP):
2234 l4_hdr = ip_hdr(skb)->protocol;
2236 case htons(ETH_P_IPV6):
2237 l4_hdr = ipv6_hdr(skb)->nexthdr;
2240 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
2243 if (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
2244 skb->inner_protocol != htons(ETH_P_TEB) ||
2245 (l4_hdr != IPPROTO_UDP && l4_hdr != IPPROTO_GRE) ||
2246 (l4_hdr == IPPROTO_UDP &&
2247 (skb_inner_mac_header(skb) - skb_transport_header(skb) !=
2248 sizeof(struct udphdr) + sizeof(struct vxlanhdr))))
2249 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
2255 * nfp_net_set_vxlan_port() - set vxlan port in SW and reconfigure HW
2256 * @nn: NFP Net device to reconfigure
2257 * @idx: Index into the port table where new port should be written
2258 * @port: UDP port to configure (pass zero to remove VXLAN port)
2260 static void nfp_net_set_vxlan_port(struct nfp_net *nn, int idx, __be16 port)
2264 nn->vxlan_ports[idx] = port;
2266 if (!(nn->ctrl & NFP_NET_CFG_CTRL_VXLAN))
2269 BUILD_BUG_ON(NFP_NET_N_VXLAN_PORTS & 1);
2270 for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i += 2)
2271 nn_writel(nn, NFP_NET_CFG_VXLAN_PORT + i * sizeof(port),
2272 be16_to_cpu(nn->vxlan_ports[i + 1]) << 16 |
2273 be16_to_cpu(nn->vxlan_ports[i]));
2275 nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_VXLAN);
2279 * nfp_net_find_vxlan_idx() - find table entry of the port or a free one
2280 * @nn: NFP Network structure
2281 * @port: UDP port to look for
2283 * Return: if the port is already in the table -- it's position;
2284 * if the port is not in the table -- free position to use;
2285 * if the table is full -- -ENOSPC.
2287 static int nfp_net_find_vxlan_idx(struct nfp_net *nn, __be16 port)
2289 int i, free_idx = -ENOSPC;
2291 for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i++) {
2292 if (nn->vxlan_ports[i] == port)
2294 if (!nn->vxlan_usecnt[i])
2301 static void nfp_net_add_vxlan_port(struct net_device *netdev,
2302 sa_family_t sa_family, __be16 port)
2304 struct nfp_net *nn = netdev_priv(netdev);
2307 idx = nfp_net_find_vxlan_idx(nn, port);
2311 if (!nn->vxlan_usecnt[idx]++)
2312 nfp_net_set_vxlan_port(nn, idx, port);
2315 static void nfp_net_del_vxlan_port(struct net_device *netdev,
2316 sa_family_t sa_family, __be16 port)
2318 struct nfp_net *nn = netdev_priv(netdev);
2321 idx = nfp_net_find_vxlan_idx(nn, port);
2322 if (!nn->vxlan_usecnt[idx] || idx == -ENOSPC)
2325 if (!--nn->vxlan_usecnt[idx])
2326 nfp_net_set_vxlan_port(nn, idx, 0);
2329 static const struct net_device_ops nfp_net_netdev_ops = {
2330 .ndo_open = nfp_net_netdev_open,
2331 .ndo_stop = nfp_net_netdev_close,
2332 .ndo_start_xmit = nfp_net_tx,
2333 .ndo_get_stats64 = nfp_net_stat64,
2334 .ndo_tx_timeout = nfp_net_tx_timeout,
2335 .ndo_set_rx_mode = nfp_net_set_rx_mode,
2336 .ndo_change_mtu = nfp_net_change_mtu,
2337 .ndo_set_mac_address = eth_mac_addr,
2338 .ndo_set_features = nfp_net_set_features,
2339 .ndo_features_check = nfp_net_features_check,
2340 .ndo_add_vxlan_port = nfp_net_add_vxlan_port,
2341 .ndo_del_vxlan_port = nfp_net_del_vxlan_port,
2345 * nfp_net_info() - Print general info about the NIC
2346 * @nn: NFP Net device to reconfigure
2348 void nfp_net_info(struct nfp_net *nn)
2350 nn_info(nn, "Netronome %s %sNetdev: TxQs=%d/%d RxQs=%d/%d\n",
2351 nn->is_nfp3200 ? "NFP-32xx" : "NFP-6xxx",
2352 nn->is_vf ? "VF " : "",
2353 nn->num_tx_rings, nn->max_tx_rings,
2354 nn->num_rx_rings, nn->max_rx_rings);
2355 nn_info(nn, "VER: %d.%d.%d.%d, Maximum supported MTU: %d\n",
2356 nn->fw_ver.resv, nn->fw_ver.class,
2357 nn->fw_ver.major, nn->fw_ver.minor,
2359 nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
2361 nn->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
2362 nn->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "",
2363 nn->cap & NFP_NET_CFG_CTRL_L2MC ? "L2MCFILT " : "",
2364 nn->cap & NFP_NET_CFG_CTRL_RXCSUM ? "RXCSUM " : "",
2365 nn->cap & NFP_NET_CFG_CTRL_TXCSUM ? "TXCSUM " : "",
2366 nn->cap & NFP_NET_CFG_CTRL_RXVLAN ? "RXVLAN " : "",
2367 nn->cap & NFP_NET_CFG_CTRL_TXVLAN ? "TXVLAN " : "",
2368 nn->cap & NFP_NET_CFG_CTRL_SCATTER ? "SCATTER " : "",
2369 nn->cap & NFP_NET_CFG_CTRL_GATHER ? "GATHER " : "",
2370 nn->cap & NFP_NET_CFG_CTRL_LSO ? "TSO " : "",
2371 nn->cap & NFP_NET_CFG_CTRL_RSS ? "RSS " : "",
2372 nn->cap & NFP_NET_CFG_CTRL_L2SWITCH ? "L2SWITCH " : "",
2373 nn->cap & NFP_NET_CFG_CTRL_MSIXAUTO ? "AUTOMASK " : "",
2374 nn->cap & NFP_NET_CFG_CTRL_IRQMOD ? "IRQMOD " : "",
2375 nn->cap & NFP_NET_CFG_CTRL_VXLAN ? "VXLAN " : "",
2376 nn->cap & NFP_NET_CFG_CTRL_NVGRE ? "NVGRE " : "");
2380 * nfp_net_netdev_alloc() - Allocate netdev and related structure
2382 * @max_tx_rings: Maximum number of TX rings supported by device
2383 * @max_rx_rings: Maximum number of RX rings supported by device
2385 * This function allocates a netdev device and fills in the initial
2386 * part of the @struct nfp_net structure.
2388 * Return: NFP Net device structure, or ERR_PTR on error.
2390 struct nfp_net *nfp_net_netdev_alloc(struct pci_dev *pdev,
2391 int max_tx_rings, int max_rx_rings)
2393 struct net_device *netdev;
2397 netdev = alloc_etherdev_mqs(sizeof(struct nfp_net),
2398 max_tx_rings, max_rx_rings);
2400 return ERR_PTR(-ENOMEM);
2402 SET_NETDEV_DEV(netdev, &pdev->dev);
2403 nn = netdev_priv(netdev);
2405 nn->netdev = netdev;
2408 nn->max_tx_rings = max_tx_rings;
2409 nn->max_rx_rings = max_rx_rings;
2411 nqs = netif_get_num_default_rss_queues();
2412 nn->num_tx_rings = min_t(int, nqs, max_tx_rings);
2413 nn->num_rx_rings = min_t(int, nqs, max_rx_rings);
2415 nn->txd_cnt = NFP_NET_TX_DESCS_DEFAULT;
2416 nn->rxd_cnt = NFP_NET_RX_DESCS_DEFAULT;
2418 spin_lock_init(&nn->reconfig_lock);
2419 spin_lock_init(&nn->link_status_lock);
2425 * nfp_net_netdev_free() - Undo what @nfp_net_netdev_alloc() did
2426 * @nn: NFP Net device to reconfigure
2428 void nfp_net_netdev_free(struct nfp_net *nn)
2430 free_netdev(nn->netdev);
2434 * nfp_net_rss_init() - Set the initial RSS parameters
2435 * @nn: NFP Net device to reconfigure
2437 static void nfp_net_rss_init(struct nfp_net *nn)
2441 netdev_rss_key_fill(nn->rss_key, NFP_NET_CFG_RSS_KEY_SZ);
2443 for (i = 0; i < sizeof(nn->rss_itbl); i++)
2445 ethtool_rxfh_indir_default(i, nn->num_rx_rings);
2447 /* Enable IPv4/IPv6 TCP by default */
2448 nn->rss_cfg = NFP_NET_CFG_RSS_IPV4_TCP |
2449 NFP_NET_CFG_RSS_IPV6_TCP |
2450 NFP_NET_CFG_RSS_TOEPLITZ |
2451 NFP_NET_CFG_RSS_MASK;
2455 * nfp_net_irqmod_init() - Set the initial IRQ moderation parameters
2456 * @nn: NFP Net device to reconfigure
2458 static void nfp_net_irqmod_init(struct nfp_net *nn)
2460 nn->rx_coalesce_usecs = 50;
2461 nn->rx_coalesce_max_frames = 64;
2462 nn->tx_coalesce_usecs = 50;
2463 nn->tx_coalesce_max_frames = 64;
2467 * nfp_net_netdev_init() - Initialise/finalise the netdev structure
2468 * @netdev: netdev structure
2470 * Return: 0 on success or negative errno on error.
2472 int nfp_net_netdev_init(struct net_device *netdev)
2474 struct nfp_net *nn = netdev_priv(netdev);
2477 /* Get some of the read-only fields from the BAR */
2478 nn->cap = nn_readl(nn, NFP_NET_CFG_CAP);
2479 nn->max_mtu = nn_readl(nn, NFP_NET_CFG_MAX_MTU);
2481 nfp_net_write_mac_addr(nn, nn->netdev->dev_addr);
2483 /* Set default MTU and Freelist buffer size */
2484 if (nn->max_mtu < NFP_NET_DEFAULT_MTU)
2485 netdev->mtu = nn->max_mtu;
2487 netdev->mtu = NFP_NET_DEFAULT_MTU;
2488 nn->fl_bufsz = NFP_NET_DEFAULT_RX_BUFSZ;
2490 /* Advertise/enable offloads based on capabilities
2492 * Note: netdev->features show the currently enabled features
2493 * and netdev->hw_features advertises which features are
2494 * supported. By default we enable most features.
2496 netdev->hw_features = NETIF_F_HIGHDMA;
2497 if (nn->cap & NFP_NET_CFG_CTRL_RXCSUM) {
2498 netdev->hw_features |= NETIF_F_RXCSUM;
2499 nn->ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
2501 if (nn->cap & NFP_NET_CFG_CTRL_TXCSUM) {
2502 netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2503 nn->ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
2505 if (nn->cap & NFP_NET_CFG_CTRL_GATHER) {
2506 netdev->hw_features |= NETIF_F_SG;
2507 nn->ctrl |= NFP_NET_CFG_CTRL_GATHER;
2509 if ((nn->cap & NFP_NET_CFG_CTRL_LSO) && nn->fw_ver.major > 2) {
2510 netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
2511 nn->ctrl |= NFP_NET_CFG_CTRL_LSO;
2513 if (nn->cap & NFP_NET_CFG_CTRL_RSS) {
2514 netdev->hw_features |= NETIF_F_RXHASH;
2515 nfp_net_rss_init(nn);
2516 nn->ctrl |= NFP_NET_CFG_CTRL_RSS;
2518 if (nn->cap & NFP_NET_CFG_CTRL_VXLAN &&
2519 nn->cap & NFP_NET_CFG_CTRL_NVGRE) {
2520 if (nn->cap & NFP_NET_CFG_CTRL_LSO)
2521 netdev->hw_features |= NETIF_F_GSO_GRE |
2522 NETIF_F_GSO_UDP_TUNNEL;
2523 nn->ctrl |= NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE;
2525 netdev->hw_enc_features = netdev->hw_features;
2528 netdev->vlan_features = netdev->hw_features;
2530 if (nn->cap & NFP_NET_CFG_CTRL_RXVLAN) {
2531 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
2532 nn->ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
2534 if (nn->cap & NFP_NET_CFG_CTRL_TXVLAN) {
2535 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
2536 nn->ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
2539 netdev->features = netdev->hw_features;
2541 /* Advertise but disable TSO by default. */
2542 netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
2544 /* Allow L2 Broadcast and Multicast through by default, if supported */
2545 if (nn->cap & NFP_NET_CFG_CTRL_L2BC)
2546 nn->ctrl |= NFP_NET_CFG_CTRL_L2BC;
2547 if (nn->cap & NFP_NET_CFG_CTRL_L2MC)
2548 nn->ctrl |= NFP_NET_CFG_CTRL_L2MC;
2550 /* Allow IRQ moderation, if supported */
2551 if (nn->cap & NFP_NET_CFG_CTRL_IRQMOD) {
2552 nfp_net_irqmod_init(nn);
2553 nn->ctrl |= NFP_NET_CFG_CTRL_IRQMOD;
2556 /* On NFP-3200 enable MSI-X auto-masking, if supported and the
2557 * interrupts are not shared.
2559 if (nn->is_nfp3200 && nn->cap & NFP_NET_CFG_CTRL_MSIXAUTO)
2560 nn->ctrl |= NFP_NET_CFG_CTRL_MSIXAUTO;
2562 /* On NFP4000/NFP6000, determine RX packet/metadata boundary offset */
2563 if (nn->fw_ver.major >= 2)
2564 nn->rx_offset = nn_readl(nn, NFP_NET_CFG_RX_OFFSET);
2566 nn->rx_offset = NFP_NET_RX_OFFSET;
2568 /* Stash the re-configuration queue away. First odd queue in TX Bar */
2569 nn->qcp_cfg = nn->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
2571 /* Make sure the FW knows the netdev is supposed to be disabled here */
2572 nn_writel(nn, NFP_NET_CFG_CTRL, 0);
2573 nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, 0);
2574 nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, 0);
2575 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_RING |
2576 NFP_NET_CFG_UPDATE_GEN);
2580 /* Finalise the netdev setup */
2581 ether_setup(netdev);
2582 netdev->netdev_ops = &nfp_net_netdev_ops;
2583 netdev->watchdog_timeo = msecs_to_jiffies(5 * 1000);
2584 netif_carrier_off(netdev);
2586 nfp_net_set_ethtool_ops(netdev);
2587 nfp_net_irqs_assign(netdev);
2589 return register_netdev(netdev);
2593 * nfp_net_netdev_clean() - Undo what nfp_net_netdev_init() did.
2594 * @netdev: netdev structure
2596 void nfp_net_netdev_clean(struct net_device *netdev)
2598 unregister_netdev(netdev);