2 * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20 #include <linux/module.h>
21 #include <linux/kernel.h>
22 #include <linux/string.h>
23 #include <linux/errno.h>
24 #include <linux/types.h>
25 #include <linux/init.h>
26 #include <linux/interrupt.h>
27 #include <linux/workqueue.h>
28 #include <linux/pci.h>
29 #include <linux/netdevice.h>
30 #include <linux/etherdevice.h>
32 #include <linux/if_ether.h>
33 #include <linux/if_vlan.h>
36 #include <linux/ipv6.h>
37 #include <linux/tcp.h>
38 #include <linux/rtnetlink.h>
39 #include <linux/prefetch.h>
40 #include <net/ip6_checksum.h>
41 #include <linux/ktime.h>
42 #ifdef CONFIG_RFS_ACCEL
43 #include <linux/cpu_rmap.h>
45 #ifdef CONFIG_NET_RX_BUSY_POLL
46 #include <net/busy_poll.h>
48 #include <linux/crash_dump.h>
50 #include "cq_enet_desc.h"
52 #include "vnic_intr.h"
53 #include "vnic_stats.h"
59 #include "enic_clsf.h"
61 #define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ)
62 #define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS)
63 #define MAX_TSO (1 << 16)
64 #define ENIC_DESC_MAX_SPLITS (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1)
66 #define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */
67 #define PCI_DEVICE_ID_CISCO_VIC_ENET_DYN 0x0044 /* enet dynamic vnic */
68 #define PCI_DEVICE_ID_CISCO_VIC_ENET_VF 0x0071 /* enet SRIOV VF */
70 #define RX_COPYBREAK_DEFAULT 256
72 /* Supported devices */
73 static const struct pci_device_id enic_id_table[] = {
74 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) },
75 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_DYN) },
76 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_VF) },
77 { 0, } /* end of table */
80 MODULE_DESCRIPTION(DRV_DESCRIPTION);
81 MODULE_AUTHOR("Scott Feldman <scofeldm@cisco.com>");
82 MODULE_LICENSE("GPL");
83 MODULE_VERSION(DRV_VERSION);
84 MODULE_DEVICE_TABLE(pci, enic_id_table);
86 #define ENIC_LARGE_PKT_THRESHOLD 1000
87 #define ENIC_MAX_COALESCE_TIMERS 10
88 /* Interrupt moderation table, which will be used to decide the
89 * coalescing timer values
90 * {rx_rate in Mbps, mapping percentage of the range}
92 static struct enic_intr_mod_table mod_table[ENIC_MAX_COALESCE_TIMERS + 1] = {
106 /* This table helps the driver to pick different ranges for rx coalescing
107 * timer depending on the link speed.
109 static struct enic_intr_mod_range mod_range[ENIC_MAX_LINK_SPEEDS] = {
110 {0, 0}, /* 0 - 4 Gbps */
111 {0, 3}, /* 4 - 10 Gbps */
112 {3, 6}, /* 10 - 40 Gbps */
115 int enic_is_dynamic(struct enic *enic)
117 return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN;
120 int enic_sriov_enabled(struct enic *enic)
122 return (enic->priv_flags & ENIC_SRIOV_ENABLED) ? 1 : 0;
125 static int enic_is_sriov_vf(struct enic *enic)
127 return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_VF;
130 int enic_is_valid_vf(struct enic *enic, int vf)
132 #ifdef CONFIG_PCI_IOV
133 return vf >= 0 && vf < enic->num_vfs;
139 static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
141 struct enic *enic = vnic_dev_priv(wq->vdev);
144 pci_unmap_single(enic->pdev, buf->dma_addr,
145 buf->len, PCI_DMA_TODEVICE);
147 pci_unmap_page(enic->pdev, buf->dma_addr,
148 buf->len, PCI_DMA_TODEVICE);
151 dev_kfree_skb_any(buf->os_buf);
154 static void enic_wq_free_buf(struct vnic_wq *wq,
155 struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque)
157 enic_free_wq_buf(wq, buf);
160 static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
161 u8 type, u16 q_number, u16 completed_index, void *opaque)
163 struct enic *enic = vnic_dev_priv(vdev);
165 spin_lock(&enic->wq_lock[q_number]);
167 vnic_wq_service(&enic->wq[q_number], cq_desc,
168 completed_index, enic_wq_free_buf,
171 if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number)) &&
172 vnic_wq_desc_avail(&enic->wq[q_number]) >=
173 (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS))
174 netif_wake_subqueue(enic->netdev, q_number);
176 spin_unlock(&enic->wq_lock[q_number]);
181 static bool enic_log_q_error(struct enic *enic)
187 for (i = 0; i < enic->wq_count; i++) {
188 error_status = vnic_wq_error_status(&enic->wq[i]);
191 netdev_err(enic->netdev, "WQ[%d] error_status %d\n",
195 for (i = 0; i < enic->rq_count; i++) {
196 error_status = vnic_rq_error_status(&enic->rq[i]);
199 netdev_err(enic->netdev, "RQ[%d] error_status %d\n",
206 static void enic_msglvl_check(struct enic *enic)
208 u32 msg_enable = vnic_dev_msg_lvl(enic->vdev);
210 if (msg_enable != enic->msg_enable) {
211 netdev_info(enic->netdev, "msg lvl changed from 0x%x to 0x%x\n",
212 enic->msg_enable, msg_enable);
213 enic->msg_enable = msg_enable;
217 static void enic_mtu_check(struct enic *enic)
219 u32 mtu = vnic_dev_mtu(enic->vdev);
220 struct net_device *netdev = enic->netdev;
222 if (mtu && mtu != enic->port_mtu) {
223 enic->port_mtu = mtu;
224 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) {
225 mtu = max_t(int, ENIC_MIN_MTU,
226 min_t(int, ENIC_MAX_MTU, mtu));
227 if (mtu != netdev->mtu)
228 schedule_work(&enic->change_mtu_work);
230 if (mtu < netdev->mtu)
232 "interface MTU (%d) set higher "
233 "than switch port MTU (%d)\n",
239 static void enic_link_check(struct enic *enic)
241 int link_status = vnic_dev_link_status(enic->vdev);
242 int carrier_ok = netif_carrier_ok(enic->netdev);
244 if (link_status && !carrier_ok) {
245 netdev_info(enic->netdev, "Link UP\n");
246 netif_carrier_on(enic->netdev);
247 } else if (!link_status && carrier_ok) {
248 netdev_info(enic->netdev, "Link DOWN\n");
249 netif_carrier_off(enic->netdev);
253 static void enic_notify_check(struct enic *enic)
255 enic_msglvl_check(enic);
256 enic_mtu_check(enic);
257 enic_link_check(enic);
260 #define ENIC_TEST_INTR(pba, i) (pba & (1 << i))
262 static irqreturn_t enic_isr_legacy(int irq, void *data)
264 struct net_device *netdev = data;
265 struct enic *enic = netdev_priv(netdev);
266 unsigned int io_intr = enic_legacy_io_intr();
267 unsigned int err_intr = enic_legacy_err_intr();
268 unsigned int notify_intr = enic_legacy_notify_intr();
271 vnic_intr_mask(&enic->intr[io_intr]);
273 pba = vnic_intr_legacy_pba(enic->legacy_pba);
275 vnic_intr_unmask(&enic->intr[io_intr]);
276 return IRQ_NONE; /* not our interrupt */
279 if (ENIC_TEST_INTR(pba, notify_intr)) {
280 enic_notify_check(enic);
281 vnic_intr_return_all_credits(&enic->intr[notify_intr]);
284 if (ENIC_TEST_INTR(pba, err_intr)) {
285 vnic_intr_return_all_credits(&enic->intr[err_intr]);
286 enic_log_q_error(enic);
287 /* schedule recovery from WQ/RQ error */
288 schedule_work(&enic->reset);
292 if (ENIC_TEST_INTR(pba, io_intr))
293 napi_schedule_irqoff(&enic->napi[0]);
295 vnic_intr_unmask(&enic->intr[io_intr]);
300 static irqreturn_t enic_isr_msi(int irq, void *data)
302 struct enic *enic = data;
304 /* With MSI, there is no sharing of interrupts, so this is
305 * our interrupt and there is no need to ack it. The device
306 * is not providing per-vector masking, so the OS will not
307 * write to PCI config space to mask/unmask the interrupt.
308 * We're using mask_on_assertion for MSI, so the device
309 * automatically masks the interrupt when the interrupt is
310 * generated. Later, when exiting polling, the interrupt
311 * will be unmasked (see enic_poll).
313 * Also, the device uses the same PCIe Traffic Class (TC)
314 * for Memory Write data and MSI, so there are no ordering
315 * issues; the MSI will always arrive at the Root Complex
316 * _after_ corresponding Memory Writes (i.e. descriptor
320 napi_schedule_irqoff(&enic->napi[0]);
325 static irqreturn_t enic_isr_msix(int irq, void *data)
327 struct napi_struct *napi = data;
329 napi_schedule_irqoff(napi);
334 static irqreturn_t enic_isr_msix_err(int irq, void *data)
336 struct enic *enic = data;
337 unsigned int intr = enic_msix_err_intr(enic);
339 vnic_intr_return_all_credits(&enic->intr[intr]);
341 if (enic_log_q_error(enic))
342 /* schedule recovery from WQ/RQ error */
343 schedule_work(&enic->reset);
348 static irqreturn_t enic_isr_msix_notify(int irq, void *data)
350 struct enic *enic = data;
351 unsigned int intr = enic_msix_notify_intr(enic);
353 enic_notify_check(enic);
354 vnic_intr_return_all_credits(&enic->intr[intr]);
359 static int enic_queue_wq_skb_cont(struct enic *enic, struct vnic_wq *wq,
360 struct sk_buff *skb, unsigned int len_left,
363 const skb_frag_t *frag;
366 /* Queue additional data fragments */
367 for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
368 len_left -= skb_frag_size(frag);
369 dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag, 0,
372 if (unlikely(enic_dma_map_check(enic, dma_addr)))
374 enic_queue_wq_desc_cont(wq, skb, dma_addr, skb_frag_size(frag),
375 (len_left == 0), /* EOP? */
382 static int enic_queue_wq_skb_vlan(struct enic *enic, struct vnic_wq *wq,
383 struct sk_buff *skb, int vlan_tag_insert,
384 unsigned int vlan_tag, int loopback)
386 unsigned int head_len = skb_headlen(skb);
387 unsigned int len_left = skb->len - head_len;
388 int eop = (len_left == 0);
392 dma_addr = pci_map_single(enic->pdev, skb->data, head_len,
394 if (unlikely(enic_dma_map_check(enic, dma_addr)))
397 /* Queue the main skb fragment. The fragments are no larger
398 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
399 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
400 * per fragment is queued.
402 enic_queue_wq_desc(wq, skb, dma_addr, head_len, vlan_tag_insert,
403 vlan_tag, eop, loopback);
406 err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
411 static int enic_queue_wq_skb_csum_l4(struct enic *enic, struct vnic_wq *wq,
412 struct sk_buff *skb, int vlan_tag_insert,
413 unsigned int vlan_tag, int loopback)
415 unsigned int head_len = skb_headlen(skb);
416 unsigned int len_left = skb->len - head_len;
417 unsigned int hdr_len = skb_checksum_start_offset(skb);
418 unsigned int csum_offset = hdr_len + skb->csum_offset;
419 int eop = (len_left == 0);
423 dma_addr = pci_map_single(enic->pdev, skb->data, head_len,
425 if (unlikely(enic_dma_map_check(enic, dma_addr)))
428 /* Queue the main skb fragment. The fragments are no larger
429 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
430 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
431 * per fragment is queued.
433 enic_queue_wq_desc_csum_l4(wq, skb, dma_addr, head_len, csum_offset,
434 hdr_len, vlan_tag_insert, vlan_tag, eop,
438 err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
443 static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq,
444 struct sk_buff *skb, unsigned int mss,
445 int vlan_tag_insert, unsigned int vlan_tag,
448 unsigned int frag_len_left = skb_headlen(skb);
449 unsigned int len_left = skb->len - frag_len_left;
450 unsigned int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
451 int eop = (len_left == 0);
454 unsigned int offset = 0;
457 /* Preload TCP csum field with IP pseudo hdr calculated
458 * with IP length set to zero. HW will later add in length
459 * to each TCP segment resulting from the TSO.
462 if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
463 ip_hdr(skb)->check = 0;
464 tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
465 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
466 } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
467 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
468 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
471 /* Queue WQ_ENET_MAX_DESC_LEN length descriptors
472 * for the main skb fragment
474 while (frag_len_left) {
475 len = min(frag_len_left, (unsigned int)WQ_ENET_MAX_DESC_LEN);
476 dma_addr = pci_map_single(enic->pdev, skb->data + offset, len,
478 if (unlikely(enic_dma_map_check(enic, dma_addr)))
480 enic_queue_wq_desc_tso(wq, skb, dma_addr, len, mss, hdr_len,
481 vlan_tag_insert, vlan_tag,
482 eop && (len == frag_len_left), loopback);
483 frag_len_left -= len;
490 /* Queue WQ_ENET_MAX_DESC_LEN length descriptors
491 * for additional data fragments
493 for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
494 len_left -= skb_frag_size(frag);
495 frag_len_left = skb_frag_size(frag);
498 while (frag_len_left) {
499 len = min(frag_len_left,
500 (unsigned int)WQ_ENET_MAX_DESC_LEN);
501 dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag,
504 if (unlikely(enic_dma_map_check(enic, dma_addr)))
506 enic_queue_wq_desc_cont(wq, skb, dma_addr, len,
508 (len == frag_len_left),/*EOP*/
510 frag_len_left -= len;
518 static inline void enic_queue_wq_skb(struct enic *enic,
519 struct vnic_wq *wq, struct sk_buff *skb)
521 unsigned int mss = skb_shinfo(skb)->gso_size;
522 unsigned int vlan_tag = 0;
523 int vlan_tag_insert = 0;
527 if (skb_vlan_tag_present(skb)) {
528 /* VLAN tag from trunking driver */
530 vlan_tag = skb_vlan_tag_get(skb);
531 } else if (enic->loop_enable) {
532 vlan_tag = enic->loop_tag;
537 err = enic_queue_wq_skb_tso(enic, wq, skb, mss,
538 vlan_tag_insert, vlan_tag,
540 else if (skb->ip_summed == CHECKSUM_PARTIAL)
541 err = enic_queue_wq_skb_csum_l4(enic, wq, skb, vlan_tag_insert,
544 err = enic_queue_wq_skb_vlan(enic, wq, skb, vlan_tag_insert,
547 struct vnic_wq_buf *buf;
549 buf = wq->to_use->prev;
550 /* while not EOP of previous pkt && queue not empty.
551 * For all non EOP bufs, os_buf is NULL.
553 while (!buf->os_buf && (buf->next != wq->to_clean)) {
554 enic_free_wq_buf(wq, buf);
555 wq->ring.desc_avail++;
558 wq->to_use = buf->next;
563 /* netif_tx_lock held, process context with BHs disabled, or BH */
564 static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
565 struct net_device *netdev)
567 struct enic *enic = netdev_priv(netdev);
569 unsigned int txq_map;
570 struct netdev_queue *txq;
573 dev_kfree_skb_any(skb);
577 txq_map = skb_get_queue_mapping(skb) % enic->wq_count;
578 wq = &enic->wq[txq_map];
579 txq = netdev_get_tx_queue(netdev, txq_map);
581 /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs,
582 * which is very likely. In the off chance it's going to take
583 * more than * ENIC_NON_TSO_MAX_DESC, linearize the skb.
586 if (skb_shinfo(skb)->gso_size == 0 &&
587 skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC &&
588 skb_linearize(skb)) {
589 dev_kfree_skb_any(skb);
593 spin_lock(&enic->wq_lock[txq_map]);
595 if (vnic_wq_desc_avail(wq) <
596 skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) {
597 netif_tx_stop_queue(txq);
598 /* This is a hard error, log it */
599 netdev_err(netdev, "BUG! Tx ring full when queue awake!\n");
600 spin_unlock(&enic->wq_lock[txq_map]);
601 return NETDEV_TX_BUSY;
604 enic_queue_wq_skb(enic, wq, skb);
606 if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
607 netif_tx_stop_queue(txq);
608 if (!skb->xmit_more || netif_xmit_stopped(txq))
609 vnic_wq_doorbell(wq);
611 spin_unlock(&enic->wq_lock[txq_map]);
616 /* dev_base_lock rwlock held, nominally process context */
617 static struct rtnl_link_stats64 *enic_get_stats(struct net_device *netdev,
618 struct rtnl_link_stats64 *net_stats)
620 struct enic *enic = netdev_priv(netdev);
621 struct vnic_stats *stats;
624 err = enic_dev_stats_dump(enic, &stats);
625 /* return only when pci_zalloc_consistent fails in vnic_dev_stats_dump
626 * For other failures, like devcmd failure, we return previously
632 net_stats->tx_packets = stats->tx.tx_frames_ok;
633 net_stats->tx_bytes = stats->tx.tx_bytes_ok;
634 net_stats->tx_errors = stats->tx.tx_errors;
635 net_stats->tx_dropped = stats->tx.tx_drops;
637 net_stats->rx_packets = stats->rx.rx_frames_ok;
638 net_stats->rx_bytes = stats->rx.rx_bytes_ok;
639 net_stats->rx_errors = stats->rx.rx_errors;
640 net_stats->multicast = stats->rx.rx_multicast_frames_ok;
641 net_stats->rx_over_errors = enic->rq_truncated_pkts;
642 net_stats->rx_crc_errors = enic->rq_bad_fcs;
643 net_stats->rx_dropped = stats->rx.rx_no_bufs + stats->rx.rx_drop;
648 static int enic_mc_sync(struct net_device *netdev, const u8 *mc_addr)
650 struct enic *enic = netdev_priv(netdev);
652 if (enic->mc_count == ENIC_MULTICAST_PERFECT_FILTERS) {
653 unsigned int mc_count = netdev_mc_count(netdev);
655 netdev_warn(netdev, "Registering only %d out of %d multicast addresses\n",
656 ENIC_MULTICAST_PERFECT_FILTERS, mc_count);
661 enic_dev_add_addr(enic, mc_addr);
667 static int enic_mc_unsync(struct net_device *netdev, const u8 *mc_addr)
669 struct enic *enic = netdev_priv(netdev);
671 enic_dev_del_addr(enic, mc_addr);
677 static int enic_uc_sync(struct net_device *netdev, const u8 *uc_addr)
679 struct enic *enic = netdev_priv(netdev);
681 if (enic->uc_count == ENIC_UNICAST_PERFECT_FILTERS) {
682 unsigned int uc_count = netdev_uc_count(netdev);
684 netdev_warn(netdev, "Registering only %d out of %d unicast addresses\n",
685 ENIC_UNICAST_PERFECT_FILTERS, uc_count);
690 enic_dev_add_addr(enic, uc_addr);
696 static int enic_uc_unsync(struct net_device *netdev, const u8 *uc_addr)
698 struct enic *enic = netdev_priv(netdev);
700 enic_dev_del_addr(enic, uc_addr);
706 void enic_reset_addr_lists(struct enic *enic)
708 struct net_device *netdev = enic->netdev;
710 __dev_uc_unsync(netdev, NULL);
711 __dev_mc_unsync(netdev, NULL);
718 static int enic_set_mac_addr(struct net_device *netdev, char *addr)
720 struct enic *enic = netdev_priv(netdev);
722 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) {
723 if (!is_valid_ether_addr(addr) && !is_zero_ether_addr(addr))
724 return -EADDRNOTAVAIL;
726 if (!is_valid_ether_addr(addr))
727 return -EADDRNOTAVAIL;
730 memcpy(netdev->dev_addr, addr, netdev->addr_len);
735 static int enic_set_mac_address_dynamic(struct net_device *netdev, void *p)
737 struct enic *enic = netdev_priv(netdev);
738 struct sockaddr *saddr = p;
739 char *addr = saddr->sa_data;
742 if (netif_running(enic->netdev)) {
743 err = enic_dev_del_station_addr(enic);
748 err = enic_set_mac_addr(netdev, addr);
752 if (netif_running(enic->netdev)) {
753 err = enic_dev_add_station_addr(enic);
761 static int enic_set_mac_address(struct net_device *netdev, void *p)
763 struct sockaddr *saddr = p;
764 char *addr = saddr->sa_data;
765 struct enic *enic = netdev_priv(netdev);
768 err = enic_dev_del_station_addr(enic);
772 err = enic_set_mac_addr(netdev, addr);
776 return enic_dev_add_station_addr(enic);
779 /* netif_tx_lock held, BHs disabled */
780 static void enic_set_rx_mode(struct net_device *netdev)
782 struct enic *enic = netdev_priv(netdev);
784 int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0;
785 int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0;
786 int promisc = (netdev->flags & IFF_PROMISC) ||
787 netdev_uc_count(netdev) > ENIC_UNICAST_PERFECT_FILTERS;
788 int allmulti = (netdev->flags & IFF_ALLMULTI) ||
789 netdev_mc_count(netdev) > ENIC_MULTICAST_PERFECT_FILTERS;
790 unsigned int flags = netdev->flags |
791 (allmulti ? IFF_ALLMULTI : 0) |
792 (promisc ? IFF_PROMISC : 0);
794 if (enic->flags != flags) {
796 enic_dev_packet_filter(enic, directed,
797 multicast, broadcast, promisc, allmulti);
801 __dev_uc_sync(netdev, enic_uc_sync, enic_uc_unsync);
803 __dev_mc_sync(netdev, enic_mc_sync, enic_mc_unsync);
807 /* netif_tx_lock held, BHs disabled */
808 static void enic_tx_timeout(struct net_device *netdev)
810 struct enic *enic = netdev_priv(netdev);
811 schedule_work(&enic->tx_hang_reset);
814 static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
816 struct enic *enic = netdev_priv(netdev);
817 struct enic_port_profile *pp;
820 ENIC_PP_BY_INDEX(enic, vf, pp, &err);
824 if (is_valid_ether_addr(mac) || is_zero_ether_addr(mac)) {
825 if (vf == PORT_SELF_VF) {
826 memcpy(pp->vf_mac, mac, ETH_ALEN);
830 * For sriov vf's set the mac in hw
832 ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic,
833 vnic_dev_set_mac_addr, mac);
834 return enic_dev_status_to_errno(err);
840 static int enic_set_vf_port(struct net_device *netdev, int vf,
841 struct nlattr *port[])
843 struct enic *enic = netdev_priv(netdev);
844 struct enic_port_profile prev_pp;
845 struct enic_port_profile *pp;
846 int err = 0, restore_pp = 1;
848 ENIC_PP_BY_INDEX(enic, vf, pp, &err);
852 if (!port[IFLA_PORT_REQUEST])
855 memcpy(&prev_pp, pp, sizeof(*enic->pp));
856 memset(pp, 0, sizeof(*enic->pp));
858 pp->set |= ENIC_SET_REQUEST;
859 pp->request = nla_get_u8(port[IFLA_PORT_REQUEST]);
861 if (port[IFLA_PORT_PROFILE]) {
862 pp->set |= ENIC_SET_NAME;
863 memcpy(pp->name, nla_data(port[IFLA_PORT_PROFILE]),
867 if (port[IFLA_PORT_INSTANCE_UUID]) {
868 pp->set |= ENIC_SET_INSTANCE;
869 memcpy(pp->instance_uuid,
870 nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX);
873 if (port[IFLA_PORT_HOST_UUID]) {
874 pp->set |= ENIC_SET_HOST;
875 memcpy(pp->host_uuid,
876 nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX);
879 if (vf == PORT_SELF_VF) {
880 /* Special case handling: mac came from IFLA_VF_MAC */
881 if (!is_zero_ether_addr(prev_pp.vf_mac))
882 memcpy(pp->mac_addr, prev_pp.vf_mac, ETH_ALEN);
884 if (is_zero_ether_addr(netdev->dev_addr))
885 eth_hw_addr_random(netdev);
887 /* SR-IOV VF: get mac from adapter */
888 ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic,
889 vnic_dev_get_mac_addr, pp->mac_addr);
891 netdev_err(netdev, "Error getting mac for vf %d\n", vf);
892 memcpy(pp, &prev_pp, sizeof(*pp));
893 return enic_dev_status_to_errno(err);
897 err = enic_process_set_pp_request(enic, vf, &prev_pp, &restore_pp);
900 /* Things are still the way they were: Implicit
901 * DISASSOCIATE failed
903 memcpy(pp, &prev_pp, sizeof(*pp));
905 memset(pp, 0, sizeof(*pp));
906 if (vf == PORT_SELF_VF)
907 eth_zero_addr(netdev->dev_addr);
910 /* Set flag to indicate that the port assoc/disassoc
911 * request has been sent out to fw
913 pp->set |= ENIC_PORT_REQUEST_APPLIED;
915 /* If DISASSOCIATE, clean up all assigned/saved macaddresses */
916 if (pp->request == PORT_REQUEST_DISASSOCIATE) {
917 eth_zero_addr(pp->mac_addr);
918 if (vf == PORT_SELF_VF)
919 eth_zero_addr(netdev->dev_addr);
923 if (vf == PORT_SELF_VF)
924 eth_zero_addr(pp->vf_mac);
929 static int enic_get_vf_port(struct net_device *netdev, int vf,
932 struct enic *enic = netdev_priv(netdev);
933 u16 response = PORT_PROFILE_RESPONSE_SUCCESS;
934 struct enic_port_profile *pp;
937 ENIC_PP_BY_INDEX(enic, vf, pp, &err);
941 if (!(pp->set & ENIC_PORT_REQUEST_APPLIED))
944 err = enic_process_get_pp_request(enic, vf, pp->request, &response);
948 if (nla_put_u16(skb, IFLA_PORT_REQUEST, pp->request) ||
949 nla_put_u16(skb, IFLA_PORT_RESPONSE, response) ||
950 ((pp->set & ENIC_SET_NAME) &&
951 nla_put(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX, pp->name)) ||
952 ((pp->set & ENIC_SET_INSTANCE) &&
953 nla_put(skb, IFLA_PORT_INSTANCE_UUID, PORT_UUID_MAX,
954 pp->instance_uuid)) ||
955 ((pp->set & ENIC_SET_HOST) &&
956 nla_put(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX, pp->host_uuid)))
957 goto nla_put_failure;
964 static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
966 struct enic *enic = vnic_dev_priv(rq->vdev);
971 pci_unmap_single(enic->pdev, buf->dma_addr,
972 buf->len, PCI_DMA_FROMDEVICE);
973 dev_kfree_skb_any(buf->os_buf);
977 static int enic_rq_alloc_buf(struct vnic_rq *rq)
979 struct enic *enic = vnic_dev_priv(rq->vdev);
980 struct net_device *netdev = enic->netdev;
982 unsigned int len = netdev->mtu + VLAN_ETH_HLEN;
983 unsigned int os_buf_index = 0;
985 struct vnic_rq_buf *buf = rq->to_use;
988 enic_queue_rq_desc(rq, buf->os_buf, os_buf_index, buf->dma_addr,
993 skb = netdev_alloc_skb_ip_align(netdev, len);
997 dma_addr = pci_map_single(enic->pdev, skb->data, len,
999 if (unlikely(enic_dma_map_check(enic, dma_addr))) {
1004 enic_queue_rq_desc(rq, skb, os_buf_index,
1010 static void enic_intr_update_pkt_size(struct vnic_rx_bytes_counter *pkt_size,
1013 if (ENIC_LARGE_PKT_THRESHOLD <= pkt_len)
1014 pkt_size->large_pkt_bytes_cnt += pkt_len;
1016 pkt_size->small_pkt_bytes_cnt += pkt_len;
1019 static bool enic_rxcopybreak(struct net_device *netdev, struct sk_buff **skb,
1020 struct vnic_rq_buf *buf, u16 len)
1022 struct enic *enic = netdev_priv(netdev);
1023 struct sk_buff *new_skb;
1025 if (len > enic->rx_copybreak)
1027 new_skb = netdev_alloc_skb_ip_align(netdev, len);
1030 pci_dma_sync_single_for_cpu(enic->pdev, buf->dma_addr, len,
1032 memcpy(new_skb->data, (*skb)->data, len);
1038 static void enic_rq_indicate_buf(struct vnic_rq *rq,
1039 struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
1040 int skipped, void *opaque)
1042 struct enic *enic = vnic_dev_priv(rq->vdev);
1043 struct net_device *netdev = enic->netdev;
1044 struct sk_buff *skb;
1045 struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
1047 u8 type, color, eop, sop, ingress_port, vlan_stripped;
1048 u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
1049 u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
1050 u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
1052 u16 q_number, completed_index, bytes_written, vlan_tci, checksum;
1060 cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
1061 &type, &color, &q_number, &completed_index,
1062 &ingress_port, &fcoe, &eop, &sop, &rss_type,
1063 &csum_not_calc, &rss_hash, &bytes_written,
1064 &packet_error, &vlan_stripped, &vlan_tci, &checksum,
1065 &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error,
1066 &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp,
1067 &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
1073 if (bytes_written > 0)
1075 else if (bytes_written == 0)
1076 enic->rq_truncated_pkts++;
1079 pci_unmap_single(enic->pdev, buf->dma_addr, buf->len,
1080 PCI_DMA_FROMDEVICE);
1081 dev_kfree_skb_any(skb);
1087 if (eop && bytes_written > 0) {
1092 if (!enic_rxcopybreak(netdev, &skb, buf, bytes_written)) {
1094 pci_unmap_single(enic->pdev, buf->dma_addr, buf->len,
1095 PCI_DMA_FROMDEVICE);
1097 prefetch(skb->data - NET_IP_ALIGN);
1099 skb_put(skb, bytes_written);
1100 skb->protocol = eth_type_trans(skb, netdev);
1101 skb_record_rx_queue(skb, q_number);
1102 if (netdev->features & NETIF_F_RXHASH) {
1103 skb_set_hash(skb, rss_hash,
1105 (NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX |
1106 NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 |
1107 NIC_CFG_RSS_HASH_TYPE_TCP_IPV4)) ?
1108 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
1111 /* Hardware does not provide whole packet checksum. It only
1112 * provides pseudo checksum. Since hw validates the packet
1113 * checksum but not provide us the checksum value. use
1114 * CHECSUM_UNNECESSARY.
1116 if ((netdev->features & NETIF_F_RXCSUM) && tcp_udp_csum_ok &&
1118 skb->ip_summed = CHECKSUM_UNNECESSARY;
1121 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
1123 skb_mark_napi_id(skb, &enic->napi[rq->index]);
1124 if (enic_poll_busy_polling(rq) ||
1125 !(netdev->features & NETIF_F_GRO))
1126 netif_receive_skb(skb);
1128 napi_gro_receive(&enic->napi[q_number], skb);
1129 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
1130 enic_intr_update_pkt_size(&cq->pkt_size_counter,
1137 pci_unmap_single(enic->pdev, buf->dma_addr, buf->len,
1138 PCI_DMA_FROMDEVICE);
1139 dev_kfree_skb_any(skb);
1144 static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
1145 u8 type, u16 q_number, u16 completed_index, void *opaque)
1147 struct enic *enic = vnic_dev_priv(vdev);
1149 vnic_rq_service(&enic->rq[q_number], cq_desc,
1150 completed_index, VNIC_RQ_RETURN_DESC,
1151 enic_rq_indicate_buf, opaque);
1156 static void enic_set_int_moderation(struct enic *enic, struct vnic_rq *rq)
1158 unsigned int intr = enic_msix_rq_intr(enic, rq->index);
1159 struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
1160 u32 timer = cq->tobe_rx_coal_timeval;
1162 if (cq->tobe_rx_coal_timeval != cq->cur_rx_coal_timeval) {
1163 vnic_intr_coalescing_timer_set(&enic->intr[intr], timer);
1164 cq->cur_rx_coal_timeval = cq->tobe_rx_coal_timeval;
1168 static void enic_calc_int_moderation(struct enic *enic, struct vnic_rq *rq)
1170 struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
1171 struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
1172 struct vnic_rx_bytes_counter *pkt_size_counter = &cq->pkt_size_counter;
1178 ktime_t now = ktime_get();
1180 delta = ktime_us_delta(now, cq->prev_ts);
1181 if (delta < ENIC_AIC_TS_BREAK)
1185 traffic = pkt_size_counter->large_pkt_bytes_cnt +
1186 pkt_size_counter->small_pkt_bytes_cnt;
1187 /* The table takes Mbps
1188 * traffic *= 8 => bits
1189 * traffic *= (10^6 / delta) => bps
1190 * traffic /= 10^6 => Mbps
1192 * Combining, traffic *= (8 / delta)
1196 traffic = delta > UINT_MAX ? 0 : traffic / (u32)delta;
1198 for (index = 0; index < ENIC_MAX_COALESCE_TIMERS; index++)
1199 if (traffic < mod_table[index].rx_rate)
1201 range_start = (pkt_size_counter->small_pkt_bytes_cnt >
1202 pkt_size_counter->large_pkt_bytes_cnt << 1) ?
1203 rx_coal->small_pkt_range_start :
1204 rx_coal->large_pkt_range_start;
1205 timer = range_start + ((rx_coal->range_end - range_start) *
1206 mod_table[index].range_percent / 100);
1208 cq->tobe_rx_coal_timeval = (timer + cq->tobe_rx_coal_timeval) >> 1;
1210 pkt_size_counter->large_pkt_bytes_cnt = 0;
1211 pkt_size_counter->small_pkt_bytes_cnt = 0;
1214 static int enic_poll(struct napi_struct *napi, int budget)
1216 struct net_device *netdev = napi->dev;
1217 struct enic *enic = netdev_priv(netdev);
1218 unsigned int cq_rq = enic_cq_rq(enic, 0);
1219 unsigned int cq_wq = enic_cq_wq(enic, 0);
1220 unsigned int intr = enic_legacy_io_intr();
1221 unsigned int rq_work_to_do = budget;
1222 unsigned int wq_work_to_do = -1; /* no limit */
1223 unsigned int work_done, rq_work_done = 0, wq_work_done;
1226 wq_work_done = vnic_cq_service(&enic->cq[cq_wq], wq_work_to_do,
1227 enic_wq_service, NULL);
1229 if (!enic_poll_lock_napi(&enic->rq[cq_rq])) {
1230 if (wq_work_done > 0)
1231 vnic_intr_return_credits(&enic->intr[intr],
1233 0 /* dont unmask intr */,
1234 0 /* dont reset intr timer */);
1239 rq_work_done = vnic_cq_service(&enic->cq[cq_rq],
1240 rq_work_to_do, enic_rq_service, NULL);
1242 /* Accumulate intr event credits for this polling
1243 * cycle. An intr event is the completion of a
1244 * a WQ or RQ packet.
1247 work_done = rq_work_done + wq_work_done;
1250 vnic_intr_return_credits(&enic->intr[intr],
1252 0 /* don't unmask intr */,
1253 0 /* don't reset intr timer */);
1255 err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
1256 enic_poll_unlock_napi(&enic->rq[cq_rq], napi);
1258 /* Buffer allocation failed. Stay in polling
1259 * mode so we can try to fill the ring again.
1263 rq_work_done = rq_work_to_do;
1264 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
1265 /* Call the function which refreshes the intr coalescing timer
1266 * value based on the traffic.
1268 enic_calc_int_moderation(enic, &enic->rq[0]);
1270 if (rq_work_done < rq_work_to_do) {
1272 /* Some work done, but not enough to stay in polling,
1276 napi_complete(napi);
1277 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
1278 enic_set_int_moderation(enic, &enic->rq[0]);
1279 vnic_intr_unmask(&enic->intr[intr]);
1282 return rq_work_done;
1285 #ifdef CONFIG_RFS_ACCEL
1286 static void enic_free_rx_cpu_rmap(struct enic *enic)
1288 free_irq_cpu_rmap(enic->netdev->rx_cpu_rmap);
1289 enic->netdev->rx_cpu_rmap = NULL;
1292 static void enic_set_rx_cpu_rmap(struct enic *enic)
1296 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) {
1297 enic->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(enic->rq_count);
1298 if (unlikely(!enic->netdev->rx_cpu_rmap))
1300 for (i = 0; i < enic->rq_count; i++) {
1301 res = irq_cpu_rmap_add(enic->netdev->rx_cpu_rmap,
1302 enic->msix_entry[i].vector);
1303 if (unlikely(res)) {
1304 enic_free_rx_cpu_rmap(enic);
1313 static void enic_free_rx_cpu_rmap(struct enic *enic)
1317 static void enic_set_rx_cpu_rmap(struct enic *enic)
1321 #endif /* CONFIG_RFS_ACCEL */
1323 #ifdef CONFIG_NET_RX_BUSY_POLL
1324 static int enic_busy_poll(struct napi_struct *napi)
1326 struct net_device *netdev = napi->dev;
1327 struct enic *enic = netdev_priv(netdev);
1328 unsigned int rq = (napi - &enic->napi[0]);
1329 unsigned int cq = enic_cq_rq(enic, rq);
1330 unsigned int intr = enic_msix_rq_intr(enic, rq);
1331 unsigned int work_to_do = -1; /* clean all pkts possible */
1332 unsigned int work_done;
1334 if (!enic_poll_lock_poll(&enic->rq[rq]))
1335 return LL_FLUSH_BUSY;
1336 work_done = vnic_cq_service(&enic->cq[cq], work_to_do,
1337 enic_rq_service, NULL);
1340 vnic_intr_return_credits(&enic->intr[intr],
1342 vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf);
1343 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
1344 enic_calc_int_moderation(enic, &enic->rq[rq]);
1345 enic_poll_unlock_poll(&enic->rq[rq]);
1349 #endif /* CONFIG_NET_RX_BUSY_POLL */
1351 static int enic_poll_msix_wq(struct napi_struct *napi, int budget)
1353 struct net_device *netdev = napi->dev;
1354 struct enic *enic = netdev_priv(netdev);
1355 unsigned int wq_index = (napi - &enic->napi[0]) - enic->rq_count;
1356 struct vnic_wq *wq = &enic->wq[wq_index];
1359 unsigned int wq_work_to_do = -1; /* clean all desc possible */
1360 unsigned int wq_work_done;
1361 unsigned int wq_irq;
1364 cq = enic_cq_wq(enic, wq_irq);
1365 intr = enic_msix_wq_intr(enic, wq_irq);
1366 wq_work_done = vnic_cq_service(&enic->cq[cq], wq_work_to_do,
1367 enic_wq_service, NULL);
1369 vnic_intr_return_credits(&enic->intr[intr], wq_work_done,
1370 0 /* don't unmask intr */,
1371 1 /* reset intr timer */);
1372 if (!wq_work_done) {
1373 napi_complete(napi);
1374 vnic_intr_unmask(&enic->intr[intr]);
1381 static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
1383 struct net_device *netdev = napi->dev;
1384 struct enic *enic = netdev_priv(netdev);
1385 unsigned int rq = (napi - &enic->napi[0]);
1386 unsigned int cq = enic_cq_rq(enic, rq);
1387 unsigned int intr = enic_msix_rq_intr(enic, rq);
1388 unsigned int work_to_do = budget;
1389 unsigned int work_done = 0;
1392 if (!enic_poll_lock_napi(&enic->rq[rq]))
1398 work_done = vnic_cq_service(&enic->cq[cq],
1399 work_to_do, enic_rq_service, NULL);
1401 /* Return intr event credits for this polling
1402 * cycle. An intr event is the completion of a
1407 vnic_intr_return_credits(&enic->intr[intr],
1409 0 /* don't unmask intr */,
1410 0 /* don't reset intr timer */);
1412 err = vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf);
1414 /* Buffer allocation failed. Stay in polling mode
1415 * so we can try to fill the ring again.
1419 work_done = work_to_do;
1420 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
1421 /* Call the function which refreshes the intr coalescing timer
1422 * value based on the traffic.
1424 enic_calc_int_moderation(enic, &enic->rq[rq]);
1426 enic_poll_unlock_napi(&enic->rq[rq], napi);
1427 if (work_done < work_to_do) {
1429 /* Some work done, but not enough to stay in polling,
1433 napi_complete(napi);
1434 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
1435 enic_set_int_moderation(enic, &enic->rq[rq]);
1436 vnic_intr_unmask(&enic->intr[intr]);
1442 static void enic_notify_timer(unsigned long data)
1444 struct enic *enic = (struct enic *)data;
1446 enic_notify_check(enic);
1448 mod_timer(&enic->notify_timer,
1449 round_jiffies(jiffies + ENIC_NOTIFY_TIMER_PERIOD));
1452 static void enic_free_intr(struct enic *enic)
1454 struct net_device *netdev = enic->netdev;
1457 enic_free_rx_cpu_rmap(enic);
1458 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1459 case VNIC_DEV_INTR_MODE_INTX:
1460 free_irq(enic->pdev->irq, netdev);
1462 case VNIC_DEV_INTR_MODE_MSI:
1463 free_irq(enic->pdev->irq, enic);
1465 case VNIC_DEV_INTR_MODE_MSIX:
1466 for (i = 0; i < ARRAY_SIZE(enic->msix); i++)
1467 if (enic->msix[i].requested)
1468 free_irq(enic->msix_entry[i].vector,
1469 enic->msix[i].devid);
1476 static int enic_request_intr(struct enic *enic)
1478 struct net_device *netdev = enic->netdev;
1479 unsigned int i, intr;
1482 enic_set_rx_cpu_rmap(enic);
1483 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1485 case VNIC_DEV_INTR_MODE_INTX:
1487 err = request_irq(enic->pdev->irq, enic_isr_legacy,
1488 IRQF_SHARED, netdev->name, netdev);
1491 case VNIC_DEV_INTR_MODE_MSI:
1493 err = request_irq(enic->pdev->irq, enic_isr_msi,
1494 0, netdev->name, enic);
1497 case VNIC_DEV_INTR_MODE_MSIX:
1499 for (i = 0; i < enic->rq_count; i++) {
1500 intr = enic_msix_rq_intr(enic, i);
1501 snprintf(enic->msix[intr].devname,
1502 sizeof(enic->msix[intr].devname),
1503 "%.11s-rx-%d", netdev->name, i);
1504 enic->msix[intr].isr = enic_isr_msix;
1505 enic->msix[intr].devid = &enic->napi[i];
1508 for (i = 0; i < enic->wq_count; i++) {
1509 int wq = enic_cq_wq(enic, i);
1511 intr = enic_msix_wq_intr(enic, i);
1512 snprintf(enic->msix[intr].devname,
1513 sizeof(enic->msix[intr].devname),
1514 "%.11s-tx-%d", netdev->name, i);
1515 enic->msix[intr].isr = enic_isr_msix;
1516 enic->msix[intr].devid = &enic->napi[wq];
1519 intr = enic_msix_err_intr(enic);
1520 snprintf(enic->msix[intr].devname,
1521 sizeof(enic->msix[intr].devname),
1522 "%.11s-err", netdev->name);
1523 enic->msix[intr].isr = enic_isr_msix_err;
1524 enic->msix[intr].devid = enic;
1526 intr = enic_msix_notify_intr(enic);
1527 snprintf(enic->msix[intr].devname,
1528 sizeof(enic->msix[intr].devname),
1529 "%.11s-notify", netdev->name);
1530 enic->msix[intr].isr = enic_isr_msix_notify;
1531 enic->msix[intr].devid = enic;
1533 for (i = 0; i < ARRAY_SIZE(enic->msix); i++)
1534 enic->msix[i].requested = 0;
1536 for (i = 0; i < enic->intr_count; i++) {
1537 err = request_irq(enic->msix_entry[i].vector,
1538 enic->msix[i].isr, 0,
1539 enic->msix[i].devname,
1540 enic->msix[i].devid);
1542 enic_free_intr(enic);
1545 enic->msix[i].requested = 1;
1557 static void enic_synchronize_irqs(struct enic *enic)
1561 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1562 case VNIC_DEV_INTR_MODE_INTX:
1563 case VNIC_DEV_INTR_MODE_MSI:
1564 synchronize_irq(enic->pdev->irq);
1566 case VNIC_DEV_INTR_MODE_MSIX:
1567 for (i = 0; i < enic->intr_count; i++)
1568 synchronize_irq(enic->msix_entry[i].vector);
1575 static void enic_set_rx_coal_setting(struct enic *enic)
1579 struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
1581 /* 1. Read the link speed from fw
1582 * 2. Pick the default range for the speed
1583 * 3. Update it in enic->rx_coalesce_setting
1585 speed = vnic_dev_port_speed(enic->vdev);
1586 if (ENIC_LINK_SPEED_10G < speed)
1587 index = ENIC_LINK_40G_INDEX;
1588 else if (ENIC_LINK_SPEED_4G < speed)
1589 index = ENIC_LINK_10G_INDEX;
1591 index = ENIC_LINK_4G_INDEX;
1593 rx_coal->small_pkt_range_start = mod_range[index].small_pkt_range_start;
1594 rx_coal->large_pkt_range_start = mod_range[index].large_pkt_range_start;
1595 rx_coal->range_end = ENIC_RX_COALESCE_RANGE_END;
1597 /* Start with the value provided by UCSM */
1598 for (index = 0; index < enic->rq_count; index++)
1599 enic->cq[index].cur_rx_coal_timeval =
1600 enic->config.intr_timer_usec;
1602 rx_coal->use_adaptive_rx_coalesce = 1;
1605 static int enic_dev_notify_set(struct enic *enic)
1609 spin_lock_bh(&enic->devcmd_lock);
1610 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1611 case VNIC_DEV_INTR_MODE_INTX:
1612 err = vnic_dev_notify_set(enic->vdev,
1613 enic_legacy_notify_intr());
1615 case VNIC_DEV_INTR_MODE_MSIX:
1616 err = vnic_dev_notify_set(enic->vdev,
1617 enic_msix_notify_intr(enic));
1620 err = vnic_dev_notify_set(enic->vdev, -1 /* no intr */);
1623 spin_unlock_bh(&enic->devcmd_lock);
1628 static void enic_notify_timer_start(struct enic *enic)
1630 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1631 case VNIC_DEV_INTR_MODE_MSI:
1632 mod_timer(&enic->notify_timer, jiffies);
1635 /* Using intr for notification for INTx/MSI-X */
1640 /* rtnl lock is held, process context */
1641 static int enic_open(struct net_device *netdev)
1643 struct enic *enic = netdev_priv(netdev);
1647 err = enic_request_intr(enic);
1649 netdev_err(netdev, "Unable to request irq.\n");
1653 err = enic_dev_notify_set(enic);
1656 "Failed to alloc notify buffer, aborting.\n");
1657 goto err_out_free_intr;
1660 for (i = 0; i < enic->rq_count; i++) {
1661 vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf);
1662 /* Need at least one buffer on ring to get going */
1663 if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
1664 netdev_err(netdev, "Unable to alloc receive buffers\n");
1666 goto err_out_free_rq;
1670 for (i = 0; i < enic->wq_count; i++)
1671 vnic_wq_enable(&enic->wq[i]);
1672 for (i = 0; i < enic->rq_count; i++)
1673 vnic_rq_enable(&enic->rq[i]);
1675 if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
1676 enic_dev_add_station_addr(enic);
1678 enic_set_rx_mode(netdev);
1680 netif_tx_wake_all_queues(netdev);
1682 for (i = 0; i < enic->rq_count; i++) {
1683 enic_busy_poll_init_lock(&enic->rq[i]);
1684 napi_enable(&enic->napi[i]);
1686 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
1687 for (i = 0; i < enic->wq_count; i++)
1688 napi_enable(&enic->napi[enic_cq_wq(enic, i)]);
1689 enic_dev_enable(enic);
1691 for (i = 0; i < enic->intr_count; i++)
1692 vnic_intr_unmask(&enic->intr[i]);
1694 enic_notify_timer_start(enic);
1695 enic_rfs_flw_tbl_init(enic);
1700 for (i = 0; i < enic->rq_count; i++)
1701 vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
1702 enic_dev_notify_unset(enic);
1704 enic_free_intr(enic);
1709 /* rtnl lock is held, process context */
1710 static int enic_stop(struct net_device *netdev)
1712 struct enic *enic = netdev_priv(netdev);
1716 for (i = 0; i < enic->intr_count; i++) {
1717 vnic_intr_mask(&enic->intr[i]);
1718 (void)vnic_intr_masked(&enic->intr[i]); /* flush write */
1721 enic_synchronize_irqs(enic);
1723 del_timer_sync(&enic->notify_timer);
1724 enic_rfs_flw_tbl_free(enic);
1726 enic_dev_disable(enic);
1728 for (i = 0; i < enic->rq_count; i++) {
1729 napi_disable(&enic->napi[i]);
1731 while (!enic_poll_lock_napi(&enic->rq[i]))
1736 netif_carrier_off(netdev);
1737 netif_tx_disable(netdev);
1738 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
1739 for (i = 0; i < enic->wq_count; i++)
1740 napi_disable(&enic->napi[enic_cq_wq(enic, i)]);
1742 if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
1743 enic_dev_del_station_addr(enic);
1745 for (i = 0; i < enic->wq_count; i++) {
1746 err = vnic_wq_disable(&enic->wq[i]);
1750 for (i = 0; i < enic->rq_count; i++) {
1751 err = vnic_rq_disable(&enic->rq[i]);
1756 enic_dev_notify_unset(enic);
1757 enic_free_intr(enic);
1759 for (i = 0; i < enic->wq_count; i++)
1760 vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
1761 for (i = 0; i < enic->rq_count; i++)
1762 vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
1763 for (i = 0; i < enic->cq_count; i++)
1764 vnic_cq_clean(&enic->cq[i]);
1765 for (i = 0; i < enic->intr_count; i++)
1766 vnic_intr_clean(&enic->intr[i]);
1771 static int enic_change_mtu(struct net_device *netdev, int new_mtu)
1773 struct enic *enic = netdev_priv(netdev);
1774 int running = netif_running(netdev);
1776 if (new_mtu < ENIC_MIN_MTU || new_mtu > ENIC_MAX_MTU)
1779 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
1785 netdev->mtu = new_mtu;
1787 if (netdev->mtu > enic->port_mtu)
1789 "interface MTU (%d) set higher than port MTU (%d)\n",
1790 netdev->mtu, enic->port_mtu);
1798 static void enic_change_mtu_work(struct work_struct *work)
1800 struct enic *enic = container_of(work, struct enic, change_mtu_work);
1801 struct net_device *netdev = enic->netdev;
1802 int new_mtu = vnic_dev_mtu(enic->vdev);
1806 new_mtu = max_t(int, ENIC_MIN_MTU, min_t(int, ENIC_MAX_MTU, new_mtu));
1811 del_timer_sync(&enic->notify_timer);
1813 for (i = 0; i < enic->rq_count; i++)
1814 napi_disable(&enic->napi[i]);
1816 vnic_intr_mask(&enic->intr[0]);
1817 enic_synchronize_irqs(enic);
1818 err = vnic_rq_disable(&enic->rq[0]);
1821 netdev_err(netdev, "Unable to disable RQ.\n");
1824 vnic_rq_clean(&enic->rq[0], enic_free_rq_buf);
1825 vnic_cq_clean(&enic->cq[0]);
1826 vnic_intr_clean(&enic->intr[0]);
1828 /* Fill RQ with new_mtu-sized buffers */
1829 netdev->mtu = new_mtu;
1830 vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
1831 /* Need at least one buffer on ring to get going */
1832 if (vnic_rq_desc_used(&enic->rq[0]) == 0) {
1834 netdev_err(netdev, "Unable to alloc receive buffers.\n");
1839 vnic_rq_enable(&enic->rq[0]);
1840 napi_enable(&enic->napi[0]);
1841 vnic_intr_unmask(&enic->intr[0]);
1842 enic_notify_timer_start(enic);
1846 netdev_info(netdev, "interface MTU set as %d\n", netdev->mtu);
1849 #ifdef CONFIG_NET_POLL_CONTROLLER
1850 static void enic_poll_controller(struct net_device *netdev)
1852 struct enic *enic = netdev_priv(netdev);
1853 struct vnic_dev *vdev = enic->vdev;
1854 unsigned int i, intr;
1856 switch (vnic_dev_get_intr_mode(vdev)) {
1857 case VNIC_DEV_INTR_MODE_MSIX:
1858 for (i = 0; i < enic->rq_count; i++) {
1859 intr = enic_msix_rq_intr(enic, i);
1860 enic_isr_msix(enic->msix_entry[intr].vector,
1864 for (i = 0; i < enic->wq_count; i++) {
1865 intr = enic_msix_wq_intr(enic, i);
1866 enic_isr_msix(enic->msix_entry[intr].vector,
1867 &enic->napi[enic_cq_wq(enic, i)]);
1871 case VNIC_DEV_INTR_MODE_MSI:
1872 enic_isr_msi(enic->pdev->irq, enic);
1874 case VNIC_DEV_INTR_MODE_INTX:
1875 enic_isr_legacy(enic->pdev->irq, netdev);
1883 static int enic_dev_wait(struct vnic_dev *vdev,
1884 int (*start)(struct vnic_dev *, int),
1885 int (*finished)(struct vnic_dev *, int *),
1892 BUG_ON(in_interrupt());
1894 err = start(vdev, arg);
1898 /* Wait for func to complete...2 seconds max
1901 time = jiffies + (HZ * 2);
1904 err = finished(vdev, &done);
1911 schedule_timeout_uninterruptible(HZ / 10);
1913 } while (time_after(time, jiffies));
1918 static int enic_dev_open(struct enic *enic)
1922 err = enic_dev_wait(enic->vdev, vnic_dev_open,
1923 vnic_dev_open_done, 0);
1925 dev_err(enic_get_dev(enic), "vNIC device open failed, err %d\n",
1931 static int enic_dev_soft_reset(struct enic *enic)
1935 err = enic_dev_wait(enic->vdev, vnic_dev_soft_reset,
1936 vnic_dev_soft_reset_done, 0);
1938 netdev_err(enic->netdev, "vNIC soft reset failed, err %d\n",
1944 static int enic_dev_hang_reset(struct enic *enic)
1948 err = enic_dev_wait(enic->vdev, vnic_dev_hang_reset,
1949 vnic_dev_hang_reset_done, 0);
1951 netdev_err(enic->netdev, "vNIC hang reset failed, err %d\n",
1957 int __enic_set_rsskey(struct enic *enic)
1959 union vnic_rss_key *rss_key_buf_va;
1960 dma_addr_t rss_key_buf_pa;
1961 int i, kidx, bidx, err;
1963 rss_key_buf_va = pci_zalloc_consistent(enic->pdev,
1964 sizeof(union vnic_rss_key),
1966 if (!rss_key_buf_va)
1969 for (i = 0; i < ENIC_RSS_LEN; i++) {
1970 kidx = i / ENIC_RSS_BYTES_PER_KEY;
1971 bidx = i % ENIC_RSS_BYTES_PER_KEY;
1972 rss_key_buf_va->key[kidx].b[bidx] = enic->rss_key[i];
1974 spin_lock_bh(&enic->devcmd_lock);
1975 err = enic_set_rss_key(enic,
1977 sizeof(union vnic_rss_key));
1978 spin_unlock_bh(&enic->devcmd_lock);
1980 pci_free_consistent(enic->pdev, sizeof(union vnic_rss_key),
1981 rss_key_buf_va, rss_key_buf_pa);
1986 static int enic_set_rsskey(struct enic *enic)
1988 netdev_rss_key_fill(enic->rss_key, ENIC_RSS_LEN);
1990 return __enic_set_rsskey(enic);
1993 static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
1995 dma_addr_t rss_cpu_buf_pa;
1996 union vnic_rss_cpu *rss_cpu_buf_va = NULL;
2000 rss_cpu_buf_va = pci_alloc_consistent(enic->pdev,
2001 sizeof(union vnic_rss_cpu), &rss_cpu_buf_pa);
2002 if (!rss_cpu_buf_va)
2005 for (i = 0; i < (1 << rss_hash_bits); i++)
2006 (*rss_cpu_buf_va).cpu[i/4].b[i%4] = i % enic->rq_count;
2008 spin_lock_bh(&enic->devcmd_lock);
2009 err = enic_set_rss_cpu(enic,
2011 sizeof(union vnic_rss_cpu));
2012 spin_unlock_bh(&enic->devcmd_lock);
2014 pci_free_consistent(enic->pdev, sizeof(union vnic_rss_cpu),
2015 rss_cpu_buf_va, rss_cpu_buf_pa);
2020 static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu,
2021 u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable)
2023 const u8 tso_ipid_split_en = 0;
2024 const u8 ig_vlan_strip_en = 1;
2027 /* Enable VLAN tag stripping.
2030 spin_lock_bh(&enic->devcmd_lock);
2031 err = enic_set_nic_cfg(enic,
2032 rss_default_cpu, rss_hash_type,
2033 rss_hash_bits, rss_base_cpu,
2034 rss_enable, tso_ipid_split_en,
2036 spin_unlock_bh(&enic->devcmd_lock);
2041 static int enic_set_rss_nic_cfg(struct enic *enic)
2043 struct device *dev = enic_get_dev(enic);
2044 const u8 rss_default_cpu = 0;
2045 const u8 rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4 |
2046 NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 |
2047 NIC_CFG_RSS_HASH_TYPE_IPV6 |
2048 NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
2049 const u8 rss_hash_bits = 7;
2050 const u8 rss_base_cpu = 0;
2051 u8 rss_enable = ENIC_SETTING(enic, RSS) && (enic->rq_count > 1);
2054 if (!enic_set_rsskey(enic)) {
2055 if (enic_set_rsscpu(enic, rss_hash_bits)) {
2057 dev_warn(dev, "RSS disabled, "
2058 "Failed to set RSS cpu indirection table.");
2062 dev_warn(dev, "RSS disabled, Failed to set RSS key.\n");
2066 return enic_set_niccfg(enic, rss_default_cpu, rss_hash_type,
2067 rss_hash_bits, rss_base_cpu, rss_enable);
2070 static void enic_reset(struct work_struct *work)
2072 struct enic *enic = container_of(work, struct enic, reset);
2074 if (!netif_running(enic->netdev))
2079 spin_lock(&enic->enic_api_lock);
2080 enic_stop(enic->netdev);
2081 enic_dev_soft_reset(enic);
2082 enic_reset_addr_lists(enic);
2083 enic_init_vnic_resources(enic);
2084 enic_set_rss_nic_cfg(enic);
2085 enic_dev_set_ig_vlan_rewrite_mode(enic);
2086 enic_open(enic->netdev);
2087 spin_unlock(&enic->enic_api_lock);
2088 call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev);
2093 static void enic_tx_hang_reset(struct work_struct *work)
2095 struct enic *enic = container_of(work, struct enic, tx_hang_reset);
2099 spin_lock(&enic->enic_api_lock);
2100 enic_dev_hang_notify(enic);
2101 enic_stop(enic->netdev);
2102 enic_dev_hang_reset(enic);
2103 enic_reset_addr_lists(enic);
2104 enic_init_vnic_resources(enic);
2105 enic_set_rss_nic_cfg(enic);
2106 enic_dev_set_ig_vlan_rewrite_mode(enic);
2107 enic_open(enic->netdev);
2108 spin_unlock(&enic->enic_api_lock);
2109 call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev);
2114 static int enic_set_intr_mode(struct enic *enic)
2116 unsigned int n = min_t(unsigned int, enic->rq_count, ENIC_RQ_MAX);
2117 unsigned int m = min_t(unsigned int, enic->wq_count, ENIC_WQ_MAX);
2120 /* Set interrupt mode (INTx, MSI, MSI-X) depending
2121 * on system capabilities.
2125 * We need n RQs, m WQs, n+m CQs, and n+m+2 INTRs
2126 * (the second to last INTR is used for WQ/RQ errors)
2127 * (the last INTR is used for notifications)
2130 BUG_ON(ARRAY_SIZE(enic->msix_entry) < n + m + 2);
2131 for (i = 0; i < n + m + 2; i++)
2132 enic->msix_entry[i].entry = i;
2134 /* Use multiple RQs if RSS is enabled
2137 if (ENIC_SETTING(enic, RSS) &&
2138 enic->config.intr_mode < 1 &&
2139 enic->rq_count >= n &&
2140 enic->wq_count >= m &&
2141 enic->cq_count >= n + m &&
2142 enic->intr_count >= n + m + 2) {
2144 if (pci_enable_msix_range(enic->pdev, enic->msix_entry,
2145 n + m + 2, n + m + 2) > 0) {
2149 enic->cq_count = n + m;
2150 enic->intr_count = n + m + 2;
2152 vnic_dev_set_intr_mode(enic->vdev,
2153 VNIC_DEV_INTR_MODE_MSIX);
2159 if (enic->config.intr_mode < 1 &&
2160 enic->rq_count >= 1 &&
2161 enic->wq_count >= m &&
2162 enic->cq_count >= 1 + m &&
2163 enic->intr_count >= 1 + m + 2) {
2164 if (pci_enable_msix_range(enic->pdev, enic->msix_entry,
2165 1 + m + 2, 1 + m + 2) > 0) {
2169 enic->cq_count = 1 + m;
2170 enic->intr_count = 1 + m + 2;
2172 vnic_dev_set_intr_mode(enic->vdev,
2173 VNIC_DEV_INTR_MODE_MSIX);
2181 * We need 1 RQ, 1 WQ, 2 CQs, and 1 INTR
2184 if (enic->config.intr_mode < 2 &&
2185 enic->rq_count >= 1 &&
2186 enic->wq_count >= 1 &&
2187 enic->cq_count >= 2 &&
2188 enic->intr_count >= 1 &&
2189 !pci_enable_msi(enic->pdev)) {
2194 enic->intr_count = 1;
2196 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSI);
2203 * We need 1 RQ, 1 WQ, 2 CQs, and 3 INTRs
2204 * (the first INTR is used for WQ/RQ)
2205 * (the second INTR is used for WQ/RQ errors)
2206 * (the last INTR is used for notifications)
2209 if (enic->config.intr_mode < 3 &&
2210 enic->rq_count >= 1 &&
2211 enic->wq_count >= 1 &&
2212 enic->cq_count >= 2 &&
2213 enic->intr_count >= 3) {
2218 enic->intr_count = 3;
2220 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_INTX);
2225 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
2230 static void enic_clear_intr_mode(struct enic *enic)
2232 switch (vnic_dev_get_intr_mode(enic->vdev)) {
2233 case VNIC_DEV_INTR_MODE_MSIX:
2234 pci_disable_msix(enic->pdev);
2236 case VNIC_DEV_INTR_MODE_MSI:
2237 pci_disable_msi(enic->pdev);
2243 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
2246 static const struct net_device_ops enic_netdev_dynamic_ops = {
2247 .ndo_open = enic_open,
2248 .ndo_stop = enic_stop,
2249 .ndo_start_xmit = enic_hard_start_xmit,
2250 .ndo_get_stats64 = enic_get_stats,
2251 .ndo_validate_addr = eth_validate_addr,
2252 .ndo_set_rx_mode = enic_set_rx_mode,
2253 .ndo_set_mac_address = enic_set_mac_address_dynamic,
2254 .ndo_change_mtu = enic_change_mtu,
2255 .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid,
2256 .ndo_vlan_rx_kill_vid = enic_vlan_rx_kill_vid,
2257 .ndo_tx_timeout = enic_tx_timeout,
2258 .ndo_set_vf_port = enic_set_vf_port,
2259 .ndo_get_vf_port = enic_get_vf_port,
2260 .ndo_set_vf_mac = enic_set_vf_mac,
2261 #ifdef CONFIG_NET_POLL_CONTROLLER
2262 .ndo_poll_controller = enic_poll_controller,
2264 #ifdef CONFIG_RFS_ACCEL
2265 .ndo_rx_flow_steer = enic_rx_flow_steer,
2267 #ifdef CONFIG_NET_RX_BUSY_POLL
2268 .ndo_busy_poll = enic_busy_poll,
2272 static const struct net_device_ops enic_netdev_ops = {
2273 .ndo_open = enic_open,
2274 .ndo_stop = enic_stop,
2275 .ndo_start_xmit = enic_hard_start_xmit,
2276 .ndo_get_stats64 = enic_get_stats,
2277 .ndo_validate_addr = eth_validate_addr,
2278 .ndo_set_mac_address = enic_set_mac_address,
2279 .ndo_set_rx_mode = enic_set_rx_mode,
2280 .ndo_change_mtu = enic_change_mtu,
2281 .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid,
2282 .ndo_vlan_rx_kill_vid = enic_vlan_rx_kill_vid,
2283 .ndo_tx_timeout = enic_tx_timeout,
2284 .ndo_set_vf_port = enic_set_vf_port,
2285 .ndo_get_vf_port = enic_get_vf_port,
2286 .ndo_set_vf_mac = enic_set_vf_mac,
2287 #ifdef CONFIG_NET_POLL_CONTROLLER
2288 .ndo_poll_controller = enic_poll_controller,
2290 #ifdef CONFIG_RFS_ACCEL
2291 .ndo_rx_flow_steer = enic_rx_flow_steer,
2293 #ifdef CONFIG_NET_RX_BUSY_POLL
2294 .ndo_busy_poll = enic_busy_poll,
2298 static void enic_dev_deinit(struct enic *enic)
2302 for (i = 0; i < enic->rq_count; i++) {
2303 napi_hash_del(&enic->napi[i]);
2304 netif_napi_del(&enic->napi[i]);
2306 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
2307 for (i = 0; i < enic->wq_count; i++)
2308 netif_napi_del(&enic->napi[enic_cq_wq(enic, i)]);
2310 enic_free_vnic_resources(enic);
2311 enic_clear_intr_mode(enic);
2314 static void enic_kdump_kernel_config(struct enic *enic)
2316 if (is_kdump_kernel()) {
2317 dev_info(enic_get_dev(enic), "Running from within kdump kernel. Using minimal resources\n");
2320 enic->config.rq_desc_count = ENIC_MIN_RQ_DESCS;
2321 enic->config.wq_desc_count = ENIC_MIN_WQ_DESCS;
2322 enic->config.mtu = min_t(u16, 1500, enic->config.mtu);
2326 static int enic_dev_init(struct enic *enic)
2328 struct device *dev = enic_get_dev(enic);
2329 struct net_device *netdev = enic->netdev;
2333 /* Get interrupt coalesce timer info */
2334 err = enic_dev_intr_coal_timer_info(enic);
2336 dev_warn(dev, "Using default conversion factor for "
2337 "interrupt coalesce timer\n");
2338 vnic_dev_intr_coal_timer_info_default(enic->vdev);
2341 /* Get vNIC configuration
2344 err = enic_get_vnic_config(enic);
2346 dev_err(dev, "Get vNIC configuration failed, aborting\n");
2350 /* Get available resource counts
2353 enic_get_res_counts(enic);
2355 /* modify resource count if we are in kdump_kernel
2357 enic_kdump_kernel_config(enic);
2359 /* Set interrupt mode based on resource counts and system
2363 err = enic_set_intr_mode(enic);
2365 dev_err(dev, "Failed to set intr mode based on resource "
2366 "counts and system capabilities, aborting\n");
2370 /* Allocate and configure vNIC resources
2373 err = enic_alloc_vnic_resources(enic);
2375 dev_err(dev, "Failed to alloc vNIC resources, aborting\n");
2376 goto err_out_free_vnic_resources;
2379 enic_init_vnic_resources(enic);
2381 err = enic_set_rss_nic_cfg(enic);
2383 dev_err(dev, "Failed to config nic, aborting\n");
2384 goto err_out_free_vnic_resources;
2387 switch (vnic_dev_get_intr_mode(enic->vdev)) {
2389 netif_napi_add(netdev, &enic->napi[0], enic_poll, 64);
2390 napi_hash_add(&enic->napi[0]);
2392 case VNIC_DEV_INTR_MODE_MSIX:
2393 for (i = 0; i < enic->rq_count; i++) {
2394 netif_napi_add(netdev, &enic->napi[i],
2395 enic_poll_msix_rq, NAPI_POLL_WEIGHT);
2396 napi_hash_add(&enic->napi[i]);
2398 for (i = 0; i < enic->wq_count; i++)
2399 netif_napi_add(netdev, &enic->napi[enic_cq_wq(enic, i)],
2400 enic_poll_msix_wq, NAPI_POLL_WEIGHT);
2406 err_out_free_vnic_resources:
2407 enic_clear_intr_mode(enic);
2408 enic_free_vnic_resources(enic);
2413 static void enic_iounmap(struct enic *enic)
2417 for (i = 0; i < ARRAY_SIZE(enic->bar); i++)
2418 if (enic->bar[i].vaddr)
2419 iounmap(enic->bar[i].vaddr);
2422 static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2424 struct device *dev = &pdev->dev;
2425 struct net_device *netdev;
2430 #ifdef CONFIG_PCI_IOV
2435 /* Allocate net device structure and initialize. Private
2436 * instance data is initialized to zero.
2439 netdev = alloc_etherdev_mqs(sizeof(struct enic),
2440 ENIC_RQ_MAX, ENIC_WQ_MAX);
2444 pci_set_drvdata(pdev, netdev);
2446 SET_NETDEV_DEV(netdev, &pdev->dev);
2448 enic = netdev_priv(netdev);
2449 enic->netdev = netdev;
2452 /* Setup PCI resources
2455 err = pci_enable_device_mem(pdev);
2457 dev_err(dev, "Cannot enable PCI device, aborting\n");
2458 goto err_out_free_netdev;
2461 err = pci_request_regions(pdev, DRV_NAME);
2463 dev_err(dev, "Cannot request PCI regions, aborting\n");
2464 goto err_out_disable_device;
2467 pci_set_master(pdev);
2469 /* Query PCI controller on system for DMA addressing
2470 * limitation for the device. Try 64-bit first, and
2474 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2476 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2478 dev_err(dev, "No usable DMA configuration, aborting\n");
2479 goto err_out_release_regions;
2481 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2483 dev_err(dev, "Unable to obtain %u-bit DMA "
2484 "for consistent allocations, aborting\n", 32);
2485 goto err_out_release_regions;
2488 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2490 dev_err(dev, "Unable to obtain %u-bit DMA "
2491 "for consistent allocations, aborting\n", 64);
2492 goto err_out_release_regions;
2497 /* Map vNIC resources from BAR0-5
2500 for (i = 0; i < ARRAY_SIZE(enic->bar); i++) {
2501 if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM))
2503 enic->bar[i].len = pci_resource_len(pdev, i);
2504 enic->bar[i].vaddr = pci_iomap(pdev, i, enic->bar[i].len);
2505 if (!enic->bar[i].vaddr) {
2506 dev_err(dev, "Cannot memory-map BAR %d, aborting\n", i);
2508 goto err_out_iounmap;
2510 enic->bar[i].bus_addr = pci_resource_start(pdev, i);
2513 /* Register vNIC device
2516 enic->vdev = vnic_dev_register(NULL, enic, pdev, enic->bar,
2517 ARRAY_SIZE(enic->bar));
2519 dev_err(dev, "vNIC registration failed, aborting\n");
2521 goto err_out_iounmap;
2524 err = vnic_devcmd_init(enic->vdev);
2527 goto err_out_vnic_unregister;
2529 #ifdef CONFIG_PCI_IOV
2530 /* Get number of subvnics */
2531 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
2533 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF,
2535 if (enic->num_vfs) {
2536 err = pci_enable_sriov(pdev, enic->num_vfs);
2538 dev_err(dev, "SRIOV enable failed, aborting."
2539 " pci_enable_sriov() returned %d\n",
2541 goto err_out_vnic_unregister;
2543 enic->priv_flags |= ENIC_SRIOV_ENABLED;
2544 num_pps = enic->num_vfs;
2549 /* Allocate structure for port profiles */
2550 enic->pp = kcalloc(num_pps, sizeof(*enic->pp), GFP_KERNEL);
2553 goto err_out_disable_sriov_pp;
2556 /* Issue device open to get device in known state
2559 err = enic_dev_open(enic);
2561 dev_err(dev, "vNIC dev open failed, aborting\n");
2562 goto err_out_disable_sriov;
2565 /* Setup devcmd lock
2568 spin_lock_init(&enic->devcmd_lock);
2569 spin_lock_init(&enic->enic_api_lock);
2572 * Set ingress vlan rewrite mode before vnic initialization
2575 err = enic_dev_set_ig_vlan_rewrite_mode(enic);
2578 "Failed to set ingress vlan rewrite mode, aborting.\n");
2579 goto err_out_dev_close;
2582 /* Issue device init to initialize the vnic-to-switch link.
2583 * We'll start with carrier off and wait for link UP
2584 * notification later to turn on carrier. We don't need
2585 * to wait here for the vnic-to-switch link initialization
2586 * to complete; link UP notification is the indication that
2587 * the process is complete.
2590 netif_carrier_off(netdev);
2592 /* Do not call dev_init for a dynamic vnic.
2593 * For a dynamic vnic, init_prov_info will be
2594 * called later by an upper layer.
2597 if (!enic_is_dynamic(enic)) {
2598 err = vnic_dev_init(enic->vdev, 0);
2600 dev_err(dev, "vNIC dev init failed, aborting\n");
2601 goto err_out_dev_close;
2605 err = enic_dev_init(enic);
2607 dev_err(dev, "Device initialization failed, aborting\n");
2608 goto err_out_dev_close;
2611 netif_set_real_num_tx_queues(netdev, enic->wq_count);
2612 netif_set_real_num_rx_queues(netdev, enic->rq_count);
2614 /* Setup notification timer, HW reset task, and wq locks
2617 init_timer(&enic->notify_timer);
2618 enic->notify_timer.function = enic_notify_timer;
2619 enic->notify_timer.data = (unsigned long)enic;
2621 enic_set_rx_coal_setting(enic);
2622 INIT_WORK(&enic->reset, enic_reset);
2623 INIT_WORK(&enic->tx_hang_reset, enic_tx_hang_reset);
2624 INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work);
2626 for (i = 0; i < enic->wq_count; i++)
2627 spin_lock_init(&enic->wq_lock[i]);
2629 /* Register net device
2632 enic->port_mtu = enic->config.mtu;
2633 (void)enic_change_mtu(netdev, enic->port_mtu);
2635 err = enic_set_mac_addr(netdev, enic->mac_addr);
2637 dev_err(dev, "Invalid MAC address, aborting\n");
2638 goto err_out_dev_deinit;
2641 enic->tx_coalesce_usecs = enic->config.intr_timer_usec;
2642 /* rx coalesce time already got initialized. This gets used
2643 * if adaptive coal is turned off
2645 enic->rx_coalesce_usecs = enic->tx_coalesce_usecs;
2647 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
2648 netdev->netdev_ops = &enic_netdev_dynamic_ops;
2650 netdev->netdev_ops = &enic_netdev_ops;
2652 netdev->watchdog_timeo = 2 * HZ;
2653 enic_set_ethtool_ops(netdev);
2655 netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
2656 if (ENIC_SETTING(enic, LOOP)) {
2657 netdev->features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2658 enic->loop_enable = 1;
2659 enic->loop_tag = enic->config.loop_tag;
2660 dev_info(dev, "loopback tag=0x%04x\n", enic->loop_tag);
2662 if (ENIC_SETTING(enic, TXCSUM))
2663 netdev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM;
2664 if (ENIC_SETTING(enic, TSO))
2665 netdev->hw_features |= NETIF_F_TSO |
2666 NETIF_F_TSO6 | NETIF_F_TSO_ECN;
2667 if (ENIC_SETTING(enic, RSS))
2668 netdev->hw_features |= NETIF_F_RXHASH;
2669 if (ENIC_SETTING(enic, RXCSUM))
2670 netdev->hw_features |= NETIF_F_RXCSUM;
2672 netdev->features |= netdev->hw_features;
2674 #ifdef CONFIG_RFS_ACCEL
2675 netdev->hw_features |= NETIF_F_NTUPLE;
2679 netdev->features |= NETIF_F_HIGHDMA;
2681 netdev->priv_flags |= IFF_UNICAST_FLT;
2683 err = register_netdev(netdev);
2685 dev_err(dev, "Cannot register net device, aborting\n");
2686 goto err_out_dev_deinit;
2688 enic->rx_copybreak = RX_COPYBREAK_DEFAULT;
2693 enic_dev_deinit(enic);
2695 vnic_dev_close(enic->vdev);
2696 err_out_disable_sriov:
2698 err_out_disable_sriov_pp:
2699 #ifdef CONFIG_PCI_IOV
2700 if (enic_sriov_enabled(enic)) {
2701 pci_disable_sriov(pdev);
2702 enic->priv_flags &= ~ENIC_SRIOV_ENABLED;
2705 err_out_vnic_unregister:
2706 vnic_dev_unregister(enic->vdev);
2709 err_out_release_regions:
2710 pci_release_regions(pdev);
2711 err_out_disable_device:
2712 pci_disable_device(pdev);
2713 err_out_free_netdev:
2714 free_netdev(netdev);
2719 static void enic_remove(struct pci_dev *pdev)
2721 struct net_device *netdev = pci_get_drvdata(pdev);
2724 struct enic *enic = netdev_priv(netdev);
2726 cancel_work_sync(&enic->reset);
2727 cancel_work_sync(&enic->change_mtu_work);
2728 unregister_netdev(netdev);
2729 enic_dev_deinit(enic);
2730 vnic_dev_close(enic->vdev);
2731 #ifdef CONFIG_PCI_IOV
2732 if (enic_sriov_enabled(enic)) {
2733 pci_disable_sriov(pdev);
2734 enic->priv_flags &= ~ENIC_SRIOV_ENABLED;
2738 vnic_dev_unregister(enic->vdev);
2740 pci_release_regions(pdev);
2741 pci_disable_device(pdev);
2742 free_netdev(netdev);
2746 static struct pci_driver enic_driver = {
2748 .id_table = enic_id_table,
2749 .probe = enic_probe,
2750 .remove = enic_remove,
2753 static int __init enic_init_module(void)
2755 pr_info("%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION);
2757 return pci_register_driver(&enic_driver);
2760 static void __exit enic_cleanup_module(void)
2762 pci_unregister_driver(&enic_driver);
2765 module_init(enic_init_module);
2766 module_exit(enic_cleanup_module);