2 * Copyright (C) 2005 - 2016 Broadcom
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@emulex.com
15 * Costa Mesa, CA 92626
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
22 #include <asm/div64.h>
23 #include <linux/aer.h>
24 #include <linux/if_bridge.h>
25 #include <net/busy_poll.h>
26 #include <net/vxlan.h>
28 MODULE_VERSION(DRV_VER);
29 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
30 MODULE_AUTHOR("Emulex Corporation");
31 MODULE_LICENSE("GPL");
33 /* num_vfs module param is obsolete.
34 * Use sysfs method to enable/disable VFs.
36 static unsigned int num_vfs;
37 module_param(num_vfs, uint, S_IRUGO);
38 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
40 static ushort rx_frag_size = 2048;
41 module_param(rx_frag_size, ushort, S_IRUGO);
42 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
44 static const struct pci_device_id be_dev_ids[] = {
45 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
46 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
47 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
48 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
49 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
50 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
51 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
52 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
55 MODULE_DEVICE_TABLE(pci, be_dev_ids);
56 /* UE Status Low CSR */
57 static const char * const ue_status_low_desc[] = {
92 /* UE Status High CSR */
93 static const char * const ue_status_hi_desc[] = {
128 #define BE_VF_IF_EN_FLAGS (BE_IF_FLAGS_UNTAGGED | \
129 BE_IF_FLAGS_BROADCAST | \
130 BE_IF_FLAGS_MULTICAST | \
131 BE_IF_FLAGS_PASS_L3L4_ERRORS)
133 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
135 struct be_dma_mem *mem = &q->dma_mem;
138 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
144 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
145 u16 len, u16 entry_size)
147 struct be_dma_mem *mem = &q->dma_mem;
149 memset(q, 0, sizeof(*q));
151 q->entry_size = entry_size;
152 mem->size = len * entry_size;
153 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
160 static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
164 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
166 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
168 if (!enabled && enable)
169 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
170 else if (enabled && !enable)
171 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
175 pci_write_config_dword(adapter->pdev,
176 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
179 static void be_intr_set(struct be_adapter *adapter, bool enable)
183 /* On lancer interrupts can't be controlled via this register */
184 if (lancer_chip(adapter))
187 if (be_check_error(adapter, BE_ERROR_EEH))
190 status = be_cmd_intr_set(adapter, enable);
192 be_reg_intr_set(adapter, enable);
195 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
199 if (be_check_error(adapter, BE_ERROR_HW))
202 val |= qid & DB_RQ_RING_ID_MASK;
203 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
206 iowrite32(val, adapter->db + DB_RQ_OFFSET);
209 static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
214 if (be_check_error(adapter, BE_ERROR_HW))
217 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
218 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
221 iowrite32(val, adapter->db + txo->db_offset);
224 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
225 bool arm, bool clear_int, u16 num_popped,
226 u32 eq_delay_mult_enc)
230 val |= qid & DB_EQ_RING_ID_MASK;
231 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
233 if (be_check_error(adapter, BE_ERROR_HW))
237 val |= 1 << DB_EQ_REARM_SHIFT;
239 val |= 1 << DB_EQ_CLR_SHIFT;
240 val |= 1 << DB_EQ_EVNT_SHIFT;
241 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
242 val |= eq_delay_mult_enc << DB_EQ_R2I_DLY_SHIFT;
243 iowrite32(val, adapter->db + DB_EQ_OFFSET);
246 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
250 val |= qid & DB_CQ_RING_ID_MASK;
251 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
252 DB_CQ_RING_ID_EXT_MASK_SHIFT);
254 if (be_check_error(adapter, BE_ERROR_HW))
258 val |= 1 << DB_CQ_REARM_SHIFT;
259 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
260 iowrite32(val, adapter->db + DB_CQ_OFFSET);
263 static int be_mac_addr_set(struct net_device *netdev, void *p)
265 struct be_adapter *adapter = netdev_priv(netdev);
266 struct device *dev = &adapter->pdev->dev;
267 struct sockaddr *addr = p;
270 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
272 if (!is_valid_ether_addr(addr->sa_data))
273 return -EADDRNOTAVAIL;
275 /* Proceed further only if, User provided MAC is different
278 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
281 /* if device is not running, copy MAC to netdev->dev_addr */
282 if (!netif_running(netdev))
285 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
286 * privilege or if PF did not provision the new MAC address.
287 * On BE3, this cmd will always fail if the VF doesn't have the
288 * FILTMGMT privilege. This failure is OK, only if the PF programmed
289 * the MAC for the VF.
291 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
292 adapter->if_handle, &adapter->pmac_id[0], 0);
294 curr_pmac_id = adapter->pmac_id[0];
296 /* Delete the old programmed MAC. This call may fail if the
297 * old MAC was already deleted by the PF driver.
299 if (adapter->pmac_id[0] != old_pmac_id)
300 be_cmd_pmac_del(adapter, adapter->if_handle,
304 /* Decide if the new MAC is successfully activated only after
307 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
308 adapter->if_handle, true, 0);
312 /* The MAC change did not happen, either due to lack of privilege
313 * or PF didn't pre-provision.
315 if (!ether_addr_equal(addr->sa_data, mac)) {
320 ether_addr_copy(netdev->dev_addr, addr->sa_data);
321 dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
324 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
328 /* BE2 supports only v0 cmd */
329 static void *hw_stats_from_cmd(struct be_adapter *adapter)
331 if (BE2_chip(adapter)) {
332 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
334 return &cmd->hw_stats;
335 } else if (BE3_chip(adapter)) {
336 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
338 return &cmd->hw_stats;
340 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
342 return &cmd->hw_stats;
346 /* BE2 supports only v0 cmd */
347 static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
349 if (BE2_chip(adapter)) {
350 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
352 return &hw_stats->erx;
353 } else if (BE3_chip(adapter)) {
354 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
356 return &hw_stats->erx;
358 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
360 return &hw_stats->erx;
364 static void populate_be_v0_stats(struct be_adapter *adapter)
366 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
367 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
368 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
369 struct be_port_rxf_stats_v0 *port_stats =
370 &rxf_stats->port[adapter->port_num];
371 struct be_drv_stats *drvs = &adapter->drv_stats;
373 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
374 drvs->rx_pause_frames = port_stats->rx_pause_frames;
375 drvs->rx_crc_errors = port_stats->rx_crc_errors;
376 drvs->rx_control_frames = port_stats->rx_control_frames;
377 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
378 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
379 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
380 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
381 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
382 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
383 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
384 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
385 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
386 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
387 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
388 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
389 drvs->rx_dropped_header_too_small =
390 port_stats->rx_dropped_header_too_small;
391 drvs->rx_address_filtered =
392 port_stats->rx_address_filtered +
393 port_stats->rx_vlan_filtered;
394 drvs->rx_alignment_symbol_errors =
395 port_stats->rx_alignment_symbol_errors;
397 drvs->tx_pauseframes = port_stats->tx_pauseframes;
398 drvs->tx_controlframes = port_stats->tx_controlframes;
400 if (adapter->port_num)
401 drvs->jabber_events = rxf_stats->port1_jabber_events;
403 drvs->jabber_events = rxf_stats->port0_jabber_events;
404 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
405 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
406 drvs->forwarded_packets = rxf_stats->forwarded_packets;
407 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
408 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
409 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
410 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
413 static void populate_be_v1_stats(struct be_adapter *adapter)
415 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
416 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
417 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
418 struct be_port_rxf_stats_v1 *port_stats =
419 &rxf_stats->port[adapter->port_num];
420 struct be_drv_stats *drvs = &adapter->drv_stats;
422 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
423 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
424 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
425 drvs->rx_pause_frames = port_stats->rx_pause_frames;
426 drvs->rx_crc_errors = port_stats->rx_crc_errors;
427 drvs->rx_control_frames = port_stats->rx_control_frames;
428 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
429 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
430 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
431 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
432 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
433 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
434 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
435 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
436 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
437 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
438 drvs->rx_dropped_header_too_small =
439 port_stats->rx_dropped_header_too_small;
440 drvs->rx_input_fifo_overflow_drop =
441 port_stats->rx_input_fifo_overflow_drop;
442 drvs->rx_address_filtered = port_stats->rx_address_filtered;
443 drvs->rx_alignment_symbol_errors =
444 port_stats->rx_alignment_symbol_errors;
445 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
446 drvs->tx_pauseframes = port_stats->tx_pauseframes;
447 drvs->tx_controlframes = port_stats->tx_controlframes;
448 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
449 drvs->jabber_events = port_stats->jabber_events;
450 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
451 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
452 drvs->forwarded_packets = rxf_stats->forwarded_packets;
453 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
454 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
455 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
456 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
459 static void populate_be_v2_stats(struct be_adapter *adapter)
461 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
462 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
463 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
464 struct be_port_rxf_stats_v2 *port_stats =
465 &rxf_stats->port[adapter->port_num];
466 struct be_drv_stats *drvs = &adapter->drv_stats;
468 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
469 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
470 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
471 drvs->rx_pause_frames = port_stats->rx_pause_frames;
472 drvs->rx_crc_errors = port_stats->rx_crc_errors;
473 drvs->rx_control_frames = port_stats->rx_control_frames;
474 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
475 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
476 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
477 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
478 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
479 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
480 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
481 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
482 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
483 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
484 drvs->rx_dropped_header_too_small =
485 port_stats->rx_dropped_header_too_small;
486 drvs->rx_input_fifo_overflow_drop =
487 port_stats->rx_input_fifo_overflow_drop;
488 drvs->rx_address_filtered = port_stats->rx_address_filtered;
489 drvs->rx_alignment_symbol_errors =
490 port_stats->rx_alignment_symbol_errors;
491 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
492 drvs->tx_pauseframes = port_stats->tx_pauseframes;
493 drvs->tx_controlframes = port_stats->tx_controlframes;
494 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
495 drvs->jabber_events = port_stats->jabber_events;
496 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
497 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
498 drvs->forwarded_packets = rxf_stats->forwarded_packets;
499 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
500 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
501 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
502 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
503 if (be_roce_supported(adapter)) {
504 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
505 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
506 drvs->rx_roce_frames = port_stats->roce_frames_received;
507 drvs->roce_drops_crc = port_stats->roce_drops_crc;
508 drvs->roce_drops_payload_len =
509 port_stats->roce_drops_payload_len;
513 static void populate_lancer_stats(struct be_adapter *adapter)
515 struct be_drv_stats *drvs = &adapter->drv_stats;
516 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
518 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
519 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
520 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
521 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
522 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
523 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
524 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
525 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
526 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
527 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
528 drvs->rx_dropped_tcp_length =
529 pport_stats->rx_dropped_invalid_tcp_length;
530 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
531 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
532 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
533 drvs->rx_dropped_header_too_small =
534 pport_stats->rx_dropped_header_too_small;
535 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
536 drvs->rx_address_filtered =
537 pport_stats->rx_address_filtered +
538 pport_stats->rx_vlan_filtered;
539 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
540 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
541 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
542 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
543 drvs->jabber_events = pport_stats->rx_jabbers;
544 drvs->forwarded_packets = pport_stats->num_forwards_lo;
545 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
546 drvs->rx_drops_too_many_frags =
547 pport_stats->rx_drops_too_many_frags_lo;
550 static void accumulate_16bit_val(u32 *acc, u16 val)
552 #define lo(x) (x & 0xFFFF)
553 #define hi(x) (x & 0xFFFF0000)
554 bool wrapped = val < lo(*acc);
555 u32 newacc = hi(*acc) + val;
559 ACCESS_ONCE(*acc) = newacc;
562 static void populate_erx_stats(struct be_adapter *adapter,
563 struct be_rx_obj *rxo, u32 erx_stat)
565 if (!BEx_chip(adapter))
566 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
568 /* below erx HW counter can actually wrap around after
569 * 65535. Driver accumulates a 32-bit value
571 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
575 void be_parse_stats(struct be_adapter *adapter)
577 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
578 struct be_rx_obj *rxo;
582 if (lancer_chip(adapter)) {
583 populate_lancer_stats(adapter);
585 if (BE2_chip(adapter))
586 populate_be_v0_stats(adapter);
587 else if (BE3_chip(adapter))
589 populate_be_v1_stats(adapter);
591 populate_be_v2_stats(adapter);
593 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
594 for_all_rx_queues(adapter, rxo, i) {
595 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
596 populate_erx_stats(adapter, rxo, erx_stat);
601 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
602 struct rtnl_link_stats64 *stats)
604 struct be_adapter *adapter = netdev_priv(netdev);
605 struct be_drv_stats *drvs = &adapter->drv_stats;
606 struct be_rx_obj *rxo;
607 struct be_tx_obj *txo;
612 for_all_rx_queues(adapter, rxo, i) {
613 const struct be_rx_stats *rx_stats = rx_stats(rxo);
616 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
617 pkts = rx_stats(rxo)->rx_pkts;
618 bytes = rx_stats(rxo)->rx_bytes;
619 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
620 stats->rx_packets += pkts;
621 stats->rx_bytes += bytes;
622 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
623 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
624 rx_stats(rxo)->rx_drops_no_frags;
627 for_all_tx_queues(adapter, txo, i) {
628 const struct be_tx_stats *tx_stats = tx_stats(txo);
631 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
632 pkts = tx_stats(txo)->tx_pkts;
633 bytes = tx_stats(txo)->tx_bytes;
634 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
635 stats->tx_packets += pkts;
636 stats->tx_bytes += bytes;
639 /* bad pkts received */
640 stats->rx_errors = drvs->rx_crc_errors +
641 drvs->rx_alignment_symbol_errors +
642 drvs->rx_in_range_errors +
643 drvs->rx_out_range_errors +
644 drvs->rx_frame_too_long +
645 drvs->rx_dropped_too_small +
646 drvs->rx_dropped_too_short +
647 drvs->rx_dropped_header_too_small +
648 drvs->rx_dropped_tcp_length +
649 drvs->rx_dropped_runt;
651 /* detailed rx errors */
652 stats->rx_length_errors = drvs->rx_in_range_errors +
653 drvs->rx_out_range_errors +
654 drvs->rx_frame_too_long;
656 stats->rx_crc_errors = drvs->rx_crc_errors;
658 /* frame alignment errors */
659 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
661 /* receiver fifo overrun */
662 /* drops_no_pbuf is no per i/f, it's per BE card */
663 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
664 drvs->rx_input_fifo_overflow_drop +
665 drvs->rx_drops_no_pbuf;
669 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
671 struct net_device *netdev = adapter->netdev;
673 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
674 netif_carrier_off(netdev);
675 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
679 netif_carrier_on(netdev);
681 netif_carrier_off(netdev);
683 netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
686 static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
688 struct be_tx_stats *stats = tx_stats(txo);
689 u64 tx_pkts = skb_shinfo(skb)->gso_segs ? : 1;
691 u64_stats_update_begin(&stats->sync);
693 stats->tx_bytes += skb->len;
694 stats->tx_pkts += tx_pkts;
695 if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
696 stats->tx_vxlan_offload_pkts += tx_pkts;
697 u64_stats_update_end(&stats->sync);
700 /* Returns number of WRBs needed for the skb */
701 static u32 skb_wrb_cnt(struct sk_buff *skb)
703 /* +1 for the header wrb */
704 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
707 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
709 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
710 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
711 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
715 /* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
716 * to avoid the swap and shift/mask operations in wrb_fill().
718 static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
726 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
732 vlan_tag = skb_vlan_tag_get(skb);
733 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
734 /* If vlan priority provided by OS is NOT in available bmap */
735 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
736 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
737 adapter->recommended_prio_bits;
742 /* Used only for IP tunnel packets */
743 static u16 skb_inner_ip_proto(struct sk_buff *skb)
745 return (inner_ip_hdr(skb)->version == 4) ?
746 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
749 static u16 skb_ip_proto(struct sk_buff *skb)
751 return (ip_hdr(skb)->version == 4) ?
752 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
755 static inline bool be_is_txq_full(struct be_tx_obj *txo)
757 return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
760 static inline bool be_can_txq_wake(struct be_tx_obj *txo)
762 return atomic_read(&txo->q.used) < txo->q.len / 2;
765 static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo)
767 return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
770 static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
772 struct be_wrb_params *wrb_params)
776 if (skb_is_gso(skb)) {
777 BE_WRB_F_SET(wrb_params->features, LSO, 1);
778 wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
779 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
780 BE_WRB_F_SET(wrb_params->features, LSO6, 1);
781 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
782 if (skb->encapsulation) {
783 BE_WRB_F_SET(wrb_params->features, IPCS, 1);
784 proto = skb_inner_ip_proto(skb);
786 proto = skb_ip_proto(skb);
788 if (proto == IPPROTO_TCP)
789 BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
790 else if (proto == IPPROTO_UDP)
791 BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
794 if (skb_vlan_tag_present(skb)) {
795 BE_WRB_F_SET(wrb_params->features, VLAN, 1);
796 wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
799 BE_WRB_F_SET(wrb_params->features, CRC, 1);
802 static void wrb_fill_hdr(struct be_adapter *adapter,
803 struct be_eth_hdr_wrb *hdr,
804 struct be_wrb_params *wrb_params,
807 memset(hdr, 0, sizeof(*hdr));
809 SET_TX_WRB_HDR_BITS(crc, hdr,
810 BE_WRB_F_GET(wrb_params->features, CRC));
811 SET_TX_WRB_HDR_BITS(ipcs, hdr,
812 BE_WRB_F_GET(wrb_params->features, IPCS));
813 SET_TX_WRB_HDR_BITS(tcpcs, hdr,
814 BE_WRB_F_GET(wrb_params->features, TCPCS));
815 SET_TX_WRB_HDR_BITS(udpcs, hdr,
816 BE_WRB_F_GET(wrb_params->features, UDPCS));
818 SET_TX_WRB_HDR_BITS(lso, hdr,
819 BE_WRB_F_GET(wrb_params->features, LSO));
820 SET_TX_WRB_HDR_BITS(lso6, hdr,
821 BE_WRB_F_GET(wrb_params->features, LSO6));
822 SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
824 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
825 * hack is not needed, the evt bit is set while ringing DB.
827 SET_TX_WRB_HDR_BITS(event, hdr,
828 BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
829 SET_TX_WRB_HDR_BITS(vlan, hdr,
830 BE_WRB_F_GET(wrb_params->features, VLAN));
831 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
833 SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
834 SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
835 SET_TX_WRB_HDR_BITS(mgmt, hdr,
836 BE_WRB_F_GET(wrb_params->features, OS2BMC));
839 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
843 u32 frag_len = le32_to_cpu(wrb->frag_len);
846 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
847 (u64)le32_to_cpu(wrb->frag_pa_lo);
850 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
852 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
856 /* Grab a WRB header for xmit */
857 static u32 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
859 u32 head = txo->q.head;
861 queue_head_inc(&txo->q);
865 /* Set up the WRB header for xmit */
866 static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
867 struct be_tx_obj *txo,
868 struct be_wrb_params *wrb_params,
869 struct sk_buff *skb, u16 head)
871 u32 num_frags = skb_wrb_cnt(skb);
872 struct be_queue_info *txq = &txo->q;
873 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
875 wrb_fill_hdr(adapter, hdr, wrb_params, skb);
876 be_dws_cpu_to_le(hdr, sizeof(*hdr));
878 BUG_ON(txo->sent_skb_list[head]);
879 txo->sent_skb_list[head] = skb;
880 txo->last_req_hdr = head;
881 atomic_add(num_frags, &txq->used);
882 txo->last_req_wrb_cnt = num_frags;
883 txo->pend_wrb_cnt += num_frags;
886 /* Setup a WRB fragment (buffer descriptor) for xmit */
887 static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
890 struct be_eth_wrb *wrb;
891 struct be_queue_info *txq = &txo->q;
893 wrb = queue_head_node(txq);
894 wrb_fill(wrb, busaddr, len);
898 /* Bring the queue back to the state it was in before be_xmit_enqueue() routine
899 * was invoked. The producer index is restored to the previous packet and the
900 * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
902 static void be_xmit_restore(struct be_adapter *adapter,
903 struct be_tx_obj *txo, u32 head, bool map_single,
907 struct be_eth_wrb *wrb;
908 struct be_queue_info *txq = &txo->q;
910 dev = &adapter->pdev->dev;
913 /* skip the first wrb (hdr); it's not mapped */
916 wrb = queue_head_node(txq);
917 unmap_tx_frag(dev, wrb, map_single);
919 copied -= le32_to_cpu(wrb->frag_len);
926 /* Enqueue the given packet for transmit. This routine allocates WRBs for the
927 * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
928 * of WRBs used up by the packet.
930 static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
932 struct be_wrb_params *wrb_params)
934 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
935 struct device *dev = &adapter->pdev->dev;
936 struct be_queue_info *txq = &txo->q;
937 bool map_single = false;
938 u32 head = txq->head;
942 head = be_tx_get_wrb_hdr(txo);
944 if (skb->len > skb->data_len) {
945 len = skb_headlen(skb);
947 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
948 if (dma_mapping_error(dev, busaddr))
951 be_tx_setup_wrb_frag(txo, busaddr, len);
955 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
956 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
957 len = skb_frag_size(frag);
959 busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
960 if (dma_mapping_error(dev, busaddr))
962 be_tx_setup_wrb_frag(txo, busaddr, len);
966 be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
968 be_tx_stats_update(txo, skb);
972 adapter->drv_stats.dma_map_errors++;
973 be_xmit_restore(adapter, txo, head, map_single, copied);
977 static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
979 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
982 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
989 skb = skb_share_check(skb, GFP_ATOMIC);
993 if (skb_vlan_tag_present(skb))
994 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
996 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
998 vlan_tag = adapter->pvid;
999 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
1000 * skip VLAN insertion
1002 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
1006 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1013 /* Insert the outer VLAN, if any */
1014 if (adapter->qnq_vid) {
1015 vlan_tag = adapter->qnq_vid;
1016 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1020 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
1026 static bool be_ipv6_exthdr_check(struct sk_buff *skb)
1028 struct ethhdr *eh = (struct ethhdr *)skb->data;
1029 u16 offset = ETH_HLEN;
1031 if (eh->h_proto == htons(ETH_P_IPV6)) {
1032 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
1034 offset += sizeof(struct ipv6hdr);
1035 if (ip6h->nexthdr != NEXTHDR_TCP &&
1036 ip6h->nexthdr != NEXTHDR_UDP) {
1037 struct ipv6_opt_hdr *ehdr =
1038 (struct ipv6_opt_hdr *)(skb->data + offset);
1040 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
1041 if (ehdr->hdrlen == 0xff)
1048 static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
1050 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
1053 static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
1055 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
1058 static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
1059 struct sk_buff *skb,
1060 struct be_wrb_params
1063 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1064 unsigned int eth_hdr_len;
1067 /* For padded packets, BE HW modifies tot_len field in IP header
1068 * incorrecly when VLAN tag is inserted by HW.
1069 * For padded packets, Lancer computes incorrect checksum.
1071 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
1072 VLAN_ETH_HLEN : ETH_HLEN;
1073 if (skb->len <= 60 &&
1074 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
1076 ip = (struct iphdr *)ip_hdr(skb);
1077 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
1080 /* If vlan tag is already inlined in the packet, skip HW VLAN
1081 * tagging in pvid-tagging mode
1083 if (be_pvid_tagging_enabled(adapter) &&
1084 veh->h_vlan_proto == htons(ETH_P_8021Q))
1085 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
1087 /* HW has a bug wherein it will calculate CSUM for VLAN
1088 * pkts even though it is disabled.
1089 * Manually insert VLAN in pkt.
1091 if (skb->ip_summed != CHECKSUM_PARTIAL &&
1092 skb_vlan_tag_present(skb)) {
1093 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
1098 /* HW may lockup when VLAN HW tagging is requested on
1099 * certain ipv6 packets. Drop such pkts if the HW workaround to
1100 * skip HW tagging is not enabled by FW.
1102 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
1103 (adapter->pvid || adapter->qnq_vid) &&
1104 !qnq_async_evt_rcvd(adapter)))
1107 /* Manual VLAN tag insertion to prevent:
1108 * ASIC lockup when the ASIC inserts VLAN tag into
1109 * certain ipv6 packets. Insert VLAN tags in driver,
1110 * and set event, completion, vlan bits accordingly
1113 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1114 be_vlan_tag_tx_chk(adapter, skb)) {
1115 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
1122 dev_kfree_skb_any(skb);
1127 static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1128 struct sk_buff *skb,
1129 struct be_wrb_params *wrb_params)
1133 /* Lancer, SH and BE3 in SRIOV mode have a bug wherein
1134 * packets that are 32b or less may cause a transmit stall
1135 * on that port. The workaround is to pad such packets
1136 * (len <= 32 bytes) to a minimum length of 36b.
1138 if (skb->len <= 32) {
1139 if (skb_put_padto(skb, 36))
1143 if (BEx_chip(adapter) || lancer_chip(adapter)) {
1144 skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
1149 /* The stack can send us skbs with length greater than
1150 * what the HW can handle. Trim the extra bytes.
1152 WARN_ON_ONCE(skb->len > BE_MAX_GSO_SIZE);
1153 err = pskb_trim(skb, BE_MAX_GSO_SIZE);
1159 static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1161 struct be_queue_info *txq = &txo->q;
1162 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1164 /* Mark the last request eventable if it hasn't been marked already */
1165 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1166 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1168 /* compose a dummy wrb if there are odd set of wrbs to notify */
1169 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
1170 wrb_fill_dummy(queue_head_node(txq));
1171 queue_head_inc(txq);
1172 atomic_inc(&txq->used);
1173 txo->pend_wrb_cnt++;
1174 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1175 TX_HDR_WRB_NUM_SHIFT);
1176 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1177 TX_HDR_WRB_NUM_SHIFT);
1179 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1180 txo->pend_wrb_cnt = 0;
1183 /* OS2BMC related */
1185 #define DHCP_CLIENT_PORT 68
1186 #define DHCP_SERVER_PORT 67
1187 #define NET_BIOS_PORT1 137
1188 #define NET_BIOS_PORT2 138
1189 #define DHCPV6_RAS_PORT 547
1191 #define is_mc_allowed_on_bmc(adapter, eh) \
1192 (!is_multicast_filt_enabled(adapter) && \
1193 is_multicast_ether_addr(eh->h_dest) && \
1194 !is_broadcast_ether_addr(eh->h_dest))
1196 #define is_bc_allowed_on_bmc(adapter, eh) \
1197 (!is_broadcast_filt_enabled(adapter) && \
1198 is_broadcast_ether_addr(eh->h_dest))
1200 #define is_arp_allowed_on_bmc(adapter, skb) \
1201 (is_arp(skb) && is_arp_filt_enabled(adapter))
1203 #define is_broadcast_packet(eh, adapter) \
1204 (is_multicast_ether_addr(eh->h_dest) && \
1205 !compare_ether_addr(eh->h_dest, adapter->netdev->broadcast))
1207 #define is_arp(skb) (skb->protocol == htons(ETH_P_ARP))
1209 #define is_arp_filt_enabled(adapter) \
1210 (adapter->bmc_filt_mask & (BMC_FILT_BROADCAST_ARP))
1212 #define is_dhcp_client_filt_enabled(adapter) \
1213 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_CLIENT)
1215 #define is_dhcp_srvr_filt_enabled(adapter) \
1216 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_SERVER)
1218 #define is_nbios_filt_enabled(adapter) \
1219 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_NET_BIOS)
1221 #define is_ipv6_na_filt_enabled(adapter) \
1222 (adapter->bmc_filt_mask & \
1223 BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER)
1225 #define is_ipv6_ra_filt_enabled(adapter) \
1226 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RA)
1228 #define is_ipv6_ras_filt_enabled(adapter) \
1229 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RAS)
1231 #define is_broadcast_filt_enabled(adapter) \
1232 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST)
1234 #define is_multicast_filt_enabled(adapter) \
1235 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST)
1237 static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
1238 struct sk_buff **skb)
1240 struct ethhdr *eh = (struct ethhdr *)(*skb)->data;
1241 bool os2bmc = false;
1243 if (!be_is_os2bmc_enabled(adapter))
1246 if (!is_multicast_ether_addr(eh->h_dest))
1249 if (is_mc_allowed_on_bmc(adapter, eh) ||
1250 is_bc_allowed_on_bmc(adapter, eh) ||
1251 is_arp_allowed_on_bmc(adapter, (*skb))) {
1256 if ((*skb)->protocol == htons(ETH_P_IPV6)) {
1257 struct ipv6hdr *hdr = ipv6_hdr((*skb));
1258 u8 nexthdr = hdr->nexthdr;
1260 if (nexthdr == IPPROTO_ICMPV6) {
1261 struct icmp6hdr *icmp6 = icmp6_hdr((*skb));
1263 switch (icmp6->icmp6_type) {
1264 case NDISC_ROUTER_ADVERTISEMENT:
1265 os2bmc = is_ipv6_ra_filt_enabled(adapter);
1267 case NDISC_NEIGHBOUR_ADVERTISEMENT:
1268 os2bmc = is_ipv6_na_filt_enabled(adapter);
1276 if (is_udp_pkt((*skb))) {
1277 struct udphdr *udp = udp_hdr((*skb));
1279 switch (ntohs(udp->dest)) {
1280 case DHCP_CLIENT_PORT:
1281 os2bmc = is_dhcp_client_filt_enabled(adapter);
1283 case DHCP_SERVER_PORT:
1284 os2bmc = is_dhcp_srvr_filt_enabled(adapter);
1286 case NET_BIOS_PORT1:
1287 case NET_BIOS_PORT2:
1288 os2bmc = is_nbios_filt_enabled(adapter);
1290 case DHCPV6_RAS_PORT:
1291 os2bmc = is_ipv6_ras_filt_enabled(adapter);
1298 /* For packets over a vlan, which are destined
1299 * to BMC, asic expects the vlan to be inline in the packet.
1302 *skb = be_insert_vlan_in_pkt(adapter, *skb, NULL);
1307 static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1309 struct be_adapter *adapter = netdev_priv(netdev);
1310 u16 q_idx = skb_get_queue_mapping(skb);
1311 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
1312 struct be_wrb_params wrb_params = { 0 };
1313 bool flush = !skb->xmit_more;
1316 skb = be_xmit_workarounds(adapter, skb, &wrb_params);
1320 be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
1322 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
1323 if (unlikely(!wrb_cnt)) {
1324 dev_kfree_skb_any(skb);
1328 /* if os2bmc is enabled and if the pkt is destined to bmc,
1329 * enqueue the pkt a 2nd time with mgmt bit set.
1331 if (be_send_pkt_to_bmc(adapter, &skb)) {
1332 BE_WRB_F_SET(wrb_params.features, OS2BMC, 1);
1333 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
1334 if (unlikely(!wrb_cnt))
1340 if (be_is_txq_full(txo)) {
1341 netif_stop_subqueue(netdev, q_idx);
1342 tx_stats(txo)->tx_stops++;
1345 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1346 be_xmit_flush(adapter, txo);
1348 return NETDEV_TX_OK;
1350 tx_stats(txo)->tx_drv_drops++;
1351 /* Flush the already enqueued tx requests */
1352 if (flush && txo->pend_wrb_cnt)
1353 be_xmit_flush(adapter, txo);
1355 return NETDEV_TX_OK;
1358 static int be_change_mtu(struct net_device *netdev, int new_mtu)
1360 struct be_adapter *adapter = netdev_priv(netdev);
1361 struct device *dev = &adapter->pdev->dev;
1363 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1364 dev_info(dev, "MTU must be between %d and %d bytes\n",
1365 BE_MIN_MTU, BE_MAX_MTU);
1369 dev_info(dev, "MTU changed from %d to %d bytes\n",
1370 netdev->mtu, new_mtu);
1371 netdev->mtu = new_mtu;
1375 static inline bool be_in_all_promisc(struct be_adapter *adapter)
1377 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1378 BE_IF_FLAGS_ALL_PROMISCUOUS;
1381 static int be_set_vlan_promisc(struct be_adapter *adapter)
1383 struct device *dev = &adapter->pdev->dev;
1386 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1389 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1391 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1392 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1394 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1399 static int be_clear_vlan_promisc(struct be_adapter *adapter)
1401 struct device *dev = &adapter->pdev->dev;
1404 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1406 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1407 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1413 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1414 * If the user configures more, place BE in vlan promiscuous mode.
1416 static int be_vid_config(struct be_adapter *adapter)
1418 struct device *dev = &adapter->pdev->dev;
1419 u16 vids[BE_NUM_VLANS_SUPPORTED];
1423 /* No need to change the VLAN state if the I/F is in promiscuous */
1424 if (adapter->netdev->flags & IFF_PROMISC)
1427 if (adapter->vlans_added > be_max_vlans(adapter))
1428 return be_set_vlan_promisc(adapter);
1430 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1431 status = be_clear_vlan_promisc(adapter);
1435 /* Construct VLAN Table to give to HW */
1436 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1437 vids[num++] = cpu_to_le16(i);
1439 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
1441 dev_err(dev, "Setting HW VLAN filtering failed\n");
1442 /* Set to VLAN promisc mode as setting VLAN filter failed */
1443 if (addl_status(status) == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
1444 addl_status(status) ==
1445 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
1446 return be_set_vlan_promisc(adapter);
1451 static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1453 struct be_adapter *adapter = netdev_priv(netdev);
1456 /* Packets with VID 0 are always received by Lancer by default */
1457 if (lancer_chip(adapter) && vid == 0)
1460 if (test_bit(vid, adapter->vids))
1463 set_bit(vid, adapter->vids);
1464 adapter->vlans_added++;
1466 return be_vid_config(adapter);
1469 static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
1471 struct be_adapter *adapter = netdev_priv(netdev);
1473 /* Packets with VID 0 are always received by Lancer by default */
1474 if (lancer_chip(adapter) && vid == 0)
1477 if (!test_bit(vid, adapter->vids))
1480 clear_bit(vid, adapter->vids);
1481 adapter->vlans_added--;
1483 return be_vid_config(adapter);
1486 static void be_set_all_promisc(struct be_adapter *adapter)
1488 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1489 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1492 static void be_set_mc_promisc(struct be_adapter *adapter)
1496 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1499 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1501 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1504 static void be_set_uc_promisc(struct be_adapter *adapter)
1508 if (adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS)
1511 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_PROMISCUOUS, ON);
1513 adapter->if_flags |= BE_IF_FLAGS_PROMISCUOUS;
1516 static void be_clear_uc_promisc(struct be_adapter *adapter)
1520 if (!(adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS))
1523 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_PROMISCUOUS, OFF);
1525 adapter->if_flags &= ~BE_IF_FLAGS_PROMISCUOUS;
1528 /* The below 2 functions are the callback args for __dev_mc_sync/dev_uc_sync().
1529 * We use a single callback function for both sync and unsync. We really don't
1530 * add/remove addresses through this callback. But, we use it to detect changes
1531 * to the uc/mc lists. The entire uc/mc list is programmed in be_set_rx_mode().
1533 static int be_uc_list_update(struct net_device *netdev,
1534 const unsigned char *addr)
1536 struct be_adapter *adapter = netdev_priv(netdev);
1538 adapter->update_uc_list = true;
1542 static int be_mc_list_update(struct net_device *netdev,
1543 const unsigned char *addr)
1545 struct be_adapter *adapter = netdev_priv(netdev);
1547 adapter->update_mc_list = true;
1551 static void be_set_mc_list(struct be_adapter *adapter)
1553 struct net_device *netdev = adapter->netdev;
1554 bool mc_promisc = false;
1557 __dev_mc_sync(netdev, be_mc_list_update, be_mc_list_update);
1559 if (netdev->flags & IFF_PROMISC) {
1560 adapter->update_mc_list = false;
1561 } else if (netdev->flags & IFF_ALLMULTI ||
1562 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1563 /* Enable multicast promisc if num configured exceeds
1567 adapter->update_mc_list = false;
1568 } else if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS) {
1569 /* Update mc-list unconditionally if the iface was previously
1570 * in mc-promisc mode and now is out of that mode.
1572 adapter->update_mc_list = true;
1576 be_set_mc_promisc(adapter);
1577 } else if (adapter->update_mc_list) {
1578 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1580 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1582 be_set_mc_promisc(adapter);
1584 adapter->update_mc_list = false;
1588 static void be_clear_mc_list(struct be_adapter *adapter)
1590 struct net_device *netdev = adapter->netdev;
1592 __dev_mc_unsync(netdev, NULL);
1593 be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, OFF);
1596 static void be_set_uc_list(struct be_adapter *adapter)
1598 struct net_device *netdev = adapter->netdev;
1599 struct netdev_hw_addr *ha;
1600 bool uc_promisc = false;
1601 int i = 1; /* First slot is claimed by the Primary MAC */
1603 __dev_uc_sync(netdev, be_uc_list_update, be_uc_list_update);
1605 if (netdev->flags & IFF_PROMISC) {
1606 adapter->update_uc_list = false;
1607 } else if (netdev_uc_count(netdev) > (be_max_uc(adapter) - 1)) {
1609 adapter->update_uc_list = false;
1610 } else if (adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS) {
1611 /* Update uc-list unconditionally if the iface was previously
1612 * in uc-promisc mode and now is out of that mode.
1614 adapter->update_uc_list = true;
1618 be_set_uc_promisc(adapter);
1619 } else if (adapter->update_uc_list) {
1620 be_clear_uc_promisc(adapter);
1622 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
1623 be_cmd_pmac_del(adapter, adapter->if_handle,
1624 adapter->pmac_id[i], 0);
1626 netdev_for_each_uc_addr(ha, adapter->netdev) {
1627 adapter->uc_macs++; /* First slot is for Primary MAC */
1628 be_cmd_pmac_add(adapter,
1629 (u8 *)ha->addr, adapter->if_handle,
1630 &adapter->pmac_id[adapter->uc_macs], 0);
1632 adapter->update_uc_list = false;
1636 static void be_clear_uc_list(struct be_adapter *adapter)
1638 struct net_device *netdev = adapter->netdev;
1641 __dev_uc_unsync(netdev, NULL);
1642 for (i = 1; i < (adapter->uc_macs + 1); i++)
1643 be_cmd_pmac_del(adapter, adapter->if_handle,
1644 adapter->pmac_id[i], 0);
1645 adapter->uc_macs = 0;
1648 static void be_set_rx_mode(struct net_device *netdev)
1650 struct be_adapter *adapter = netdev_priv(netdev);
1652 if (netdev->flags & IFF_PROMISC) {
1653 if (!be_in_all_promisc(adapter))
1654 be_set_all_promisc(adapter);
1655 } else if (be_in_all_promisc(adapter)) {
1656 /* We need to re-program the vlan-list or clear
1657 * vlan-promisc mode (if needed) when the interface
1658 * comes out of promisc mode.
1660 be_vid_config(adapter);
1663 be_set_uc_list(adapter);
1664 be_set_mc_list(adapter);
1667 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1669 struct be_adapter *adapter = netdev_priv(netdev);
1670 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1673 if (!sriov_enabled(adapter))
1676 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1679 /* Proceed further only if user provided MAC is different
1682 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1685 if (BEx_chip(adapter)) {
1686 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1689 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1690 &vf_cfg->pmac_id, vf + 1);
1692 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1697 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1699 return be_cmd_status(status);
1702 ether_addr_copy(vf_cfg->mac_addr, mac);
1707 static int be_get_vf_config(struct net_device *netdev, int vf,
1708 struct ifla_vf_info *vi)
1710 struct be_adapter *adapter = netdev_priv(netdev);
1711 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1713 if (!sriov_enabled(adapter))
1716 if (vf >= adapter->num_vfs)
1720 vi->max_tx_rate = vf_cfg->tx_rate;
1721 vi->min_tx_rate = 0;
1722 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1723 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
1724 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1725 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
1726 vi->spoofchk = adapter->vf_cfg[vf].spoofchk;
1731 static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1733 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1734 u16 vids[BE_NUM_VLANS_SUPPORTED];
1735 int vf_if_id = vf_cfg->if_handle;
1738 /* Enable Transparent VLAN Tagging */
1739 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0, 0);
1743 /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1745 status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1747 dev_info(&adapter->pdev->dev,
1748 "Cleared guest VLANs on VF%d", vf);
1750 /* After TVT is enabled, disallow VFs to program VLAN filters */
1751 if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1752 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1753 ~BE_PRIV_FILTMGMT, vf + 1);
1755 vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1760 static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1762 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1763 struct device *dev = &adapter->pdev->dev;
1766 /* Reset Transparent VLAN Tagging. */
1767 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
1768 vf_cfg->if_handle, 0, 0);
1772 /* Allow VFs to program VLAN filtering */
1773 if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1774 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1775 BE_PRIV_FILTMGMT, vf + 1);
1777 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1778 dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1783 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1787 static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
1789 struct be_adapter *adapter = netdev_priv(netdev);
1790 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1793 if (!sriov_enabled(adapter))
1796 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1800 vlan |= qos << VLAN_PRIO_SHIFT;
1801 status = be_set_vf_tvt(adapter, vf, vlan);
1803 status = be_clear_vf_tvt(adapter, vf);
1807 dev_err(&adapter->pdev->dev,
1808 "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
1810 return be_cmd_status(status);
1813 vf_cfg->vlan_tag = vlan;
1817 static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1818 int min_tx_rate, int max_tx_rate)
1820 struct be_adapter *adapter = netdev_priv(netdev);
1821 struct device *dev = &adapter->pdev->dev;
1822 int percent_rate, status = 0;
1826 if (!sriov_enabled(adapter))
1829 if (vf >= adapter->num_vfs)
1838 status = be_cmd_link_status_query(adapter, &link_speed,
1844 dev_err(dev, "TX-rate setting not allowed when link is down\n");
1849 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1850 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1856 /* On Skyhawk the QOS setting must be done only as a % value */
1857 percent_rate = link_speed / 100;
1858 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1859 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1866 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
1870 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1874 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1876 return be_cmd_status(status);
1879 static int be_set_vf_link_state(struct net_device *netdev, int vf,
1882 struct be_adapter *adapter = netdev_priv(netdev);
1885 if (!sriov_enabled(adapter))
1888 if (vf >= adapter->num_vfs)
1891 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
1893 dev_err(&adapter->pdev->dev,
1894 "Link state change on VF %d failed: %#x\n", vf, status);
1895 return be_cmd_status(status);
1898 adapter->vf_cfg[vf].plink_tracking = link_state;
1903 static int be_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
1905 struct be_adapter *adapter = netdev_priv(netdev);
1906 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1910 if (!sriov_enabled(adapter))
1913 if (vf >= adapter->num_vfs)
1916 if (BEx_chip(adapter))
1919 if (enable == vf_cfg->spoofchk)
1922 spoofchk = enable ? ENABLE_MAC_SPOOFCHK : DISABLE_MAC_SPOOFCHK;
1924 status = be_cmd_set_hsw_config(adapter, 0, vf + 1, vf_cfg->if_handle,
1927 dev_err(&adapter->pdev->dev,
1928 "Spoofchk change on VF %d failed: %#x\n", vf, status);
1929 return be_cmd_status(status);
1932 vf_cfg->spoofchk = enable;
1936 static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1939 aic->rx_pkts_prev = rx_pkts;
1940 aic->tx_reqs_prev = tx_pkts;
1944 static int be_get_new_eqd(struct be_eq_obj *eqo)
1946 struct be_adapter *adapter = eqo->adapter;
1948 struct be_aic_obj *aic;
1949 struct be_rx_obj *rxo;
1950 struct be_tx_obj *txo;
1951 u64 rx_pkts = 0, tx_pkts = 0;
1956 aic = &adapter->aic_obj[eqo->idx];
1964 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
1966 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
1967 rx_pkts += rxo->stats.rx_pkts;
1968 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
1971 for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
1973 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
1974 tx_pkts += txo->stats.tx_reqs;
1975 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
1978 /* Skip, if wrapped around or first calculation */
1980 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1981 rx_pkts < aic->rx_pkts_prev ||
1982 tx_pkts < aic->tx_reqs_prev) {
1983 be_aic_update(aic, rx_pkts, tx_pkts, now);
1984 return aic->prev_eqd;
1987 delta = jiffies_to_msecs(now - aic->jiffies);
1989 return aic->prev_eqd;
1991 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1992 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1993 eqd = (pps / 15000) << 2;
1997 eqd = min_t(u32, eqd, aic->max_eqd);
1998 eqd = max_t(u32, eqd, aic->min_eqd);
2000 be_aic_update(aic, rx_pkts, tx_pkts, now);
2005 /* For Skyhawk-R only */
2006 static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
2008 struct be_adapter *adapter = eqo->adapter;
2009 struct be_aic_obj *aic = &adapter->aic_obj[eqo->idx];
2010 ulong now = jiffies;
2017 if (jiffies_to_msecs(now - aic->jiffies) < 1)
2018 eqd = aic->prev_eqd;
2020 eqd = be_get_new_eqd(eqo);
2023 mult_enc = R2I_DLY_ENC_1;
2025 mult_enc = R2I_DLY_ENC_2;
2027 mult_enc = R2I_DLY_ENC_3;
2029 mult_enc = R2I_DLY_ENC_0;
2031 aic->prev_eqd = eqd;
2036 void be_eqd_update(struct be_adapter *adapter, bool force_update)
2038 struct be_set_eqd set_eqd[MAX_EVT_QS];
2039 struct be_aic_obj *aic;
2040 struct be_eq_obj *eqo;
2041 int i, num = 0, eqd;
2043 for_all_evt_queues(adapter, eqo, i) {
2044 aic = &adapter->aic_obj[eqo->idx];
2045 eqd = be_get_new_eqd(eqo);
2046 if (force_update || eqd != aic->prev_eqd) {
2047 set_eqd[num].delay_multiplier = (eqd * 65)/100;
2048 set_eqd[num].eq_id = eqo->q.id;
2049 aic->prev_eqd = eqd;
2055 be_cmd_modify_eqd(adapter, set_eqd, num);
2058 static void be_rx_stats_update(struct be_rx_obj *rxo,
2059 struct be_rx_compl_info *rxcp)
2061 struct be_rx_stats *stats = rx_stats(rxo);
2063 u64_stats_update_begin(&stats->sync);
2065 stats->rx_bytes += rxcp->pkt_size;
2068 stats->rx_vxlan_offload_pkts++;
2069 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
2070 stats->rx_mcast_pkts++;
2072 stats->rx_compl_err++;
2073 u64_stats_update_end(&stats->sync);
2076 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
2078 /* L4 checksum is not reliable for non TCP/UDP packets.
2079 * Also ignore ipcksm for ipv6 pkts
2081 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
2082 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
2085 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
2087 struct be_adapter *adapter = rxo->adapter;
2088 struct be_rx_page_info *rx_page_info;
2089 struct be_queue_info *rxq = &rxo->q;
2090 u32 frag_idx = rxq->tail;
2092 rx_page_info = &rxo->page_info_tbl[frag_idx];
2093 BUG_ON(!rx_page_info->page);
2095 if (rx_page_info->last_frag) {
2096 dma_unmap_page(&adapter->pdev->dev,
2097 dma_unmap_addr(rx_page_info, bus),
2098 adapter->big_page_size, DMA_FROM_DEVICE);
2099 rx_page_info->last_frag = false;
2101 dma_sync_single_for_cpu(&adapter->pdev->dev,
2102 dma_unmap_addr(rx_page_info, bus),
2103 rx_frag_size, DMA_FROM_DEVICE);
2106 queue_tail_inc(rxq);
2107 atomic_dec(&rxq->used);
2108 return rx_page_info;
2111 /* Throwaway the data in the Rx completion */
2112 static void be_rx_compl_discard(struct be_rx_obj *rxo,
2113 struct be_rx_compl_info *rxcp)
2115 struct be_rx_page_info *page_info;
2116 u16 i, num_rcvd = rxcp->num_rcvd;
2118 for (i = 0; i < num_rcvd; i++) {
2119 page_info = get_rx_page_info(rxo);
2120 put_page(page_info->page);
2121 memset(page_info, 0, sizeof(*page_info));
2126 * skb_fill_rx_data forms a complete skb for an ether frame
2127 * indicated by rxcp.
2129 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
2130 struct be_rx_compl_info *rxcp)
2132 struct be_rx_page_info *page_info;
2134 u16 hdr_len, curr_frag_len, remaining;
2137 page_info = get_rx_page_info(rxo);
2138 start = page_address(page_info->page) + page_info->page_offset;
2141 /* Copy data in the first descriptor of this completion */
2142 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
2144 skb->len = curr_frag_len;
2145 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
2146 memcpy(skb->data, start, curr_frag_len);
2147 /* Complete packet has now been moved to data */
2148 put_page(page_info->page);
2150 skb->tail += curr_frag_len;
2153 memcpy(skb->data, start, hdr_len);
2154 skb_shinfo(skb)->nr_frags = 1;
2155 skb_frag_set_page(skb, 0, page_info->page);
2156 skb_shinfo(skb)->frags[0].page_offset =
2157 page_info->page_offset + hdr_len;
2158 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
2159 curr_frag_len - hdr_len);
2160 skb->data_len = curr_frag_len - hdr_len;
2161 skb->truesize += rx_frag_size;
2162 skb->tail += hdr_len;
2164 page_info->page = NULL;
2166 if (rxcp->pkt_size <= rx_frag_size) {
2167 BUG_ON(rxcp->num_rcvd != 1);
2171 /* More frags present for this completion */
2172 remaining = rxcp->pkt_size - curr_frag_len;
2173 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
2174 page_info = get_rx_page_info(rxo);
2175 curr_frag_len = min(remaining, rx_frag_size);
2177 /* Coalesce all frags from the same physical page in one slot */
2178 if (page_info->page_offset == 0) {
2181 skb_frag_set_page(skb, j, page_info->page);
2182 skb_shinfo(skb)->frags[j].page_offset =
2183 page_info->page_offset;
2184 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
2185 skb_shinfo(skb)->nr_frags++;
2187 put_page(page_info->page);
2190 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
2191 skb->len += curr_frag_len;
2192 skb->data_len += curr_frag_len;
2193 skb->truesize += rx_frag_size;
2194 remaining -= curr_frag_len;
2195 page_info->page = NULL;
2197 BUG_ON(j > MAX_SKB_FRAGS);
2200 /* Process the RX completion indicated by rxcp when GRO is disabled */
2201 static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
2202 struct be_rx_compl_info *rxcp)
2204 struct be_adapter *adapter = rxo->adapter;
2205 struct net_device *netdev = adapter->netdev;
2206 struct sk_buff *skb;
2208 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
2209 if (unlikely(!skb)) {
2210 rx_stats(rxo)->rx_drops_no_skbs++;
2211 be_rx_compl_discard(rxo, rxcp);
2215 skb_fill_rx_data(rxo, skb, rxcp);
2217 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
2218 skb->ip_summed = CHECKSUM_UNNECESSARY;
2220 skb_checksum_none_assert(skb);
2222 skb->protocol = eth_type_trans(skb, netdev);
2223 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
2224 if (netdev->features & NETIF_F_RXHASH)
2225 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
2227 skb->csum_level = rxcp->tunneled;
2228 skb_mark_napi_id(skb, napi);
2231 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
2233 netif_receive_skb(skb);
2236 /* Process the RX completion indicated by rxcp when GRO is enabled */
2237 static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
2238 struct napi_struct *napi,
2239 struct be_rx_compl_info *rxcp)
2241 struct be_adapter *adapter = rxo->adapter;
2242 struct be_rx_page_info *page_info;
2243 struct sk_buff *skb = NULL;
2244 u16 remaining, curr_frag_len;
2247 skb = napi_get_frags(napi);
2249 be_rx_compl_discard(rxo, rxcp);
2253 remaining = rxcp->pkt_size;
2254 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
2255 page_info = get_rx_page_info(rxo);
2257 curr_frag_len = min(remaining, rx_frag_size);
2259 /* Coalesce all frags from the same physical page in one slot */
2260 if (i == 0 || page_info->page_offset == 0) {
2261 /* First frag or Fresh page */
2263 skb_frag_set_page(skb, j, page_info->page);
2264 skb_shinfo(skb)->frags[j].page_offset =
2265 page_info->page_offset;
2266 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
2268 put_page(page_info->page);
2270 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
2271 skb->truesize += rx_frag_size;
2272 remaining -= curr_frag_len;
2273 memset(page_info, 0, sizeof(*page_info));
2275 BUG_ON(j > MAX_SKB_FRAGS);
2277 skb_shinfo(skb)->nr_frags = j + 1;
2278 skb->len = rxcp->pkt_size;
2279 skb->data_len = rxcp->pkt_size;
2280 skb->ip_summed = CHECKSUM_UNNECESSARY;
2281 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
2282 if (adapter->netdev->features & NETIF_F_RXHASH)
2283 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
2285 skb->csum_level = rxcp->tunneled;
2288 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
2290 napi_gro_frags(napi);
2293 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
2294 struct be_rx_compl_info *rxcp)
2296 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
2297 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
2298 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
2299 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
2300 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
2301 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
2302 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
2303 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
2304 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
2305 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
2306 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
2308 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
2309 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
2311 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
2313 GET_RX_COMPL_V1_BITS(tunneled, compl);
2316 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
2317 struct be_rx_compl_info *rxcp)
2319 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
2320 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
2321 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
2322 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
2323 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
2324 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
2325 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
2326 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
2327 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
2328 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
2329 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
2331 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
2332 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
2334 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
2335 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
2338 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
2340 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
2341 struct be_rx_compl_info *rxcp = &rxo->rxcp;
2342 struct be_adapter *adapter = rxo->adapter;
2344 /* For checking the valid bit it is Ok to use either definition as the
2345 * valid bit is at the same position in both v0 and v1 Rx compl */
2346 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
2350 be_dws_le_to_cpu(compl, sizeof(*compl));
2352 if (adapter->be3_native)
2353 be_parse_rx_compl_v1(compl, rxcp);
2355 be_parse_rx_compl_v0(compl, rxcp);
2361 /* In QNQ modes, if qnq bit is not set, then the packet was
2362 * tagged only with the transparent outer vlan-tag and must
2363 * not be treated as a vlan packet by host
2365 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
2368 if (!lancer_chip(adapter))
2369 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
2371 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
2372 !test_bit(rxcp->vlan_tag, adapter->vids))
2376 /* As the compl has been parsed, reset it; we wont touch it again */
2377 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
2379 queue_tail_inc(&rxo->cq);
2383 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
2385 u32 order = get_order(size);
2389 return alloc_pages(gfp, order);
2393 * Allocate a page, split it to fragments of size rx_frag_size and post as
2394 * receive buffers to BE
2396 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
2398 struct be_adapter *adapter = rxo->adapter;
2399 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
2400 struct be_queue_info *rxq = &rxo->q;
2401 struct page *pagep = NULL;
2402 struct device *dev = &adapter->pdev->dev;
2403 struct be_eth_rx_d *rxd;
2404 u64 page_dmaaddr = 0, frag_dmaaddr;
2405 u32 posted, page_offset = 0, notify = 0;
2407 page_info = &rxo->page_info_tbl[rxq->head];
2408 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
2410 pagep = be_alloc_pages(adapter->big_page_size, gfp);
2411 if (unlikely(!pagep)) {
2412 rx_stats(rxo)->rx_post_fail++;
2415 page_dmaaddr = dma_map_page(dev, pagep, 0,
2416 adapter->big_page_size,
2418 if (dma_mapping_error(dev, page_dmaaddr)) {
2421 adapter->drv_stats.dma_map_errors++;
2427 page_offset += rx_frag_size;
2429 page_info->page_offset = page_offset;
2430 page_info->page = pagep;
2432 rxd = queue_head_node(rxq);
2433 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
2434 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2435 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
2437 /* Any space left in the current big page for another frag? */
2438 if ((page_offset + rx_frag_size + rx_frag_size) >
2439 adapter->big_page_size) {
2441 page_info->last_frag = true;
2442 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2444 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
2447 prev_page_info = page_info;
2448 queue_head_inc(rxq);
2449 page_info = &rxo->page_info_tbl[rxq->head];
2452 /* Mark the last frag of a page when we break out of the above loop
2453 * with no more slots available in the RXQ
2456 prev_page_info->last_frag = true;
2457 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2461 atomic_add(posted, &rxq->used);
2462 if (rxo->rx_post_starved)
2463 rxo->rx_post_starved = false;
2465 notify = min(MAX_NUM_POST_ERX_DB, posted);
2466 be_rxq_notify(adapter, rxq->id, notify);
2469 } else if (atomic_read(&rxq->used) == 0) {
2470 /* Let be_worker replenish when memory is available */
2471 rxo->rx_post_starved = true;
2475 static struct be_tx_compl_info *be_tx_compl_get(struct be_tx_obj *txo)
2477 struct be_queue_info *tx_cq = &txo->cq;
2478 struct be_tx_compl_info *txcp = &txo->txcp;
2479 struct be_eth_tx_compl *compl = queue_tail_node(tx_cq);
2481 if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
2484 /* Ensure load ordering of valid bit dword and other dwords below */
2486 be_dws_le_to_cpu(compl, sizeof(*compl));
2488 txcp->status = GET_TX_COMPL_BITS(status, compl);
2489 txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl);
2491 compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
2492 queue_tail_inc(tx_cq);
2496 static u16 be_tx_compl_process(struct be_adapter *adapter,
2497 struct be_tx_obj *txo, u16 last_index)
2499 struct sk_buff **sent_skbs = txo->sent_skb_list;
2500 struct be_queue_info *txq = &txo->q;
2501 struct sk_buff *skb = NULL;
2502 bool unmap_skb_hdr = false;
2503 struct be_eth_wrb *wrb;
2508 if (sent_skbs[txq->tail]) {
2509 /* Free skb from prev req */
2511 dev_consume_skb_any(skb);
2512 skb = sent_skbs[txq->tail];
2513 sent_skbs[txq->tail] = NULL;
2514 queue_tail_inc(txq); /* skip hdr wrb */
2516 unmap_skb_hdr = true;
2518 wrb = queue_tail_node(txq);
2519 frag_index = txq->tail;
2520 unmap_tx_frag(&adapter->pdev->dev, wrb,
2521 (unmap_skb_hdr && skb_headlen(skb)));
2522 unmap_skb_hdr = false;
2523 queue_tail_inc(txq);
2525 } while (frag_index != last_index);
2526 dev_consume_skb_any(skb);
2531 /* Return the number of events in the event queue */
2532 static inline int events_get(struct be_eq_obj *eqo)
2534 struct be_eq_entry *eqe;
2538 eqe = queue_tail_node(&eqo->q);
2545 queue_tail_inc(&eqo->q);
2551 /* Leaves the EQ is disarmed state */
2552 static void be_eq_clean(struct be_eq_obj *eqo)
2554 int num = events_get(eqo);
2556 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
2559 /* Free posted rx buffers that were not used */
2560 static void be_rxq_clean(struct be_rx_obj *rxo)
2562 struct be_queue_info *rxq = &rxo->q;
2563 struct be_rx_page_info *page_info;
2565 while (atomic_read(&rxq->used) > 0) {
2566 page_info = get_rx_page_info(rxo);
2567 put_page(page_info->page);
2568 memset(page_info, 0, sizeof(*page_info));
2570 BUG_ON(atomic_read(&rxq->used));
2575 static void be_rx_cq_clean(struct be_rx_obj *rxo)
2577 struct be_queue_info *rx_cq = &rxo->cq;
2578 struct be_rx_compl_info *rxcp;
2579 struct be_adapter *adapter = rxo->adapter;
2582 /* Consume pending rx completions.
2583 * Wait for the flush completion (identified by zero num_rcvd)
2584 * to arrive. Notify CQ even when there are no more CQ entries
2585 * for HW to flush partially coalesced CQ entries.
2586 * In Lancer, there is no need to wait for flush compl.
2589 rxcp = be_rx_compl_get(rxo);
2591 if (lancer_chip(adapter))
2594 if (flush_wait++ > 50 ||
2595 be_check_error(adapter,
2597 dev_warn(&adapter->pdev->dev,
2598 "did not receive flush compl\n");
2601 be_cq_notify(adapter, rx_cq->id, true, 0);
2604 be_rx_compl_discard(rxo, rxcp);
2605 be_cq_notify(adapter, rx_cq->id, false, 1);
2606 if (rxcp->num_rcvd == 0)
2611 /* After cleanup, leave the CQ in unarmed state */
2612 be_cq_notify(adapter, rx_cq->id, false, 0);
2615 static void be_tx_compl_clean(struct be_adapter *adapter)
2617 struct device *dev = &adapter->pdev->dev;
2618 u16 cmpl = 0, timeo = 0, num_wrbs = 0;
2619 struct be_tx_compl_info *txcp;
2620 struct be_queue_info *txq;
2621 u32 end_idx, notified_idx;
2622 struct be_tx_obj *txo;
2623 int i, pending_txqs;
2625 /* Stop polling for compls when HW has been silent for 10ms */
2627 pending_txqs = adapter->num_tx_qs;
2629 for_all_tx_queues(adapter, txo, i) {
2633 while ((txcp = be_tx_compl_get(txo))) {
2635 be_tx_compl_process(adapter, txo,
2640 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2641 atomic_sub(num_wrbs, &txq->used);
2644 if (!be_is_tx_compl_pending(txo))
2648 if (pending_txqs == 0 || ++timeo > 10 ||
2649 be_check_error(adapter, BE_ERROR_HW))
2655 /* Free enqueued TX that was never notified to HW */
2656 for_all_tx_queues(adapter, txo, i) {
2659 if (atomic_read(&txq->used)) {
2660 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2661 i, atomic_read(&txq->used));
2662 notified_idx = txq->tail;
2663 end_idx = txq->tail;
2664 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2666 /* Use the tx-compl process logic to handle requests
2667 * that were not sent to the HW.
2669 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2670 atomic_sub(num_wrbs, &txq->used);
2671 BUG_ON(atomic_read(&txq->used));
2672 txo->pend_wrb_cnt = 0;
2673 /* Since hw was never notified of these requests,
2676 txq->head = notified_idx;
2677 txq->tail = notified_idx;
2682 static void be_evt_queues_destroy(struct be_adapter *adapter)
2684 struct be_eq_obj *eqo;
2687 for_all_evt_queues(adapter, eqo, i) {
2688 if (eqo->q.created) {
2690 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
2691 napi_hash_del(&eqo->napi);
2692 netif_napi_del(&eqo->napi);
2693 free_cpumask_var(eqo->affinity_mask);
2695 be_queue_free(adapter, &eqo->q);
2699 static int be_evt_queues_create(struct be_adapter *adapter)
2701 struct be_queue_info *eq;
2702 struct be_eq_obj *eqo;
2703 struct be_aic_obj *aic;
2706 /* need enough EQs to service both RX and TX queues */
2707 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2708 max(adapter->cfg_num_rx_irqs,
2709 adapter->cfg_num_tx_irqs));
2711 for_all_evt_queues(adapter, eqo, i) {
2712 int numa_node = dev_to_node(&adapter->pdev->dev);
2714 aic = &adapter->aic_obj[i];
2715 eqo->adapter = adapter;
2717 aic->max_eqd = BE_MAX_EQD;
2721 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
2722 sizeof(struct be_eq_entry));
2726 rc = be_cmd_eq_create(adapter, eqo);
2730 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
2732 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
2733 eqo->affinity_mask);
2734 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2740 static void be_mcc_queues_destroy(struct be_adapter *adapter)
2742 struct be_queue_info *q;
2744 q = &adapter->mcc_obj.q;
2746 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
2747 be_queue_free(adapter, q);
2749 q = &adapter->mcc_obj.cq;
2751 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2752 be_queue_free(adapter, q);
2755 /* Must be called only after TX qs are created as MCC shares TX EQ */
2756 static int be_mcc_queues_create(struct be_adapter *adapter)
2758 struct be_queue_info *q, *cq;
2760 cq = &adapter->mcc_obj.cq;
2761 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
2762 sizeof(struct be_mcc_compl)))
2765 /* Use the default EQ for MCC completions */
2766 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
2769 q = &adapter->mcc_obj.q;
2770 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2771 goto mcc_cq_destroy;
2773 if (be_cmd_mccq_create(adapter, q, cq))
2779 be_queue_free(adapter, q);
2781 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
2783 be_queue_free(adapter, cq);
2788 static void be_tx_queues_destroy(struct be_adapter *adapter)
2790 struct be_queue_info *q;
2791 struct be_tx_obj *txo;
2794 for_all_tx_queues(adapter, txo, i) {
2797 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2798 be_queue_free(adapter, q);
2802 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2803 be_queue_free(adapter, q);
2807 static int be_tx_qs_create(struct be_adapter *adapter)
2809 struct be_queue_info *cq;
2810 struct be_tx_obj *txo;
2811 struct be_eq_obj *eqo;
2814 adapter->num_tx_qs = min(adapter->num_evt_qs, adapter->cfg_num_tx_irqs);
2816 for_all_tx_queues(adapter, txo, i) {
2818 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2819 sizeof(struct be_eth_tx_compl));
2823 u64_stats_init(&txo->stats.sync);
2824 u64_stats_init(&txo->stats.sync_compl);
2826 /* If num_evt_qs is less than num_tx_qs, then more than
2827 * one txq share an eq
2829 eqo = &adapter->eq_obj[i % adapter->num_evt_qs];
2830 status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3);
2834 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2835 sizeof(struct be_eth_wrb));
2839 status = be_cmd_txq_create(adapter, txo);
2843 netif_set_xps_queue(adapter->netdev, eqo->affinity_mask,
2847 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2848 adapter->num_tx_qs);
2852 static void be_rx_cqs_destroy(struct be_adapter *adapter)
2854 struct be_queue_info *q;
2855 struct be_rx_obj *rxo;
2858 for_all_rx_queues(adapter, rxo, i) {
2861 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2862 be_queue_free(adapter, q);
2866 static int be_rx_cqs_create(struct be_adapter *adapter)
2868 struct be_queue_info *eq, *cq;
2869 struct be_rx_obj *rxo;
2872 adapter->num_rss_qs =
2873 min(adapter->num_evt_qs, adapter->cfg_num_rx_irqs);
2875 /* We'll use RSS only if atleast 2 RSS rings are supported. */
2876 if (adapter->num_rss_qs < 2)
2877 adapter->num_rss_qs = 0;
2879 adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
2881 /* When the interface is not capable of RSS rings (and there is no
2882 * need to create a default RXQ) we'll still need one RXQ
2884 if (adapter->num_rx_qs == 0)
2885 adapter->num_rx_qs = 1;
2887 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
2888 for_all_rx_queues(adapter, rxo, i) {
2889 rxo->adapter = adapter;
2891 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2892 sizeof(struct be_eth_rx_compl));
2896 u64_stats_init(&rxo->stats.sync);
2897 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2898 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
2903 dev_info(&adapter->pdev->dev,
2904 "created %d RX queue(s)\n", adapter->num_rx_qs);
2908 static irqreturn_t be_intx(int irq, void *dev)
2910 struct be_eq_obj *eqo = dev;
2911 struct be_adapter *adapter = eqo->adapter;
2914 /* IRQ is not expected when NAPI is scheduled as the EQ
2915 * will not be armed.
2916 * But, this can happen on Lancer INTx where it takes
2917 * a while to de-assert INTx or in BE2 where occasionaly
2918 * an interrupt may be raised even when EQ is unarmed.
2919 * If NAPI is already scheduled, then counting & notifying
2920 * events will orphan them.
2922 if (napi_schedule_prep(&eqo->napi)) {
2923 num_evts = events_get(eqo);
2924 __napi_schedule(&eqo->napi);
2926 eqo->spurious_intr = 0;
2928 be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
2930 /* Return IRQ_HANDLED only for the the first spurious intr
2931 * after a valid intr to stop the kernel from branding
2932 * this irq as a bad one!
2934 if (num_evts || eqo->spurious_intr++ == 0)
2940 static irqreturn_t be_msix(int irq, void *dev)
2942 struct be_eq_obj *eqo = dev;
2944 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
2945 napi_schedule(&eqo->napi);
2949 static inline bool do_gro(struct be_rx_compl_info *rxcp)
2951 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
2954 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2955 int budget, int polling)
2957 struct be_adapter *adapter = rxo->adapter;
2958 struct be_queue_info *rx_cq = &rxo->cq;
2959 struct be_rx_compl_info *rxcp;
2961 u32 frags_consumed = 0;
2963 for (work_done = 0; work_done < budget; work_done++) {
2964 rxcp = be_rx_compl_get(rxo);
2968 /* Is it a flush compl that has no data */
2969 if (unlikely(rxcp->num_rcvd == 0))
2972 /* Discard compl with partial DMA Lancer B0 */
2973 if (unlikely(!rxcp->pkt_size)) {
2974 be_rx_compl_discard(rxo, rxcp);
2978 /* On BE drop pkts that arrive due to imperfect filtering in
2979 * promiscuous mode on some skews
2981 if (unlikely(rxcp->port != adapter->port_num &&
2982 !lancer_chip(adapter))) {
2983 be_rx_compl_discard(rxo, rxcp);
2987 /* Don't do gro when we're busy_polling */
2988 if (do_gro(rxcp) && polling != BUSY_POLLING)
2989 be_rx_compl_process_gro(rxo, napi, rxcp);
2991 be_rx_compl_process(rxo, napi, rxcp);
2994 frags_consumed += rxcp->num_rcvd;
2995 be_rx_stats_update(rxo, rxcp);
2999 be_cq_notify(adapter, rx_cq->id, true, work_done);
3001 /* When an rx-obj gets into post_starved state, just
3002 * let be_worker do the posting.
3004 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
3005 !rxo->rx_post_starved)
3006 be_post_rx_frags(rxo, GFP_ATOMIC,
3007 max_t(u32, MAX_RX_POST,
3014 static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status)
3017 case BE_TX_COMP_HDR_PARSE_ERR:
3018 tx_stats(txo)->tx_hdr_parse_err++;
3020 case BE_TX_COMP_NDMA_ERR:
3021 tx_stats(txo)->tx_dma_err++;
3023 case BE_TX_COMP_ACL_ERR:
3024 tx_stats(txo)->tx_spoof_check_err++;
3029 static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status)
3032 case LANCER_TX_COMP_LSO_ERR:
3033 tx_stats(txo)->tx_tso_err++;
3035 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
3036 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
3037 tx_stats(txo)->tx_spoof_check_err++;
3039 case LANCER_TX_COMP_QINQ_ERR:
3040 tx_stats(txo)->tx_qinq_err++;
3042 case LANCER_TX_COMP_PARITY_ERR:
3043 tx_stats(txo)->tx_internal_parity_err++;
3045 case LANCER_TX_COMP_DMA_ERR:
3046 tx_stats(txo)->tx_dma_err++;
3051 static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
3054 int num_wrbs = 0, work_done = 0;
3055 struct be_tx_compl_info *txcp;
3057 while ((txcp = be_tx_compl_get(txo))) {
3058 num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index);
3062 if (lancer_chip(adapter))
3063 lancer_update_tx_err(txo, txcp->status);
3065 be_update_tx_err(txo, txcp->status);
3070 be_cq_notify(adapter, txo->cq.id, true, work_done);
3071 atomic_sub(num_wrbs, &txo->q.used);
3073 /* As Tx wrbs have been freed up, wake up netdev queue
3074 * if it was stopped due to lack of tx wrbs. */
3075 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
3076 be_can_txq_wake(txo)) {
3077 netif_wake_subqueue(adapter->netdev, idx);
3080 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
3081 tx_stats(txo)->tx_compl += work_done;
3082 u64_stats_update_end(&tx_stats(txo)->sync_compl);
3086 #ifdef CONFIG_NET_RX_BUSY_POLL
3087 static inline bool be_lock_napi(struct be_eq_obj *eqo)
3091 spin_lock(&eqo->lock); /* BH is already disabled */
3092 if (eqo->state & BE_EQ_LOCKED) {
3093 WARN_ON(eqo->state & BE_EQ_NAPI);
3094 eqo->state |= BE_EQ_NAPI_YIELD;
3097 eqo->state = BE_EQ_NAPI;
3099 spin_unlock(&eqo->lock);
3103 static inline void be_unlock_napi(struct be_eq_obj *eqo)
3105 spin_lock(&eqo->lock); /* BH is already disabled */
3107 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
3108 eqo->state = BE_EQ_IDLE;
3110 spin_unlock(&eqo->lock);
3113 static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3117 spin_lock_bh(&eqo->lock);
3118 if (eqo->state & BE_EQ_LOCKED) {
3119 eqo->state |= BE_EQ_POLL_YIELD;
3122 eqo->state |= BE_EQ_POLL;
3124 spin_unlock_bh(&eqo->lock);
3128 static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3130 spin_lock_bh(&eqo->lock);
3132 WARN_ON(eqo->state & (BE_EQ_NAPI));
3133 eqo->state = BE_EQ_IDLE;
3135 spin_unlock_bh(&eqo->lock);
3138 static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3140 spin_lock_init(&eqo->lock);
3141 eqo->state = BE_EQ_IDLE;
3144 static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3148 /* It's enough to just acquire napi lock on the eqo to stop
3149 * be_busy_poll() from processing any queueus.
3151 while (!be_lock_napi(eqo))
3157 #else /* CONFIG_NET_RX_BUSY_POLL */
3159 static inline bool be_lock_napi(struct be_eq_obj *eqo)
3164 static inline void be_unlock_napi(struct be_eq_obj *eqo)
3168 static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3173 static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3177 static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3181 static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3184 #endif /* CONFIG_NET_RX_BUSY_POLL */
3186 int be_poll(struct napi_struct *napi, int budget)
3188 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3189 struct be_adapter *adapter = eqo->adapter;
3190 int max_work = 0, work, i, num_evts;
3191 struct be_rx_obj *rxo;
3192 struct be_tx_obj *txo;
3195 num_evts = events_get(eqo);
3197 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
3198 be_process_tx(adapter, txo, i);
3200 if (be_lock_napi(eqo)) {
3201 /* This loop will iterate twice for EQ0 in which
3202 * completions of the last RXQ (default one) are also processed
3203 * For other EQs the loop iterates only once
3205 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3206 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
3207 max_work = max(work, max_work);
3209 be_unlock_napi(eqo);
3214 if (is_mcc_eqo(eqo))
3215 be_process_mcc(adapter);
3217 if (max_work < budget) {
3218 napi_complete(napi);
3220 /* Skyhawk EQ_DB has a provision to set the rearm to interrupt
3221 * delay via a delay multiplier encoding value
3223 if (skyhawk_chip(adapter))
3224 mult_enc = be_get_eq_delay_mult_enc(eqo);
3226 be_eq_notify(adapter, eqo->q.id, true, false, num_evts,
3229 /* As we'll continue in polling mode, count and clear events */
3230 be_eq_notify(adapter, eqo->q.id, false, false, num_evts, 0);
3235 #ifdef CONFIG_NET_RX_BUSY_POLL
3236 static int be_busy_poll(struct napi_struct *napi)
3238 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3239 struct be_adapter *adapter = eqo->adapter;
3240 struct be_rx_obj *rxo;
3243 if (!be_lock_busy_poll(eqo))
3244 return LL_FLUSH_BUSY;
3246 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3247 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
3252 be_unlock_busy_poll(eqo);
3257 void be_detect_error(struct be_adapter *adapter)
3259 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
3260 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
3262 struct device *dev = &adapter->pdev->dev;
3264 if (be_check_error(adapter, BE_ERROR_HW))
3267 if (lancer_chip(adapter)) {
3268 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3269 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3270 be_set_error(adapter, BE_ERROR_UE);
3271 sliport_err1 = ioread32(adapter->db +
3272 SLIPORT_ERROR1_OFFSET);
3273 sliport_err2 = ioread32(adapter->db +
3274 SLIPORT_ERROR2_OFFSET);
3275 /* Do not log error messages if its a FW reset */
3276 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
3277 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
3278 dev_info(dev, "Firmware update in progress\n");
3280 dev_err(dev, "Error detected in the card\n");
3281 dev_err(dev, "ERR: sliport status 0x%x\n",
3283 dev_err(dev, "ERR: sliport error1 0x%x\n",
3285 dev_err(dev, "ERR: sliport error2 0x%x\n",
3290 ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
3291 ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
3292 ue_lo_mask = ioread32(adapter->pcicfg +
3293 PCICFG_UE_STATUS_LOW_MASK);
3294 ue_hi_mask = ioread32(adapter->pcicfg +
3295 PCICFG_UE_STATUS_HI_MASK);
3297 ue_lo = (ue_lo & ~ue_lo_mask);
3298 ue_hi = (ue_hi & ~ue_hi_mask);
3300 /* On certain platforms BE hardware can indicate spurious UEs.
3301 * Allow HW to stop working completely in case of a real UE.
3302 * Hence not setting the hw_error for UE detection.
3305 if (ue_lo || ue_hi) {
3307 "Unrecoverable Error detected in the adapter");
3308 dev_err(dev, "Please reboot server to recover");
3309 if (skyhawk_chip(adapter))
3310 be_set_error(adapter, BE_ERROR_UE);
3312 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
3314 dev_err(dev, "UE: %s bit set\n",
3315 ue_status_low_desc[i]);
3317 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
3319 dev_err(dev, "UE: %s bit set\n",
3320 ue_status_hi_desc[i]);
3326 static void be_msix_disable(struct be_adapter *adapter)
3328 if (msix_enabled(adapter)) {
3329 pci_disable_msix(adapter->pdev);
3330 adapter->num_msix_vec = 0;
3331 adapter->num_msix_roce_vec = 0;
3335 static int be_msix_enable(struct be_adapter *adapter)
3337 unsigned int i, max_roce_eqs;
3338 struct device *dev = &adapter->pdev->dev;
3341 /* If RoCE is supported, program the max number of vectors that
3342 * could be used for NIC and RoCE, else, just program the number
3343 * we'll use initially.
3345 if (be_roce_supported(adapter)) {
3347 be_max_func_eqs(adapter) - be_max_nic_eqs(adapter);
3348 max_roce_eqs = min(max_roce_eqs, num_online_cpus());
3349 num_vec = be_max_any_irqs(adapter) + max_roce_eqs;
3351 num_vec = max(adapter->cfg_num_rx_irqs,
3352 adapter->cfg_num_tx_irqs);
3355 for (i = 0; i < num_vec; i++)
3356 adapter->msix_entries[i].entry = i;
3358 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
3359 MIN_MSIX_VECTORS, num_vec);
3363 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
3364 adapter->num_msix_roce_vec = num_vec / 2;
3365 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
3366 adapter->num_msix_roce_vec);
3369 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
3371 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
3372 adapter->num_msix_vec);
3376 dev_warn(dev, "MSIx enable failed\n");
3378 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
3379 if (be_virtfn(adapter))
3384 static inline int be_msix_vec_get(struct be_adapter *adapter,
3385 struct be_eq_obj *eqo)
3387 return adapter->msix_entries[eqo->msix_idx].vector;
3390 static int be_msix_register(struct be_adapter *adapter)
3392 struct net_device *netdev = adapter->netdev;
3393 struct be_eq_obj *eqo;
3396 for_all_evt_queues(adapter, eqo, i) {
3397 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
3398 vec = be_msix_vec_get(adapter, eqo);
3399 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3403 irq_set_affinity_hint(vec, eqo->affinity_mask);
3408 for (i--; i >= 0; i--) {
3409 eqo = &adapter->eq_obj[i];
3410 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3412 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
3414 be_msix_disable(adapter);
3418 static int be_irq_register(struct be_adapter *adapter)
3420 struct net_device *netdev = adapter->netdev;
3423 if (msix_enabled(adapter)) {
3424 status = be_msix_register(adapter);
3427 /* INTx is not supported for VF */
3428 if (be_virtfn(adapter))
3432 /* INTx: only the first EQ is used */
3433 netdev->irq = adapter->pdev->irq;
3434 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
3435 &adapter->eq_obj[0]);
3437 dev_err(&adapter->pdev->dev,
3438 "INTx request IRQ failed - err %d\n", status);
3442 adapter->isr_registered = true;
3446 static void be_irq_unregister(struct be_adapter *adapter)
3448 struct net_device *netdev = adapter->netdev;
3449 struct be_eq_obj *eqo;
3452 if (!adapter->isr_registered)
3456 if (!msix_enabled(adapter)) {
3457 free_irq(netdev->irq, &adapter->eq_obj[0]);
3462 for_all_evt_queues(adapter, eqo, i) {
3463 vec = be_msix_vec_get(adapter, eqo);
3464 irq_set_affinity_hint(vec, NULL);
3469 adapter->isr_registered = false;
3472 static void be_rx_qs_destroy(struct be_adapter *adapter)
3474 struct rss_info *rss = &adapter->rss_info;
3475 struct be_queue_info *q;
3476 struct be_rx_obj *rxo;
3479 for_all_rx_queues(adapter, rxo, i) {
3482 /* If RXQs are destroyed while in an "out of buffer"
3483 * state, there is a possibility of an HW stall on
3484 * Lancer. So, post 64 buffers to each queue to relieve
3485 * the "out of buffer" condition.
3486 * Make sure there's space in the RXQ before posting.
3488 if (lancer_chip(adapter)) {
3489 be_rx_cq_clean(rxo);
3490 if (atomic_read(&q->used) == 0)
3491 be_post_rx_frags(rxo, GFP_KERNEL,
3495 be_cmd_rxq_destroy(adapter, q);
3496 be_rx_cq_clean(rxo);
3499 be_queue_free(adapter, q);
3502 if (rss->rss_flags) {
3503 rss->rss_flags = RSS_ENABLE_NONE;
3504 be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3505 128, rss->rss_hkey);
3509 static void be_disable_if_filters(struct be_adapter *adapter)
3511 be_cmd_pmac_del(adapter, adapter->if_handle,
3512 adapter->pmac_id[0], 0);
3514 be_clear_uc_list(adapter);
3515 be_clear_mc_list(adapter);
3517 /* The IFACE flags are enabled in the open path and cleared
3518 * in the close path. When a VF gets detached from the host and
3519 * assigned to a VM the following happens:
3520 * - VF's IFACE flags get cleared in the detach path
3521 * - IFACE create is issued by the VF in the attach path
3522 * Due to a bug in the BE3/Skyhawk-R FW
3523 * (Lancer FW doesn't have the bug), the IFACE capability flags
3524 * specified along with the IFACE create cmd issued by a VF are not
3525 * honoured by FW. As a consequence, if a *new* driver
3526 * (that enables/disables IFACE flags in open/close)
3527 * is loaded in the host and an *old* driver is * used by a VM/VF,
3528 * the IFACE gets created *without* the needed flags.
3529 * To avoid this, disable RX-filter flags only for Lancer.
3531 if (lancer_chip(adapter)) {
3532 be_cmd_rx_filter(adapter, BE_IF_ALL_FILT_FLAGS, OFF);
3533 adapter->if_flags &= ~BE_IF_ALL_FILT_FLAGS;
3537 static int be_close(struct net_device *netdev)
3539 struct be_adapter *adapter = netdev_priv(netdev);
3540 struct be_eq_obj *eqo;
3543 /* This protection is needed as be_close() may be called even when the
3544 * adapter is in cleared state (after eeh perm failure)
3546 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3549 be_disable_if_filters(adapter);
3551 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3552 for_all_evt_queues(adapter, eqo, i) {
3553 napi_disable(&eqo->napi);
3554 be_disable_busy_poll(eqo);
3556 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
3559 be_async_mcc_disable(adapter);
3561 /* Wait for all pending tx completions to arrive so that
3562 * all tx skbs are freed.
3564 netif_tx_disable(netdev);
3565 be_tx_compl_clean(adapter);
3567 be_rx_qs_destroy(adapter);
3569 for_all_evt_queues(adapter, eqo, i) {
3570 if (msix_enabled(adapter))
3571 synchronize_irq(be_msix_vec_get(adapter, eqo));
3573 synchronize_irq(netdev->irq);
3577 be_irq_unregister(adapter);
3582 static int be_rx_qs_create(struct be_adapter *adapter)
3584 struct rss_info *rss = &adapter->rss_info;
3585 u8 rss_key[RSS_HASH_KEY_LEN];
3586 struct be_rx_obj *rxo;
3589 for_all_rx_queues(adapter, rxo, i) {
3590 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3591 sizeof(struct be_eth_rx_d));
3596 if (adapter->need_def_rxq || !adapter->num_rss_qs) {
3597 rxo = default_rxo(adapter);
3598 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3599 rx_frag_size, adapter->if_handle,
3600 false, &rxo->rss_id);
3605 for_all_rss_queues(adapter, rxo, i) {
3606 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3607 rx_frag_size, adapter->if_handle,
3608 true, &rxo->rss_id);
3613 if (be_multi_rxq(adapter)) {
3614 for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) {
3615 for_all_rss_queues(adapter, rxo, i) {
3616 if ((j + i) >= RSS_INDIR_TABLE_LEN)
3618 rss->rsstable[j + i] = rxo->rss_id;
3619 rss->rss_queue[j + i] = i;
3622 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3623 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
3625 if (!BEx_chip(adapter))
3626 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3627 RSS_ENABLE_UDP_IPV6;
3629 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
3630 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3631 RSS_INDIR_TABLE_LEN, rss_key);
3633 rss->rss_flags = RSS_ENABLE_NONE;
3637 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
3639 /* Disable RSS, if only default RX Q is created */
3640 rss->rss_flags = RSS_ENABLE_NONE;
3644 /* Post 1 less than RXQ-len to avoid head being equal to tail,
3645 * which is a queue empty condition
3647 for_all_rx_queues(adapter, rxo, i)
3648 be_post_rx_frags(rxo, GFP_KERNEL, RX_Q_LEN - 1);
3653 static int be_enable_if_filters(struct be_adapter *adapter)
3657 status = be_cmd_rx_filter(adapter, BE_IF_FILT_FLAGS_BASIC, ON);
3661 /* For BE3 VFs, the PF programs the initial MAC address */
3662 if (!(BEx_chip(adapter) && be_virtfn(adapter))) {
3663 status = be_cmd_pmac_add(adapter, adapter->netdev->dev_addr,
3665 &adapter->pmac_id[0], 0);
3670 if (adapter->vlans_added)
3671 be_vid_config(adapter);
3673 be_set_rx_mode(adapter->netdev);
3678 static int be_open(struct net_device *netdev)
3680 struct be_adapter *adapter = netdev_priv(netdev);
3681 struct be_eq_obj *eqo;
3682 struct be_rx_obj *rxo;
3683 struct be_tx_obj *txo;
3687 status = be_rx_qs_create(adapter);
3691 status = be_enable_if_filters(adapter);
3695 status = be_irq_register(adapter);
3699 for_all_rx_queues(adapter, rxo, i)
3700 be_cq_notify(adapter, rxo->cq.id, true, 0);
3702 for_all_tx_queues(adapter, txo, i)
3703 be_cq_notify(adapter, txo->cq.id, true, 0);
3705 be_async_mcc_enable(adapter);
3707 for_all_evt_queues(adapter, eqo, i) {
3708 napi_enable(&eqo->napi);
3709 be_enable_busy_poll(eqo);
3710 be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
3712 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
3714 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
3716 be_link_status_update(adapter, link_status);
3718 netif_tx_start_all_queues(netdev);
3719 if (skyhawk_chip(adapter))
3720 udp_tunnel_get_rx_info(netdev);
3724 be_close(adapter->netdev);
3728 static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3732 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3734 mac[5] = (u8)(addr & 0xFF);
3735 mac[4] = (u8)((addr >> 8) & 0xFF);
3736 mac[3] = (u8)((addr >> 16) & 0xFF);
3737 /* Use the OUI from the current MAC address */
3738 memcpy(mac, adapter->netdev->dev_addr, 3);
3742 * Generate a seed MAC address from the PF MAC Address using jhash.
3743 * MAC Address for VFs are assigned incrementally starting from the seed.
3744 * These addresses are programmed in the ASIC by the PF and the VF driver
3745 * queries for the MAC address during its probe.
3747 static int be_vf_eth_addr_config(struct be_adapter *adapter)
3752 struct be_vf_cfg *vf_cfg;
3754 be_vf_eth_addr_generate(adapter, mac);
3756 for_all_vfs(adapter, vf_cfg, vf) {
3757 if (BEx_chip(adapter))
3758 status = be_cmd_pmac_add(adapter, mac,
3760 &vf_cfg->pmac_id, vf + 1);
3762 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3766 dev_err(&adapter->pdev->dev,
3767 "Mac address assignment failed for VF %d\n",
3770 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3777 static int be_vfs_mac_query(struct be_adapter *adapter)
3781 struct be_vf_cfg *vf_cfg;
3783 for_all_vfs(adapter, vf_cfg, vf) {
3784 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3785 mac, vf_cfg->if_handle,
3789 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3794 static void be_vf_clear(struct be_adapter *adapter)
3796 struct be_vf_cfg *vf_cfg;
3799 if (pci_vfs_assigned(adapter->pdev)) {
3800 dev_warn(&adapter->pdev->dev,
3801 "VFs are assigned to VMs: not disabling VFs\n");
3805 pci_disable_sriov(adapter->pdev);
3807 for_all_vfs(adapter, vf_cfg, vf) {
3808 if (BEx_chip(adapter))
3809 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3810 vf_cfg->pmac_id, vf + 1);
3812 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3815 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3818 if (BE3_chip(adapter))
3819 be_cmd_set_hsw_config(adapter, 0, 0,
3821 PORT_FWD_TYPE_PASSTHRU, 0);
3823 kfree(adapter->vf_cfg);
3824 adapter->num_vfs = 0;
3825 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
3828 static void be_clear_queues(struct be_adapter *adapter)
3830 be_mcc_queues_destroy(adapter);
3831 be_rx_cqs_destroy(adapter);
3832 be_tx_queues_destroy(adapter);
3833 be_evt_queues_destroy(adapter);
3836 static void be_cancel_worker(struct be_adapter *adapter)
3838 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3839 cancel_delayed_work_sync(&adapter->work);
3840 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3844 static void be_cancel_err_detection(struct be_adapter *adapter)
3846 if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
3847 cancel_delayed_work_sync(&adapter->be_err_detection_work);
3848 adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
3852 static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3854 struct net_device *netdev = adapter->netdev;
3856 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3857 be_cmd_manage_iface(adapter, adapter->if_handle,
3858 OP_CONVERT_TUNNEL_TO_NORMAL);
3860 if (adapter->vxlan_port)
3861 be_cmd_set_vxlan_port(adapter, 0);
3863 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3864 adapter->vxlan_port = 0;
3866 netdev->hw_enc_features = 0;
3867 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
3868 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
3871 static void be_calculate_vf_res(struct be_adapter *adapter, u16 num_vfs,
3872 struct be_resources *vft_res)
3874 struct be_resources res = adapter->pool_res;
3875 u32 vf_if_cap_flags = res.vf_if_cap_flags;
3876 struct be_resources res_mod = {0};
3879 /* Distribute the queue resources among the PF and it's VFs */
3881 /* Divide the rx queues evenly among the VFs and the PF, capped
3882 * at VF-EQ-count. Any remainder queues belong to the PF.
3884 num_vf_qs = min(SH_VF_MAX_NIC_EQS,
3885 res.max_rss_qs / (num_vfs + 1));
3887 /* Skyhawk-R chip supports only MAX_PORT_RSS_TABLES
3888 * RSS Tables per port. Provide RSS on VFs, only if number of
3889 * VFs requested is less than it's PF Pool's RSS Tables limit.
3891 if (num_vfs >= be_max_pf_pool_rss_tables(adapter))
3895 /* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd,
3896 * which are modifiable using SET_PROFILE_CONFIG cmd.
3898 be_cmd_get_profile_config(adapter, &res_mod, NULL, ACTIVE_PROFILE_TYPE,
3899 RESOURCE_MODIFIABLE, 0);
3901 /* If RSS IFACE capability flags are modifiable for a VF, set the
3902 * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if
3903 * more than 1 RSSQ is available for a VF.
3904 * Otherwise, provision only 1 queue pair for VF.
3906 if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
3907 vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
3908 if (num_vf_qs > 1) {
3909 vf_if_cap_flags |= BE_IF_FLAGS_RSS;
3910 if (res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS)
3911 vf_if_cap_flags |= BE_IF_FLAGS_DEFQ_RSS;
3913 vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS |
3914 BE_IF_FLAGS_DEFQ_RSS);
3920 if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
3921 vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
3922 vf_if_cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
3925 vft_res->vf_if_cap_flags = vf_if_cap_flags;
3926 vft_res->max_rx_qs = num_vf_qs;
3927 vft_res->max_rss_qs = num_vf_qs;
3928 vft_res->max_tx_qs = res.max_tx_qs / (num_vfs + 1);
3929 vft_res->max_cq_count = res.max_cq_count / (num_vfs + 1);
3931 /* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally
3932 * among the PF and it's VFs, if the fields are changeable
3934 if (res_mod.max_uc_mac == FIELD_MODIFIABLE)
3935 vft_res->max_uc_mac = res.max_uc_mac / (num_vfs + 1);
3937 if (res_mod.max_vlans == FIELD_MODIFIABLE)
3938 vft_res->max_vlans = res.max_vlans / (num_vfs + 1);
3940 if (res_mod.max_iface_count == FIELD_MODIFIABLE)
3941 vft_res->max_iface_count = res.max_iface_count / (num_vfs + 1);
3943 if (res_mod.max_mcc_count == FIELD_MODIFIABLE)
3944 vft_res->max_mcc_count = res.max_mcc_count / (num_vfs + 1);
3947 static int be_clear(struct be_adapter *adapter)
3949 struct pci_dev *pdev = adapter->pdev;
3950 struct be_resources vft_res = {0};
3952 be_cancel_worker(adapter);
3954 if (sriov_enabled(adapter))
3955 be_vf_clear(adapter);
3957 /* Re-configure FW to distribute resources evenly across max-supported
3958 * number of VFs, only when VFs are not already enabled.
3960 if (skyhawk_chip(adapter) && be_physfn(adapter) &&
3961 !pci_vfs_assigned(pdev)) {
3962 be_calculate_vf_res(adapter,
3963 pci_sriov_get_totalvfs(pdev),
3965 be_cmd_set_sriov_config(adapter, adapter->pool_res,
3966 pci_sriov_get_totalvfs(pdev),
3970 be_disable_vxlan_offloads(adapter);
3971 kfree(adapter->pmac_id);
3972 adapter->pmac_id = NULL;
3974 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
3976 be_clear_queues(adapter);
3978 be_msix_disable(adapter);
3979 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
3983 static int be_vfs_if_create(struct be_adapter *adapter)
3985 struct be_resources res = {0};
3986 u32 cap_flags, en_flags, vf;
3987 struct be_vf_cfg *vf_cfg;
3990 /* If a FW profile exists, then cap_flags are updated */
3991 cap_flags = BE_VF_IF_EN_FLAGS;
3993 for_all_vfs(adapter, vf_cfg, vf) {
3994 if (!BE3_chip(adapter)) {
3995 status = be_cmd_get_profile_config(adapter, &res, NULL,
3996 ACTIVE_PROFILE_TYPE,
4000 cap_flags = res.if_cap_flags;
4001 /* Prevent VFs from enabling VLAN promiscuous
4004 cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
4008 /* PF should enable IF flags during proxy if_create call */
4009 en_flags = cap_flags & BE_VF_IF_EN_FLAGS;
4010 status = be_cmd_if_create(adapter, cap_flags, en_flags,
4011 &vf_cfg->if_handle, vf + 1);
4019 static int be_vf_setup_init(struct be_adapter *adapter)
4021 struct be_vf_cfg *vf_cfg;
4024 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
4026 if (!adapter->vf_cfg)
4029 for_all_vfs(adapter, vf_cfg, vf) {
4030 vf_cfg->if_handle = -1;
4031 vf_cfg->pmac_id = -1;
4036 static int be_vf_setup(struct be_adapter *adapter)
4038 struct device *dev = &adapter->pdev->dev;
4039 struct be_vf_cfg *vf_cfg;
4040 int status, old_vfs, vf;
4043 old_vfs = pci_num_vf(adapter->pdev);
4045 status = be_vf_setup_init(adapter);
4050 for_all_vfs(adapter, vf_cfg, vf) {
4051 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
4056 status = be_vfs_mac_query(adapter);
4060 status = be_vfs_if_create(adapter);
4064 status = be_vf_eth_addr_config(adapter);
4069 for_all_vfs(adapter, vf_cfg, vf) {
4070 /* Allow VFs to programs MAC/VLAN filters */
4071 status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
4073 if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
4074 status = be_cmd_set_fn_privileges(adapter,
4075 vf_cfg->privileges |
4079 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
4080 dev_info(dev, "VF%d has FILTMGMT privilege\n",
4085 /* Allow full available bandwidth */
4087 be_cmd_config_qos(adapter, 0, 0, vf + 1);
4089 status = be_cmd_get_hsw_config(adapter, NULL, vf + 1,
4090 vf_cfg->if_handle, NULL,
4093 vf_cfg->spoofchk = spoofchk;
4096 be_cmd_enable_vf(adapter, vf + 1);
4097 be_cmd_set_logical_link_config(adapter,
4098 IFLA_VF_LINK_STATE_AUTO,
4104 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
4106 dev_err(dev, "SRIOV enable failed\n");
4107 adapter->num_vfs = 0;
4112 if (BE3_chip(adapter)) {
4113 /* On BE3, enable VEB only when SRIOV is enabled */
4114 status = be_cmd_set_hsw_config(adapter, 0, 0,
4116 PORT_FWD_TYPE_VEB, 0);
4121 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
4124 dev_err(dev, "VF setup failed\n");
4125 be_vf_clear(adapter);
4129 /* Converting function_mode bits on BE3 to SH mc_type enums */
4131 static u8 be_convert_mc_type(u32 function_mode)
4133 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
4135 else if (function_mode & QNQ_MODE)
4137 else if (function_mode & VNIC_MODE)
4139 else if (function_mode & UMC_ENABLED)
4145 /* On BE2/BE3 FW does not suggest the supported limits */
4146 static void BEx_get_resources(struct be_adapter *adapter,
4147 struct be_resources *res)
4149 bool use_sriov = adapter->num_vfs ? 1 : 0;
4151 if (be_physfn(adapter))
4152 res->max_uc_mac = BE_UC_PMAC_COUNT;
4154 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
4156 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
4158 if (be_is_mc(adapter)) {
4159 /* Assuming that there are 4 channels per port,
4160 * when multi-channel is enabled
4162 if (be_is_qnq_mode(adapter))
4163 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
4165 /* In a non-qnq multichannel mode, the pvid
4166 * takes up one vlan entry
4168 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
4170 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
4173 res->max_mcast_mac = BE_MAX_MC;
4175 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
4176 * 2) Create multiple TX rings on a BE3-R multi-channel interface
4177 * *only* if it is RSS-capable.
4179 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
4180 be_virtfn(adapter) ||
4181 (be_is_mc(adapter) &&
4182 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
4184 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
4185 struct be_resources super_nic_res = {0};
4187 /* On a SuperNIC profile, the driver needs to use the
4188 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
4190 be_cmd_get_profile_config(adapter, &super_nic_res, NULL,
4191 ACTIVE_PROFILE_TYPE, RESOURCE_LIMITS,
4193 /* Some old versions of BE3 FW don't report max_tx_qs value */
4194 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
4196 res->max_tx_qs = BE3_MAX_TX_QS;
4199 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
4200 !use_sriov && be_physfn(adapter))
4201 res->max_rss_qs = (adapter->be3_native) ?
4202 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
4203 res->max_rx_qs = res->max_rss_qs + 1;
4205 if (be_physfn(adapter))
4206 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
4207 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
4209 res->max_evt_qs = 1;
4211 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
4212 res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS;
4213 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
4214 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
4217 static void be_setup_init(struct be_adapter *adapter)
4219 adapter->vlan_prio_bmap = 0xff;
4220 adapter->phy.link_speed = -1;
4221 adapter->if_handle = -1;
4222 adapter->be3_native = false;
4223 adapter->if_flags = 0;
4224 adapter->phy_state = BE_UNKNOWN_PHY_STATE;
4225 if (be_physfn(adapter))
4226 adapter->cmd_privileges = MAX_PRIVILEGES;
4228 adapter->cmd_privileges = MIN_PRIVILEGES;
4231 /* HW supports only MAX_PORT_RSS_TABLES RSS Policy Tables per port.
4232 * However, this HW limitation is not exposed to the host via any SLI cmd.
4233 * As a result, in the case of SRIOV and in particular multi-partition configs
4234 * the driver needs to calcuate a proportional share of RSS Tables per PF-pool
4235 * for distribution between the VFs. This self-imposed limit will determine the
4236 * no: of VFs for which RSS can be enabled.
4238 void be_calculate_pf_pool_rss_tables(struct be_adapter *adapter)
4240 struct be_port_resources port_res = {0};
4241 u8 rss_tables_on_port;
4242 u16 max_vfs = be_max_vfs(adapter);
4244 be_cmd_get_profile_config(adapter, NULL, &port_res, SAVED_PROFILE_TYPE,
4245 RESOURCE_LIMITS, 0);
4247 rss_tables_on_port = MAX_PORT_RSS_TABLES - port_res.nic_pfs;
4249 /* Each PF Pool's RSS Tables limit =
4250 * PF's Max VFs / Total_Max_VFs on Port * RSS Tables on Port
4252 adapter->pool_res.max_rss_tables =
4253 max_vfs * rss_tables_on_port / port_res.max_vfs;
4256 static int be_get_sriov_config(struct be_adapter *adapter)
4258 struct be_resources res = {0};
4259 int max_vfs, old_vfs;
4261 be_cmd_get_profile_config(adapter, &res, NULL, ACTIVE_PROFILE_TYPE,
4262 RESOURCE_LIMITS, 0);
4264 /* Some old versions of BE3 FW don't report max_vfs value */
4265 if (BE3_chip(adapter) && !res.max_vfs) {
4266 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
4267 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
4270 adapter->pool_res = res;
4272 /* If during previous unload of the driver, the VFs were not disabled,
4273 * then we cannot rely on the PF POOL limits for the TotalVFs value.
4274 * Instead use the TotalVFs value stored in the pci-dev struct.
4276 old_vfs = pci_num_vf(adapter->pdev);
4278 dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n",
4281 adapter->pool_res.max_vfs =
4282 pci_sriov_get_totalvfs(adapter->pdev);
4283 adapter->num_vfs = old_vfs;
4286 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
4287 be_calculate_pf_pool_rss_tables(adapter);
4288 dev_info(&adapter->pdev->dev,
4289 "RSS can be enabled for all VFs if num_vfs <= %d\n",
4290 be_max_pf_pool_rss_tables(adapter));
4295 static void be_alloc_sriov_res(struct be_adapter *adapter)
4297 int old_vfs = pci_num_vf(adapter->pdev);
4298 struct be_resources vft_res = {0};
4301 be_get_sriov_config(adapter);
4304 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
4306 /* When the HW is in SRIOV capable configuration, the PF-pool
4307 * resources are given to PF during driver load, if there are no
4308 * old VFs. This facility is not available in BE3 FW.
4309 * Also, this is done by FW in Lancer chip.
4311 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
4312 be_calculate_vf_res(adapter, 0, &vft_res);
4313 status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
4316 dev_err(&adapter->pdev->dev,
4317 "Failed to optimize SRIOV resources\n");
4321 static int be_get_resources(struct be_adapter *adapter)
4323 struct device *dev = &adapter->pdev->dev;
4324 struct be_resources res = {0};
4327 /* For Lancer, SH etc read per-function resource limits from FW.
4328 * GET_FUNC_CONFIG returns per function guaranteed limits.
4329 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
4331 if (BEx_chip(adapter)) {
4332 BEx_get_resources(adapter, &res);
4334 status = be_cmd_get_func_config(adapter, &res);
4338 /* If a deafault RXQ must be created, we'll use up one RSSQ*/
4339 if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
4340 !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
4341 res.max_rss_qs -= 1;
4344 /* If RoCE is supported stash away half the EQs for RoCE */
4345 res.max_nic_evt_qs = be_roce_supported(adapter) ?
4346 res.max_evt_qs / 2 : res.max_evt_qs;
4349 /* If FW supports RSS default queue, then skip creating non-RSS
4350 * queue for non-IP traffic.
4352 adapter->need_def_rxq = (be_if_cap_flags(adapter) &
4353 BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1;
4355 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
4356 be_max_txqs(adapter), be_max_rxqs(adapter),
4357 be_max_rss(adapter), be_max_nic_eqs(adapter),
4358 be_max_vfs(adapter));
4359 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
4360 be_max_uc(adapter), be_max_mc(adapter),
4361 be_max_vlans(adapter));
4363 /* Ensure RX and TX queues are created in pairs at init time */
4364 adapter->cfg_num_rx_irqs =
4365 min_t(u16, netif_get_num_default_rss_queues(),
4366 be_max_qp_irqs(adapter));
4367 adapter->cfg_num_tx_irqs = adapter->cfg_num_rx_irqs;
4371 static int be_get_config(struct be_adapter *adapter)
4376 status = be_cmd_get_cntl_attributes(adapter);
4380 status = be_cmd_query_fw_cfg(adapter);
4384 if (!lancer_chip(adapter) && be_physfn(adapter))
4385 be_cmd_get_fat_dump_len(adapter, &adapter->fat_dump_len);
4387 if (BEx_chip(adapter)) {
4388 level = be_cmd_get_fw_log_level(adapter);
4389 adapter->msg_enable =
4390 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4393 be_cmd_get_acpi_wol_cap(adapter);
4394 pci_enable_wake(adapter->pdev, PCI_D3hot, adapter->wol_en);
4395 pci_enable_wake(adapter->pdev, PCI_D3cold, adapter->wol_en);
4397 be_cmd_query_port_name(adapter);
4399 if (be_physfn(adapter)) {
4400 status = be_cmd_get_active_profile(adapter, &profile_id);
4402 dev_info(&adapter->pdev->dev,
4403 "Using profile 0x%x\n", profile_id);
4409 static int be_mac_setup(struct be_adapter *adapter)
4414 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
4415 status = be_cmd_get_perm_mac(adapter, mac);
4419 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
4420 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
4426 static void be_schedule_worker(struct be_adapter *adapter)
4428 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4429 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
4432 static void be_schedule_err_detection(struct be_adapter *adapter, u32 delay)
4434 schedule_delayed_work(&adapter->be_err_detection_work,
4435 msecs_to_jiffies(delay));
4436 adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
4439 static int be_setup_queues(struct be_adapter *adapter)
4441 struct net_device *netdev = adapter->netdev;
4444 status = be_evt_queues_create(adapter);
4448 status = be_tx_qs_create(adapter);
4452 status = be_rx_cqs_create(adapter);
4456 status = be_mcc_queues_create(adapter);
4460 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
4464 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
4470 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
4474 static int be_if_create(struct be_adapter *adapter)
4476 u32 en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
4477 u32 cap_flags = be_if_cap_flags(adapter);
4480 if (adapter->cfg_num_rx_irqs == 1)
4481 cap_flags &= ~(BE_IF_FLAGS_DEFQ_RSS | BE_IF_FLAGS_RSS);
4483 en_flags &= cap_flags;
4484 /* will enable all the needed filter flags in be_open() */
4485 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
4486 &adapter->if_handle, 0);
4491 int be_update_queues(struct be_adapter *adapter)
4493 struct net_device *netdev = adapter->netdev;
4496 if (netif_running(netdev))
4499 be_cancel_worker(adapter);
4501 /* If any vectors have been shared with RoCE we cannot re-program
4504 if (!adapter->num_msix_roce_vec)
4505 be_msix_disable(adapter);
4507 be_clear_queues(adapter);
4508 status = be_cmd_if_destroy(adapter, adapter->if_handle, 0);
4512 if (!msix_enabled(adapter)) {
4513 status = be_msix_enable(adapter);
4518 status = be_if_create(adapter);
4522 status = be_setup_queues(adapter);
4526 be_schedule_worker(adapter);
4528 if (netif_running(netdev))
4529 status = be_open(netdev);
4534 static inline int fw_major_num(const char *fw_ver)
4536 int fw_major = 0, i;
4538 i = sscanf(fw_ver, "%d.", &fw_major);
4545 /* If any VFs are already enabled don't FLR the PF */
4546 static bool be_reset_required(struct be_adapter *adapter)
4548 return pci_num_vf(adapter->pdev) ? false : true;
4551 /* Wait for the FW to be ready and perform the required initialization */
4552 static int be_func_init(struct be_adapter *adapter)
4556 status = be_fw_wait_ready(adapter);
4560 if (be_reset_required(adapter)) {
4561 status = be_cmd_reset_function(adapter);
4565 /* Wait for interrupts to quiesce after an FLR */
4568 /* We can clear all errors when function reset succeeds */
4569 be_clear_error(adapter, BE_CLEAR_ALL);
4572 /* Tell FW we're ready to fire cmds */
4573 status = be_cmd_fw_init(adapter);
4577 /* Allow interrupts for other ULPs running on NIC function */
4578 be_intr_set(adapter, true);
4583 static int be_setup(struct be_adapter *adapter)
4585 struct device *dev = &adapter->pdev->dev;
4588 status = be_func_init(adapter);
4592 be_setup_init(adapter);
4594 if (!lancer_chip(adapter))
4595 be_cmd_req_native_mode(adapter);
4597 /* invoke this cmd first to get pf_num and vf_num which are needed
4598 * for issuing profile related cmds
4600 if (!BEx_chip(adapter)) {
4601 status = be_cmd_get_func_config(adapter, NULL);
4606 status = be_get_config(adapter);
4610 if (!BE2_chip(adapter) && be_physfn(adapter))
4611 be_alloc_sriov_res(adapter);
4613 status = be_get_resources(adapter);
4617 adapter->pmac_id = kcalloc(be_max_uc(adapter),
4618 sizeof(*adapter->pmac_id), GFP_KERNEL);
4619 if (!adapter->pmac_id)
4622 status = be_msix_enable(adapter);
4626 /* will enable all the needed filter flags in be_open() */
4627 status = be_if_create(adapter);
4631 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
4633 status = be_setup_queues(adapter);
4638 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
4640 status = be_mac_setup(adapter);
4644 be_cmd_get_fw_ver(adapter);
4645 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
4647 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
4648 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
4650 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4653 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4656 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
4659 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
4660 adapter->tx_fc, adapter->rx_fc);
4662 if (be_physfn(adapter))
4663 be_cmd_set_logical_link_config(adapter,
4664 IFLA_VF_LINK_STATE_AUTO, 0);
4666 /* BE3 EVB echoes broadcast/multicast packets back to PF's vport
4667 * confusing a linux bridge or OVS that it might be connected to.
4668 * Set the EVB to PASSTHRU mode which effectively disables the EVB
4669 * when SRIOV is not enabled.
4671 if (BE3_chip(adapter))
4672 be_cmd_set_hsw_config(adapter, 0, 0, adapter->if_handle,
4673 PORT_FWD_TYPE_PASSTHRU, 0);
4675 if (adapter->num_vfs)
4676 be_vf_setup(adapter);
4678 status = be_cmd_get_phy_info(adapter);
4679 if (!status && be_pause_supported(adapter))
4680 adapter->phy.fc_autoneg = 1;
4682 be_schedule_worker(adapter);
4683 adapter->flags |= BE_FLAGS_SETUP_DONE;
4690 #ifdef CONFIG_NET_POLL_CONTROLLER
4691 static void be_netpoll(struct net_device *netdev)
4693 struct be_adapter *adapter = netdev_priv(netdev);
4694 struct be_eq_obj *eqo;
4697 for_all_evt_queues(adapter, eqo, i) {
4698 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
4699 napi_schedule(&eqo->napi);
4704 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4706 const struct firmware *fw;
4709 if (!netif_running(adapter->netdev)) {
4710 dev_err(&adapter->pdev->dev,
4711 "Firmware load not allowed (interface is down)\n");
4715 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4719 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4721 if (lancer_chip(adapter))
4722 status = lancer_fw_download(adapter, fw);
4724 status = be_fw_download(adapter, fw);
4727 be_cmd_get_fw_ver(adapter);
4730 release_firmware(fw);
4734 static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4737 struct be_adapter *adapter = netdev_priv(dev);
4738 struct nlattr *attr, *br_spec;
4743 if (!sriov_enabled(adapter))
4746 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4750 nla_for_each_nested(attr, br_spec, rem) {
4751 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4754 if (nla_len(attr) < sizeof(mode))
4757 mode = nla_get_u16(attr);
4758 if (BE3_chip(adapter) && mode == BRIDGE_MODE_VEPA)
4761 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4764 status = be_cmd_set_hsw_config(adapter, 0, 0,
4766 mode == BRIDGE_MODE_VEPA ?
4767 PORT_FWD_TYPE_VEPA :
4768 PORT_FWD_TYPE_VEB, 0);
4772 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4773 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4778 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4779 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4784 static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4785 struct net_device *dev, u32 filter_mask,
4788 struct be_adapter *adapter = netdev_priv(dev);
4792 /* BE and Lancer chips support VEB mode only */
4793 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4794 /* VEB is disabled in non-SR-IOV profiles on BE3/Lancer */
4795 if (!pci_sriov_get_totalvfs(adapter->pdev))
4797 hsw_mode = PORT_FWD_TYPE_VEB;
4799 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4800 adapter->if_handle, &hsw_mode,
4805 if (hsw_mode == PORT_FWD_TYPE_PASSTHRU)
4809 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4810 hsw_mode == PORT_FWD_TYPE_VEPA ?
4811 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
4812 0, 0, nlflags, filter_mask, NULL);
4815 /* VxLAN offload Notes:
4817 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
4818 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
4819 * is expected to work across all types of IP tunnels once exported. Skyhawk
4820 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
4821 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
4822 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
4823 * those other tunnels are unexported on the fly through ndo_features_check().
4825 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
4826 * adds more than one port, disable offloads and don't re-enable them again
4827 * until after all the tunnels are removed.
4829 static void be_add_vxlan_port(struct net_device *netdev,
4830 struct udp_tunnel_info *ti)
4832 struct be_adapter *adapter = netdev_priv(netdev);
4833 struct device *dev = &adapter->pdev->dev;
4834 __be16 port = ti->port;
4837 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
4840 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
4843 if (adapter->vxlan_port == port && adapter->vxlan_port_count) {
4844 adapter->vxlan_port_aliases++;
4848 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
4850 "Only one UDP port supported for VxLAN offloads\n");
4851 dev_info(dev, "Disabling VxLAN offloads\n");
4852 adapter->vxlan_port_count++;
4856 if (adapter->vxlan_port_count++ >= 1)
4859 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4860 OP_CONVERT_NORMAL_TO_TUNNEL);
4862 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4866 status = be_cmd_set_vxlan_port(adapter, port);
4868 dev_warn(dev, "Failed to add VxLAN port\n");
4871 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4872 adapter->vxlan_port = port;
4874 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4875 NETIF_F_TSO | NETIF_F_TSO6 |
4876 NETIF_F_GSO_UDP_TUNNEL;
4877 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
4878 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
4880 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4884 be_disable_vxlan_offloads(adapter);
4887 static void be_del_vxlan_port(struct net_device *netdev,
4888 struct udp_tunnel_info *ti)
4890 struct be_adapter *adapter = netdev_priv(netdev);
4891 __be16 port = ti->port;
4893 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
4896 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
4899 if (adapter->vxlan_port != port)
4902 if (adapter->vxlan_port_aliases) {
4903 adapter->vxlan_port_aliases--;
4907 be_disable_vxlan_offloads(adapter);
4909 dev_info(&adapter->pdev->dev,
4910 "Disabled VxLAN offloads for UDP port %d\n",
4913 adapter->vxlan_port_count--;
4916 static netdev_features_t be_features_check(struct sk_buff *skb,
4917 struct net_device *dev,
4918 netdev_features_t features)
4920 struct be_adapter *adapter = netdev_priv(dev);
4923 /* The code below restricts offload features for some tunneled packets.
4924 * Offload features for normal (non tunnel) packets are unchanged.
4926 if (!skb->encapsulation ||
4927 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
4930 /* It's an encapsulated packet and VxLAN offloads are enabled. We
4931 * should disable tunnel offload features if it's not a VxLAN packet,
4932 * as tunnel offloads have been enabled only for VxLAN. This is done to
4933 * allow other tunneled traffic like GRE work fine while VxLAN
4934 * offloads are configured in Skyhawk-R.
4936 switch (vlan_get_protocol(skb)) {
4937 case htons(ETH_P_IP):
4938 l4_hdr = ip_hdr(skb)->protocol;
4940 case htons(ETH_P_IPV6):
4941 l4_hdr = ipv6_hdr(skb)->nexthdr;
4947 if (l4_hdr != IPPROTO_UDP ||
4948 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
4949 skb->inner_protocol != htons(ETH_P_TEB) ||
4950 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
4951 sizeof(struct udphdr) + sizeof(struct vxlanhdr))
4952 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
4957 static int be_get_phys_port_id(struct net_device *dev,
4958 struct netdev_phys_item_id *ppid)
4960 int i, id_len = CNTL_SERIAL_NUM_WORDS * CNTL_SERIAL_NUM_WORD_SZ + 1;
4961 struct be_adapter *adapter = netdev_priv(dev);
4964 if (MAX_PHYS_ITEM_ID_LEN < id_len)
4967 ppid->id[0] = adapter->hba_port_num + 1;
4969 for (i = CNTL_SERIAL_NUM_WORDS - 1; i >= 0;
4970 i--, id += CNTL_SERIAL_NUM_WORD_SZ)
4971 memcpy(id, &adapter->serial_num[i], CNTL_SERIAL_NUM_WORD_SZ);
4973 ppid->id_len = id_len;
4978 static const struct net_device_ops be_netdev_ops = {
4979 .ndo_open = be_open,
4980 .ndo_stop = be_close,
4981 .ndo_start_xmit = be_xmit,
4982 .ndo_set_rx_mode = be_set_rx_mode,
4983 .ndo_set_mac_address = be_mac_addr_set,
4984 .ndo_change_mtu = be_change_mtu,
4985 .ndo_get_stats64 = be_get_stats64,
4986 .ndo_validate_addr = eth_validate_addr,
4987 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4988 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
4989 .ndo_set_vf_mac = be_set_vf_mac,
4990 .ndo_set_vf_vlan = be_set_vf_vlan,
4991 .ndo_set_vf_rate = be_set_vf_tx_rate,
4992 .ndo_get_vf_config = be_get_vf_config,
4993 .ndo_set_vf_link_state = be_set_vf_link_state,
4994 .ndo_set_vf_spoofchk = be_set_vf_spoofchk,
4995 #ifdef CONFIG_NET_POLL_CONTROLLER
4996 .ndo_poll_controller = be_netpoll,
4998 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4999 .ndo_bridge_getlink = be_ndo_bridge_getlink,
5000 #ifdef CONFIG_NET_RX_BUSY_POLL
5001 .ndo_busy_poll = be_busy_poll,
5003 .ndo_udp_tunnel_add = be_add_vxlan_port,
5004 .ndo_udp_tunnel_del = be_del_vxlan_port,
5005 .ndo_features_check = be_features_check,
5006 .ndo_get_phys_port_id = be_get_phys_port_id,
5009 static void be_netdev_init(struct net_device *netdev)
5011 struct be_adapter *adapter = netdev_priv(netdev);
5013 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
5014 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
5015 NETIF_F_HW_VLAN_CTAG_TX;
5016 if ((be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
5017 netdev->hw_features |= NETIF_F_RXHASH;
5019 netdev->features |= netdev->hw_features |
5020 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
5022 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
5023 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
5025 netdev->priv_flags |= IFF_UNICAST_FLT;
5027 netdev->flags |= IFF_MULTICAST;
5029 netif_set_gso_max_size(netdev, BE_MAX_GSO_SIZE - ETH_HLEN);
5031 netdev->netdev_ops = &be_netdev_ops;
5033 netdev->ethtool_ops = &be_ethtool_ops;
5036 static void be_cleanup(struct be_adapter *adapter)
5038 struct net_device *netdev = adapter->netdev;
5041 netif_device_detach(netdev);
5042 if (netif_running(netdev))
5049 static int be_resume(struct be_adapter *adapter)
5051 struct net_device *netdev = adapter->netdev;
5054 status = be_setup(adapter);
5059 if (netif_running(netdev))
5060 status = be_open(netdev);
5066 netif_device_attach(netdev);
5071 static int be_err_recover(struct be_adapter *adapter)
5075 /* Error recovery is supported only Lancer as of now */
5076 if (!lancer_chip(adapter))
5079 /* Wait for adapter to reach quiescent state before
5082 status = be_fw_wait_ready(adapter);
5086 be_cleanup(adapter);
5088 status = be_resume(adapter);
5097 static void be_err_detection_task(struct work_struct *work)
5099 struct be_adapter *adapter =
5100 container_of(work, struct be_adapter,
5101 be_err_detection_work.work);
5102 struct device *dev = &adapter->pdev->dev;
5103 int recovery_status;
5104 int delay = ERR_DETECTION_DELAY;
5106 be_detect_error(adapter);
5108 if (be_check_error(adapter, BE_ERROR_HW))
5109 recovery_status = be_err_recover(adapter);
5111 goto reschedule_task;
5113 if (!recovery_status) {
5114 adapter->recovery_retries = 0;
5115 dev_info(dev, "Adapter recovery successful\n");
5116 goto reschedule_task;
5117 } else if (be_virtfn(adapter)) {
5118 /* For VFs, check if PF have allocated resources
5121 dev_err(dev, "Re-trying adapter recovery\n");
5122 goto reschedule_task;
5123 } else if (adapter->recovery_retries++ <
5124 MAX_ERR_RECOVERY_RETRY_COUNT) {
5125 /* In case of another error during recovery, it takes 30 sec
5126 * for adapter to come out of error. Retry error recovery after
5127 * this time interval.
5129 dev_err(&adapter->pdev->dev, "Re-trying adapter recovery\n");
5130 delay = ERR_RECOVERY_RETRY_DELAY;
5131 goto reschedule_task;
5133 dev_err(dev, "Adapter recovery failed\n");
5138 be_schedule_err_detection(adapter, delay);
5141 static void be_log_sfp_info(struct be_adapter *adapter)
5145 status = be_cmd_query_sfp_info(adapter);
5147 dev_err(&adapter->pdev->dev,
5148 "Port %c: %s Vendor: %s part no: %s",
5150 be_misconfig_evt_port_state[adapter->phy_state],
5151 adapter->phy.vendor_name,
5152 adapter->phy.vendor_pn);
5154 adapter->flags &= ~BE_FLAGS_PHY_MISCONFIGURED;
5157 static void be_worker(struct work_struct *work)
5159 struct be_adapter *adapter =
5160 container_of(work, struct be_adapter, work.work);
5161 struct be_rx_obj *rxo;
5164 if (be_physfn(adapter) &&
5165 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
5166 be_cmd_get_die_temperature(adapter);
5168 /* when interrupts are not yet enabled, just reap any pending
5171 if (!netif_running(adapter->netdev)) {
5173 be_process_mcc(adapter);
5178 if (!adapter->stats_cmd_sent) {
5179 if (lancer_chip(adapter))
5180 lancer_cmd_get_pport_stats(adapter,
5181 &adapter->stats_cmd);
5183 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5186 for_all_rx_queues(adapter, rxo, i) {
5187 /* Replenish RX-queues starved due to memory
5188 * allocation failures.
5190 if (rxo->rx_post_starved)
5191 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
5194 /* EQ-delay update for Skyhawk is done while notifying EQ */
5195 if (!skyhawk_chip(adapter))
5196 be_eqd_update(adapter, false);
5198 if (adapter->flags & BE_FLAGS_PHY_MISCONFIGURED)
5199 be_log_sfp_info(adapter);
5202 adapter->work_counter++;
5203 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
5206 static void be_unmap_pci_bars(struct be_adapter *adapter)
5209 pci_iounmap(adapter->pdev, adapter->csr);
5211 pci_iounmap(adapter->pdev, adapter->db);
5212 if (adapter->pcicfg && adapter->pcicfg_mapped)
5213 pci_iounmap(adapter->pdev, adapter->pcicfg);
5216 static int db_bar(struct be_adapter *adapter)
5218 if (lancer_chip(adapter) || be_virtfn(adapter))
5224 static int be_roce_map_pci_bars(struct be_adapter *adapter)
5226 if (skyhawk_chip(adapter)) {
5227 adapter->roce_db.size = 4096;
5228 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
5230 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
5236 static int be_map_pci_bars(struct be_adapter *adapter)
5238 struct pci_dev *pdev = adapter->pdev;
5242 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
5243 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
5244 SLI_INTF_FAMILY_SHIFT;
5245 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
5247 if (BEx_chip(adapter) && be_physfn(adapter)) {
5248 adapter->csr = pci_iomap(pdev, 2, 0);
5253 addr = pci_iomap(pdev, db_bar(adapter), 0);
5258 if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
5259 if (be_physfn(adapter)) {
5260 /* PCICFG is the 2nd BAR in BE2 */
5261 addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
5264 adapter->pcicfg = addr;
5265 adapter->pcicfg_mapped = true;
5267 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
5268 adapter->pcicfg_mapped = false;
5272 be_roce_map_pci_bars(adapter);
5276 dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
5277 be_unmap_pci_bars(adapter);
5281 static void be_drv_cleanup(struct be_adapter *adapter)
5283 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
5284 struct device *dev = &adapter->pdev->dev;
5287 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5289 mem = &adapter->rx_filter;
5291 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5293 mem = &adapter->stats_cmd;
5295 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5298 /* Allocate and initialize various fields in be_adapter struct */
5299 static int be_drv_init(struct be_adapter *adapter)
5301 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
5302 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5303 struct be_dma_mem *rx_filter = &adapter->rx_filter;
5304 struct be_dma_mem *stats_cmd = &adapter->stats_cmd;
5305 struct device *dev = &adapter->pdev->dev;
5308 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
5309 mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size,
5310 &mbox_mem_alloc->dma,
5312 if (!mbox_mem_alloc->va)
5315 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
5316 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
5317 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
5319 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
5320 rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
5321 &rx_filter->dma, GFP_KERNEL);
5322 if (!rx_filter->va) {
5327 if (lancer_chip(adapter))
5328 stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5329 else if (BE2_chip(adapter))
5330 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
5331 else if (BE3_chip(adapter))
5332 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5334 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5335 stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size,
5336 &stats_cmd->dma, GFP_KERNEL);
5337 if (!stats_cmd->va) {
5339 goto free_rx_filter;
5342 mutex_init(&adapter->mbox_lock);
5343 spin_lock_init(&adapter->mcc_lock);
5344 spin_lock_init(&adapter->mcc_cq_lock);
5345 init_completion(&adapter->et_cmd_compl);
5347 pci_save_state(adapter->pdev);
5349 INIT_DELAYED_WORK(&adapter->work, be_worker);
5350 INIT_DELAYED_WORK(&adapter->be_err_detection_work,
5351 be_err_detection_task);
5353 adapter->rx_fc = true;
5354 adapter->tx_fc = true;
5356 /* Must be a power of 2 or else MODULO will BUG_ON */
5357 adapter->be_get_temp_freq = 64;
5362 dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma);
5364 dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va,
5365 mbox_mem_alloc->dma);
5369 static void be_remove(struct pci_dev *pdev)
5371 struct be_adapter *adapter = pci_get_drvdata(pdev);
5376 be_roce_dev_remove(adapter);
5377 be_intr_set(adapter, false);
5379 be_cancel_err_detection(adapter);
5381 unregister_netdev(adapter->netdev);
5385 /* tell fw we're done with firing cmds */
5386 be_cmd_fw_clean(adapter);
5388 be_unmap_pci_bars(adapter);
5389 be_drv_cleanup(adapter);
5391 pci_disable_pcie_error_reporting(pdev);
5393 pci_release_regions(pdev);
5394 pci_disable_device(pdev);
5396 free_netdev(adapter->netdev);
5399 static ssize_t be_hwmon_show_temp(struct device *dev,
5400 struct device_attribute *dev_attr,
5403 struct be_adapter *adapter = dev_get_drvdata(dev);
5405 /* Unit: millidegree Celsius */
5406 if (adapter->hwmon_info.be_on_die_temp == BE_INVALID_DIE_TEMP)
5409 return sprintf(buf, "%u\n",
5410 adapter->hwmon_info.be_on_die_temp * 1000);
5413 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
5414 be_hwmon_show_temp, NULL, 1);
5416 static struct attribute *be_hwmon_attrs[] = {
5417 &sensor_dev_attr_temp1_input.dev_attr.attr,
5421 ATTRIBUTE_GROUPS(be_hwmon);
5423 static char *mc_name(struct be_adapter *adapter)
5425 char *str = ""; /* default */
5427 switch (adapter->mc_type) {
5453 static inline char *func_name(struct be_adapter *adapter)
5455 return be_physfn(adapter) ? "PF" : "VF";
5458 static inline char *nic_name(struct pci_dev *pdev)
5460 switch (pdev->device) {
5467 return OC_NAME_LANCER;
5478 static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
5480 struct be_adapter *adapter;
5481 struct net_device *netdev;
5484 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5486 status = pci_enable_device(pdev);
5490 status = pci_request_regions(pdev, DRV_NAME);
5493 pci_set_master(pdev);
5495 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
5500 adapter = netdev_priv(netdev);
5501 adapter->pdev = pdev;
5502 pci_set_drvdata(pdev, adapter);
5503 adapter->netdev = netdev;
5504 SET_NETDEV_DEV(netdev, &pdev->dev);
5506 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
5508 netdev->features |= NETIF_F_HIGHDMA;
5510 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
5512 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5517 status = pci_enable_pcie_error_reporting(pdev);
5519 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
5521 status = be_map_pci_bars(adapter);
5525 status = be_drv_init(adapter);
5529 status = be_setup(adapter);
5533 be_netdev_init(netdev);
5534 status = register_netdev(netdev);
5538 be_roce_dev_add(adapter);
5540 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
5542 /* On Die temperature not supported for VF. */
5543 if (be_physfn(adapter) && IS_ENABLED(CONFIG_BE2NET_HWMON)) {
5544 adapter->hwmon_info.hwmon_dev =
5545 devm_hwmon_device_register_with_groups(&pdev->dev,
5549 adapter->hwmon_info.be_on_die_temp = BE_INVALID_DIE_TEMP;
5552 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
5553 func_name(adapter), mc_name(adapter), adapter->port_name);
5560 be_drv_cleanup(adapter);
5562 be_unmap_pci_bars(adapter);
5564 free_netdev(netdev);
5566 pci_release_regions(pdev);
5568 pci_disable_device(pdev);
5570 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
5574 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5576 struct be_adapter *adapter = pci_get_drvdata(pdev);
5578 be_intr_set(adapter, false);
5579 be_cancel_err_detection(adapter);
5581 be_cleanup(adapter);
5583 pci_save_state(pdev);
5584 pci_disable_device(pdev);
5585 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5589 static int be_pci_resume(struct pci_dev *pdev)
5591 struct be_adapter *adapter = pci_get_drvdata(pdev);
5594 status = pci_enable_device(pdev);
5598 pci_restore_state(pdev);
5600 status = be_resume(adapter);
5604 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
5610 * An FLR will stop BE from DMAing any data.
5612 static void be_shutdown(struct pci_dev *pdev)
5614 struct be_adapter *adapter = pci_get_drvdata(pdev);
5619 be_roce_dev_shutdown(adapter);
5620 cancel_delayed_work_sync(&adapter->work);
5621 be_cancel_err_detection(adapter);
5623 netif_device_detach(adapter->netdev);
5625 be_cmd_reset_function(adapter);
5627 pci_disable_device(pdev);
5630 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
5631 pci_channel_state_t state)
5633 struct be_adapter *adapter = pci_get_drvdata(pdev);
5635 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5637 be_roce_dev_remove(adapter);
5639 if (!be_check_error(adapter, BE_ERROR_EEH)) {
5640 be_set_error(adapter, BE_ERROR_EEH);
5642 be_cancel_err_detection(adapter);
5644 be_cleanup(adapter);
5647 if (state == pci_channel_io_perm_failure)
5648 return PCI_ERS_RESULT_DISCONNECT;
5650 pci_disable_device(pdev);
5652 /* The error could cause the FW to trigger a flash debug dump.
5653 * Resetting the card while flash dump is in progress
5654 * can cause it not to recover; wait for it to finish.
5655 * Wait only for first function as it is needed only once per
5658 if (pdev->devfn == 0)
5661 return PCI_ERS_RESULT_NEED_RESET;
5664 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5666 struct be_adapter *adapter = pci_get_drvdata(pdev);
5669 dev_info(&adapter->pdev->dev, "EEH reset\n");
5671 status = pci_enable_device(pdev);
5673 return PCI_ERS_RESULT_DISCONNECT;
5675 pci_set_master(pdev);
5676 pci_restore_state(pdev);
5678 /* Check if card is ok and fw is ready */
5679 dev_info(&adapter->pdev->dev,
5680 "Waiting for FW to be ready after EEH reset\n");
5681 status = be_fw_wait_ready(adapter);
5683 return PCI_ERS_RESULT_DISCONNECT;
5685 pci_cleanup_aer_uncorrect_error_status(pdev);
5686 be_clear_error(adapter, BE_CLEAR_ALL);
5687 return PCI_ERS_RESULT_RECOVERED;
5690 static void be_eeh_resume(struct pci_dev *pdev)
5693 struct be_adapter *adapter = pci_get_drvdata(pdev);
5695 dev_info(&adapter->pdev->dev, "EEH resume\n");
5697 pci_save_state(pdev);
5699 status = be_resume(adapter);
5703 be_roce_dev_add(adapter);
5705 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
5708 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
5711 static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
5713 struct be_adapter *adapter = pci_get_drvdata(pdev);
5714 struct be_resources vft_res = {0};
5718 be_vf_clear(adapter);
5720 adapter->num_vfs = num_vfs;
5722 if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) {
5723 dev_warn(&pdev->dev,
5724 "Cannot disable VFs while they are assigned\n");
5728 /* When the HW is in SRIOV capable configuration, the PF-pool resources
5729 * are equally distributed across the max-number of VFs. The user may
5730 * request only a subset of the max-vfs to be enabled.
5731 * Based on num_vfs, redistribute the resources across num_vfs so that
5732 * each VF will have access to more number of resources.
5733 * This facility is not available in BE3 FW.
5734 * Also, this is done by FW in Lancer chip.
5736 if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
5737 be_calculate_vf_res(adapter, adapter->num_vfs,
5739 status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
5740 adapter->num_vfs, &vft_res);
5743 "Failed to optimize SR-IOV resources\n");
5746 status = be_get_resources(adapter);
5748 return be_cmd_status(status);
5750 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
5752 status = be_update_queues(adapter);
5755 return be_cmd_status(status);
5757 if (adapter->num_vfs)
5758 status = be_vf_setup(adapter);
5761 return adapter->num_vfs;
5766 static const struct pci_error_handlers be_eeh_handlers = {
5767 .error_detected = be_eeh_err_detected,
5768 .slot_reset = be_eeh_reset,
5769 .resume = be_eeh_resume,
5772 static struct pci_driver be_driver = {
5774 .id_table = be_dev_ids,
5776 .remove = be_remove,
5777 .suspend = be_suspend,
5778 .resume = be_pci_resume,
5779 .shutdown = be_shutdown,
5780 .sriov_configure = be_pci_sriov_configure,
5781 .err_handler = &be_eeh_handlers
5784 static int __init be_init_module(void)
5786 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5787 rx_frag_size != 2048) {
5788 printk(KERN_WARNING DRV_NAME
5789 " : Module param rx_frag_size must be 2048/4096/8192."
5791 rx_frag_size = 2048;
5795 pr_info(DRV_NAME " : Module param num_vfs is obsolete.");
5796 pr_info(DRV_NAME " : Use sysfs method to enable VFs\n");
5799 return pci_register_driver(&be_driver);
5801 module_init(be_init_module);
5803 static void __exit be_exit_module(void)
5805 pci_unregister_driver(&be_driver);
5807 module_exit(be_exit_module);