2 * Copyright (C) 2005 - 2014 Emulex
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@emulex.com
15 * Costa Mesa, CA 92626
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
22 #include <asm/div64.h>
23 #include <linux/aer.h>
24 #include <linux/if_bridge.h>
25 #include <net/busy_poll.h>
26 #include <net/vxlan.h>
28 MODULE_VERSION(DRV_VER);
29 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
30 MODULE_AUTHOR("Emulex Corporation");
31 MODULE_LICENSE("GPL");
33 static unsigned int num_vfs;
34 module_param(num_vfs, uint, S_IRUGO);
35 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
37 static ushort rx_frag_size = 2048;
38 module_param(rx_frag_size, ushort, S_IRUGO);
39 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
41 static const struct pci_device_id be_dev_ids[] = {
42 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
44 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
45 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
46 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
47 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
48 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
49 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
52 MODULE_DEVICE_TABLE(pci, be_dev_ids);
53 /* UE Status Low CSR */
54 static const char * const ue_status_low_desc[] = {
89 /* UE Status High CSR */
90 static const char * const ue_status_hi_desc[] = {
125 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
127 struct be_dma_mem *mem = &q->dma_mem;
130 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
136 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
137 u16 len, u16 entry_size)
139 struct be_dma_mem *mem = &q->dma_mem;
141 memset(q, 0, sizeof(*q));
143 q->entry_size = entry_size;
144 mem->size = len * entry_size;
145 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
152 static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
156 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
158 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
160 if (!enabled && enable)
161 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162 else if (enabled && !enable)
163 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
167 pci_write_config_dword(adapter->pdev,
168 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
171 static void be_intr_set(struct be_adapter *adapter, bool enable)
175 /* On lancer interrupts can't be controlled via this register */
176 if (lancer_chip(adapter))
179 if (adapter->eeh_error)
182 status = be_cmd_intr_set(adapter, enable);
184 be_reg_intr_set(adapter, enable);
187 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
191 val |= qid & DB_RQ_RING_ID_MASK;
192 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
195 iowrite32(val, adapter->db + DB_RQ_OFFSET);
198 static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
203 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
204 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
207 iowrite32(val, adapter->db + txo->db_offset);
210 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
211 bool arm, bool clear_int, u16 num_popped)
215 val |= qid & DB_EQ_RING_ID_MASK;
216 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
218 if (adapter->eeh_error)
222 val |= 1 << DB_EQ_REARM_SHIFT;
224 val |= 1 << DB_EQ_CLR_SHIFT;
225 val |= 1 << DB_EQ_EVNT_SHIFT;
226 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
227 iowrite32(val, adapter->db + DB_EQ_OFFSET);
230 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
234 val |= qid & DB_CQ_RING_ID_MASK;
235 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
236 DB_CQ_RING_ID_EXT_MASK_SHIFT);
238 if (adapter->eeh_error)
242 val |= 1 << DB_CQ_REARM_SHIFT;
243 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
244 iowrite32(val, adapter->db + DB_CQ_OFFSET);
247 static int be_mac_addr_set(struct net_device *netdev, void *p)
249 struct be_adapter *adapter = netdev_priv(netdev);
250 struct device *dev = &adapter->pdev->dev;
251 struct sockaddr *addr = p;
254 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
256 if (!is_valid_ether_addr(addr->sa_data))
257 return -EADDRNOTAVAIL;
259 /* Proceed further only if, User provided MAC is different
262 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
265 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
266 * privilege or if PF did not provision the new MAC address.
267 * On BE3, this cmd will always fail if the VF doesn't have the
268 * FILTMGMT privilege. This failure is OK, only if the PF programmed
269 * the MAC for the VF.
271 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
272 adapter->if_handle, &adapter->pmac_id[0], 0);
274 curr_pmac_id = adapter->pmac_id[0];
276 /* Delete the old programmed MAC. This call may fail if the
277 * old MAC was already deleted by the PF driver.
279 if (adapter->pmac_id[0] != old_pmac_id)
280 be_cmd_pmac_del(adapter, adapter->if_handle,
284 /* Decide if the new MAC is successfully activated only after
287 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
288 adapter->if_handle, true, 0);
292 /* The MAC change did not happen, either due to lack of privilege
293 * or PF didn't pre-provision.
295 if (!ether_addr_equal(addr->sa_data, mac)) {
300 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
301 dev_info(dev, "MAC address changed to %pM\n", mac);
304 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
308 /* BE2 supports only v0 cmd */
309 static void *hw_stats_from_cmd(struct be_adapter *adapter)
311 if (BE2_chip(adapter)) {
312 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
314 return &cmd->hw_stats;
315 } else if (BE3_chip(adapter)) {
316 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
318 return &cmd->hw_stats;
320 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
322 return &cmd->hw_stats;
326 /* BE2 supports only v0 cmd */
327 static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
329 if (BE2_chip(adapter)) {
330 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
332 return &hw_stats->erx;
333 } else if (BE3_chip(adapter)) {
334 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
336 return &hw_stats->erx;
338 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
340 return &hw_stats->erx;
344 static void populate_be_v0_stats(struct be_adapter *adapter)
346 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
347 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
348 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
349 struct be_port_rxf_stats_v0 *port_stats =
350 &rxf_stats->port[adapter->port_num];
351 struct be_drv_stats *drvs = &adapter->drv_stats;
353 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
354 drvs->rx_pause_frames = port_stats->rx_pause_frames;
355 drvs->rx_crc_errors = port_stats->rx_crc_errors;
356 drvs->rx_control_frames = port_stats->rx_control_frames;
357 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
358 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
359 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
360 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
361 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
362 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
363 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
364 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
365 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
366 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
367 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
368 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
369 drvs->rx_dropped_header_too_small =
370 port_stats->rx_dropped_header_too_small;
371 drvs->rx_address_filtered =
372 port_stats->rx_address_filtered +
373 port_stats->rx_vlan_filtered;
374 drvs->rx_alignment_symbol_errors =
375 port_stats->rx_alignment_symbol_errors;
377 drvs->tx_pauseframes = port_stats->tx_pauseframes;
378 drvs->tx_controlframes = port_stats->tx_controlframes;
380 if (adapter->port_num)
381 drvs->jabber_events = rxf_stats->port1_jabber_events;
383 drvs->jabber_events = rxf_stats->port0_jabber_events;
384 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
385 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
386 drvs->forwarded_packets = rxf_stats->forwarded_packets;
387 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
388 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
389 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
390 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
393 static void populate_be_v1_stats(struct be_adapter *adapter)
395 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
396 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
397 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
398 struct be_port_rxf_stats_v1 *port_stats =
399 &rxf_stats->port[adapter->port_num];
400 struct be_drv_stats *drvs = &adapter->drv_stats;
402 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
403 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
404 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
405 drvs->rx_pause_frames = port_stats->rx_pause_frames;
406 drvs->rx_crc_errors = port_stats->rx_crc_errors;
407 drvs->rx_control_frames = port_stats->rx_control_frames;
408 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
409 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
410 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
411 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
412 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
413 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
414 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
415 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
416 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
417 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
418 drvs->rx_dropped_header_too_small =
419 port_stats->rx_dropped_header_too_small;
420 drvs->rx_input_fifo_overflow_drop =
421 port_stats->rx_input_fifo_overflow_drop;
422 drvs->rx_address_filtered = port_stats->rx_address_filtered;
423 drvs->rx_alignment_symbol_errors =
424 port_stats->rx_alignment_symbol_errors;
425 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
426 drvs->tx_pauseframes = port_stats->tx_pauseframes;
427 drvs->tx_controlframes = port_stats->tx_controlframes;
428 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
429 drvs->jabber_events = port_stats->jabber_events;
430 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
431 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
432 drvs->forwarded_packets = rxf_stats->forwarded_packets;
433 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
434 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
435 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
436 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
439 static void populate_be_v2_stats(struct be_adapter *adapter)
441 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
442 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
443 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
444 struct be_port_rxf_stats_v2 *port_stats =
445 &rxf_stats->port[adapter->port_num];
446 struct be_drv_stats *drvs = &adapter->drv_stats;
448 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
449 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
450 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
451 drvs->rx_pause_frames = port_stats->rx_pause_frames;
452 drvs->rx_crc_errors = port_stats->rx_crc_errors;
453 drvs->rx_control_frames = port_stats->rx_control_frames;
454 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
455 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
456 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
457 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
458 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
459 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
460 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
461 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
462 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
463 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
464 drvs->rx_dropped_header_too_small =
465 port_stats->rx_dropped_header_too_small;
466 drvs->rx_input_fifo_overflow_drop =
467 port_stats->rx_input_fifo_overflow_drop;
468 drvs->rx_address_filtered = port_stats->rx_address_filtered;
469 drvs->rx_alignment_symbol_errors =
470 port_stats->rx_alignment_symbol_errors;
471 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
472 drvs->tx_pauseframes = port_stats->tx_pauseframes;
473 drvs->tx_controlframes = port_stats->tx_controlframes;
474 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
475 drvs->jabber_events = port_stats->jabber_events;
476 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
477 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
478 drvs->forwarded_packets = rxf_stats->forwarded_packets;
479 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
480 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
481 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
482 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
483 if (be_roce_supported(adapter)) {
484 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
485 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
486 drvs->rx_roce_frames = port_stats->roce_frames_received;
487 drvs->roce_drops_crc = port_stats->roce_drops_crc;
488 drvs->roce_drops_payload_len =
489 port_stats->roce_drops_payload_len;
493 static void populate_lancer_stats(struct be_adapter *adapter)
495 struct be_drv_stats *drvs = &adapter->drv_stats;
496 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
498 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
499 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
500 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
501 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
502 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
503 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
504 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
505 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
506 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
507 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
508 drvs->rx_dropped_tcp_length =
509 pport_stats->rx_dropped_invalid_tcp_length;
510 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
511 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
512 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
513 drvs->rx_dropped_header_too_small =
514 pport_stats->rx_dropped_header_too_small;
515 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
516 drvs->rx_address_filtered =
517 pport_stats->rx_address_filtered +
518 pport_stats->rx_vlan_filtered;
519 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
520 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
521 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
522 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
523 drvs->jabber_events = pport_stats->rx_jabbers;
524 drvs->forwarded_packets = pport_stats->num_forwards_lo;
525 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
526 drvs->rx_drops_too_many_frags =
527 pport_stats->rx_drops_too_many_frags_lo;
530 static void accumulate_16bit_val(u32 *acc, u16 val)
532 #define lo(x) (x & 0xFFFF)
533 #define hi(x) (x & 0xFFFF0000)
534 bool wrapped = val < lo(*acc);
535 u32 newacc = hi(*acc) + val;
539 ACCESS_ONCE(*acc) = newacc;
542 static void populate_erx_stats(struct be_adapter *adapter,
543 struct be_rx_obj *rxo, u32 erx_stat)
545 if (!BEx_chip(adapter))
546 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
548 /* below erx HW counter can actually wrap around after
549 * 65535. Driver accumulates a 32-bit value
551 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
555 void be_parse_stats(struct be_adapter *adapter)
557 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
558 struct be_rx_obj *rxo;
562 if (lancer_chip(adapter)) {
563 populate_lancer_stats(adapter);
565 if (BE2_chip(adapter))
566 populate_be_v0_stats(adapter);
567 else if (BE3_chip(adapter))
569 populate_be_v1_stats(adapter);
571 populate_be_v2_stats(adapter);
573 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
574 for_all_rx_queues(adapter, rxo, i) {
575 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
576 populate_erx_stats(adapter, rxo, erx_stat);
581 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
582 struct rtnl_link_stats64 *stats)
584 struct be_adapter *adapter = netdev_priv(netdev);
585 struct be_drv_stats *drvs = &adapter->drv_stats;
586 struct be_rx_obj *rxo;
587 struct be_tx_obj *txo;
592 for_all_rx_queues(adapter, rxo, i) {
593 const struct be_rx_stats *rx_stats = rx_stats(rxo);
596 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
597 pkts = rx_stats(rxo)->rx_pkts;
598 bytes = rx_stats(rxo)->rx_bytes;
599 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
600 stats->rx_packets += pkts;
601 stats->rx_bytes += bytes;
602 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
603 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
604 rx_stats(rxo)->rx_drops_no_frags;
607 for_all_tx_queues(adapter, txo, i) {
608 const struct be_tx_stats *tx_stats = tx_stats(txo);
611 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
612 pkts = tx_stats(txo)->tx_pkts;
613 bytes = tx_stats(txo)->tx_bytes;
614 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
615 stats->tx_packets += pkts;
616 stats->tx_bytes += bytes;
619 /* bad pkts received */
620 stats->rx_errors = drvs->rx_crc_errors +
621 drvs->rx_alignment_symbol_errors +
622 drvs->rx_in_range_errors +
623 drvs->rx_out_range_errors +
624 drvs->rx_frame_too_long +
625 drvs->rx_dropped_too_small +
626 drvs->rx_dropped_too_short +
627 drvs->rx_dropped_header_too_small +
628 drvs->rx_dropped_tcp_length +
629 drvs->rx_dropped_runt;
631 /* detailed rx errors */
632 stats->rx_length_errors = drvs->rx_in_range_errors +
633 drvs->rx_out_range_errors +
634 drvs->rx_frame_too_long;
636 stats->rx_crc_errors = drvs->rx_crc_errors;
638 /* frame alignment errors */
639 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
641 /* receiver fifo overrun */
642 /* drops_no_pbuf is no per i/f, it's per BE card */
643 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
644 drvs->rx_input_fifo_overflow_drop +
645 drvs->rx_drops_no_pbuf;
649 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
651 struct net_device *netdev = adapter->netdev;
653 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
654 netif_carrier_off(netdev);
655 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
659 netif_carrier_on(netdev);
661 netif_carrier_off(netdev);
664 static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
666 struct be_tx_stats *stats = tx_stats(txo);
668 u64_stats_update_begin(&stats->sync);
670 stats->tx_bytes += skb->len;
671 stats->tx_pkts += (skb_shinfo(skb)->gso_segs ? : 1);
672 u64_stats_update_end(&stats->sync);
675 /* Returns number of WRBs needed for the skb */
676 static u32 skb_wrb_cnt(struct sk_buff *skb)
678 /* +1 for the header wrb */
679 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
682 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
684 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
685 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
686 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
690 /* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
691 * to avoid the swap and shift/mask operations in wrb_fill().
693 static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
701 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
707 vlan_tag = skb_vlan_tag_get(skb);
708 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
709 /* If vlan priority provided by OS is NOT in available bmap */
710 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
711 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
712 adapter->recommended_prio;
717 /* Used only for IP tunnel packets */
718 static u16 skb_inner_ip_proto(struct sk_buff *skb)
720 return (inner_ip_hdr(skb)->version == 4) ?
721 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
724 static u16 skb_ip_proto(struct sk_buff *skb)
726 return (ip_hdr(skb)->version == 4) ?
727 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
730 static inline bool be_is_txq_full(struct be_tx_obj *txo)
732 return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
735 static inline bool be_can_txq_wake(struct be_tx_obj *txo)
737 return atomic_read(&txo->q.used) < txo->q.len / 2;
740 static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo)
742 return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
745 static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
747 struct be_wrb_params *wrb_params)
751 if (skb_is_gso(skb)) {
752 BE_WRB_F_SET(wrb_params->features, LSO, 1);
753 wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
754 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
755 BE_WRB_F_SET(wrb_params->features, LSO6, 1);
756 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
757 if (skb->encapsulation) {
758 BE_WRB_F_SET(wrb_params->features, IPCS, 1);
759 proto = skb_inner_ip_proto(skb);
761 proto = skb_ip_proto(skb);
763 if (proto == IPPROTO_TCP)
764 BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
765 else if (proto == IPPROTO_UDP)
766 BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
769 if (skb_vlan_tag_present(skb)) {
770 BE_WRB_F_SET(wrb_params->features, VLAN, 1);
771 wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
774 BE_WRB_F_SET(wrb_params->features, CRC, 1);
777 static void wrb_fill_hdr(struct be_adapter *adapter,
778 struct be_eth_hdr_wrb *hdr,
779 struct be_wrb_params *wrb_params,
782 memset(hdr, 0, sizeof(*hdr));
784 SET_TX_WRB_HDR_BITS(crc, hdr,
785 BE_WRB_F_GET(wrb_params->features, CRC));
786 SET_TX_WRB_HDR_BITS(ipcs, hdr,
787 BE_WRB_F_GET(wrb_params->features, IPCS));
788 SET_TX_WRB_HDR_BITS(tcpcs, hdr,
789 BE_WRB_F_GET(wrb_params->features, TCPCS));
790 SET_TX_WRB_HDR_BITS(udpcs, hdr,
791 BE_WRB_F_GET(wrb_params->features, UDPCS));
793 SET_TX_WRB_HDR_BITS(lso, hdr,
794 BE_WRB_F_GET(wrb_params->features, LSO));
795 SET_TX_WRB_HDR_BITS(lso6, hdr,
796 BE_WRB_F_GET(wrb_params->features, LSO6));
797 SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
799 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
800 * hack is not needed, the evt bit is set while ringing DB.
802 SET_TX_WRB_HDR_BITS(event, hdr,
803 BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
804 SET_TX_WRB_HDR_BITS(vlan, hdr,
805 BE_WRB_F_GET(wrb_params->features, VLAN));
806 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
808 SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
809 SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
812 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
816 u32 frag_len = le32_to_cpu(wrb->frag_len);
819 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
820 (u64)le32_to_cpu(wrb->frag_pa_lo);
823 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
825 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
829 /* Grab a WRB header for xmit */
830 static u16 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
832 u16 head = txo->q.head;
834 queue_head_inc(&txo->q);
838 /* Set up the WRB header for xmit */
839 static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
840 struct be_tx_obj *txo,
841 struct be_wrb_params *wrb_params,
842 struct sk_buff *skb, u16 head)
844 u32 num_frags = skb_wrb_cnt(skb);
845 struct be_queue_info *txq = &txo->q;
846 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
848 wrb_fill_hdr(adapter, hdr, wrb_params, skb);
849 be_dws_cpu_to_le(hdr, sizeof(*hdr));
851 BUG_ON(txo->sent_skb_list[head]);
852 txo->sent_skb_list[head] = skb;
853 txo->last_req_hdr = head;
854 atomic_add(num_frags, &txq->used);
855 txo->last_req_wrb_cnt = num_frags;
856 txo->pend_wrb_cnt += num_frags;
859 /* Setup a WRB fragment (buffer descriptor) for xmit */
860 static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
863 struct be_eth_wrb *wrb;
864 struct be_queue_info *txq = &txo->q;
866 wrb = queue_head_node(txq);
867 wrb_fill(wrb, busaddr, len);
871 /* Bring the queue back to the state it was in before be_xmit_enqueue() routine
872 * was invoked. The producer index is restored to the previous packet and the
873 * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
875 static void be_xmit_restore(struct be_adapter *adapter,
876 struct be_tx_obj *txo, u16 head, bool map_single,
880 struct be_eth_wrb *wrb;
881 struct be_queue_info *txq = &txo->q;
883 dev = &adapter->pdev->dev;
886 /* skip the first wrb (hdr); it's not mapped */
889 wrb = queue_head_node(txq);
890 unmap_tx_frag(dev, wrb, map_single);
892 copied -= le32_to_cpu(wrb->frag_len);
899 /* Enqueue the given packet for transmit. This routine allocates WRBs for the
900 * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
901 * of WRBs used up by the packet.
903 static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
905 struct be_wrb_params *wrb_params)
907 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
908 struct device *dev = &adapter->pdev->dev;
909 struct be_queue_info *txq = &txo->q;
910 bool map_single = false;
911 u16 head = txq->head;
915 head = be_tx_get_wrb_hdr(txo);
917 if (skb->len > skb->data_len) {
918 len = skb_headlen(skb);
920 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
921 if (dma_mapping_error(dev, busaddr))
924 be_tx_setup_wrb_frag(txo, busaddr, len);
928 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
929 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
930 len = skb_frag_size(frag);
932 busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
933 if (dma_mapping_error(dev, busaddr))
935 be_tx_setup_wrb_frag(txo, busaddr, len);
939 be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
941 be_tx_stats_update(txo, skb);
945 adapter->drv_stats.dma_map_errors++;
946 be_xmit_restore(adapter, txo, head, map_single, copied);
950 static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
952 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
955 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
962 skb = skb_share_check(skb, GFP_ATOMIC);
966 if (skb_vlan_tag_present(skb))
967 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
969 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
971 vlan_tag = adapter->pvid;
972 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
973 * skip VLAN insertion
975 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
979 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
986 /* Insert the outer VLAN, if any */
987 if (adapter->qnq_vid) {
988 vlan_tag = adapter->qnq_vid;
989 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
993 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
999 static bool be_ipv6_exthdr_check(struct sk_buff *skb)
1001 struct ethhdr *eh = (struct ethhdr *)skb->data;
1002 u16 offset = ETH_HLEN;
1004 if (eh->h_proto == htons(ETH_P_IPV6)) {
1005 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
1007 offset += sizeof(struct ipv6hdr);
1008 if (ip6h->nexthdr != NEXTHDR_TCP &&
1009 ip6h->nexthdr != NEXTHDR_UDP) {
1010 struct ipv6_opt_hdr *ehdr =
1011 (struct ipv6_opt_hdr *)(skb->data + offset);
1013 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
1014 if (ehdr->hdrlen == 0xff)
1021 static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
1023 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
1026 static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
1028 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
1031 static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
1032 struct sk_buff *skb,
1033 struct be_wrb_params
1036 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1037 unsigned int eth_hdr_len;
1040 /* For padded packets, BE HW modifies tot_len field in IP header
1041 * incorrecly when VLAN tag is inserted by HW.
1042 * For padded packets, Lancer computes incorrect checksum.
1044 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
1045 VLAN_ETH_HLEN : ETH_HLEN;
1046 if (skb->len <= 60 &&
1047 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
1049 ip = (struct iphdr *)ip_hdr(skb);
1050 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
1053 /* If vlan tag is already inlined in the packet, skip HW VLAN
1054 * tagging in pvid-tagging mode
1056 if (be_pvid_tagging_enabled(adapter) &&
1057 veh->h_vlan_proto == htons(ETH_P_8021Q))
1058 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
1060 /* HW has a bug wherein it will calculate CSUM for VLAN
1061 * pkts even though it is disabled.
1062 * Manually insert VLAN in pkt.
1064 if (skb->ip_summed != CHECKSUM_PARTIAL &&
1065 skb_vlan_tag_present(skb)) {
1066 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
1071 /* HW may lockup when VLAN HW tagging is requested on
1072 * certain ipv6 packets. Drop such pkts if the HW workaround to
1073 * skip HW tagging is not enabled by FW.
1075 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
1076 (adapter->pvid || adapter->qnq_vid) &&
1077 !qnq_async_evt_rcvd(adapter)))
1080 /* Manual VLAN tag insertion to prevent:
1081 * ASIC lockup when the ASIC inserts VLAN tag into
1082 * certain ipv6 packets. Insert VLAN tags in driver,
1083 * and set event, completion, vlan bits accordingly
1086 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1087 be_vlan_tag_tx_chk(adapter, skb)) {
1088 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
1095 dev_kfree_skb_any(skb);
1100 static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1101 struct sk_buff *skb,
1102 struct be_wrb_params *wrb_params)
1104 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1105 * less may cause a transmit stall on that port. So the work-around is
1106 * to pad short packets (<= 32 bytes) to a 36-byte length.
1108 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
1109 if (skb_put_padto(skb, 36))
1113 if (BEx_chip(adapter) || lancer_chip(adapter)) {
1114 skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
1122 static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1124 struct be_queue_info *txq = &txo->q;
1125 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1127 /* Mark the last request eventable if it hasn't been marked already */
1128 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1129 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1131 /* compose a dummy wrb if there are odd set of wrbs to notify */
1132 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
1133 wrb_fill_dummy(queue_head_node(txq));
1134 queue_head_inc(txq);
1135 atomic_inc(&txq->used);
1136 txo->pend_wrb_cnt++;
1137 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1138 TX_HDR_WRB_NUM_SHIFT);
1139 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1140 TX_HDR_WRB_NUM_SHIFT);
1142 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1143 txo->pend_wrb_cnt = 0;
1146 static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1148 struct be_adapter *adapter = netdev_priv(netdev);
1149 u16 q_idx = skb_get_queue_mapping(skb);
1150 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
1151 struct be_wrb_params wrb_params = { 0 };
1152 bool flush = !skb->xmit_more;
1155 skb = be_xmit_workarounds(adapter, skb, &wrb_params);
1159 be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
1161 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
1162 if (unlikely(!wrb_cnt)) {
1163 dev_kfree_skb_any(skb);
1167 if (be_is_txq_full(txo)) {
1168 netif_stop_subqueue(netdev, q_idx);
1169 tx_stats(txo)->tx_stops++;
1172 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1173 be_xmit_flush(adapter, txo);
1175 return NETDEV_TX_OK;
1177 tx_stats(txo)->tx_drv_drops++;
1178 /* Flush the already enqueued tx requests */
1179 if (flush && txo->pend_wrb_cnt)
1180 be_xmit_flush(adapter, txo);
1182 return NETDEV_TX_OK;
1185 static int be_change_mtu(struct net_device *netdev, int new_mtu)
1187 struct be_adapter *adapter = netdev_priv(netdev);
1188 struct device *dev = &adapter->pdev->dev;
1190 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1191 dev_info(dev, "MTU must be between %d and %d bytes\n",
1192 BE_MIN_MTU, BE_MAX_MTU);
1196 dev_info(dev, "MTU changed from %d to %d bytes\n",
1197 netdev->mtu, new_mtu);
1198 netdev->mtu = new_mtu;
1202 static inline bool be_in_all_promisc(struct be_adapter *adapter)
1204 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1205 BE_IF_FLAGS_ALL_PROMISCUOUS;
1208 static int be_set_vlan_promisc(struct be_adapter *adapter)
1210 struct device *dev = &adapter->pdev->dev;
1213 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1216 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1218 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1219 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1221 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1226 static int be_clear_vlan_promisc(struct be_adapter *adapter)
1228 struct device *dev = &adapter->pdev->dev;
1231 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1233 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1234 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1240 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1241 * If the user configures more, place BE in vlan promiscuous mode.
1243 static int be_vid_config(struct be_adapter *adapter)
1245 struct device *dev = &adapter->pdev->dev;
1246 u16 vids[BE_NUM_VLANS_SUPPORTED];
1250 /* No need to further configure vids if in promiscuous mode */
1251 if (be_in_all_promisc(adapter))
1254 if (adapter->vlans_added > be_max_vlans(adapter))
1255 return be_set_vlan_promisc(adapter);
1257 /* Construct VLAN Table to give to HW */
1258 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1259 vids[num++] = cpu_to_le16(i);
1261 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
1263 dev_err(dev, "Setting HW VLAN filtering failed\n");
1264 /* Set to VLAN promisc mode as setting VLAN filter failed */
1265 if (addl_status(status) ==
1266 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
1267 return be_set_vlan_promisc(adapter);
1268 } else if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1269 status = be_clear_vlan_promisc(adapter);
1274 static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1276 struct be_adapter *adapter = netdev_priv(netdev);
1279 /* Packets with VID 0 are always received by Lancer by default */
1280 if (lancer_chip(adapter) && vid == 0)
1283 if (test_bit(vid, adapter->vids))
1286 set_bit(vid, adapter->vids);
1287 adapter->vlans_added++;
1289 status = be_vid_config(adapter);
1291 adapter->vlans_added--;
1292 clear_bit(vid, adapter->vids);
1298 static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
1300 struct be_adapter *adapter = netdev_priv(netdev);
1302 /* Packets with VID 0 are always received by Lancer by default */
1303 if (lancer_chip(adapter) && vid == 0)
1306 clear_bit(vid, adapter->vids);
1307 adapter->vlans_added--;
1309 return be_vid_config(adapter);
1312 static void be_clear_all_promisc(struct be_adapter *adapter)
1314 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, OFF);
1315 adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
1318 static void be_set_all_promisc(struct be_adapter *adapter)
1320 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1321 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1324 static void be_set_mc_promisc(struct be_adapter *adapter)
1328 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1331 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1333 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1336 static void be_set_mc_list(struct be_adapter *adapter)
1340 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1342 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1344 be_set_mc_promisc(adapter);
1347 static void be_set_uc_list(struct be_adapter *adapter)
1349 struct netdev_hw_addr *ha;
1350 int i = 1; /* First slot is claimed by the Primary MAC */
1352 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
1353 be_cmd_pmac_del(adapter, adapter->if_handle,
1354 adapter->pmac_id[i], 0);
1356 if (netdev_uc_count(adapter->netdev) > be_max_uc(adapter)) {
1357 be_set_all_promisc(adapter);
1361 netdev_for_each_uc_addr(ha, adapter->netdev) {
1362 adapter->uc_macs++; /* First slot is for Primary MAC */
1363 be_cmd_pmac_add(adapter, (u8 *)ha->addr, adapter->if_handle,
1364 &adapter->pmac_id[adapter->uc_macs], 0);
1368 static void be_clear_uc_list(struct be_adapter *adapter)
1372 for (i = 1; i < (adapter->uc_macs + 1); i++)
1373 be_cmd_pmac_del(adapter, adapter->if_handle,
1374 adapter->pmac_id[i], 0);
1375 adapter->uc_macs = 0;
1378 static void be_set_rx_mode(struct net_device *netdev)
1380 struct be_adapter *adapter = netdev_priv(netdev);
1382 if (netdev->flags & IFF_PROMISC) {
1383 be_set_all_promisc(adapter);
1387 /* Interface was previously in promiscuous mode; disable it */
1388 if (be_in_all_promisc(adapter)) {
1389 be_clear_all_promisc(adapter);
1390 if (adapter->vlans_added)
1391 be_vid_config(adapter);
1394 /* Enable multicast promisc if num configured exceeds what we support */
1395 if (netdev->flags & IFF_ALLMULTI ||
1396 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1397 be_set_mc_promisc(adapter);
1401 if (netdev_uc_count(netdev) != adapter->uc_macs)
1402 be_set_uc_list(adapter);
1404 be_set_mc_list(adapter);
1407 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1409 struct be_adapter *adapter = netdev_priv(netdev);
1410 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1413 if (!sriov_enabled(adapter))
1416 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1419 /* Proceed further only if user provided MAC is different
1422 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1425 if (BEx_chip(adapter)) {
1426 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1429 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1430 &vf_cfg->pmac_id, vf + 1);
1432 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1437 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1439 return be_cmd_status(status);
1442 ether_addr_copy(vf_cfg->mac_addr, mac);
1447 static int be_get_vf_config(struct net_device *netdev, int vf,
1448 struct ifla_vf_info *vi)
1450 struct be_adapter *adapter = netdev_priv(netdev);
1451 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1453 if (!sriov_enabled(adapter))
1456 if (vf >= adapter->num_vfs)
1460 vi->max_tx_rate = vf_cfg->tx_rate;
1461 vi->min_tx_rate = 0;
1462 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1463 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
1464 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1465 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
1470 static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
1472 struct be_adapter *adapter = netdev_priv(netdev);
1473 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1476 if (!sriov_enabled(adapter))
1479 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1483 vlan |= qos << VLAN_PRIO_SHIFT;
1484 if (vf_cfg->vlan_tag != vlan)
1485 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1486 vf_cfg->if_handle, 0);
1488 /* Reset Transparent Vlan Tagging. */
1489 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1490 vf + 1, vf_cfg->if_handle, 0);
1494 dev_err(&adapter->pdev->dev,
1495 "VLAN %d config on VF %d failed : %#x\n", vlan,
1497 return be_cmd_status(status);
1500 vf_cfg->vlan_tag = vlan;
1505 static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1506 int min_tx_rate, int max_tx_rate)
1508 struct be_adapter *adapter = netdev_priv(netdev);
1509 struct device *dev = &adapter->pdev->dev;
1510 int percent_rate, status = 0;
1514 if (!sriov_enabled(adapter))
1517 if (vf >= adapter->num_vfs)
1526 status = be_cmd_link_status_query(adapter, &link_speed,
1532 dev_err(dev, "TX-rate setting not allowed when link is down\n");
1537 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1538 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1544 /* On Skyhawk the QOS setting must be done only as a % value */
1545 percent_rate = link_speed / 100;
1546 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1547 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1554 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
1558 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1562 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1564 return be_cmd_status(status);
1567 static int be_set_vf_link_state(struct net_device *netdev, int vf,
1570 struct be_adapter *adapter = netdev_priv(netdev);
1573 if (!sriov_enabled(adapter))
1576 if (vf >= adapter->num_vfs)
1579 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
1581 dev_err(&adapter->pdev->dev,
1582 "Link state change on VF %d failed: %#x\n", vf, status);
1583 return be_cmd_status(status);
1586 adapter->vf_cfg[vf].plink_tracking = link_state;
1591 static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1594 aic->rx_pkts_prev = rx_pkts;
1595 aic->tx_reqs_prev = tx_pkts;
1599 static void be_eqd_update(struct be_adapter *adapter)
1601 struct be_set_eqd set_eqd[MAX_EVT_QS];
1602 int eqd, i, num = 0, start;
1603 struct be_aic_obj *aic;
1604 struct be_eq_obj *eqo;
1605 struct be_rx_obj *rxo;
1606 struct be_tx_obj *txo;
1607 u64 rx_pkts, tx_pkts;
1611 for_all_evt_queues(adapter, eqo, i) {
1612 aic = &adapter->aic_obj[eqo->idx];
1620 rxo = &adapter->rx_obj[eqo->idx];
1622 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
1623 rx_pkts = rxo->stats.rx_pkts;
1624 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
1626 txo = &adapter->tx_obj[eqo->idx];
1628 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
1629 tx_pkts = txo->stats.tx_reqs;
1630 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
1632 /* Skip, if wrapped around or first calculation */
1634 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1635 rx_pkts < aic->rx_pkts_prev ||
1636 tx_pkts < aic->tx_reqs_prev) {
1637 be_aic_update(aic, rx_pkts, tx_pkts, now);
1641 delta = jiffies_to_msecs(now - aic->jiffies);
1642 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1643 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1644 eqd = (pps / 15000) << 2;
1648 eqd = min_t(u32, eqd, aic->max_eqd);
1649 eqd = max_t(u32, eqd, aic->min_eqd);
1651 be_aic_update(aic, rx_pkts, tx_pkts, now);
1653 if (eqd != aic->prev_eqd) {
1654 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1655 set_eqd[num].eq_id = eqo->q.id;
1656 aic->prev_eqd = eqd;
1662 be_cmd_modify_eqd(adapter, set_eqd, num);
1665 static void be_rx_stats_update(struct be_rx_obj *rxo,
1666 struct be_rx_compl_info *rxcp)
1668 struct be_rx_stats *stats = rx_stats(rxo);
1670 u64_stats_update_begin(&stats->sync);
1672 stats->rx_bytes += rxcp->pkt_size;
1674 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1675 stats->rx_mcast_pkts++;
1677 stats->rx_compl_err++;
1678 u64_stats_update_end(&stats->sync);
1681 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1683 /* L4 checksum is not reliable for non TCP/UDP packets.
1684 * Also ignore ipcksm for ipv6 pkts
1686 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1687 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
1690 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
1692 struct be_adapter *adapter = rxo->adapter;
1693 struct be_rx_page_info *rx_page_info;
1694 struct be_queue_info *rxq = &rxo->q;
1695 u16 frag_idx = rxq->tail;
1697 rx_page_info = &rxo->page_info_tbl[frag_idx];
1698 BUG_ON(!rx_page_info->page);
1700 if (rx_page_info->last_frag) {
1701 dma_unmap_page(&adapter->pdev->dev,
1702 dma_unmap_addr(rx_page_info, bus),
1703 adapter->big_page_size, DMA_FROM_DEVICE);
1704 rx_page_info->last_frag = false;
1706 dma_sync_single_for_cpu(&adapter->pdev->dev,
1707 dma_unmap_addr(rx_page_info, bus),
1708 rx_frag_size, DMA_FROM_DEVICE);
1711 queue_tail_inc(rxq);
1712 atomic_dec(&rxq->used);
1713 return rx_page_info;
1716 /* Throwaway the data in the Rx completion */
1717 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1718 struct be_rx_compl_info *rxcp)
1720 struct be_rx_page_info *page_info;
1721 u16 i, num_rcvd = rxcp->num_rcvd;
1723 for (i = 0; i < num_rcvd; i++) {
1724 page_info = get_rx_page_info(rxo);
1725 put_page(page_info->page);
1726 memset(page_info, 0, sizeof(*page_info));
1731 * skb_fill_rx_data forms a complete skb for an ether frame
1732 * indicated by rxcp.
1734 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1735 struct be_rx_compl_info *rxcp)
1737 struct be_rx_page_info *page_info;
1739 u16 hdr_len, curr_frag_len, remaining;
1742 page_info = get_rx_page_info(rxo);
1743 start = page_address(page_info->page) + page_info->page_offset;
1746 /* Copy data in the first descriptor of this completion */
1747 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1749 skb->len = curr_frag_len;
1750 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1751 memcpy(skb->data, start, curr_frag_len);
1752 /* Complete packet has now been moved to data */
1753 put_page(page_info->page);
1755 skb->tail += curr_frag_len;
1758 memcpy(skb->data, start, hdr_len);
1759 skb_shinfo(skb)->nr_frags = 1;
1760 skb_frag_set_page(skb, 0, page_info->page);
1761 skb_shinfo(skb)->frags[0].page_offset =
1762 page_info->page_offset + hdr_len;
1763 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1764 curr_frag_len - hdr_len);
1765 skb->data_len = curr_frag_len - hdr_len;
1766 skb->truesize += rx_frag_size;
1767 skb->tail += hdr_len;
1769 page_info->page = NULL;
1771 if (rxcp->pkt_size <= rx_frag_size) {
1772 BUG_ON(rxcp->num_rcvd != 1);
1776 /* More frags present for this completion */
1777 remaining = rxcp->pkt_size - curr_frag_len;
1778 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1779 page_info = get_rx_page_info(rxo);
1780 curr_frag_len = min(remaining, rx_frag_size);
1782 /* Coalesce all frags from the same physical page in one slot */
1783 if (page_info->page_offset == 0) {
1786 skb_frag_set_page(skb, j, page_info->page);
1787 skb_shinfo(skb)->frags[j].page_offset =
1788 page_info->page_offset;
1789 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1790 skb_shinfo(skb)->nr_frags++;
1792 put_page(page_info->page);
1795 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1796 skb->len += curr_frag_len;
1797 skb->data_len += curr_frag_len;
1798 skb->truesize += rx_frag_size;
1799 remaining -= curr_frag_len;
1800 page_info->page = NULL;
1802 BUG_ON(j > MAX_SKB_FRAGS);
1805 /* Process the RX completion indicated by rxcp when GRO is disabled */
1806 static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
1807 struct be_rx_compl_info *rxcp)
1809 struct be_adapter *adapter = rxo->adapter;
1810 struct net_device *netdev = adapter->netdev;
1811 struct sk_buff *skb;
1813 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1814 if (unlikely(!skb)) {
1815 rx_stats(rxo)->rx_drops_no_skbs++;
1816 be_rx_compl_discard(rxo, rxcp);
1820 skb_fill_rx_data(rxo, skb, rxcp);
1822 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1823 skb->ip_summed = CHECKSUM_UNNECESSARY;
1825 skb_checksum_none_assert(skb);
1827 skb->protocol = eth_type_trans(skb, netdev);
1828 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1829 if (netdev->features & NETIF_F_RXHASH)
1830 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
1832 skb->csum_level = rxcp->tunneled;
1833 skb_mark_napi_id(skb, napi);
1836 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1838 netif_receive_skb(skb);
1841 /* Process the RX completion indicated by rxcp when GRO is enabled */
1842 static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1843 struct napi_struct *napi,
1844 struct be_rx_compl_info *rxcp)
1846 struct be_adapter *adapter = rxo->adapter;
1847 struct be_rx_page_info *page_info;
1848 struct sk_buff *skb = NULL;
1849 u16 remaining, curr_frag_len;
1852 skb = napi_get_frags(napi);
1854 be_rx_compl_discard(rxo, rxcp);
1858 remaining = rxcp->pkt_size;
1859 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1860 page_info = get_rx_page_info(rxo);
1862 curr_frag_len = min(remaining, rx_frag_size);
1864 /* Coalesce all frags from the same physical page in one slot */
1865 if (i == 0 || page_info->page_offset == 0) {
1866 /* First frag or Fresh page */
1868 skb_frag_set_page(skb, j, page_info->page);
1869 skb_shinfo(skb)->frags[j].page_offset =
1870 page_info->page_offset;
1871 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1873 put_page(page_info->page);
1875 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1876 skb->truesize += rx_frag_size;
1877 remaining -= curr_frag_len;
1878 memset(page_info, 0, sizeof(*page_info));
1880 BUG_ON(j > MAX_SKB_FRAGS);
1882 skb_shinfo(skb)->nr_frags = j + 1;
1883 skb->len = rxcp->pkt_size;
1884 skb->data_len = rxcp->pkt_size;
1885 skb->ip_summed = CHECKSUM_UNNECESSARY;
1886 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1887 if (adapter->netdev->features & NETIF_F_RXHASH)
1888 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
1890 skb->csum_level = rxcp->tunneled;
1891 skb_mark_napi_id(skb, napi);
1894 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1896 napi_gro_frags(napi);
1899 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1900 struct be_rx_compl_info *rxcp)
1902 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
1903 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
1904 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
1905 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
1906 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
1907 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
1908 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
1909 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
1910 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
1911 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
1912 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
1914 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
1915 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
1917 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
1919 GET_RX_COMPL_V1_BITS(tunneled, compl);
1922 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1923 struct be_rx_compl_info *rxcp)
1925 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
1926 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
1927 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
1928 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
1929 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
1930 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
1931 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
1932 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
1933 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
1934 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
1935 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
1937 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
1938 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
1940 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
1941 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
1944 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1946 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1947 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1948 struct be_adapter *adapter = rxo->adapter;
1950 /* For checking the valid bit it is Ok to use either definition as the
1951 * valid bit is at the same position in both v0 and v1 Rx compl */
1952 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1956 be_dws_le_to_cpu(compl, sizeof(*compl));
1958 if (adapter->be3_native)
1959 be_parse_rx_compl_v1(compl, rxcp);
1961 be_parse_rx_compl_v0(compl, rxcp);
1967 /* In QNQ modes, if qnq bit is not set, then the packet was
1968 * tagged only with the transparent outer vlan-tag and must
1969 * not be treated as a vlan packet by host
1971 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
1974 if (!lancer_chip(adapter))
1975 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1977 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1978 !test_bit(rxcp->vlan_tag, adapter->vids))
1982 /* As the compl has been parsed, reset it; we wont touch it again */
1983 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1985 queue_tail_inc(&rxo->cq);
1989 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1991 u32 order = get_order(size);
1995 return alloc_pages(gfp, order);
1999 * Allocate a page, split it to fragments of size rx_frag_size and post as
2000 * receive buffers to BE
2002 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
2004 struct be_adapter *adapter = rxo->adapter;
2005 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
2006 struct be_queue_info *rxq = &rxo->q;
2007 struct page *pagep = NULL;
2008 struct device *dev = &adapter->pdev->dev;
2009 struct be_eth_rx_d *rxd;
2010 u64 page_dmaaddr = 0, frag_dmaaddr;
2011 u32 posted, page_offset = 0, notify = 0;
2013 page_info = &rxo->page_info_tbl[rxq->head];
2014 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
2016 pagep = be_alloc_pages(adapter->big_page_size, gfp);
2017 if (unlikely(!pagep)) {
2018 rx_stats(rxo)->rx_post_fail++;
2021 page_dmaaddr = dma_map_page(dev, pagep, 0,
2022 adapter->big_page_size,
2024 if (dma_mapping_error(dev, page_dmaaddr)) {
2027 adapter->drv_stats.dma_map_errors++;
2033 page_offset += rx_frag_size;
2035 page_info->page_offset = page_offset;
2036 page_info->page = pagep;
2038 rxd = queue_head_node(rxq);
2039 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
2040 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2041 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
2043 /* Any space left in the current big page for another frag? */
2044 if ((page_offset + rx_frag_size + rx_frag_size) >
2045 adapter->big_page_size) {
2047 page_info->last_frag = true;
2048 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2050 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
2053 prev_page_info = page_info;
2054 queue_head_inc(rxq);
2055 page_info = &rxo->page_info_tbl[rxq->head];
2058 /* Mark the last frag of a page when we break out of the above loop
2059 * with no more slots available in the RXQ
2062 prev_page_info->last_frag = true;
2063 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2067 atomic_add(posted, &rxq->used);
2068 if (rxo->rx_post_starved)
2069 rxo->rx_post_starved = false;
2071 notify = min(256u, posted);
2072 be_rxq_notify(adapter, rxq->id, notify);
2075 } else if (atomic_read(&rxq->used) == 0) {
2076 /* Let be_worker replenish when memory is available */
2077 rxo->rx_post_starved = true;
2081 static struct be_tx_compl_info *be_tx_compl_get(struct be_tx_obj *txo)
2083 struct be_queue_info *tx_cq = &txo->cq;
2084 struct be_tx_compl_info *txcp = &txo->txcp;
2085 struct be_eth_tx_compl *compl = queue_tail_node(tx_cq);
2087 if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
2090 /* Ensure load ordering of valid bit dword and other dwords below */
2092 be_dws_le_to_cpu(compl, sizeof(*compl));
2094 txcp->status = GET_TX_COMPL_BITS(status, compl);
2095 txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl);
2097 compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
2098 queue_tail_inc(tx_cq);
2102 static u16 be_tx_compl_process(struct be_adapter *adapter,
2103 struct be_tx_obj *txo, u16 last_index)
2105 struct sk_buff **sent_skbs = txo->sent_skb_list;
2106 struct be_queue_info *txq = &txo->q;
2107 u16 frag_index, num_wrbs = 0;
2108 struct sk_buff *skb = NULL;
2109 bool unmap_skb_hdr = false;
2110 struct be_eth_wrb *wrb;
2113 if (sent_skbs[txq->tail]) {
2114 /* Free skb from prev req */
2116 dev_consume_skb_any(skb);
2117 skb = sent_skbs[txq->tail];
2118 sent_skbs[txq->tail] = NULL;
2119 queue_tail_inc(txq); /* skip hdr wrb */
2121 unmap_skb_hdr = true;
2123 wrb = queue_tail_node(txq);
2124 frag_index = txq->tail;
2125 unmap_tx_frag(&adapter->pdev->dev, wrb,
2126 (unmap_skb_hdr && skb_headlen(skb)));
2127 unmap_skb_hdr = false;
2128 queue_tail_inc(txq);
2130 } while (frag_index != last_index);
2131 dev_consume_skb_any(skb);
2136 /* Return the number of events in the event queue */
2137 static inline int events_get(struct be_eq_obj *eqo)
2139 struct be_eq_entry *eqe;
2143 eqe = queue_tail_node(&eqo->q);
2150 queue_tail_inc(&eqo->q);
2156 /* Leaves the EQ is disarmed state */
2157 static void be_eq_clean(struct be_eq_obj *eqo)
2159 int num = events_get(eqo);
2161 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
2164 static void be_rx_cq_clean(struct be_rx_obj *rxo)
2166 struct be_rx_page_info *page_info;
2167 struct be_queue_info *rxq = &rxo->q;
2168 struct be_queue_info *rx_cq = &rxo->cq;
2169 struct be_rx_compl_info *rxcp;
2170 struct be_adapter *adapter = rxo->adapter;
2173 /* Consume pending rx completions.
2174 * Wait for the flush completion (identified by zero num_rcvd)
2175 * to arrive. Notify CQ even when there are no more CQ entries
2176 * for HW to flush partially coalesced CQ entries.
2177 * In Lancer, there is no need to wait for flush compl.
2180 rxcp = be_rx_compl_get(rxo);
2182 if (lancer_chip(adapter))
2185 if (flush_wait++ > 10 || be_hw_error(adapter)) {
2186 dev_warn(&adapter->pdev->dev,
2187 "did not receive flush compl\n");
2190 be_cq_notify(adapter, rx_cq->id, true, 0);
2193 be_rx_compl_discard(rxo, rxcp);
2194 be_cq_notify(adapter, rx_cq->id, false, 1);
2195 if (rxcp->num_rcvd == 0)
2200 /* After cleanup, leave the CQ in unarmed state */
2201 be_cq_notify(adapter, rx_cq->id, false, 0);
2203 /* Then free posted rx buffers that were not used */
2204 while (atomic_read(&rxq->used) > 0) {
2205 page_info = get_rx_page_info(rxo);
2206 put_page(page_info->page);
2207 memset(page_info, 0, sizeof(*page_info));
2209 BUG_ON(atomic_read(&rxq->used));
2214 static void be_tx_compl_clean(struct be_adapter *adapter)
2216 u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
2217 struct device *dev = &adapter->pdev->dev;
2218 struct be_tx_compl_info *txcp;
2219 struct be_queue_info *txq;
2220 struct be_tx_obj *txo;
2221 int i, pending_txqs;
2223 /* Stop polling for compls when HW has been silent for 10ms */
2225 pending_txqs = adapter->num_tx_qs;
2227 for_all_tx_queues(adapter, txo, i) {
2231 while ((txcp = be_tx_compl_get(txo))) {
2233 be_tx_compl_process(adapter, txo,
2238 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2239 atomic_sub(num_wrbs, &txq->used);
2242 if (!be_is_tx_compl_pending(txo))
2246 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
2252 /* Free enqueued TX that was never notified to HW */
2253 for_all_tx_queues(adapter, txo, i) {
2256 if (atomic_read(&txq->used)) {
2257 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2258 i, atomic_read(&txq->used));
2259 notified_idx = txq->tail;
2260 end_idx = txq->tail;
2261 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2263 /* Use the tx-compl process logic to handle requests
2264 * that were not sent to the HW.
2266 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2267 atomic_sub(num_wrbs, &txq->used);
2268 BUG_ON(atomic_read(&txq->used));
2269 txo->pend_wrb_cnt = 0;
2270 /* Since hw was never notified of these requests,
2273 txq->head = notified_idx;
2274 txq->tail = notified_idx;
2279 static void be_evt_queues_destroy(struct be_adapter *adapter)
2281 struct be_eq_obj *eqo;
2284 for_all_evt_queues(adapter, eqo, i) {
2285 if (eqo->q.created) {
2287 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
2288 napi_hash_del(&eqo->napi);
2289 netif_napi_del(&eqo->napi);
2291 be_queue_free(adapter, &eqo->q);
2295 static int be_evt_queues_create(struct be_adapter *adapter)
2297 struct be_queue_info *eq;
2298 struct be_eq_obj *eqo;
2299 struct be_aic_obj *aic;
2302 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2303 adapter->cfg_num_qs);
2305 for_all_evt_queues(adapter, eqo, i) {
2306 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2308 napi_hash_add(&eqo->napi);
2309 aic = &adapter->aic_obj[i];
2310 eqo->adapter = adapter;
2312 aic->max_eqd = BE_MAX_EQD;
2316 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
2317 sizeof(struct be_eq_entry));
2321 rc = be_cmd_eq_create(adapter, eqo);
2328 static void be_mcc_queues_destroy(struct be_adapter *adapter)
2330 struct be_queue_info *q;
2332 q = &adapter->mcc_obj.q;
2334 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
2335 be_queue_free(adapter, q);
2337 q = &adapter->mcc_obj.cq;
2339 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2340 be_queue_free(adapter, q);
2343 /* Must be called only after TX qs are created as MCC shares TX EQ */
2344 static int be_mcc_queues_create(struct be_adapter *adapter)
2346 struct be_queue_info *q, *cq;
2348 cq = &adapter->mcc_obj.cq;
2349 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
2350 sizeof(struct be_mcc_compl)))
2353 /* Use the default EQ for MCC completions */
2354 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
2357 q = &adapter->mcc_obj.q;
2358 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2359 goto mcc_cq_destroy;
2361 if (be_cmd_mccq_create(adapter, q, cq))
2367 be_queue_free(adapter, q);
2369 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
2371 be_queue_free(adapter, cq);
2376 static void be_tx_queues_destroy(struct be_adapter *adapter)
2378 struct be_queue_info *q;
2379 struct be_tx_obj *txo;
2382 for_all_tx_queues(adapter, txo, i) {
2385 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2386 be_queue_free(adapter, q);
2390 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2391 be_queue_free(adapter, q);
2395 static int be_tx_qs_create(struct be_adapter *adapter)
2397 struct be_queue_info *cq, *eq;
2398 struct be_tx_obj *txo;
2401 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
2403 for_all_tx_queues(adapter, txo, i) {
2405 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2406 sizeof(struct be_eth_tx_compl));
2410 u64_stats_init(&txo->stats.sync);
2411 u64_stats_init(&txo->stats.sync_compl);
2413 /* If num_evt_qs is less than num_tx_qs, then more than
2414 * one txq share an eq
2416 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2417 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2421 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2422 sizeof(struct be_eth_wrb));
2426 status = be_cmd_txq_create(adapter, txo);
2431 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2432 adapter->num_tx_qs);
2436 static void be_rx_cqs_destroy(struct be_adapter *adapter)
2438 struct be_queue_info *q;
2439 struct be_rx_obj *rxo;
2442 for_all_rx_queues(adapter, rxo, i) {
2445 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2446 be_queue_free(adapter, q);
2450 static int be_rx_cqs_create(struct be_adapter *adapter)
2452 struct be_queue_info *eq, *cq;
2453 struct be_rx_obj *rxo;
2456 /* We can create as many RSS rings as there are EQs. */
2457 adapter->num_rx_qs = adapter->num_evt_qs;
2459 /* We'll use RSS only if atleast 2 RSS rings are supported.
2460 * When RSS is used, we'll need a default RXQ for non-IP traffic.
2462 if (adapter->num_rx_qs > 1)
2463 adapter->num_rx_qs++;
2465 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
2466 for_all_rx_queues(adapter, rxo, i) {
2467 rxo->adapter = adapter;
2469 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2470 sizeof(struct be_eth_rx_compl));
2474 u64_stats_init(&rxo->stats.sync);
2475 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2476 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
2481 dev_info(&adapter->pdev->dev,
2482 "created %d RSS queue(s) and 1 default RX queue\n",
2483 adapter->num_rx_qs - 1);
2487 static irqreturn_t be_intx(int irq, void *dev)
2489 struct be_eq_obj *eqo = dev;
2490 struct be_adapter *adapter = eqo->adapter;
2493 /* IRQ is not expected when NAPI is scheduled as the EQ
2494 * will not be armed.
2495 * But, this can happen on Lancer INTx where it takes
2496 * a while to de-assert INTx or in BE2 where occasionaly
2497 * an interrupt may be raised even when EQ is unarmed.
2498 * If NAPI is already scheduled, then counting & notifying
2499 * events will orphan them.
2501 if (napi_schedule_prep(&eqo->napi)) {
2502 num_evts = events_get(eqo);
2503 __napi_schedule(&eqo->napi);
2505 eqo->spurious_intr = 0;
2507 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
2509 /* Return IRQ_HANDLED only for the the first spurious intr
2510 * after a valid intr to stop the kernel from branding
2511 * this irq as a bad one!
2513 if (num_evts || eqo->spurious_intr++ == 0)
2519 static irqreturn_t be_msix(int irq, void *dev)
2521 struct be_eq_obj *eqo = dev;
2523 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2524 napi_schedule(&eqo->napi);
2528 static inline bool do_gro(struct be_rx_compl_info *rxcp)
2530 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
2533 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2534 int budget, int polling)
2536 struct be_adapter *adapter = rxo->adapter;
2537 struct be_queue_info *rx_cq = &rxo->cq;
2538 struct be_rx_compl_info *rxcp;
2540 u32 frags_consumed = 0;
2542 for (work_done = 0; work_done < budget; work_done++) {
2543 rxcp = be_rx_compl_get(rxo);
2547 /* Is it a flush compl that has no data */
2548 if (unlikely(rxcp->num_rcvd == 0))
2551 /* Discard compl with partial DMA Lancer B0 */
2552 if (unlikely(!rxcp->pkt_size)) {
2553 be_rx_compl_discard(rxo, rxcp);
2557 /* On BE drop pkts that arrive due to imperfect filtering in
2558 * promiscuous mode on some skews
2560 if (unlikely(rxcp->port != adapter->port_num &&
2561 !lancer_chip(adapter))) {
2562 be_rx_compl_discard(rxo, rxcp);
2566 /* Don't do gro when we're busy_polling */
2567 if (do_gro(rxcp) && polling != BUSY_POLLING)
2568 be_rx_compl_process_gro(rxo, napi, rxcp);
2570 be_rx_compl_process(rxo, napi, rxcp);
2573 frags_consumed += rxcp->num_rcvd;
2574 be_rx_stats_update(rxo, rxcp);
2578 be_cq_notify(adapter, rx_cq->id, true, work_done);
2580 /* When an rx-obj gets into post_starved state, just
2581 * let be_worker do the posting.
2583 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2584 !rxo->rx_post_starved)
2585 be_post_rx_frags(rxo, GFP_ATOMIC,
2586 max_t(u32, MAX_RX_POST,
2593 static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status)
2596 case BE_TX_COMP_HDR_PARSE_ERR:
2597 tx_stats(txo)->tx_hdr_parse_err++;
2599 case BE_TX_COMP_NDMA_ERR:
2600 tx_stats(txo)->tx_dma_err++;
2602 case BE_TX_COMP_ACL_ERR:
2603 tx_stats(txo)->tx_spoof_check_err++;
2608 static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status)
2611 case LANCER_TX_COMP_LSO_ERR:
2612 tx_stats(txo)->tx_tso_err++;
2614 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2615 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2616 tx_stats(txo)->tx_spoof_check_err++;
2618 case LANCER_TX_COMP_QINQ_ERR:
2619 tx_stats(txo)->tx_qinq_err++;
2621 case LANCER_TX_COMP_PARITY_ERR:
2622 tx_stats(txo)->tx_internal_parity_err++;
2624 case LANCER_TX_COMP_DMA_ERR:
2625 tx_stats(txo)->tx_dma_err++;
2630 static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2633 int num_wrbs = 0, work_done = 0;
2634 struct be_tx_compl_info *txcp;
2636 while ((txcp = be_tx_compl_get(txo))) {
2637 num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index);
2641 if (lancer_chip(adapter))
2642 lancer_update_tx_err(txo, txcp->status);
2644 be_update_tx_err(txo, txcp->status);
2649 be_cq_notify(adapter, txo->cq.id, true, work_done);
2650 atomic_sub(num_wrbs, &txo->q.used);
2652 /* As Tx wrbs have been freed up, wake up netdev queue
2653 * if it was stopped due to lack of tx wrbs. */
2654 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2655 be_can_txq_wake(txo)) {
2656 netif_wake_subqueue(adapter->netdev, idx);
2659 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2660 tx_stats(txo)->tx_compl += work_done;
2661 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2665 #ifdef CONFIG_NET_RX_BUSY_POLL
2666 static inline bool be_lock_napi(struct be_eq_obj *eqo)
2670 spin_lock(&eqo->lock); /* BH is already disabled */
2671 if (eqo->state & BE_EQ_LOCKED) {
2672 WARN_ON(eqo->state & BE_EQ_NAPI);
2673 eqo->state |= BE_EQ_NAPI_YIELD;
2676 eqo->state = BE_EQ_NAPI;
2678 spin_unlock(&eqo->lock);
2682 static inline void be_unlock_napi(struct be_eq_obj *eqo)
2684 spin_lock(&eqo->lock); /* BH is already disabled */
2686 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
2687 eqo->state = BE_EQ_IDLE;
2689 spin_unlock(&eqo->lock);
2692 static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
2696 spin_lock_bh(&eqo->lock);
2697 if (eqo->state & BE_EQ_LOCKED) {
2698 eqo->state |= BE_EQ_POLL_YIELD;
2701 eqo->state |= BE_EQ_POLL;
2703 spin_unlock_bh(&eqo->lock);
2707 static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
2709 spin_lock_bh(&eqo->lock);
2711 WARN_ON(eqo->state & (BE_EQ_NAPI));
2712 eqo->state = BE_EQ_IDLE;
2714 spin_unlock_bh(&eqo->lock);
2717 static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
2719 spin_lock_init(&eqo->lock);
2720 eqo->state = BE_EQ_IDLE;
2723 static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
2727 /* It's enough to just acquire napi lock on the eqo to stop
2728 * be_busy_poll() from processing any queueus.
2730 while (!be_lock_napi(eqo))
2736 #else /* CONFIG_NET_RX_BUSY_POLL */
2738 static inline bool be_lock_napi(struct be_eq_obj *eqo)
2743 static inline void be_unlock_napi(struct be_eq_obj *eqo)
2747 static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
2752 static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
2756 static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
2760 static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
2763 #endif /* CONFIG_NET_RX_BUSY_POLL */
2765 int be_poll(struct napi_struct *napi, int budget)
2767 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2768 struct be_adapter *adapter = eqo->adapter;
2769 int max_work = 0, work, i, num_evts;
2770 struct be_rx_obj *rxo;
2771 struct be_tx_obj *txo;
2773 num_evts = events_get(eqo);
2775 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
2776 be_process_tx(adapter, txo, i);
2778 if (be_lock_napi(eqo)) {
2779 /* This loop will iterate twice for EQ0 in which
2780 * completions of the last RXQ (default one) are also processed
2781 * For other EQs the loop iterates only once
2783 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2784 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2785 max_work = max(work, max_work);
2787 be_unlock_napi(eqo);
2792 if (is_mcc_eqo(eqo))
2793 be_process_mcc(adapter);
2795 if (max_work < budget) {
2796 napi_complete(napi);
2797 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
2799 /* As we'll continue in polling mode, count and clear events */
2800 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
2805 #ifdef CONFIG_NET_RX_BUSY_POLL
2806 static int be_busy_poll(struct napi_struct *napi)
2808 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2809 struct be_adapter *adapter = eqo->adapter;
2810 struct be_rx_obj *rxo;
2813 if (!be_lock_busy_poll(eqo))
2814 return LL_FLUSH_BUSY;
2816 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2817 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2822 be_unlock_busy_poll(eqo);
2827 void be_detect_error(struct be_adapter *adapter)
2829 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2830 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2832 bool error_detected = false;
2833 struct device *dev = &adapter->pdev->dev;
2834 struct net_device *netdev = adapter->netdev;
2836 if (be_hw_error(adapter))
2839 if (lancer_chip(adapter)) {
2840 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2841 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2842 sliport_err1 = ioread32(adapter->db +
2843 SLIPORT_ERROR1_OFFSET);
2844 sliport_err2 = ioread32(adapter->db +
2845 SLIPORT_ERROR2_OFFSET);
2846 adapter->hw_error = true;
2847 /* Do not log error messages if its a FW reset */
2848 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2849 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2850 dev_info(dev, "Firmware update in progress\n");
2852 error_detected = true;
2853 dev_err(dev, "Error detected in the card\n");
2854 dev_err(dev, "ERR: sliport status 0x%x\n",
2856 dev_err(dev, "ERR: sliport error1 0x%x\n",
2858 dev_err(dev, "ERR: sliport error2 0x%x\n",
2863 pci_read_config_dword(adapter->pdev,
2864 PCICFG_UE_STATUS_LOW, &ue_lo);
2865 pci_read_config_dword(adapter->pdev,
2866 PCICFG_UE_STATUS_HIGH, &ue_hi);
2867 pci_read_config_dword(adapter->pdev,
2868 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2869 pci_read_config_dword(adapter->pdev,
2870 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2872 ue_lo = (ue_lo & ~ue_lo_mask);
2873 ue_hi = (ue_hi & ~ue_hi_mask);
2875 /* On certain platforms BE hardware can indicate spurious UEs.
2876 * Allow HW to stop working completely in case of a real UE.
2877 * Hence not setting the hw_error for UE detection.
2880 if (ue_lo || ue_hi) {
2881 error_detected = true;
2883 "Unrecoverable Error detected in the adapter");
2884 dev_err(dev, "Please reboot server to recover");
2885 if (skyhawk_chip(adapter))
2886 adapter->hw_error = true;
2887 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2889 dev_err(dev, "UE: %s bit set\n",
2890 ue_status_low_desc[i]);
2892 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2894 dev_err(dev, "UE: %s bit set\n",
2895 ue_status_hi_desc[i]);
2900 netif_carrier_off(netdev);
2903 static void be_msix_disable(struct be_adapter *adapter)
2905 if (msix_enabled(adapter)) {
2906 pci_disable_msix(adapter->pdev);
2907 adapter->num_msix_vec = 0;
2908 adapter->num_msix_roce_vec = 0;
2912 static int be_msix_enable(struct be_adapter *adapter)
2915 struct device *dev = &adapter->pdev->dev;
2917 /* If RoCE is supported, program the max number of NIC vectors that
2918 * may be configured via set-channels, along with vectors needed for
2919 * RoCe. Else, just program the number we'll use initially.
2921 if (be_roce_supported(adapter))
2922 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2923 2 * num_online_cpus());
2925 num_vec = adapter->cfg_num_qs;
2927 for (i = 0; i < num_vec; i++)
2928 adapter->msix_entries[i].entry = i;
2930 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2931 MIN_MSIX_VECTORS, num_vec);
2935 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2936 adapter->num_msix_roce_vec = num_vec / 2;
2937 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2938 adapter->num_msix_roce_vec);
2941 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2943 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2944 adapter->num_msix_vec);
2948 dev_warn(dev, "MSIx enable failed\n");
2950 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2951 if (!be_physfn(adapter))
2956 static inline int be_msix_vec_get(struct be_adapter *adapter,
2957 struct be_eq_obj *eqo)
2959 return adapter->msix_entries[eqo->msix_idx].vector;
2962 static int be_msix_register(struct be_adapter *adapter)
2964 struct net_device *netdev = adapter->netdev;
2965 struct be_eq_obj *eqo;
2968 for_all_evt_queues(adapter, eqo, i) {
2969 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2970 vec = be_msix_vec_get(adapter, eqo);
2971 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2978 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2979 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2980 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2982 be_msix_disable(adapter);
2986 static int be_irq_register(struct be_adapter *adapter)
2988 struct net_device *netdev = adapter->netdev;
2991 if (msix_enabled(adapter)) {
2992 status = be_msix_register(adapter);
2995 /* INTx is not supported for VF */
2996 if (!be_physfn(adapter))
3000 /* INTx: only the first EQ is used */
3001 netdev->irq = adapter->pdev->irq;
3002 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
3003 &adapter->eq_obj[0]);
3005 dev_err(&adapter->pdev->dev,
3006 "INTx request IRQ failed - err %d\n", status);
3010 adapter->isr_registered = true;
3014 static void be_irq_unregister(struct be_adapter *adapter)
3016 struct net_device *netdev = adapter->netdev;
3017 struct be_eq_obj *eqo;
3020 if (!adapter->isr_registered)
3024 if (!msix_enabled(adapter)) {
3025 free_irq(netdev->irq, &adapter->eq_obj[0]);
3030 for_all_evt_queues(adapter, eqo, i)
3031 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3034 adapter->isr_registered = false;
3037 static void be_rx_qs_destroy(struct be_adapter *adapter)
3039 struct be_queue_info *q;
3040 struct be_rx_obj *rxo;
3043 for_all_rx_queues(adapter, rxo, i) {
3046 be_cmd_rxq_destroy(adapter, q);
3047 be_rx_cq_clean(rxo);
3049 be_queue_free(adapter, q);
3053 static int be_close(struct net_device *netdev)
3055 struct be_adapter *adapter = netdev_priv(netdev);
3056 struct be_eq_obj *eqo;
3059 /* This protection is needed as be_close() may be called even when the
3060 * adapter is in cleared state (after eeh perm failure)
3062 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3065 be_roce_dev_close(adapter);
3067 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3068 for_all_evt_queues(adapter, eqo, i) {
3069 napi_disable(&eqo->napi);
3070 be_disable_busy_poll(eqo);
3072 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
3075 be_async_mcc_disable(adapter);
3077 /* Wait for all pending tx completions to arrive so that
3078 * all tx skbs are freed.
3080 netif_tx_disable(netdev);
3081 be_tx_compl_clean(adapter);
3083 be_rx_qs_destroy(adapter);
3084 be_clear_uc_list(adapter);
3086 for_all_evt_queues(adapter, eqo, i) {
3087 if (msix_enabled(adapter))
3088 synchronize_irq(be_msix_vec_get(adapter, eqo));
3090 synchronize_irq(netdev->irq);
3094 be_irq_unregister(adapter);
3099 static int be_rx_qs_create(struct be_adapter *adapter)
3101 struct rss_info *rss = &adapter->rss_info;
3102 u8 rss_key[RSS_HASH_KEY_LEN];
3103 struct be_rx_obj *rxo;
3106 for_all_rx_queues(adapter, rxo, i) {
3107 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3108 sizeof(struct be_eth_rx_d));
3113 /* The FW would like the default RXQ to be created first */
3114 rxo = default_rxo(adapter);
3115 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
3116 adapter->if_handle, false, &rxo->rss_id);
3120 for_all_rss_queues(adapter, rxo, i) {
3121 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3122 rx_frag_size, adapter->if_handle,
3123 true, &rxo->rss_id);
3128 if (be_multi_rxq(adapter)) {
3129 for (j = 0; j < RSS_INDIR_TABLE_LEN;
3130 j += adapter->num_rx_qs - 1) {
3131 for_all_rss_queues(adapter, rxo, i) {
3132 if ((j + i) >= RSS_INDIR_TABLE_LEN)
3134 rss->rsstable[j + i] = rxo->rss_id;
3135 rss->rss_queue[j + i] = i;
3138 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3139 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
3141 if (!BEx_chip(adapter))
3142 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3143 RSS_ENABLE_UDP_IPV6;
3145 /* Disable RSS, if only default RX Q is created */
3146 rss->rss_flags = RSS_ENABLE_NONE;
3149 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
3150 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3153 rss->rss_flags = RSS_ENABLE_NONE;
3157 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
3159 /* First time posting */
3160 for_all_rx_queues(adapter, rxo, i)
3161 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
3165 static int be_open(struct net_device *netdev)
3167 struct be_adapter *adapter = netdev_priv(netdev);
3168 struct be_eq_obj *eqo;
3169 struct be_rx_obj *rxo;
3170 struct be_tx_obj *txo;
3174 status = be_rx_qs_create(adapter);
3178 status = be_irq_register(adapter);
3182 for_all_rx_queues(adapter, rxo, i)
3183 be_cq_notify(adapter, rxo->cq.id, true, 0);
3185 for_all_tx_queues(adapter, txo, i)
3186 be_cq_notify(adapter, txo->cq.id, true, 0);
3188 be_async_mcc_enable(adapter);
3190 for_all_evt_queues(adapter, eqo, i) {
3191 napi_enable(&eqo->napi);
3192 be_enable_busy_poll(eqo);
3193 be_eq_notify(adapter, eqo->q.id, true, true, 0);
3195 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
3197 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
3199 be_link_status_update(adapter, link_status);
3201 netif_tx_start_all_queues(netdev);
3202 be_roce_dev_open(adapter);
3204 #ifdef CONFIG_BE2NET_VXLAN
3205 if (skyhawk_chip(adapter))
3206 vxlan_get_rx_port(netdev);
3211 be_close(adapter->netdev);
3215 static int be_setup_wol(struct be_adapter *adapter, bool enable)
3217 struct be_dma_mem cmd;
3221 memset(mac, 0, ETH_ALEN);
3223 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
3224 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3230 status = pci_write_config_dword(adapter->pdev,
3231 PCICFG_PM_CONTROL_OFFSET,
3232 PCICFG_PM_CONTROL_MASK);
3234 dev_err(&adapter->pdev->dev,
3235 "Could not enable Wake-on-lan\n");
3236 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
3240 status = be_cmd_enable_magic_wol(adapter,
3241 adapter->netdev->dev_addr,
3243 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
3244 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
3246 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
3247 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
3248 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
3251 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
3255 static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3259 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3261 mac[5] = (u8)(addr & 0xFF);
3262 mac[4] = (u8)((addr >> 8) & 0xFF);
3263 mac[3] = (u8)((addr >> 16) & 0xFF);
3264 /* Use the OUI from the current MAC address */
3265 memcpy(mac, adapter->netdev->dev_addr, 3);
3269 * Generate a seed MAC address from the PF MAC Address using jhash.
3270 * MAC Address for VFs are assigned incrementally starting from the seed.
3271 * These addresses are programmed in the ASIC by the PF and the VF driver
3272 * queries for the MAC address during its probe.
3274 static int be_vf_eth_addr_config(struct be_adapter *adapter)
3279 struct be_vf_cfg *vf_cfg;
3281 be_vf_eth_addr_generate(adapter, mac);
3283 for_all_vfs(adapter, vf_cfg, vf) {
3284 if (BEx_chip(adapter))
3285 status = be_cmd_pmac_add(adapter, mac,
3287 &vf_cfg->pmac_id, vf + 1);
3289 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3293 dev_err(&adapter->pdev->dev,
3294 "Mac address assignment failed for VF %d\n",
3297 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3304 static int be_vfs_mac_query(struct be_adapter *adapter)
3308 struct be_vf_cfg *vf_cfg;
3310 for_all_vfs(adapter, vf_cfg, vf) {
3311 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3312 mac, vf_cfg->if_handle,
3316 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3321 static void be_vf_clear(struct be_adapter *adapter)
3323 struct be_vf_cfg *vf_cfg;
3326 if (pci_vfs_assigned(adapter->pdev)) {
3327 dev_warn(&adapter->pdev->dev,
3328 "VFs are assigned to VMs: not disabling VFs\n");
3332 pci_disable_sriov(adapter->pdev);
3334 for_all_vfs(adapter, vf_cfg, vf) {
3335 if (BEx_chip(adapter))
3336 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3337 vf_cfg->pmac_id, vf + 1);
3339 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3342 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3345 kfree(adapter->vf_cfg);
3346 adapter->num_vfs = 0;
3347 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
3350 static void be_clear_queues(struct be_adapter *adapter)
3352 be_mcc_queues_destroy(adapter);
3353 be_rx_cqs_destroy(adapter);
3354 be_tx_queues_destroy(adapter);
3355 be_evt_queues_destroy(adapter);
3358 static void be_cancel_worker(struct be_adapter *adapter)
3360 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3361 cancel_delayed_work_sync(&adapter->work);
3362 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3366 static void be_mac_clear(struct be_adapter *adapter)
3368 if (adapter->pmac_id) {
3369 be_cmd_pmac_del(adapter, adapter->if_handle,
3370 adapter->pmac_id[0], 0);
3371 kfree(adapter->pmac_id);
3372 adapter->pmac_id = NULL;
3376 #ifdef CONFIG_BE2NET_VXLAN
3377 static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3379 struct net_device *netdev = adapter->netdev;
3381 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3382 be_cmd_manage_iface(adapter, adapter->if_handle,
3383 OP_CONVERT_TUNNEL_TO_NORMAL);
3385 if (adapter->vxlan_port)
3386 be_cmd_set_vxlan_port(adapter, 0);
3388 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3389 adapter->vxlan_port = 0;
3391 netdev->hw_enc_features = 0;
3392 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
3393 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
3397 static int be_clear(struct be_adapter *adapter)
3399 be_cancel_worker(adapter);
3401 if (sriov_enabled(adapter))
3402 be_vf_clear(adapter);
3404 /* Re-configure FW to distribute resources evenly across max-supported
3405 * number of VFs, only when VFs are not already enabled.
3407 if (be_physfn(adapter) && !pci_vfs_assigned(adapter->pdev))
3408 be_cmd_set_sriov_config(adapter, adapter->pool_res,
3409 pci_sriov_get_totalvfs(adapter->pdev));
3411 #ifdef CONFIG_BE2NET_VXLAN
3412 be_disable_vxlan_offloads(adapter);
3414 /* delete the primary mac along with the uc-mac list */
3415 be_mac_clear(adapter);
3417 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
3419 be_clear_queues(adapter);
3421 be_msix_disable(adapter);
3422 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
3426 static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
3427 u32 cap_flags, u32 vf)
3432 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3433 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
3436 en_flags &= cap_flags;
3438 status = be_cmd_if_create(adapter, cap_flags, en_flags,
3444 static int be_vfs_if_create(struct be_adapter *adapter)
3446 struct be_resources res = {0};
3447 struct be_vf_cfg *vf_cfg;
3451 /* If a FW profile exists, then cap_flags are updated */
3452 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3453 BE_IF_FLAGS_MULTICAST;
3455 for_all_vfs(adapter, vf_cfg, vf) {
3456 if (!BE3_chip(adapter)) {
3457 status = be_cmd_get_profile_config(adapter, &res,
3460 cap_flags = res.if_cap_flags;
3463 status = be_if_create(adapter, &vf_cfg->if_handle,
3472 static int be_vf_setup_init(struct be_adapter *adapter)
3474 struct be_vf_cfg *vf_cfg;
3477 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3479 if (!adapter->vf_cfg)
3482 for_all_vfs(adapter, vf_cfg, vf) {
3483 vf_cfg->if_handle = -1;
3484 vf_cfg->pmac_id = -1;
3489 static int be_vf_setup(struct be_adapter *adapter)
3491 struct device *dev = &adapter->pdev->dev;
3492 struct be_vf_cfg *vf_cfg;
3493 int status, old_vfs, vf;
3496 old_vfs = pci_num_vf(adapter->pdev);
3498 status = be_vf_setup_init(adapter);
3503 for_all_vfs(adapter, vf_cfg, vf) {
3504 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3509 status = be_vfs_mac_query(adapter);
3513 status = be_vfs_if_create(adapter);
3517 status = be_vf_eth_addr_config(adapter);
3522 for_all_vfs(adapter, vf_cfg, vf) {
3523 /* Allow VFs to programs MAC/VLAN filters */
3524 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3525 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3526 status = be_cmd_set_fn_privileges(adapter,
3531 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3535 /* Allow full available bandwidth */
3537 be_cmd_config_qos(adapter, 0, 0, vf + 1);
3540 be_cmd_enable_vf(adapter, vf + 1);
3541 be_cmd_set_logical_link_config(adapter,
3542 IFLA_VF_LINK_STATE_AUTO,
3548 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3550 dev_err(dev, "SRIOV enable failed\n");
3551 adapter->num_vfs = 0;
3556 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
3559 dev_err(dev, "VF setup failed\n");
3560 be_vf_clear(adapter);
3564 /* Converting function_mode bits on BE3 to SH mc_type enums */
3566 static u8 be_convert_mc_type(u32 function_mode)
3568 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
3570 else if (function_mode & QNQ_MODE)
3572 else if (function_mode & VNIC_MODE)
3574 else if (function_mode & UMC_ENABLED)
3580 /* On BE2/BE3 FW does not suggest the supported limits */
3581 static void BEx_get_resources(struct be_adapter *adapter,
3582 struct be_resources *res)
3584 bool use_sriov = adapter->num_vfs ? 1 : 0;
3586 if (be_physfn(adapter))
3587 res->max_uc_mac = BE_UC_PMAC_COUNT;
3589 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3591 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3593 if (be_is_mc(adapter)) {
3594 /* Assuming that there are 4 channels per port,
3595 * when multi-channel is enabled
3597 if (be_is_qnq_mode(adapter))
3598 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3600 /* In a non-qnq multichannel mode, the pvid
3601 * takes up one vlan entry
3603 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3605 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
3608 res->max_mcast_mac = BE_MAX_MC;
3610 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3611 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3612 * *only* if it is RSS-capable.
3614 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3615 !be_physfn(adapter) || (be_is_mc(adapter) &&
3616 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
3618 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
3619 struct be_resources super_nic_res = {0};
3621 /* On a SuperNIC profile, the driver needs to use the
3622 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
3624 be_cmd_get_profile_config(adapter, &super_nic_res, 0);
3625 /* Some old versions of BE3 FW don't report max_tx_qs value */
3626 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
3628 res->max_tx_qs = BE3_MAX_TX_QS;
3631 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3632 !use_sriov && be_physfn(adapter))
3633 res->max_rss_qs = (adapter->be3_native) ?
3634 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3635 res->max_rx_qs = res->max_rss_qs + 1;
3637 if (be_physfn(adapter))
3638 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
3639 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3641 res->max_evt_qs = 1;
3643 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3644 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3645 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3648 static void be_setup_init(struct be_adapter *adapter)
3650 adapter->vlan_prio_bmap = 0xff;
3651 adapter->phy.link_speed = -1;
3652 adapter->if_handle = -1;
3653 adapter->be3_native = false;
3654 adapter->if_flags = 0;
3655 if (be_physfn(adapter))
3656 adapter->cmd_privileges = MAX_PRIVILEGES;
3658 adapter->cmd_privileges = MIN_PRIVILEGES;
3661 static int be_get_sriov_config(struct be_adapter *adapter)
3663 struct device *dev = &adapter->pdev->dev;
3664 struct be_resources res = {0};
3665 int max_vfs, old_vfs;
3667 /* Some old versions of BE3 FW don't report max_vfs value */
3668 be_cmd_get_profile_config(adapter, &res, 0);
3670 if (BE3_chip(adapter) && !res.max_vfs) {
3671 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
3672 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3675 adapter->pool_res = res;
3677 if (!be_max_vfs(adapter)) {
3679 dev_warn(dev, "SRIOV is disabled. Ignoring num_vfs\n");
3680 adapter->num_vfs = 0;
3684 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
3686 /* validate num_vfs module param */
3687 old_vfs = pci_num_vf(adapter->pdev);
3689 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3690 if (old_vfs != num_vfs)
3691 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3692 adapter->num_vfs = old_vfs;
3694 if (num_vfs > be_max_vfs(adapter)) {
3695 dev_info(dev, "Resources unavailable to init %d VFs\n",
3697 dev_info(dev, "Limiting to %d VFs\n",
3698 be_max_vfs(adapter));
3700 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
3706 static int be_get_resources(struct be_adapter *adapter)
3708 struct device *dev = &adapter->pdev->dev;
3709 struct be_resources res = {0};
3712 if (BEx_chip(adapter)) {
3713 BEx_get_resources(adapter, &res);
3717 /* For Lancer, SH etc read per-function resource limits from FW.
3718 * GET_FUNC_CONFIG returns per function guaranteed limits.
3719 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3721 if (!BEx_chip(adapter)) {
3722 status = be_cmd_get_func_config(adapter, &res);
3726 /* If RoCE may be enabled stash away half the EQs for RoCE */
3727 if (be_roce_supported(adapter))
3728 res.max_evt_qs /= 2;
3732 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3733 be_max_txqs(adapter), be_max_rxqs(adapter),
3734 be_max_rss(adapter), be_max_eqs(adapter),
3735 be_max_vfs(adapter));
3736 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3737 be_max_uc(adapter), be_max_mc(adapter),
3738 be_max_vlans(adapter));
3743 static void be_sriov_config(struct be_adapter *adapter)
3745 struct device *dev = &adapter->pdev->dev;
3748 status = be_get_sriov_config(adapter);
3750 dev_err(dev, "Failed to query SR-IOV configuration\n");
3751 dev_err(dev, "SR-IOV cannot be enabled\n");
3755 /* When the HW is in SRIOV capable configuration, the PF-pool
3756 * resources are equally distributed across the max-number of
3757 * VFs. The user may request only a subset of the max-vfs to be
3758 * enabled. Based on num_vfs, redistribute the resources across
3759 * num_vfs so that each VF will have access to more number of
3760 * resources. This facility is not available in BE3 FW.
3761 * Also, this is done by FW in Lancer chip.
3763 if (be_max_vfs(adapter) && !pci_num_vf(adapter->pdev)) {
3764 status = be_cmd_set_sriov_config(adapter,
3768 dev_err(dev, "Failed to optimize SR-IOV resources\n");
3772 static int be_get_config(struct be_adapter *adapter)
3777 status = be_cmd_query_fw_cfg(adapter);
3781 be_cmd_query_port_name(adapter);
3783 if (be_physfn(adapter)) {
3784 status = be_cmd_get_active_profile(adapter, &profile_id);
3786 dev_info(&adapter->pdev->dev,
3787 "Using profile 0x%x\n", profile_id);
3790 if (!BE2_chip(adapter) && be_physfn(adapter))
3791 be_sriov_config(adapter);
3793 status = be_get_resources(adapter);
3797 adapter->pmac_id = kcalloc(be_max_uc(adapter),
3798 sizeof(*adapter->pmac_id), GFP_KERNEL);
3799 if (!adapter->pmac_id)
3802 /* Sanitize cfg_num_qs based on HW and platform limits */
3803 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3808 static int be_mac_setup(struct be_adapter *adapter)
3813 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3814 status = be_cmd_get_perm_mac(adapter, mac);
3818 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3819 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3821 /* Maybe the HW was reset; dev_addr must be re-programmed */
3822 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3825 /* For BE3-R VFs, the PF programs the initial MAC address */
3826 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3827 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3828 &adapter->pmac_id[0], 0);
3832 static void be_schedule_worker(struct be_adapter *adapter)
3834 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3835 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3838 static int be_setup_queues(struct be_adapter *adapter)
3840 struct net_device *netdev = adapter->netdev;
3843 status = be_evt_queues_create(adapter);
3847 status = be_tx_qs_create(adapter);
3851 status = be_rx_cqs_create(adapter);
3855 status = be_mcc_queues_create(adapter);
3859 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3863 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3869 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3873 int be_update_queues(struct be_adapter *adapter)
3875 struct net_device *netdev = adapter->netdev;
3878 if (netif_running(netdev))
3881 be_cancel_worker(adapter);
3883 /* If any vectors have been shared with RoCE we cannot re-program
3886 if (!adapter->num_msix_roce_vec)
3887 be_msix_disable(adapter);
3889 be_clear_queues(adapter);
3891 if (!msix_enabled(adapter)) {
3892 status = be_msix_enable(adapter);
3897 status = be_setup_queues(adapter);
3901 be_schedule_worker(adapter);
3903 if (netif_running(netdev))
3904 status = be_open(netdev);
3909 static inline int fw_major_num(const char *fw_ver)
3911 int fw_major = 0, i;
3913 i = sscanf(fw_ver, "%d.", &fw_major);
3920 static int be_setup(struct be_adapter *adapter)
3922 struct device *dev = &adapter->pdev->dev;
3925 be_setup_init(adapter);
3927 if (!lancer_chip(adapter))
3928 be_cmd_req_native_mode(adapter);
3930 status = be_get_config(adapter);
3934 status = be_msix_enable(adapter);
3938 status = be_if_create(adapter, &adapter->if_handle,
3939 be_if_cap_flags(adapter), 0);
3943 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3945 status = be_setup_queues(adapter);
3950 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3952 status = be_mac_setup(adapter);
3956 be_cmd_get_fw_ver(adapter);
3957 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
3959 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
3960 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
3962 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3965 if (adapter->vlans_added)
3966 be_vid_config(adapter);
3968 be_set_rx_mode(adapter->netdev);
3970 be_cmd_get_acpi_wol_cap(adapter);
3972 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
3975 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
3978 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
3979 adapter->tx_fc, adapter->rx_fc);
3981 if (be_physfn(adapter))
3982 be_cmd_set_logical_link_config(adapter,
3983 IFLA_VF_LINK_STATE_AUTO, 0);
3985 if (adapter->num_vfs)
3986 be_vf_setup(adapter);
3988 status = be_cmd_get_phy_info(adapter);
3989 if (!status && be_pause_supported(adapter))
3990 adapter->phy.fc_autoneg = 1;
3992 be_schedule_worker(adapter);
3993 adapter->flags |= BE_FLAGS_SETUP_DONE;
4000 #ifdef CONFIG_NET_POLL_CONTROLLER
4001 static void be_netpoll(struct net_device *netdev)
4003 struct be_adapter *adapter = netdev_priv(netdev);
4004 struct be_eq_obj *eqo;
4007 for_all_evt_queues(adapter, eqo, i) {
4008 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
4009 napi_schedule(&eqo->napi);
4014 static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
4016 static bool phy_flashing_required(struct be_adapter *adapter)
4018 return (adapter->phy.phy_type == PHY_TYPE_TN_8022 &&
4019 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
4022 static bool is_comp_in_ufi(struct be_adapter *adapter,
4023 struct flash_section_info *fsec, int type)
4025 int i = 0, img_type = 0;
4026 struct flash_section_info_g2 *fsec_g2 = NULL;
4028 if (BE2_chip(adapter))
4029 fsec_g2 = (struct flash_section_info_g2 *)fsec;
4031 for (i = 0; i < MAX_FLASH_COMP; i++) {
4033 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
4035 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4037 if (img_type == type)
4044 static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
4046 const struct firmware *fw)
4048 struct flash_section_info *fsec = NULL;
4049 const u8 *p = fw->data;
4052 while (p < (fw->data + fw->size)) {
4053 fsec = (struct flash_section_info *)p;
4054 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
4061 static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
4062 u32 img_offset, u32 img_size, int hdr_size,
4063 u16 img_optype, bool *crc_match)
4069 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_offset,
4074 crc_offset = hdr_size + img_offset + img_size - 4;
4076 /* Skip flashing, if crc of flashed region matches */
4077 if (!memcmp(crc, p + crc_offset, 4))
4085 static int be_flash(struct be_adapter *adapter, const u8 *img,
4086 struct be_dma_mem *flash_cmd, int optype, int img_size,
4089 u32 flash_op, num_bytes, total_bytes = img_size, bytes_sent = 0;
4090 struct be_cmd_write_flashrom *req = flash_cmd->va;
4093 while (total_bytes) {
4094 num_bytes = min_t(u32, 32*1024, total_bytes);
4096 total_bytes -= num_bytes;
4099 if (optype == OPTYPE_PHY_FW)
4100 flash_op = FLASHROM_OPER_PHY_FLASH;
4102 flash_op = FLASHROM_OPER_FLASH;
4104 if (optype == OPTYPE_PHY_FW)
4105 flash_op = FLASHROM_OPER_PHY_SAVE;
4107 flash_op = FLASHROM_OPER_SAVE;
4110 memcpy(req->data_buf, img, num_bytes);
4112 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
4113 flash_op, img_offset +
4114 bytes_sent, num_bytes);
4115 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
4116 optype == OPTYPE_PHY_FW)
4121 bytes_sent += num_bytes;
4126 /* For BE2, BE3 and BE3-R */
4127 static int be_flash_BEx(struct be_adapter *adapter,
4128 const struct firmware *fw,
4129 struct be_dma_mem *flash_cmd, int num_of_images)
4131 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
4132 struct device *dev = &adapter->pdev->dev;
4133 struct flash_section_info *fsec = NULL;
4134 int status, i, filehdr_size, num_comp;
4135 const struct flash_comp *pflashcomp;
4139 struct flash_comp gen3_flash_types[] = {
4140 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
4141 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
4142 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
4143 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
4144 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
4145 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
4146 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
4147 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
4148 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
4149 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
4150 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
4151 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
4152 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
4153 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
4154 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
4155 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
4156 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
4157 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
4158 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
4159 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
4162 struct flash_comp gen2_flash_types[] = {
4163 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
4164 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
4165 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
4166 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
4167 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
4168 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
4169 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
4170 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
4171 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
4172 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
4173 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
4174 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
4175 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
4176 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
4177 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
4178 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
4181 if (BE3_chip(adapter)) {
4182 pflashcomp = gen3_flash_types;
4183 filehdr_size = sizeof(struct flash_file_hdr_g3);
4184 num_comp = ARRAY_SIZE(gen3_flash_types);
4186 pflashcomp = gen2_flash_types;
4187 filehdr_size = sizeof(struct flash_file_hdr_g2);
4188 num_comp = ARRAY_SIZE(gen2_flash_types);
4192 /* Get flash section info*/
4193 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4195 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
4198 for (i = 0; i < num_comp; i++) {
4199 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
4202 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
4203 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
4206 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
4207 !phy_flashing_required(adapter))
4210 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
4211 status = be_check_flash_crc(adapter, fw->data,
4212 pflashcomp[i].offset,
4216 OPTYPE_REDBOOT, &crc_match);
4219 "Could not get CRC for 0x%x region\n",
4220 pflashcomp[i].optype);
4228 p = fw->data + filehdr_size + pflashcomp[i].offset +
4230 if (p + pflashcomp[i].size > fw->data + fw->size)
4233 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
4234 pflashcomp[i].size, 0);
4236 dev_err(dev, "Flashing section type 0x%x failed\n",
4237 pflashcomp[i].img_type);
4244 static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
4246 u32 img_type = le32_to_cpu(fsec_entry.type);
4247 u16 img_optype = le16_to_cpu(fsec_entry.optype);
4249 if (img_optype != 0xFFFF)
4253 case IMAGE_FIRMWARE_iSCSI:
4254 img_optype = OPTYPE_ISCSI_ACTIVE;
4256 case IMAGE_BOOT_CODE:
4257 img_optype = OPTYPE_REDBOOT;
4259 case IMAGE_OPTION_ROM_ISCSI:
4260 img_optype = OPTYPE_BIOS;
4262 case IMAGE_OPTION_ROM_PXE:
4263 img_optype = OPTYPE_PXE_BIOS;
4265 case IMAGE_OPTION_ROM_FCoE:
4266 img_optype = OPTYPE_FCOE_BIOS;
4268 case IMAGE_FIRMWARE_BACKUP_iSCSI:
4269 img_optype = OPTYPE_ISCSI_BACKUP;
4272 img_optype = OPTYPE_NCSI_FW;
4274 case IMAGE_FLASHISM_JUMPVECTOR:
4275 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
4277 case IMAGE_FIRMWARE_PHY:
4278 img_optype = OPTYPE_SH_PHY_FW;
4280 case IMAGE_REDBOOT_DIR:
4281 img_optype = OPTYPE_REDBOOT_DIR;
4283 case IMAGE_REDBOOT_CONFIG:
4284 img_optype = OPTYPE_REDBOOT_CONFIG;
4287 img_optype = OPTYPE_UFI_DIR;
4296 static int be_flash_skyhawk(struct be_adapter *adapter,
4297 const struct firmware *fw,
4298 struct be_dma_mem *flash_cmd, int num_of_images)
4300 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
4301 bool crc_match, old_fw_img, flash_offset_support = true;
4302 struct device *dev = &adapter->pdev->dev;
4303 struct flash_section_info *fsec = NULL;
4304 u32 img_offset, img_size, img_type;
4305 u16 img_optype, flash_optype;
4306 int status, i, filehdr_size;
4309 filehdr_size = sizeof(struct flash_file_hdr_g3);
4310 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4312 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
4317 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
4318 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
4319 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
4320 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4321 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
4322 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
4324 if (img_optype == 0xFFFF)
4327 if (flash_offset_support)
4328 flash_optype = OPTYPE_OFFSET_SPECIFIED;
4330 flash_optype = img_optype;
4332 /* Don't bother verifying CRC if an old FW image is being
4338 status = be_check_flash_crc(adapter, fw->data, img_offset,
4339 img_size, filehdr_size +
4340 img_hdrs_size, flash_optype,
4342 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
4343 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
4344 /* The current FW image on the card does not support
4345 * OFFSET based flashing. Retry using older mechanism
4346 * of OPTYPE based flashing
4348 if (flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4349 flash_offset_support = false;
4353 /* The current FW image on the card does not recognize
4354 * the new FLASH op_type. The FW download is partially
4355 * complete. Reboot the server now to enable FW image
4356 * to recognize the new FLASH op_type. To complete the
4357 * remaining process, download the same FW again after
4360 dev_err(dev, "Flash incomplete. Reset the server\n");
4361 dev_err(dev, "Download FW image again after reset\n");
4363 } else if (status) {
4364 dev_err(dev, "Could not get CRC for 0x%x region\n",
4373 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
4374 if (p + img_size > fw->data + fw->size)
4377 status = be_flash(adapter, p, flash_cmd, flash_optype, img_size,
4380 /* The current FW image on the card does not support OFFSET
4381 * based flashing. Retry using older mechanism of OPTYPE based
4384 if (base_status(status) == MCC_STATUS_ILLEGAL_FIELD &&
4385 flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4386 flash_offset_support = false;
4390 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4394 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4395 (img_optype == OPTYPE_UFI_DIR &&
4396 base_status(status) == MCC_STATUS_FAILED))) {
4398 } else if (status) {
4399 dev_err(dev, "Flashing section type 0x%x failed\n",
4407 static int lancer_fw_download(struct be_adapter *adapter,
4408 const struct firmware *fw)
4410 #define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4411 #define LANCER_FW_DOWNLOAD_LOCATION "/prg"
4412 struct device *dev = &adapter->pdev->dev;
4413 struct be_dma_mem flash_cmd;
4414 const u8 *data_ptr = NULL;
4415 u8 *dest_image_ptr = NULL;
4416 size_t image_size = 0;
4418 u32 data_written = 0;
4424 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
4425 dev_err(dev, "FW image size should be multiple of 4\n");
4429 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4430 + LANCER_FW_DOWNLOAD_CHUNK;
4431 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size,
4432 &flash_cmd.dma, GFP_KERNEL);
4436 dest_image_ptr = flash_cmd.va +
4437 sizeof(struct lancer_cmd_req_write_object);
4438 image_size = fw->size;
4439 data_ptr = fw->data;
4441 while (image_size) {
4442 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4444 /* Copy the image chunk content. */
4445 memcpy(dest_image_ptr, data_ptr, chunk_size);
4447 status = lancer_cmd_write_object(adapter, &flash_cmd,
4449 LANCER_FW_DOWNLOAD_LOCATION,
4450 &data_written, &change_status,
4455 offset += data_written;
4456 data_ptr += data_written;
4457 image_size -= data_written;
4461 /* Commit the FW written */
4462 status = lancer_cmd_write_object(adapter, &flash_cmd,
4464 LANCER_FW_DOWNLOAD_LOCATION,
4465 &data_written, &change_status,
4469 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
4471 dev_err(dev, "Firmware load error\n");
4472 return be_cmd_status(status);
4475 dev_info(dev, "Firmware flashed successfully\n");
4477 if (change_status == LANCER_FW_RESET_NEEDED) {
4478 dev_info(dev, "Resetting adapter to activate new FW\n");
4479 status = lancer_physdev_ctrl(adapter,
4480 PHYSDEV_CONTROL_FW_RESET_MASK);
4482 dev_err(dev, "Adapter busy, could not reset FW\n");
4483 dev_err(dev, "Reboot server to activate new FW\n");
4485 } else if (change_status != LANCER_NO_RESET_NEEDED) {
4486 dev_info(dev, "Reboot server to activate new FW\n");
4496 #define SH_P2_UFI 11
4498 static int be_get_ufi_type(struct be_adapter *adapter,
4499 struct flash_file_hdr_g3 *fhdr)
4502 dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
4506 /* First letter of the build version is used to identify
4507 * which chip this image file is meant for.
4509 switch (fhdr->build[0]) {
4510 case BLD_STR_UFI_TYPE_SH:
4511 return (fhdr->asic_type_rev == ASIC_REV_P2) ? SH_P2_UFI :
4513 case BLD_STR_UFI_TYPE_BE3:
4514 return (fhdr->asic_type_rev == ASIC_REV_B0) ? BE3R_UFI :
4516 case BLD_STR_UFI_TYPE_BE2:
4523 /* Check if the flash image file is compatible with the adapter that
4525 * BE3 chips with asic-rev B0 must be flashed only with BE3R_UFI type.
4526 * Skyhawk chips with asic-rev P2 must be flashed only with SH_P2_UFI type.
4528 static bool be_check_ufi_compatibility(struct be_adapter *adapter,
4529 struct flash_file_hdr_g3 *fhdr)
4531 int ufi_type = be_get_ufi_type(adapter, fhdr);
4535 return skyhawk_chip(adapter);
4537 return (skyhawk_chip(adapter) &&
4538 adapter->asic_rev < ASIC_REV_P2);
4540 return BE3_chip(adapter);
4542 return (BE3_chip(adapter) && adapter->asic_rev < ASIC_REV_B0);
4544 return BE2_chip(adapter);
4550 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4552 struct device *dev = &adapter->pdev->dev;
4553 struct flash_file_hdr_g3 *fhdr3;
4554 struct image_hdr *img_hdr_ptr;
4555 int status = 0, i, num_imgs;
4556 struct be_dma_mem flash_cmd;
4558 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
4559 if (!be_check_ufi_compatibility(adapter, fhdr3)) {
4560 dev_err(dev, "Flash image is not compatible with adapter\n");
4564 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
4565 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
4570 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4571 for (i = 0; i < num_imgs; i++) {
4572 img_hdr_ptr = (struct image_hdr *)(fw->data +
4573 (sizeof(struct flash_file_hdr_g3) +
4574 i * sizeof(struct image_hdr)));
4575 if (!BE2_chip(adapter) &&
4576 le32_to_cpu(img_hdr_ptr->imageid) != 1)
4579 if (skyhawk_chip(adapter))
4580 status = be_flash_skyhawk(adapter, fw, &flash_cmd,
4583 status = be_flash_BEx(adapter, fw, &flash_cmd,
4587 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
4589 dev_info(dev, "Firmware flashed successfully\n");
4594 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4596 const struct firmware *fw;
4599 if (!netif_running(adapter->netdev)) {
4600 dev_err(&adapter->pdev->dev,
4601 "Firmware load not allowed (interface is down)\n");
4605 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4609 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4611 if (lancer_chip(adapter))
4612 status = lancer_fw_download(adapter, fw);
4614 status = be_fw_download(adapter, fw);
4617 be_cmd_get_fw_ver(adapter);
4620 release_firmware(fw);
4624 static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4627 struct be_adapter *adapter = netdev_priv(dev);
4628 struct nlattr *attr, *br_spec;
4633 if (!sriov_enabled(adapter))
4636 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4640 nla_for_each_nested(attr, br_spec, rem) {
4641 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4644 if (nla_len(attr) < sizeof(mode))
4647 mode = nla_get_u16(attr);
4648 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4651 status = be_cmd_set_hsw_config(adapter, 0, 0,
4653 mode == BRIDGE_MODE_VEPA ?
4654 PORT_FWD_TYPE_VEPA :
4659 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4660 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4665 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4666 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4671 static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4672 struct net_device *dev, u32 filter_mask)
4674 struct be_adapter *adapter = netdev_priv(dev);
4678 if (!sriov_enabled(adapter))
4681 /* BE and Lancer chips support VEB mode only */
4682 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4683 hsw_mode = PORT_FWD_TYPE_VEB;
4685 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4686 adapter->if_handle, &hsw_mode);
4691 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4692 hsw_mode == PORT_FWD_TYPE_VEPA ?
4693 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
4697 #ifdef CONFIG_BE2NET_VXLAN
4698 /* VxLAN offload Notes:
4700 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
4701 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
4702 * is expected to work across all types of IP tunnels once exported. Skyhawk
4703 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
4704 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
4705 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
4706 * those other tunnels are unexported on the fly through ndo_features_check().
4708 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
4709 * adds more than one port, disable offloads and don't re-enable them again
4710 * until after all the tunnels are removed.
4712 static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4715 struct be_adapter *adapter = netdev_priv(netdev);
4716 struct device *dev = &adapter->pdev->dev;
4719 if (lancer_chip(adapter) || BEx_chip(adapter))
4722 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
4724 "Only one UDP port supported for VxLAN offloads\n");
4725 dev_info(dev, "Disabling VxLAN offloads\n");
4726 adapter->vxlan_port_count++;
4730 if (adapter->vxlan_port_count++ >= 1)
4733 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4734 OP_CONVERT_NORMAL_TO_TUNNEL);
4736 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4740 status = be_cmd_set_vxlan_port(adapter, port);
4742 dev_warn(dev, "Failed to add VxLAN port\n");
4745 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4746 adapter->vxlan_port = port;
4748 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4749 NETIF_F_TSO | NETIF_F_TSO6 |
4750 NETIF_F_GSO_UDP_TUNNEL;
4751 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
4752 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
4754 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4758 be_disable_vxlan_offloads(adapter);
4761 static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4764 struct be_adapter *adapter = netdev_priv(netdev);
4766 if (lancer_chip(adapter) || BEx_chip(adapter))
4769 if (adapter->vxlan_port != port)
4772 be_disable_vxlan_offloads(adapter);
4774 dev_info(&adapter->pdev->dev,
4775 "Disabled VxLAN offloads for UDP port %d\n",
4778 adapter->vxlan_port_count--;
4781 static netdev_features_t be_features_check(struct sk_buff *skb,
4782 struct net_device *dev,
4783 netdev_features_t features)
4785 struct be_adapter *adapter = netdev_priv(dev);
4788 /* The code below restricts offload features for some tunneled packets.
4789 * Offload features for normal (non tunnel) packets are unchanged.
4791 if (!skb->encapsulation ||
4792 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
4795 /* It's an encapsulated packet and VxLAN offloads are enabled. We
4796 * should disable tunnel offload features if it's not a VxLAN packet,
4797 * as tunnel offloads have been enabled only for VxLAN. This is done to
4798 * allow other tunneled traffic like GRE work fine while VxLAN
4799 * offloads are configured in Skyhawk-R.
4801 switch (vlan_get_protocol(skb)) {
4802 case htons(ETH_P_IP):
4803 l4_hdr = ip_hdr(skb)->protocol;
4805 case htons(ETH_P_IPV6):
4806 l4_hdr = ipv6_hdr(skb)->nexthdr;
4812 if (l4_hdr != IPPROTO_UDP ||
4813 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
4814 skb->inner_protocol != htons(ETH_P_TEB) ||
4815 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
4816 sizeof(struct udphdr) + sizeof(struct vxlanhdr))
4817 return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
4823 static const struct net_device_ops be_netdev_ops = {
4824 .ndo_open = be_open,
4825 .ndo_stop = be_close,
4826 .ndo_start_xmit = be_xmit,
4827 .ndo_set_rx_mode = be_set_rx_mode,
4828 .ndo_set_mac_address = be_mac_addr_set,
4829 .ndo_change_mtu = be_change_mtu,
4830 .ndo_get_stats64 = be_get_stats64,
4831 .ndo_validate_addr = eth_validate_addr,
4832 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4833 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
4834 .ndo_set_vf_mac = be_set_vf_mac,
4835 .ndo_set_vf_vlan = be_set_vf_vlan,
4836 .ndo_set_vf_rate = be_set_vf_tx_rate,
4837 .ndo_get_vf_config = be_get_vf_config,
4838 .ndo_set_vf_link_state = be_set_vf_link_state,
4839 #ifdef CONFIG_NET_POLL_CONTROLLER
4840 .ndo_poll_controller = be_netpoll,
4842 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4843 .ndo_bridge_getlink = be_ndo_bridge_getlink,
4844 #ifdef CONFIG_NET_RX_BUSY_POLL
4845 .ndo_busy_poll = be_busy_poll,
4847 #ifdef CONFIG_BE2NET_VXLAN
4848 .ndo_add_vxlan_port = be_add_vxlan_port,
4849 .ndo_del_vxlan_port = be_del_vxlan_port,
4850 .ndo_features_check = be_features_check,
4854 static void be_netdev_init(struct net_device *netdev)
4856 struct be_adapter *adapter = netdev_priv(netdev);
4858 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
4859 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
4860 NETIF_F_HW_VLAN_CTAG_TX;
4861 if (be_multi_rxq(adapter))
4862 netdev->hw_features |= NETIF_F_RXHASH;
4864 netdev->features |= netdev->hw_features |
4865 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4867 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
4868 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
4870 netdev->priv_flags |= IFF_UNICAST_FLT;
4872 netdev->flags |= IFF_MULTICAST;
4874 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
4876 netdev->netdev_ops = &be_netdev_ops;
4878 netdev->ethtool_ops = &be_ethtool_ops;
4881 static void be_unmap_pci_bars(struct be_adapter *adapter)
4884 pci_iounmap(adapter->pdev, adapter->csr);
4886 pci_iounmap(adapter->pdev, adapter->db);
4889 static int db_bar(struct be_adapter *adapter)
4891 if (lancer_chip(adapter) || !be_physfn(adapter))
4897 static int be_roce_map_pci_bars(struct be_adapter *adapter)
4899 if (skyhawk_chip(adapter)) {
4900 adapter->roce_db.size = 4096;
4901 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4903 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4909 static int be_map_pci_bars(struct be_adapter *adapter)
4913 if (BEx_chip(adapter) && be_physfn(adapter)) {
4914 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
4919 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
4924 be_roce_map_pci_bars(adapter);
4928 dev_err(&adapter->pdev->dev, "Error in mapping PCI BARs\n");
4929 be_unmap_pci_bars(adapter);
4933 static void be_ctrl_cleanup(struct be_adapter *adapter)
4935 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
4937 be_unmap_pci_bars(adapter);
4940 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4943 mem = &adapter->rx_filter;
4945 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4949 static int be_ctrl_init(struct be_adapter *adapter)
4951 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4952 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
4953 struct be_dma_mem *rx_filter = &adapter->rx_filter;
4957 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4958 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4959 SLI_INTF_FAMILY_SHIFT;
4960 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4962 status = be_map_pci_bars(adapter);
4966 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
4967 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4968 mbox_mem_alloc->size,
4969 &mbox_mem_alloc->dma,
4971 if (!mbox_mem_alloc->va) {
4973 goto unmap_pci_bars;
4975 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4976 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4977 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4978 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
4980 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
4981 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4982 rx_filter->size, &rx_filter->dma,
4984 if (!rx_filter->va) {
4989 mutex_init(&adapter->mbox_lock);
4990 spin_lock_init(&adapter->mcc_lock);
4991 spin_lock_init(&adapter->mcc_cq_lock);
4993 init_completion(&adapter->et_cmd_compl);
4994 pci_save_state(adapter->pdev);
4998 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4999 mbox_mem_alloc->va, mbox_mem_alloc->dma);
5002 be_unmap_pci_bars(adapter);
5008 static void be_stats_cleanup(struct be_adapter *adapter)
5010 struct be_dma_mem *cmd = &adapter->stats_cmd;
5013 dma_free_coherent(&adapter->pdev->dev, cmd->size,
5017 static int be_stats_init(struct be_adapter *adapter)
5019 struct be_dma_mem *cmd = &adapter->stats_cmd;
5021 if (lancer_chip(adapter))
5022 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5023 else if (BE2_chip(adapter))
5024 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
5025 else if (BE3_chip(adapter))
5026 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5028 /* ALL non-BE ASICs */
5029 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5031 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
5038 static void be_remove(struct pci_dev *pdev)
5040 struct be_adapter *adapter = pci_get_drvdata(pdev);
5045 be_roce_dev_remove(adapter);
5046 be_intr_set(adapter, false);
5048 cancel_delayed_work_sync(&adapter->func_recovery_work);
5050 unregister_netdev(adapter->netdev);
5054 /* tell fw we're done with firing cmds */
5055 be_cmd_fw_clean(adapter);
5057 be_stats_cleanup(adapter);
5059 be_ctrl_cleanup(adapter);
5061 pci_disable_pcie_error_reporting(pdev);
5063 pci_release_regions(pdev);
5064 pci_disable_device(pdev);
5066 free_netdev(adapter->netdev);
5069 static int be_get_initial_config(struct be_adapter *adapter)
5073 status = be_cmd_get_cntl_attributes(adapter);
5077 /* Must be a power of 2 or else MODULO will BUG_ON */
5078 adapter->be_get_temp_freq = 64;
5080 if (BEx_chip(adapter)) {
5081 level = be_cmd_get_fw_log_level(adapter);
5082 adapter->msg_enable =
5083 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
5086 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
5090 static int lancer_recover_func(struct be_adapter *adapter)
5092 struct device *dev = &adapter->pdev->dev;
5095 status = lancer_test_and_set_rdy_state(adapter);
5099 if (netif_running(adapter->netdev))
5100 be_close(adapter->netdev);
5104 be_clear_all_error(adapter);
5106 status = be_setup(adapter);
5110 if (netif_running(adapter->netdev)) {
5111 status = be_open(adapter->netdev);
5116 dev_err(dev, "Adapter recovery successful\n");
5119 if (status == -EAGAIN)
5120 dev_err(dev, "Waiting for resource provisioning\n");
5122 dev_err(dev, "Adapter recovery failed\n");
5127 static void be_func_recovery_task(struct work_struct *work)
5129 struct be_adapter *adapter =
5130 container_of(work, struct be_adapter, func_recovery_work.work);
5133 be_detect_error(adapter);
5135 if (adapter->hw_error && lancer_chip(adapter)) {
5137 netif_device_detach(adapter->netdev);
5140 status = lancer_recover_func(adapter);
5142 netif_device_attach(adapter->netdev);
5145 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
5146 * no need to attempt further recovery.
5148 if (!status || status == -EAGAIN)
5149 schedule_delayed_work(&adapter->func_recovery_work,
5150 msecs_to_jiffies(1000));
5153 static void be_log_sfp_info(struct be_adapter *adapter)
5157 status = be_cmd_query_sfp_info(adapter);
5159 dev_err(&adapter->pdev->dev,
5160 "Unqualified SFP+ detected on %c from %s part no: %s",
5161 adapter->port_name, adapter->phy.vendor_name,
5162 adapter->phy.vendor_pn);
5164 adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP;
5167 static void be_worker(struct work_struct *work)
5169 struct be_adapter *adapter =
5170 container_of(work, struct be_adapter, work.work);
5171 struct be_rx_obj *rxo;
5174 /* when interrupts are not yet enabled, just reap any pending
5175 * mcc completions */
5176 if (!netif_running(adapter->netdev)) {
5178 be_process_mcc(adapter);
5183 if (!adapter->stats_cmd_sent) {
5184 if (lancer_chip(adapter))
5185 lancer_cmd_get_pport_stats(adapter,
5186 &adapter->stats_cmd);
5188 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5191 if (be_physfn(adapter) &&
5192 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
5193 be_cmd_get_die_temperature(adapter);
5195 for_all_rx_queues(adapter, rxo, i) {
5196 /* Replenish RX-queues starved due to memory
5197 * allocation failures.
5199 if (rxo->rx_post_starved)
5200 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
5203 be_eqd_update(adapter);
5205 if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP)
5206 be_log_sfp_info(adapter);
5209 adapter->work_counter++;
5210 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
5213 /* If any VFs are already enabled don't FLR the PF */
5214 static bool be_reset_required(struct be_adapter *adapter)
5216 return pci_num_vf(adapter->pdev) ? false : true;
5219 static char *mc_name(struct be_adapter *adapter)
5221 char *str = ""; /* default */
5223 switch (adapter->mc_type) {
5249 static inline char *func_name(struct be_adapter *adapter)
5251 return be_physfn(adapter) ? "PF" : "VF";
5254 static inline char *nic_name(struct pci_dev *pdev)
5256 switch (pdev->device) {
5263 return OC_NAME_LANCER;
5274 static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
5276 struct be_adapter *adapter;
5277 struct net_device *netdev;
5280 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5282 status = pci_enable_device(pdev);
5286 status = pci_request_regions(pdev, DRV_NAME);
5289 pci_set_master(pdev);
5291 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
5296 adapter = netdev_priv(netdev);
5297 adapter->pdev = pdev;
5298 pci_set_drvdata(pdev, adapter);
5299 adapter->netdev = netdev;
5300 SET_NETDEV_DEV(netdev, &pdev->dev);
5302 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
5304 netdev->features |= NETIF_F_HIGHDMA;
5306 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
5308 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5313 status = pci_enable_pcie_error_reporting(pdev);
5315 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
5317 status = be_ctrl_init(adapter);
5321 /* sync up with fw's ready state */
5322 if (be_physfn(adapter)) {
5323 status = be_fw_wait_ready(adapter);
5328 if (be_reset_required(adapter)) {
5329 status = be_cmd_reset_function(adapter);
5333 /* Wait for interrupts to quiesce after an FLR */
5337 /* Allow interrupts for other ULPs running on NIC function */
5338 be_intr_set(adapter, true);
5340 /* tell fw we're ready to fire cmds */
5341 status = be_cmd_fw_init(adapter);
5345 status = be_stats_init(adapter);
5349 status = be_get_initial_config(adapter);
5353 INIT_DELAYED_WORK(&adapter->work, be_worker);
5354 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
5355 adapter->rx_fc = true;
5356 adapter->tx_fc = true;
5358 status = be_setup(adapter);
5362 be_netdev_init(netdev);
5363 status = register_netdev(netdev);
5367 be_roce_dev_add(adapter);
5369 schedule_delayed_work(&adapter->func_recovery_work,
5370 msecs_to_jiffies(1000));
5372 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
5373 func_name(adapter), mc_name(adapter), adapter->port_name);
5380 be_stats_cleanup(adapter);
5382 be_ctrl_cleanup(adapter);
5384 free_netdev(netdev);
5386 pci_release_regions(pdev);
5388 pci_disable_device(pdev);
5390 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
5394 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5396 struct be_adapter *adapter = pci_get_drvdata(pdev);
5397 struct net_device *netdev = adapter->netdev;
5399 if (adapter->wol_en)
5400 be_setup_wol(adapter, true);
5402 be_intr_set(adapter, false);
5403 cancel_delayed_work_sync(&adapter->func_recovery_work);
5405 netif_device_detach(netdev);
5406 if (netif_running(netdev)) {
5413 pci_save_state(pdev);
5414 pci_disable_device(pdev);
5415 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5419 static int be_resume(struct pci_dev *pdev)
5422 struct be_adapter *adapter = pci_get_drvdata(pdev);
5423 struct net_device *netdev = adapter->netdev;
5425 netif_device_detach(netdev);
5427 status = pci_enable_device(pdev);
5431 pci_set_power_state(pdev, PCI_D0);
5432 pci_restore_state(pdev);
5434 status = be_fw_wait_ready(adapter);
5438 status = be_cmd_reset_function(adapter);
5442 be_intr_set(adapter, true);
5443 /* tell fw we're ready to fire cmds */
5444 status = be_cmd_fw_init(adapter);
5449 if (netif_running(netdev)) {
5455 schedule_delayed_work(&adapter->func_recovery_work,
5456 msecs_to_jiffies(1000));
5457 netif_device_attach(netdev);
5459 if (adapter->wol_en)
5460 be_setup_wol(adapter, false);
5466 * An FLR will stop BE from DMAing any data.
5468 static void be_shutdown(struct pci_dev *pdev)
5470 struct be_adapter *adapter = pci_get_drvdata(pdev);
5475 be_roce_dev_shutdown(adapter);
5476 cancel_delayed_work_sync(&adapter->work);
5477 cancel_delayed_work_sync(&adapter->func_recovery_work);
5479 netif_device_detach(adapter->netdev);
5481 be_cmd_reset_function(adapter);
5483 pci_disable_device(pdev);
5486 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
5487 pci_channel_state_t state)
5489 struct be_adapter *adapter = pci_get_drvdata(pdev);
5490 struct net_device *netdev = adapter->netdev;
5492 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5494 if (!adapter->eeh_error) {
5495 adapter->eeh_error = true;
5497 cancel_delayed_work_sync(&adapter->func_recovery_work);
5500 netif_device_detach(netdev);
5501 if (netif_running(netdev))
5508 if (state == pci_channel_io_perm_failure)
5509 return PCI_ERS_RESULT_DISCONNECT;
5511 pci_disable_device(pdev);
5513 /* The error could cause the FW to trigger a flash debug dump.
5514 * Resetting the card while flash dump is in progress
5515 * can cause it not to recover; wait for it to finish.
5516 * Wait only for first function as it is needed only once per
5519 if (pdev->devfn == 0)
5522 return PCI_ERS_RESULT_NEED_RESET;
5525 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5527 struct be_adapter *adapter = pci_get_drvdata(pdev);
5530 dev_info(&adapter->pdev->dev, "EEH reset\n");
5532 status = pci_enable_device(pdev);
5534 return PCI_ERS_RESULT_DISCONNECT;
5536 pci_set_master(pdev);
5537 pci_set_power_state(pdev, PCI_D0);
5538 pci_restore_state(pdev);
5540 /* Check if card is ok and fw is ready */
5541 dev_info(&adapter->pdev->dev,
5542 "Waiting for FW to be ready after EEH reset\n");
5543 status = be_fw_wait_ready(adapter);
5545 return PCI_ERS_RESULT_DISCONNECT;
5547 pci_cleanup_aer_uncorrect_error_status(pdev);
5548 be_clear_all_error(adapter);
5549 return PCI_ERS_RESULT_RECOVERED;
5552 static void be_eeh_resume(struct pci_dev *pdev)
5555 struct be_adapter *adapter = pci_get_drvdata(pdev);
5556 struct net_device *netdev = adapter->netdev;
5558 dev_info(&adapter->pdev->dev, "EEH resume\n");
5560 pci_save_state(pdev);
5562 status = be_cmd_reset_function(adapter);
5566 /* On some BE3 FW versions, after a HW reset,
5567 * interrupts will remain disabled for each function.
5568 * So, explicitly enable interrupts
5570 be_intr_set(adapter, true);
5572 /* tell fw we're ready to fire cmds */
5573 status = be_cmd_fw_init(adapter);
5577 status = be_setup(adapter);
5581 if (netif_running(netdev)) {
5582 status = be_open(netdev);
5587 schedule_delayed_work(&adapter->func_recovery_work,
5588 msecs_to_jiffies(1000));
5589 netif_device_attach(netdev);
5592 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
5595 static const struct pci_error_handlers be_eeh_handlers = {
5596 .error_detected = be_eeh_err_detected,
5597 .slot_reset = be_eeh_reset,
5598 .resume = be_eeh_resume,
5601 static struct pci_driver be_driver = {
5603 .id_table = be_dev_ids,
5605 .remove = be_remove,
5606 .suspend = be_suspend,
5607 .resume = be_resume,
5608 .shutdown = be_shutdown,
5609 .err_handler = &be_eeh_handlers
5612 static int __init be_init_module(void)
5614 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5615 rx_frag_size != 2048) {
5616 printk(KERN_WARNING DRV_NAME
5617 " : Module param rx_frag_size must be 2048/4096/8192."
5619 rx_frag_size = 2048;
5622 return pci_register_driver(&be_driver);
5624 module_init(be_init_module);
5626 static void __exit be_exit_module(void)
5628 pci_unregister_driver(&be_driver);
5630 module_exit(be_exit_module);