2 * Copyright (C) 2015 Cavium, Inc.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License
6 * as published by the Free Software Foundation.
9 #include <linux/module.h>
10 #include <linux/interrupt.h>
11 #include <linux/pci.h>
12 #include <linux/etherdevice.h>
18 #include "thunder_bgx.h"
20 #define DRV_NAME "thunder-nic"
21 #define DRV_VERSION "1.0"
26 u8 chans_per_bgx; /* Rx/Tx chans */
36 bool tl1_per_bgx; /* TL1 per BGX or per LMAC */
44 u8 num_vf_en; /* No of VF enabled */
45 bool vf_enabled[MAX_NUM_VFS_SUPPORTED];
46 void __iomem *reg_base; /* Register start address */
47 u8 num_sqs_en; /* Secondary qsets enabled */
48 u64 nicvf[MAX_NUM_VFS_SUPPORTED];
49 u8 vf_sqs[MAX_NUM_VFS_SUPPORTED][MAX_SQS_PER_VF];
50 u8 pqs_vf[MAX_NUM_VFS_SUPPORTED];
51 bool sqs_used[MAX_NUM_VFS_SUPPORTED];
52 struct pkind_cfg pkind;
53 #define NIC_SET_VF_LMAC_MAP(bgx, lmac) (((bgx & 0xF) << 4) | (lmac & 0xF))
54 #define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) ((map >> 4) & 0xF)
55 #define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) (map & 0xF)
57 struct delayed_work dwork;
58 struct workqueue_struct *check_link;
62 u16 cpi_base[MAX_NUM_VFS_SUPPORTED];
63 u16 rssi_base[MAX_NUM_VFS_SUPPORTED];
64 bool mbx_lock[MAX_NUM_VFS_SUPPORTED];
69 struct msix_entry *msix_entries;
70 bool irq_allocated[NIC_PF_MSIX_VECTORS];
71 char irq_name[NIC_PF_MSIX_VECTORS][20];
74 /* Supported devices */
75 static const struct pci_device_id nic_id_table[] = {
76 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_NIC_PF) },
77 { 0, } /* end of table */
80 MODULE_AUTHOR("Sunil Goutham");
81 MODULE_DESCRIPTION("Cavium Thunder NIC Physical Function Driver");
82 MODULE_LICENSE("GPL v2");
83 MODULE_VERSION(DRV_VERSION);
84 MODULE_DEVICE_TABLE(pci, nic_id_table);
86 /* The Cavium ThunderX network controller can *only* be found in SoCs
87 * containing the ThunderX ARM64 CPU implementation. All accesses to the device
88 * registers on this platform are implicitly strongly ordered with respect
89 * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use
90 * with no memory barriers in this driver. The readq()/writeq() functions add
91 * explicit ordering operation which in this case are redundant, and only
95 /* Register read/write APIs */
96 static void nic_reg_write(struct nicpf *nic, u64 offset, u64 val)
98 writeq_relaxed(val, nic->reg_base + offset);
101 static u64 nic_reg_read(struct nicpf *nic, u64 offset)
103 return readq_relaxed(nic->reg_base + offset);
106 /* PF -> VF mailbox communication APIs */
107 static void nic_enable_mbx_intr(struct nicpf *nic)
109 int vf_cnt = pci_sriov_get_totalvfs(nic->pdev);
111 #define INTR_MASK(vfs) ((vfs < 64) ? (BIT_ULL(vfs) - 1) : (~0ull))
113 /* Clear it, to avoid spurious interrupts (if any) */
114 nic_reg_write(nic, NIC_PF_MAILBOX_INT, INTR_MASK(vf_cnt));
116 /* Enable mailbox interrupt for all VFs */
117 nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S, INTR_MASK(vf_cnt));
118 /* One mailbox intr enable reg per 64 VFs */
120 nic_reg_write(nic, NIC_PF_MAILBOX_INT + sizeof(u64),
121 INTR_MASK(vf_cnt - 64));
122 nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S + sizeof(u64),
123 INTR_MASK(vf_cnt - 64));
127 static void nic_clear_mbx_intr(struct nicpf *nic, int vf, int mbx_reg)
129 nic_reg_write(nic, NIC_PF_MAILBOX_INT + (mbx_reg << 3), BIT_ULL(vf));
132 static u64 nic_get_mbx_addr(int vf)
134 return NIC_PF_VF_0_127_MAILBOX_0_1 + (vf << NIC_VF_NUM_SHIFT);
137 /* Send a mailbox message to VF
138 * @vf: vf to which this message to be sent
139 * @mbx: Message to be sent
141 static void nic_send_msg_to_vf(struct nicpf *nic, int vf, union nic_mbx *mbx)
143 void __iomem *mbx_addr = nic->reg_base + nic_get_mbx_addr(vf);
144 u64 *msg = (u64 *)mbx;
146 /* In first revision HW, mbox interrupt is triggerred
147 * when PF writes to MBOX(1), in next revisions when
148 * PF writes to MBOX(0)
150 if (pass1_silicon(nic->pdev)) {
151 /* see the comment for nic_reg_write()/nic_reg_read()
154 writeq_relaxed(msg[0], mbx_addr);
155 writeq_relaxed(msg[1], mbx_addr + 8);
157 writeq_relaxed(msg[1], mbx_addr + 8);
158 writeq_relaxed(msg[0], mbx_addr);
162 /* Responds to VF's READY message with VF's
163 * ID, node, MAC address e.t.c
164 * @vf: VF which sent READY message
166 static void nic_mbx_send_ready(struct nicpf *nic, int vf)
168 union nic_mbx mbx = {};
172 mbx.nic_cfg.msg = NIC_MBOX_MSG_READY;
173 mbx.nic_cfg.vf_id = vf;
175 mbx.nic_cfg.tns_mode = NIC_TNS_BYPASS_MODE;
177 if (vf < nic->num_vf_en) {
178 bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
179 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
181 mac = bgx_get_lmac_mac(nic->node, bgx_idx, lmac);
183 ether_addr_copy((u8 *)&mbx.nic_cfg.mac_addr, mac);
185 mbx.nic_cfg.sqs_mode = (vf >= nic->num_vf_en) ? true : false;
186 mbx.nic_cfg.node_id = nic->node;
188 mbx.nic_cfg.loopback_supported = vf < nic->num_vf_en;
190 nic_send_msg_to_vf(nic, vf, &mbx);
193 /* ACKs VF's mailbox message
194 * @vf: VF to which ACK to be sent
196 static void nic_mbx_send_ack(struct nicpf *nic, int vf)
198 union nic_mbx mbx = {};
200 mbx.msg.msg = NIC_MBOX_MSG_ACK;
201 nic_send_msg_to_vf(nic, vf, &mbx);
204 /* NACKs VF's mailbox message that PF is not able to
205 * complete the action
206 * @vf: VF to which ACK to be sent
208 static void nic_mbx_send_nack(struct nicpf *nic, int vf)
210 union nic_mbx mbx = {};
212 mbx.msg.msg = NIC_MBOX_MSG_NACK;
213 nic_send_msg_to_vf(nic, vf, &mbx);
216 /* Flush all in flight receive packets to memory and
217 * bring down an active RQ
219 static int nic_rcv_queue_sw_sync(struct nicpf *nic)
223 nic_reg_write(nic, NIC_PF_SW_SYNC_RX, 0x01);
224 /* Wait till sync cycle is finished */
226 if (nic_reg_read(nic, NIC_PF_SW_SYNC_RX_DONE) & 0x1)
230 nic_reg_write(nic, NIC_PF_SW_SYNC_RX, 0x00);
232 dev_err(&nic->pdev->dev, "Receive queue software sync failed");
238 /* Get BGX Rx/Tx stats and respond to VF's request */
239 static void nic_get_bgx_stats(struct nicpf *nic, struct bgx_stats_msg *bgx)
242 union nic_mbx mbx = {};
244 bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[bgx->vf_id]);
245 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[bgx->vf_id]);
247 mbx.bgx_stats.msg = NIC_MBOX_MSG_BGX_STATS;
248 mbx.bgx_stats.vf_id = bgx->vf_id;
249 mbx.bgx_stats.rx = bgx->rx;
250 mbx.bgx_stats.idx = bgx->idx;
252 mbx.bgx_stats.stats = bgx_get_rx_stats(nic->node, bgx_idx,
255 mbx.bgx_stats.stats = bgx_get_tx_stats(nic->node, bgx_idx,
257 nic_send_msg_to_vf(nic, bgx->vf_id, &mbx);
260 /* Update hardware min/max frame size */
261 static int nic_update_hw_frs(struct nicpf *nic, int new_frs, int vf)
263 if ((new_frs > NIC_HW_MAX_FRS) || (new_frs < NIC_HW_MIN_FRS)) {
264 dev_err(&nic->pdev->dev,
265 "Invalid MTU setting from VF%d rejected, should be between %d and %d\n",
266 vf, NIC_HW_MIN_FRS, NIC_HW_MAX_FRS);
270 if (new_frs <= nic->pkind.maxlen)
273 nic->pkind.maxlen = new_frs;
274 nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG, *(u64 *)&nic->pkind);
278 /* Set minimum transmit packet size */
279 static void nic_set_tx_pkt_pad(struct nicpf *nic, int size)
285 /* Max value that can be set is 60 */
289 pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid);
290 /* 81xx's RGX has only one LMAC */
291 if (sdevid == PCI_SUBSYS_DEVID_81XX_NIC_PF)
292 max_lmac = ((nic->hw->bgx_cnt - 1) * MAX_LMAC_PER_BGX) + 1;
294 max_lmac = nic->hw->bgx_cnt * MAX_LMAC_PER_BGX;
296 for (lmac = 0; lmac < max_lmac; lmac++) {
297 lmac_cfg = nic_reg_read(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3));
298 lmac_cfg &= ~(0xF << 2);
299 lmac_cfg |= ((size / 4) << 2);
300 nic_reg_write(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3), lmac_cfg);
304 /* Function to check number of LMACs present and set VF::LMAC mapping.
305 * Mapping will be used while initializing channels.
307 static void nic_set_lmac_vf_mapping(struct nicpf *nic)
309 unsigned bgx_map = bgx_get_map(nic->node);
310 int bgx, next_bgx_lmac = 0;
311 int lmac, lmac_cnt = 0;
316 for (bgx = 0; bgx < nic->hw->bgx_cnt; bgx++) {
317 if (!(bgx_map & (1 << bgx)))
319 lmac_cnt = bgx_get_lmac_count(nic->node, bgx);
320 for (lmac = 0; lmac < lmac_cnt; lmac++)
321 nic->vf_lmac_map[next_bgx_lmac++] =
322 NIC_SET_VF_LMAC_MAP(bgx, lmac);
323 nic->num_vf_en += lmac_cnt;
325 /* Program LMAC credits */
326 lmac_credit = (1ull << 1); /* channel credit enable */
327 lmac_credit |= (0x1ff << 2); /* Max outstanding pkt count */
328 /* 48KB BGX Tx buffer size, each unit is of size 16bytes */
329 lmac_credit |= (((((48 * 1024) / lmac_cnt) -
330 NIC_HW_MAX_FRS) / 16) << 12);
331 lmac = bgx * MAX_LMAC_PER_BGX;
332 for (; lmac < lmac_cnt + (bgx * MAX_LMAC_PER_BGX); lmac++)
334 NIC_PF_LMAC_0_7_CREDIT + (lmac * 8),
337 /* On CN81XX there are only 8 VFs but max possible no of
340 if (nic->num_vf_en >= pci_sriov_get_totalvfs(nic->pdev)) {
341 nic->num_vf_en = pci_sriov_get_totalvfs(nic->pdev);
347 static void nic_free_lmacmem(struct nicpf *nic)
349 kfree(nic->vf_lmac_map);
355 static int nic_get_hw_info(struct nicpf *nic)
359 struct hw_info *hw = nic->hw;
361 pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid);
364 case PCI_SUBSYS_DEVID_88XX_NIC_PF:
365 hw->bgx_cnt = MAX_BGX_PER_CN88XX;
366 hw->chans_per_lmac = 16;
367 hw->chans_per_bgx = 128;
370 hw->rss_ind_tbl_size = NIC_MAX_RSS_IDR_TBL_SIZE;
374 hw->tl1_per_bgx = true;
376 case PCI_SUBSYS_DEVID_81XX_NIC_PF:
377 hw->bgx_cnt = MAX_BGX_PER_CN81XX;
378 hw->chans_per_lmac = 8;
379 hw->chans_per_bgx = 32;
380 hw->chans_per_rgx = 8;
381 hw->chans_per_lbk = 24;
384 hw->rss_ind_tbl_size = 32; /* Max RSSI / Max interfaces */
388 hw->tl1_per_bgx = false;
390 case PCI_SUBSYS_DEVID_83XX_NIC_PF:
391 hw->bgx_cnt = MAX_BGX_PER_CN83XX;
392 hw->chans_per_lmac = 8;
393 hw->chans_per_bgx = 32;
394 hw->chans_per_lbk = 64;
397 hw->rss_ind_tbl_size = 64; /* Max RSSI / Max interfaces */
401 hw->tl1_per_bgx = false;
404 hw->tl4_cnt = MAX_QUEUES_PER_QSET * pci_sriov_get_totalvfs(nic->pdev);
406 /* Allocate memory for LMAC tracking elements */
407 max_lmac = hw->bgx_cnt * MAX_LMAC_PER_BGX;
408 nic->vf_lmac_map = kmalloc_array(max_lmac, sizeof(u8), GFP_KERNEL);
409 if (!nic->vf_lmac_map)
411 nic->link = kmalloc_array(max_lmac, sizeof(u8), GFP_KERNEL);
414 nic->duplex = kmalloc_array(max_lmac, sizeof(u8), GFP_KERNEL);
417 nic->speed = kmalloc_array(max_lmac, sizeof(u32), GFP_KERNEL);
423 nic_free_lmacmem(nic);
430 static int nic_init_hw(struct nicpf *nic)
435 /* Get HW capability info */
436 err = nic_get_hw_info(nic);
440 /* Enable NIC HW block */
441 nic_reg_write(nic, NIC_PF_CFG, 0x3);
443 /* Enable backpressure */
444 nic_reg_write(nic, NIC_PF_BP_CFG, (1ULL << 6) | 0x03);
446 /* TNS and TNS bypass modes are present only on 88xx */
447 if (nic->pdev->subsystem_device == PCI_SUBSYS_DEVID_88XX_NIC_PF) {
448 /* Disable TNS mode on both interfaces */
449 nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG,
450 (NIC_TNS_BYPASS_MODE << 7) | BGX0_BLOCK);
451 nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG | (1 << 8),
452 (NIC_TNS_BYPASS_MODE << 7) | BGX1_BLOCK);
455 nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG,
456 (1ULL << 63) | BGX0_BLOCK);
457 nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG + (1 << 8),
458 (1ULL << 63) | BGX1_BLOCK);
460 /* PKIND configuration */
461 nic->pkind.minlen = 0;
462 nic->pkind.maxlen = NIC_HW_MAX_FRS + ETH_HLEN;
463 nic->pkind.lenerr_en = 1;
464 nic->pkind.rx_hdr = 0;
465 nic->pkind.hdr_sl = 0;
467 for (i = 0; i < NIC_MAX_PKIND; i++)
468 nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG | (i << 3),
469 *(u64 *)&nic->pkind);
471 nic_set_tx_pkt_pad(nic, NIC_HW_MIN_FRS);
474 nic_reg_write(nic, NIC_PF_INTR_TIMER_CFG, NICPF_CLK_PER_INT_TICK);
476 /* Enable VLAN ethertype matching and stripping */
477 nic_reg_write(nic, NIC_PF_RX_ETYPE_0_7,
478 (2 << 19) | (ETYPE_ALG_VLAN_STRIP << 16) | ETH_P_8021Q);
480 /* Check if HW expected value is higher (could be in future chips) */
481 cqm_cfg = nic_reg_read(nic, NIC_PF_CQM_CFG);
482 if (cqm_cfg < NICPF_CQM_MIN_DROP_LEVEL)
483 nic_reg_write(nic, NIC_PF_CQM_CFG, NICPF_CQM_MIN_DROP_LEVEL);
488 /* Channel parse index configuration */
489 static void nic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg)
491 struct hw_info *hw = nic->hw;
492 u32 vnic, bgx, lmac, chan;
493 u32 padd, cpi_count = 0;
494 u64 cpi_base, cpi, rssi_base, rssi;
498 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
499 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
501 chan = (lmac * hw->chans_per_lmac) + (bgx * hw->chans_per_bgx);
502 cpi_base = vnic * NIC_MAX_CPI_PER_LMAC;
503 rssi_base = vnic * hw->rss_ind_tbl_size;
505 /* Rx channel configuration */
506 nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_BP_CFG | (chan << 3),
507 (1ull << 63) | (vnic << 0));
508 nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_CFG | (chan << 3),
509 ((u64)cfg->cpi_alg << 62) | (cpi_base << 48));
511 if (cfg->cpi_alg == CPI_ALG_NONE)
513 else if (cfg->cpi_alg == CPI_ALG_VLAN) /* 3 bits of PCP */
515 else if (cfg->cpi_alg == CPI_ALG_VLAN16) /* 3 bits PCP + DEI */
517 else if (cfg->cpi_alg == CPI_ALG_DIFF) /* 6bits DSCP */
518 cpi_count = NIC_MAX_CPI_PER_LMAC;
520 /* RSS Qset, Qidx mapping */
523 for (; rssi < (rssi_base + cfg->rq_cnt); rssi++) {
524 nic_reg_write(nic, NIC_PF_RSSI_0_4097_RQ | (rssi << 3),
525 (qset << 3) | rq_idx);
531 for (; cpi < (cpi_base + cpi_count); cpi++) {
532 /* Determine port to channel adder */
533 if (cfg->cpi_alg != CPI_ALG_DIFF)
534 padd = cpi % cpi_count;
536 padd = cpi % 8; /* 3 bits CS out of 6bits DSCP */
538 /* Leave RSS_SIZE as '0' to disable RSS */
539 if (pass1_silicon(nic->pdev)) {
540 nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3),
541 (vnic << 24) | (padd << 16) |
544 /* Set MPI_ALG to '0' to disable MCAM parsing */
545 nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3),
547 /* MPI index is same as CPI if MPI_ALG is not enabled */
548 nic_reg_write(nic, NIC_PF_MPI_0_2047_CFG | (cpi << 3),
549 (vnic << 24) | (rssi_base + rssi));
552 if ((rssi + 1) >= cfg->rq_cnt)
555 if (cfg->cpi_alg == CPI_ALG_VLAN)
557 else if (cfg->cpi_alg == CPI_ALG_VLAN16)
558 rssi = ((cpi - cpi_base) & 0xe) >> 1;
559 else if (cfg->cpi_alg == CPI_ALG_DIFF)
560 rssi = ((cpi - cpi_base) & 0x38) >> 3;
562 nic->cpi_base[cfg->vf_id] = cpi_base;
563 nic->rssi_base[cfg->vf_id] = rssi_base;
566 /* Responsds to VF with its RSS indirection table size */
567 static void nic_send_rss_size(struct nicpf *nic, int vf)
569 union nic_mbx mbx = {};
574 mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE;
575 mbx.rss_size.ind_tbl_size = nic->hw->rss_ind_tbl_size;
576 nic_send_msg_to_vf(nic, vf, &mbx);
579 /* Receive side scaling configuration
582 * - indir table i.e hash::RQ mapping
583 * - no of hash bits to consider
585 static void nic_config_rss(struct nicpf *nic, struct rss_cfg_msg *cfg)
588 u64 cpi_cfg, cpi_base, rssi_base, rssi;
591 rssi_base = nic->rssi_base[cfg->vf_id] + cfg->tbl_offset;
596 for (; rssi < (rssi_base + cfg->tbl_len); rssi++) {
597 u8 svf = cfg->ind_tbl[idx] >> 3;
600 qset = nic->vf_sqs[cfg->vf_id][svf - 1];
603 nic_reg_write(nic, NIC_PF_RSSI_0_4097_RQ | (rssi << 3),
604 (qset << 3) | (cfg->ind_tbl[idx] & 0x7));
608 cpi_base = nic->cpi_base[cfg->vf_id];
609 if (pass1_silicon(nic->pdev))
610 idx_addr = NIC_PF_CPI_0_2047_CFG;
612 idx_addr = NIC_PF_MPI_0_2047_CFG;
613 cpi_cfg = nic_reg_read(nic, idx_addr | (cpi_base << 3));
614 cpi_cfg &= ~(0xFULL << 20);
615 cpi_cfg |= (cfg->hash_bits << 20);
616 nic_reg_write(nic, idx_addr | (cpi_base << 3), cpi_cfg);
619 /* 4 level transmit side scheduler configutation
620 * for TNS bypass mode
622 * Sample configuration for SQ0 on 88xx
623 * VNIC0-SQ0 -> TL4(0) -> TL3[0] -> TL2[0] -> TL1[0] -> BGX0
624 * VNIC1-SQ0 -> TL4(8) -> TL3[2] -> TL2[0] -> TL1[0] -> BGX0
625 * VNIC2-SQ0 -> TL4(16) -> TL3[4] -> TL2[1] -> TL1[0] -> BGX0
626 * VNIC3-SQ0 -> TL4(24) -> TL3[6] -> TL2[1] -> TL1[0] -> BGX0
627 * VNIC4-SQ0 -> TL4(512) -> TL3[128] -> TL2[32] -> TL1[1] -> BGX1
628 * VNIC5-SQ0 -> TL4(520) -> TL3[130] -> TL2[32] -> TL1[1] -> BGX1
629 * VNIC6-SQ0 -> TL4(528) -> TL3[132] -> TL2[33] -> TL1[1] -> BGX1
630 * VNIC7-SQ0 -> TL4(536) -> TL3[134] -> TL2[33] -> TL1[1] -> BGX1
632 static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic,
633 struct sq_cfg_msg *sq)
635 struct hw_info *hw = nic->hw;
639 u8 sq_idx = sq->sq_num;
644 pqs_vnic = nic->pqs_vf[vnic];
648 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[pqs_vnic]);
649 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[pqs_vnic]);
651 /* 24 bytes for FCS, IPG and preamble */
652 rr_quantum = ((NIC_HW_MAX_FRS + 24) / 4);
654 /* For 88xx 0-511 TL4 transmits via BGX0 and
655 * 512-1023 TL4s transmit via BGX1.
657 if (hw->tl1_per_bgx) {
658 tl4 = bgx * (hw->tl4_cnt / hw->bgx_cnt);
660 tl4 += (lmac * MAX_QUEUES_PER_QSET);
662 for (svf = 0; svf < MAX_SQS_PER_VF; svf++) {
663 if (nic->vf_sqs[pqs_vnic][svf] == vnic)
666 tl4 += (MAX_LMAC_PER_BGX * MAX_QUEUES_PER_QSET);
667 tl4 += (lmac * MAX_QUEUES_PER_QSET * MAX_SQS_PER_VF);
668 tl4 += (svf * MAX_QUEUES_PER_QSET);
671 tl4 = (vnic * MAX_QUEUES_PER_QSET);
675 tl3 = tl4 / (hw->tl4_cnt / hw->tl3_cnt);
676 nic_reg_write(nic, NIC_PF_QSET_0_127_SQ_0_7_CFG2 |
677 ((u64)vnic << NIC_QS_ID_SHIFT) |
678 ((u32)sq_idx << NIC_Q_NUM_SHIFT), tl4);
679 nic_reg_write(nic, NIC_PF_TL4_0_1023_CFG | (tl4 << 3),
680 ((u64)vnic << 27) | ((u32)sq_idx << 24) | rr_quantum);
682 nic_reg_write(nic, NIC_PF_TL3_0_255_CFG | (tl3 << 3), rr_quantum);
684 /* On 88xx 0-127 channels are for BGX0 and
685 * 127-255 channels for BGX1.
687 * On 81xx/83xx TL3_CHAN reg should be configured with channel
688 * within LMAC i.e 0-7 and not the actual channel number like on 88xx
690 chan = (lmac * hw->chans_per_lmac) + (bgx * hw->chans_per_bgx);
692 nic_reg_write(nic, NIC_PF_TL3_0_255_CHAN | (tl3 << 3), chan);
694 nic_reg_write(nic, NIC_PF_TL3_0_255_CHAN | (tl3 << 3), 0);
696 /* Enable backpressure on the channel */
697 nic_reg_write(nic, NIC_PF_CHAN_0_255_TX_CFG | (chan << 3), 1);
700 nic_reg_write(nic, NIC_PF_TL3A_0_63_CFG | (tl2 << 3), tl2);
701 nic_reg_write(nic, NIC_PF_TL2_0_63_CFG | (tl2 << 3), rr_quantum);
702 /* No priorities as of now */
703 nic_reg_write(nic, NIC_PF_TL2_0_63_PRI | (tl2 << 3), 0x00);
705 /* Unlike 88xx where TL2s 0-31 transmits to TL1 '0' and rest to TL1 '1'
706 * on 81xx/83xx TL2 needs to be configured to transmit to one of the
709 * This register doesn't exist on 88xx.
711 if (!hw->tl1_per_bgx)
712 nic_reg_write(nic, NIC_PF_TL2_LMAC | (tl2 << 3),
713 lmac + (bgx * MAX_LMAC_PER_BGX));
716 /* Send primary nicvf pointer to secondary QS's VF */
717 static void nic_send_pnicvf(struct nicpf *nic, int sqs)
719 union nic_mbx mbx = {};
721 mbx.nicvf.msg = NIC_MBOX_MSG_PNICVF_PTR;
722 mbx.nicvf.nicvf = nic->nicvf[nic->pqs_vf[sqs]];
723 nic_send_msg_to_vf(nic, sqs, &mbx);
726 /* Send SQS's nicvf pointer to primary QS's VF */
727 static void nic_send_snicvf(struct nicpf *nic, struct nicvf_ptr *nicvf)
729 union nic_mbx mbx = {};
730 int sqs_id = nic->vf_sqs[nicvf->vf_id][nicvf->sqs_id];
732 mbx.nicvf.msg = NIC_MBOX_MSG_SNICVF_PTR;
733 mbx.nicvf.sqs_id = nicvf->sqs_id;
734 mbx.nicvf.nicvf = nic->nicvf[sqs_id];
735 nic_send_msg_to_vf(nic, nicvf->vf_id, &mbx);
738 /* Find next available Qset that can be assigned as a
739 * secondary Qset to a VF.
741 static int nic_nxt_avail_sqs(struct nicpf *nic)
745 for (sqs = 0; sqs < nic->num_sqs_en; sqs++) {
746 if (!nic->sqs_used[sqs])
747 nic->sqs_used[sqs] = true;
750 return sqs + nic->num_vf_en;
755 /* Allocate additional Qsets for requested VF */
756 static void nic_alloc_sqs(struct nicpf *nic, struct sqs_alloc *sqs)
758 union nic_mbx mbx = {};
759 int idx, alloc_qs = 0;
762 if (!nic->num_sqs_en)
765 for (idx = 0; idx < sqs->qs_count; idx++) {
766 sqs_id = nic_nxt_avail_sqs(nic);
769 nic->vf_sqs[sqs->vf_id][idx] = sqs_id;
770 nic->pqs_vf[sqs_id] = sqs->vf_id;
775 mbx.sqs_alloc.msg = NIC_MBOX_MSG_ALLOC_SQS;
776 mbx.sqs_alloc.vf_id = sqs->vf_id;
777 mbx.sqs_alloc.qs_count = alloc_qs;
778 nic_send_msg_to_vf(nic, sqs->vf_id, &mbx);
781 static int nic_config_loopback(struct nicpf *nic, struct set_loopback *lbk)
783 int bgx_idx, lmac_idx;
785 if (lbk->vf_id >= nic->num_vf_en)
788 bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lbk->vf_id]);
789 lmac_idx = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lbk->vf_id]);
791 bgx_lmac_internal_loopback(nic->node, bgx_idx, lmac_idx, lbk->enable);
796 /* Reset statistics counters */
797 static int nic_reset_stat_counters(struct nicpf *nic,
798 int vf, struct reset_stat_cfg *cfg)
803 for (i = 0; i < RX_STATS_ENUM_LAST; i++) {
804 if (cfg->rx_stat_mask & BIT(i)) {
805 reg_addr = NIC_PF_VNIC_0_127_RX_STAT_0_13 |
806 (vf << NIC_QS_ID_SHIFT) |
808 nic_reg_write(nic, reg_addr, 0);
812 for (i = 0; i < TX_STATS_ENUM_LAST; i++) {
813 if (cfg->tx_stat_mask & BIT(i)) {
814 reg_addr = NIC_PF_VNIC_0_127_TX_STAT_0_4 |
815 (vf << NIC_QS_ID_SHIFT) |
817 nic_reg_write(nic, reg_addr, 0);
821 for (i = 0; i <= 15; i++) {
823 stat = i & 1 ? 1 : 0;
824 reg_addr = (vf << NIC_QS_ID_SHIFT) |
825 (qnum << NIC_Q_NUM_SHIFT) | (stat << 3);
826 if (cfg->rq_stat_mask & BIT(i)) {
827 reg_addr |= NIC_PF_QSET_0_127_RQ_0_7_STAT_0_1;
828 nic_reg_write(nic, reg_addr, 0);
830 if (cfg->sq_stat_mask & BIT(i)) {
831 reg_addr |= NIC_PF_QSET_0_127_SQ_0_7_STAT_0_1;
832 nic_reg_write(nic, reg_addr, 0);
838 static void nic_enable_tunnel_parsing(struct nicpf *nic, int vf)
840 u64 prot_def = (IPV6_PROT << 32) | (IPV4_PROT << 16) | ET_PROT;
841 u64 vxlan_prot_def = (IPV6_PROT_DEF << 32) |
842 (IPV4_PROT_DEF) << 16 | ET_PROT_DEF;
844 /* Configure tunnel parsing parameters */
845 nic_reg_write(nic, NIC_PF_RX_GENEVE_DEF,
846 (1ULL << 63 | UDP_GENEVE_PORT_NUM));
847 nic_reg_write(nic, NIC_PF_RX_GENEVE_PROT_DEF,
848 ((7ULL << 61) | prot_def));
849 nic_reg_write(nic, NIC_PF_RX_NVGRE_PROT_DEF,
850 ((7ULL << 61) | prot_def));
851 nic_reg_write(nic, NIC_PF_RX_VXLAN_DEF_0_1,
852 ((1ULL << 63) | UDP_VXLAN_PORT_NUM));
853 nic_reg_write(nic, NIC_PF_RX_VXLAN_PROT_DEF,
854 ((0xfULL << 60) | vxlan_prot_def));
857 static void nic_enable_vf(struct nicpf *nic, int vf, bool enable)
861 nic->vf_enabled[vf] = enable;
863 if (vf >= nic->num_vf_en)
866 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
867 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
869 bgx_lmac_rx_tx_enable(nic->node, bgx, lmac, enable);
872 /* Interrupt handler to handle mailbox messages from VFs */
873 static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
875 union nic_mbx mbx = {};
884 nic->mbx_lock[vf] = true;
886 mbx_addr = nic_get_mbx_addr(vf);
887 mbx_data = (u64 *)&mbx;
889 for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) {
890 *mbx_data = nic_reg_read(nic, mbx_addr);
892 mbx_addr += sizeof(u64);
895 dev_dbg(&nic->pdev->dev, "%s: Mailbox msg 0x%02x from VF%d\n",
896 __func__, mbx.msg.msg, vf);
897 switch (mbx.msg.msg) {
898 case NIC_MBOX_MSG_READY:
899 nic_mbx_send_ready(nic, vf);
900 if (vf < nic->num_vf_en) {
906 case NIC_MBOX_MSG_QS_CFG:
907 reg_addr = NIC_PF_QSET_0_127_CFG |
908 (mbx.qs.num << NIC_QS_ID_SHIFT);
910 /* Check if its a secondary Qset */
911 if (vf >= nic->num_vf_en) {
912 cfg = cfg & (~0x7FULL);
913 /* Assign this Qset to primary Qset's VF */
914 cfg |= nic->pqs_vf[vf];
916 nic_reg_write(nic, reg_addr, cfg);
918 case NIC_MBOX_MSG_RQ_CFG:
919 reg_addr = NIC_PF_QSET_0_127_RQ_0_7_CFG |
920 (mbx.rq.qs_num << NIC_QS_ID_SHIFT) |
921 (mbx.rq.rq_num << NIC_Q_NUM_SHIFT);
922 nic_reg_write(nic, reg_addr, mbx.rq.cfg);
923 /* Enable CQE_RX2_S extension in CQE_RX descriptor.
924 * This gets appended by default on 81xx/83xx chips,
925 * for consistency enabling the same on 88xx pass2
926 * where this is introduced.
928 if (pass2_silicon(nic->pdev))
929 nic_reg_write(nic, NIC_PF_RX_CFG, 0x01);
930 if (!pass1_silicon(nic->pdev))
931 nic_enable_tunnel_parsing(nic, vf);
933 case NIC_MBOX_MSG_RQ_BP_CFG:
934 reg_addr = NIC_PF_QSET_0_127_RQ_0_7_BP_CFG |
935 (mbx.rq.qs_num << NIC_QS_ID_SHIFT) |
936 (mbx.rq.rq_num << NIC_Q_NUM_SHIFT);
937 nic_reg_write(nic, reg_addr, mbx.rq.cfg);
939 case NIC_MBOX_MSG_RQ_SW_SYNC:
940 ret = nic_rcv_queue_sw_sync(nic);
942 case NIC_MBOX_MSG_RQ_DROP_CFG:
943 reg_addr = NIC_PF_QSET_0_127_RQ_0_7_DROP_CFG |
944 (mbx.rq.qs_num << NIC_QS_ID_SHIFT) |
945 (mbx.rq.rq_num << NIC_Q_NUM_SHIFT);
946 nic_reg_write(nic, reg_addr, mbx.rq.cfg);
948 case NIC_MBOX_MSG_SQ_CFG:
949 reg_addr = NIC_PF_QSET_0_127_SQ_0_7_CFG |
950 (mbx.sq.qs_num << NIC_QS_ID_SHIFT) |
951 (mbx.sq.sq_num << NIC_Q_NUM_SHIFT);
952 nic_reg_write(nic, reg_addr, mbx.sq.cfg);
953 nic_tx_channel_cfg(nic, mbx.qs.num, &mbx.sq);
955 case NIC_MBOX_MSG_SET_MAC:
956 if (vf >= nic->num_vf_en) {
960 lmac = mbx.mac.vf_id;
961 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]);
962 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]);
963 bgx_set_lmac_mac(nic->node, bgx, lmac, mbx.mac.mac_addr);
965 case NIC_MBOX_MSG_SET_MAX_FRS:
966 ret = nic_update_hw_frs(nic, mbx.frs.max_frs,
969 case NIC_MBOX_MSG_CPI_CFG:
970 nic_config_cpi(nic, &mbx.cpi_cfg);
972 case NIC_MBOX_MSG_RSS_SIZE:
973 nic_send_rss_size(nic, vf);
975 case NIC_MBOX_MSG_RSS_CFG:
976 case NIC_MBOX_MSG_RSS_CFG_CONT:
977 nic_config_rss(nic, &mbx.rss_cfg);
979 case NIC_MBOX_MSG_CFG_DONE:
980 /* Last message of VF config msg sequence */
981 nic_enable_vf(nic, vf, true);
983 case NIC_MBOX_MSG_SHUTDOWN:
984 /* First msg in VF teardown sequence */
985 if (vf >= nic->num_vf_en)
986 nic->sqs_used[vf - nic->num_vf_en] = false;
988 nic_enable_vf(nic, vf, false);
990 case NIC_MBOX_MSG_ALLOC_SQS:
991 nic_alloc_sqs(nic, &mbx.sqs_alloc);
993 case NIC_MBOX_MSG_NICVF_PTR:
994 nic->nicvf[vf] = mbx.nicvf.nicvf;
996 case NIC_MBOX_MSG_PNICVF_PTR:
997 nic_send_pnicvf(nic, vf);
999 case NIC_MBOX_MSG_SNICVF_PTR:
1000 nic_send_snicvf(nic, &mbx.nicvf);
1002 case NIC_MBOX_MSG_BGX_STATS:
1003 nic_get_bgx_stats(nic, &mbx.bgx_stats);
1005 case NIC_MBOX_MSG_LOOPBACK:
1006 ret = nic_config_loopback(nic, &mbx.lbk);
1008 case NIC_MBOX_MSG_RESET_STAT_COUNTER:
1009 ret = nic_reset_stat_counters(nic, vf, &mbx.reset_stat);
1012 dev_err(&nic->pdev->dev,
1013 "Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg);
1018 nic_mbx_send_ack(nic, vf);
1019 } else if (mbx.msg.msg != NIC_MBOX_MSG_READY) {
1020 dev_err(&nic->pdev->dev, "NACK for MBOX 0x%02x from VF %d\n",
1022 nic_mbx_send_nack(nic, vf);
1025 nic->mbx_lock[vf] = false;
1028 static irqreturn_t nic_mbx_intr_handler(int irq, void *nic_irq)
1030 struct nicpf *nic = (struct nicpf *)nic_irq;
1033 u8 vf, vf_per_mbx_reg = 64;
1035 if (irq == nic->msix_entries[NIC_PF_INTR_ID_MBOX0].vector)
1040 intr = nic_reg_read(nic, NIC_PF_MAILBOX_INT + (mbx << 3));
1041 dev_dbg(&nic->pdev->dev, "PF interrupt Mbox%d 0x%llx\n", mbx, intr);
1042 for (vf = 0; vf < vf_per_mbx_reg; vf++) {
1043 if (intr & (1ULL << vf)) {
1044 dev_dbg(&nic->pdev->dev, "Intr from VF %d\n",
1045 vf + (mbx * vf_per_mbx_reg));
1047 nic_handle_mbx_intr(nic, vf + (mbx * vf_per_mbx_reg));
1048 nic_clear_mbx_intr(nic, vf, mbx);
1054 static int nic_enable_msix(struct nicpf *nic)
1058 nic->num_vec = pci_msix_vec_count(nic->pdev);
1060 nic->msix_entries = kmalloc_array(nic->num_vec,
1061 sizeof(struct msix_entry),
1063 if (!nic->msix_entries)
1066 for (i = 0; i < nic->num_vec; i++)
1067 nic->msix_entries[i].entry = i;
1069 ret = pci_enable_msix(nic->pdev, nic->msix_entries, nic->num_vec);
1071 dev_err(&nic->pdev->dev,
1072 "Request for #%d msix vectors failed, returned %d\n",
1074 kfree(nic->msix_entries);
1078 nic->msix_enabled = 1;
1082 static void nic_disable_msix(struct nicpf *nic)
1084 if (nic->msix_enabled) {
1085 pci_disable_msix(nic->pdev);
1086 kfree(nic->msix_entries);
1087 nic->msix_enabled = 0;
1092 static void nic_free_all_interrupts(struct nicpf *nic)
1096 for (irq = 0; irq < nic->num_vec; irq++) {
1097 if (nic->irq_allocated[irq])
1098 free_irq(nic->msix_entries[irq].vector, nic);
1099 nic->irq_allocated[irq] = false;
1103 static int nic_register_interrupts(struct nicpf *nic)
1108 ret = nic_enable_msix(nic);
1112 /* Register mailbox interrupt handler */
1113 for (i = NIC_PF_INTR_ID_MBOX0; i < nic->num_vec; i++) {
1114 sprintf(nic->irq_name[i],
1115 "NICPF Mbox%d", (i - NIC_PF_INTR_ID_MBOX0));
1117 ret = request_irq(nic->msix_entries[i].vector,
1118 nic_mbx_intr_handler, 0,
1119 nic->irq_name[i], nic);
1123 nic->irq_allocated[i] = true;
1126 /* Enable mailbox interrupt */
1127 nic_enable_mbx_intr(nic);
1131 dev_err(&nic->pdev->dev, "Request irq failed\n");
1132 nic_free_all_interrupts(nic);
1133 nic_disable_msix(nic);
1137 static void nic_unregister_interrupts(struct nicpf *nic)
1139 nic_free_all_interrupts(nic);
1140 nic_disable_msix(nic);
1143 static int nic_num_sqs_en(struct nicpf *nic, int vf_en)
1145 int pos, sqs_per_vf = MAX_SQS_PER_VF_SINGLE_NODE;
1148 /* Secondary Qsets are needed only if CPU count is
1149 * morethan MAX_QUEUES_PER_QSET.
1151 if (num_online_cpus() <= MAX_QUEUES_PER_QSET)
1154 /* Check if its a multi-node environment */
1155 if (nr_node_ids > 1)
1156 sqs_per_vf = MAX_SQS_PER_VF;
1158 pos = pci_find_ext_capability(nic->pdev, PCI_EXT_CAP_ID_SRIOV);
1159 pci_read_config_word(nic->pdev, (pos + PCI_SRIOV_TOTAL_VF), &total_vf);
1160 return min(total_vf - vf_en, vf_en * sqs_per_vf);
1163 static int nic_sriov_init(struct pci_dev *pdev, struct nicpf *nic)
1170 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1172 dev_err(&pdev->dev, "SRIOV capability is not found in PCIe config space\n");
1176 pci_read_config_word(pdev, (pos + PCI_SRIOV_TOTAL_VF), &total_vf_cnt);
1177 if (total_vf_cnt < nic->num_vf_en)
1178 nic->num_vf_en = total_vf_cnt;
1183 vf_en = nic->num_vf_en;
1184 nic->num_sqs_en = nic_num_sqs_en(nic, nic->num_vf_en);
1185 vf_en += nic->num_sqs_en;
1187 err = pci_enable_sriov(pdev, vf_en);
1189 dev_err(&pdev->dev, "SRIOV enable failed, num VF is %d\n",
1195 dev_info(&pdev->dev, "SRIOV enabled, number of VF available %d\n",
1198 nic->flags |= NIC_SRIOV_ENABLED;
1202 /* Poll for BGX LMAC link status and update corresponding VF
1203 * if there is a change, valid only if internal L2 switch
1204 * is not present otherwise VF link is always treated as up
1206 static void nic_poll_for_link(struct work_struct *work)
1208 union nic_mbx mbx = {};
1210 struct bgx_link_status link;
1213 nic = container_of(work, struct nicpf, dwork.work);
1215 mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE;
1217 for (vf = 0; vf < nic->num_vf_en; vf++) {
1218 /* Poll only if VF is UP */
1219 if (!nic->vf_enabled[vf])
1222 /* Get BGX, LMAC indices for the VF */
1223 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
1224 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
1225 /* Get interface link status */
1226 bgx_get_lmac_link_state(nic->node, bgx, lmac, &link);
1228 /* Inform VF only if link status changed */
1229 if (nic->link[vf] == link.link_up)
1232 if (!nic->mbx_lock[vf]) {
1233 nic->link[vf] = link.link_up;
1234 nic->duplex[vf] = link.duplex;
1235 nic->speed[vf] = link.speed;
1237 /* Send a mbox message to VF with current link status */
1238 mbx.link_status.link_up = link.link_up;
1239 mbx.link_status.duplex = link.duplex;
1240 mbx.link_status.speed = link.speed;
1241 nic_send_msg_to_vf(nic, vf, &mbx);
1244 queue_delayed_work(nic->check_link, &nic->dwork, HZ * 2);
1247 static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1249 struct device *dev = &pdev->dev;
1253 BUILD_BUG_ON(sizeof(union nic_mbx) > 16);
1255 nic = devm_kzalloc(dev, sizeof(*nic), GFP_KERNEL);
1259 nic->hw = devm_kzalloc(dev, sizeof(struct hw_info), GFP_KERNEL);
1261 devm_kfree(dev, nic);
1265 pci_set_drvdata(pdev, nic);
1269 err = pci_enable_device(pdev);
1271 dev_err(dev, "Failed to enable PCI device\n");
1272 pci_set_drvdata(pdev, NULL);
1276 err = pci_request_regions(pdev, DRV_NAME);
1278 dev_err(dev, "PCI request regions failed 0x%x\n", err);
1279 goto err_disable_device;
1282 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
1284 dev_err(dev, "Unable to get usable DMA configuration\n");
1285 goto err_release_regions;
1288 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
1290 dev_err(dev, "Unable to get 48-bit DMA for consistent allocations\n");
1291 goto err_release_regions;
1294 /* MAP PF's configuration registers */
1295 nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
1296 if (!nic->reg_base) {
1297 dev_err(dev, "Cannot map config register space, aborting\n");
1299 goto err_release_regions;
1302 nic->node = nic_get_node_id(pdev);
1304 /* Initialize hardware */
1305 err = nic_init_hw(nic);
1307 goto err_release_regions;
1309 nic_set_lmac_vf_mapping(nic);
1311 /* Register interrupts */
1312 err = nic_register_interrupts(nic);
1314 goto err_release_regions;
1316 /* Configure SRIOV */
1317 err = nic_sriov_init(pdev, nic);
1319 goto err_unregister_interrupts;
1321 /* Register a physical link status poll fn() */
1322 nic->check_link = alloc_workqueue("check_link_status",
1323 WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
1324 if (!nic->check_link) {
1326 goto err_disable_sriov;
1329 INIT_DELAYED_WORK(&nic->dwork, nic_poll_for_link);
1330 queue_delayed_work(nic->check_link, &nic->dwork, 0);
1335 if (nic->flags & NIC_SRIOV_ENABLED)
1336 pci_disable_sriov(pdev);
1337 err_unregister_interrupts:
1338 nic_unregister_interrupts(nic);
1339 err_release_regions:
1340 pci_release_regions(pdev);
1342 nic_free_lmacmem(nic);
1343 devm_kfree(dev, nic->hw);
1344 devm_kfree(dev, nic);
1345 pci_disable_device(pdev);
1346 pci_set_drvdata(pdev, NULL);
1350 static void nic_remove(struct pci_dev *pdev)
1352 struct nicpf *nic = pci_get_drvdata(pdev);
1354 if (nic->flags & NIC_SRIOV_ENABLED)
1355 pci_disable_sriov(pdev);
1357 if (nic->check_link) {
1358 /* Destroy work Queue */
1359 cancel_delayed_work_sync(&nic->dwork);
1360 destroy_workqueue(nic->check_link);
1363 nic_unregister_interrupts(nic);
1364 pci_release_regions(pdev);
1366 nic_free_lmacmem(nic);
1367 devm_kfree(&pdev->dev, nic->hw);
1368 devm_kfree(&pdev->dev, nic);
1370 pci_disable_device(pdev);
1371 pci_set_drvdata(pdev, NULL);
1374 static struct pci_driver nic_driver = {
1376 .id_table = nic_id_table,
1378 .remove = nic_remove,
1381 static int __init nic_init_module(void)
1383 pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
1385 return pci_register_driver(&nic_driver);
1388 static void __exit nic_cleanup_module(void)
1390 pci_unregister_driver(&nic_driver);
1393 module_init(nic_init_module);
1394 module_exit(nic_cleanup_module);