2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <net/tc_act/tc_gact.h>
34 #include <net/pkt_cls.h>
35 #include <linux/mlx5/fs.h>
36 #include <net/vxlan.h>
37 #include <linux/bpf.h>
43 struct mlx5e_rq_param {
44 u32 rqc[MLX5_ST_SZ_DW(rqc)];
45 struct mlx5_wq_param wq;
49 struct mlx5e_sq_param {
50 u32 sqc[MLX5_ST_SZ_DW(sqc)];
51 struct mlx5_wq_param wq;
54 enum mlx5e_sq_type type;
57 struct mlx5e_cq_param {
58 u32 cqc[MLX5_ST_SZ_DW(cqc)];
59 struct mlx5_wq_param wq;
64 struct mlx5e_channel_param {
65 struct mlx5e_rq_param rq;
66 struct mlx5e_sq_param sq;
67 struct mlx5e_sq_param xdp_sq;
68 struct mlx5e_sq_param icosq;
69 struct mlx5e_cq_param rx_cq;
70 struct mlx5e_cq_param tx_cq;
71 struct mlx5e_cq_param icosq_cq;
74 static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
76 return MLX5_CAP_GEN(mdev, striding_rq) &&
77 MLX5_CAP_GEN(mdev, umr_ptr_rlky) &&
78 MLX5_CAP_ETH(mdev, reg_umr_sq);
81 static void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type)
83 priv->params.rq_wq_type = rq_type;
84 switch (priv->params.rq_wq_type) {
85 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
86 priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
87 priv->params.mpwqe_log_stride_sz = priv->params.rx_cqe_compress ?
88 MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS :
89 MLX5_MPWRQ_LOG_STRIDE_SIZE;
90 priv->params.mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ -
91 priv->params.mpwqe_log_stride_sz;
93 default: /* MLX5_WQ_TYPE_LINKED_LIST */
94 priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
96 priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type,
97 BIT(priv->params.log_rq_size));
99 mlx5_core_info(priv->mdev,
100 "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
101 priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
102 BIT(priv->params.log_rq_size),
103 BIT(priv->params.mpwqe_log_stride_sz),
104 priv->params.rx_cqe_compress_admin);
107 static void mlx5e_set_rq_priv_params(struct mlx5e_priv *priv)
109 u8 rq_type = mlx5e_check_fragmented_striding_rq_cap(priv->mdev) &&
111 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
112 MLX5_WQ_TYPE_LINKED_LIST;
113 mlx5e_set_rq_type_params(priv, rq_type);
116 static void mlx5e_update_carrier(struct mlx5e_priv *priv)
118 struct mlx5_core_dev *mdev = priv->mdev;
121 port_state = mlx5_query_vport_state(mdev,
122 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0);
124 if (port_state == VPORT_STATE_UP) {
125 netdev_info(priv->netdev, "Link up\n");
126 netif_carrier_on(priv->netdev);
128 netdev_info(priv->netdev, "Link down\n");
129 netif_carrier_off(priv->netdev);
133 static void mlx5e_update_carrier_work(struct work_struct *work)
135 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
136 update_carrier_work);
138 mutex_lock(&priv->state_lock);
139 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
140 mlx5e_update_carrier(priv);
141 mutex_unlock(&priv->state_lock);
144 static void mlx5e_tx_timeout_work(struct work_struct *work)
146 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
151 mutex_lock(&priv->state_lock);
152 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
154 mlx5e_close_locked(priv->netdev);
155 err = mlx5e_open_locked(priv->netdev);
157 netdev_err(priv->netdev, "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n",
160 mutex_unlock(&priv->state_lock);
164 static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
166 struct mlx5e_sw_stats *s = &priv->stats.sw;
167 struct mlx5e_rq_stats *rq_stats;
168 struct mlx5e_sq_stats *sq_stats;
169 u64 tx_offload_none = 0;
172 memset(s, 0, sizeof(*s));
173 for (i = 0; i < priv->params.num_channels; i++) {
174 rq_stats = &priv->channel[i]->rq.stats;
176 s->rx_packets += rq_stats->packets;
177 s->rx_bytes += rq_stats->bytes;
178 s->rx_lro_packets += rq_stats->lro_packets;
179 s->rx_lro_bytes += rq_stats->lro_bytes;
180 s->rx_csum_none += rq_stats->csum_none;
181 s->rx_csum_complete += rq_stats->csum_complete;
182 s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
183 s->rx_xdp_drop += rq_stats->xdp_drop;
184 s->rx_xdp_tx += rq_stats->xdp_tx;
185 s->rx_xdp_tx_full += rq_stats->xdp_tx_full;
186 s->rx_wqe_err += rq_stats->wqe_err;
187 s->rx_mpwqe_filler += rq_stats->mpwqe_filler;
188 s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
189 s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
190 s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
191 s->rx_cache_reuse += rq_stats->cache_reuse;
192 s->rx_cache_full += rq_stats->cache_full;
193 s->rx_cache_empty += rq_stats->cache_empty;
194 s->rx_cache_busy += rq_stats->cache_busy;
196 for (j = 0; j < priv->params.num_tc; j++) {
197 sq_stats = &priv->channel[i]->sq[j].stats;
199 s->tx_packets += sq_stats->packets;
200 s->tx_bytes += sq_stats->bytes;
201 s->tx_tso_packets += sq_stats->tso_packets;
202 s->tx_tso_bytes += sq_stats->tso_bytes;
203 s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
204 s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes;
205 s->tx_queue_stopped += sq_stats->stopped;
206 s->tx_queue_wake += sq_stats->wake;
207 s->tx_queue_dropped += sq_stats->dropped;
208 s->tx_xmit_more += sq_stats->xmit_more;
209 s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
210 tx_offload_none += sq_stats->csum_none;
214 /* Update calculated offload counters */
215 s->tx_csum_partial = s->tx_packets - tx_offload_none - s->tx_csum_partial_inner;
216 s->rx_csum_unnecessary = s->rx_packets - s->rx_csum_none - s->rx_csum_complete;
218 s->link_down_events_phy = MLX5_GET(ppcnt_reg,
219 priv->stats.pport.phy_counters,
220 counter_set.phys_layer_cntrs.link_down_events);
223 static void mlx5e_update_vport_counters(struct mlx5e_priv *priv)
225 int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
226 u32 *out = (u32 *)priv->stats.vport.query_vport_out;
227 u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
228 struct mlx5_core_dev *mdev = priv->mdev;
230 MLX5_SET(query_vport_counter_in, in, opcode,
231 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
232 MLX5_SET(query_vport_counter_in, in, op_mod, 0);
233 MLX5_SET(query_vport_counter_in, in, other_vport, 0);
235 memset(out, 0, outlen);
236 mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
239 static void mlx5e_update_pport_counters(struct mlx5e_priv *priv)
241 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
242 struct mlx5_core_dev *mdev = priv->mdev;
243 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
248 in = mlx5_vzalloc(sz);
252 MLX5_SET(ppcnt_reg, in, local_port, 1);
254 out = pstats->IEEE_802_3_counters;
255 MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
256 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
258 out = pstats->RFC_2863_counters;
259 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
260 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
262 out = pstats->RFC_2819_counters;
263 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
264 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
266 out = pstats->phy_counters;
267 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
268 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
270 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
271 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
272 out = pstats->per_prio_counters[prio];
273 MLX5_SET(ppcnt_reg, in, prio_tc, prio);
274 mlx5_core_access_reg(mdev, in, sz, out, sz,
275 MLX5_REG_PPCNT, 0, 0);
282 static void mlx5e_update_q_counter(struct mlx5e_priv *priv)
284 struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
286 if (!priv->q_counter)
289 mlx5_core_query_out_of_buffer(priv->mdev, priv->q_counter,
290 &qcnt->rx_out_of_buffer);
293 void mlx5e_update_stats(struct mlx5e_priv *priv)
295 mlx5e_update_q_counter(priv);
296 mlx5e_update_vport_counters(priv);
297 mlx5e_update_pport_counters(priv);
298 mlx5e_update_sw_counters(priv);
301 void mlx5e_update_stats_work(struct work_struct *work)
303 struct delayed_work *dwork = to_delayed_work(work);
304 struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv,
306 mutex_lock(&priv->state_lock);
307 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
308 priv->profile->update_stats(priv);
309 queue_delayed_work(priv->wq, dwork,
310 msecs_to_jiffies(MLX5E_UPDATE_STATS_INTERVAL));
312 mutex_unlock(&priv->state_lock);
315 static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
316 enum mlx5_dev_event event, unsigned long param)
318 struct mlx5e_priv *priv = vpriv;
320 if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state))
324 case MLX5_DEV_EVENT_PORT_UP:
325 case MLX5_DEV_EVENT_PORT_DOWN:
326 queue_work(priv->wq, &priv->update_carrier_work);
334 static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
336 set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
339 static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
341 clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
342 synchronize_irq(mlx5_get_msix_vec(priv->mdev, MLX5_EQ_VEC_ASYNC));
345 #define MLX5E_HW2SW_MTU(hwmtu) (hwmtu - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
346 #define MLX5E_SW2HW_MTU(swmtu) (swmtu + (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
348 static inline int mlx5e_get_wqe_mtt_sz(void)
350 /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
351 * To avoid copying garbage after the mtt array, we allocate
354 return ALIGN(MLX5_MPWRQ_PAGES_PER_WQE * sizeof(__be64),
355 MLX5_UMR_MTT_ALIGNMENT);
358 static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, struct mlx5e_sq *sq,
359 struct mlx5e_umr_wqe *wqe, u16 ix)
361 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
362 struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
363 struct mlx5_wqe_data_seg *dseg = &wqe->data;
364 struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
365 u8 ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
366 u32 umr_wqe_mtt_offset = mlx5e_get_wqe_mtt_offset(rq, ix);
368 cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
370 cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
371 cseg->imm = rq->mkey_be;
373 ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN;
374 ucseg->klm_octowords =
375 cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE));
376 ucseg->bsf_octowords =
377 cpu_to_be16(MLX5_MTT_OCTW(umr_wqe_mtt_offset));
378 ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
380 dseg->lkey = sq->mkey_be;
381 dseg->addr = cpu_to_be64(wi->umr.mtt_addr);
384 static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq,
385 struct mlx5e_channel *c)
387 int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
388 int mtt_sz = mlx5e_get_wqe_mtt_sz();
389 int mtt_alloc = mtt_sz + MLX5_UMR_ALIGN - 1;
392 rq->mpwqe.info = kzalloc_node(wq_sz * sizeof(*rq->mpwqe.info),
393 GFP_KERNEL, cpu_to_node(c->cpu));
397 /* We allocate more than mtt_sz as we will align the pointer */
398 rq->mpwqe.mtt_no_align = kzalloc_node(mtt_alloc * wq_sz, GFP_KERNEL,
399 cpu_to_node(c->cpu));
400 if (unlikely(!rq->mpwqe.mtt_no_align))
401 goto err_free_wqe_info;
403 for (i = 0; i < wq_sz; i++) {
404 struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i];
406 wi->umr.mtt = PTR_ALIGN(rq->mpwqe.mtt_no_align + i * mtt_alloc,
408 wi->umr.mtt_addr = dma_map_single(c->pdev, wi->umr.mtt, mtt_sz,
410 if (unlikely(dma_mapping_error(c->pdev, wi->umr.mtt_addr)))
413 mlx5e_build_umr_wqe(rq, &c->icosq, &wi->umr.wqe, i);
420 struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i];
422 dma_unmap_single(c->pdev, wi->umr.mtt_addr, mtt_sz,
425 kfree(rq->mpwqe.mtt_no_align);
427 kfree(rq->mpwqe.info);
433 static void mlx5e_rq_free_mpwqe_info(struct mlx5e_rq *rq)
435 int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
436 int mtt_sz = mlx5e_get_wqe_mtt_sz();
439 for (i = 0; i < wq_sz; i++) {
440 struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i];
442 dma_unmap_single(rq->pdev, wi->umr.mtt_addr, mtt_sz,
445 kfree(rq->mpwqe.mtt_no_align);
446 kfree(rq->mpwqe.info);
449 static int mlx5e_create_rq(struct mlx5e_channel *c,
450 struct mlx5e_rq_param *param,
453 struct mlx5e_priv *priv = c->priv;
454 struct mlx5_core_dev *mdev = priv->mdev;
455 void *rqc = param->rqc;
456 void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
464 param->wq.db_numa_node = cpu_to_node(c->cpu);
466 err = mlx5_wq_ll_create(mdev, ¶m->wq, rqc_wq, &rq->wq,
471 rq->wq.db = &rq->wq.db[MLX5_RCV_DBR];
473 wq_sz = mlx5_wq_ll_get_size(&rq->wq);
475 rq->wq_type = priv->params.rq_wq_type;
477 rq->netdev = c->netdev;
478 rq->tstamp = &priv->tstamp;
482 rq->xdp_prog = priv->xdp_prog;
484 rq->buff.map_dir = DMA_FROM_DEVICE;
486 rq->buff.map_dir = DMA_BIDIRECTIONAL;
488 switch (priv->params.rq_wq_type) {
489 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
490 rq->handle_rx_cqe = mlx5e_handle_rx_cqe_mpwrq;
491 rq->alloc_wqe = mlx5e_alloc_rx_mpwqe;
492 rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
494 rq->mpwqe.mtt_offset = c->ix *
495 MLX5E_REQUIRED_MTTS(1, BIT(priv->params.log_rq_size));
497 rq->mpwqe_stride_sz = BIT(priv->params.mpwqe_log_stride_sz);
498 rq->mpwqe_num_strides = BIT(priv->params.mpwqe_log_num_strides);
500 rq->buff.wqe_sz = rq->mpwqe_stride_sz * rq->mpwqe_num_strides;
501 byte_count = rq->buff.wqe_sz;
502 rq->mkey_be = cpu_to_be32(c->priv->umr_mkey.key);
503 err = mlx5e_rq_alloc_mpwqe_info(rq, c);
505 goto err_rq_wq_destroy;
507 default: /* MLX5_WQ_TYPE_LINKED_LIST */
508 rq->dma_info = kzalloc_node(wq_sz * sizeof(*rq->dma_info),
509 GFP_KERNEL, cpu_to_node(c->cpu));
512 goto err_rq_wq_destroy;
515 rq->handle_rx_cqe = mlx5e_handle_rx_cqe;
516 rq->alloc_wqe = mlx5e_alloc_rx_wqe;
517 rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
519 rq->buff.wqe_sz = (priv->params.lro_en) ?
520 priv->params.lro_wqe_sz :
521 MLX5E_SW2HW_MTU(priv->netdev->mtu);
522 byte_count = rq->buff.wqe_sz;
524 /* calc the required page order */
525 frag_sz = MLX5_RX_HEADROOM +
526 byte_count /* packet data */ +
527 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
528 frag_sz = SKB_DATA_ALIGN(frag_sz);
530 npages = DIV_ROUND_UP(frag_sz, PAGE_SIZE);
531 rq->buff.page_order = order_base_2(npages);
533 byte_count |= MLX5_HW_START_PADDING;
534 rq->mkey_be = c->mkey_be;
537 for (i = 0; i < wq_sz; i++) {
538 struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
540 wqe->data.byte_count = cpu_to_be32(byte_count);
541 wqe->data.lkey = rq->mkey_be;
544 INIT_WORK(&rq->am.work, mlx5e_rx_am_work);
545 rq->am.mode = priv->params.rx_cq_period_mode;
547 rq->page_cache.head = 0;
548 rq->page_cache.tail = 0;
551 bpf_prog_add(rq->xdp_prog, 1);
556 mlx5_wq_destroy(&rq->wq_ctrl);
561 static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
566 bpf_prog_put(rq->xdp_prog);
568 switch (rq->wq_type) {
569 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
570 mlx5e_rq_free_mpwqe_info(rq);
572 default: /* MLX5_WQ_TYPE_LINKED_LIST */
576 for (i = rq->page_cache.head; i != rq->page_cache.tail;
577 i = (i + 1) & (MLX5E_CACHE_SIZE - 1)) {
578 struct mlx5e_dma_info *dma_info = &rq->page_cache.page_cache[i];
580 mlx5e_page_release(rq, dma_info, false);
582 mlx5_wq_destroy(&rq->wq_ctrl);
585 static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
587 struct mlx5e_priv *priv = rq->priv;
588 struct mlx5_core_dev *mdev = priv->mdev;
596 inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
597 sizeof(u64) * rq->wq_ctrl.buf.npages;
598 in = mlx5_vzalloc(inlen);
602 rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
603 wq = MLX5_ADDR_OF(rqc, rqc, wq);
605 memcpy(rqc, param->rqc, sizeof(param->rqc));
607 MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn);
608 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
609 MLX5_SET(rqc, rqc, vsd, priv->params.vlan_strip_disable);
610 MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
611 MLX5_ADAPTER_PAGE_SHIFT);
612 MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
614 mlx5_fill_page_array(&rq->wq_ctrl.buf,
615 (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
617 err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
624 static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state,
627 struct mlx5e_channel *c = rq->channel;
628 struct mlx5e_priv *priv = c->priv;
629 struct mlx5_core_dev *mdev = priv->mdev;
636 inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
637 in = mlx5_vzalloc(inlen);
641 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
643 MLX5_SET(modify_rq_in, in, rq_state, curr_state);
644 MLX5_SET(rqc, rqc, state, next_state);
646 err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
653 static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
655 struct mlx5e_channel *c = rq->channel;
656 struct mlx5e_priv *priv = c->priv;
657 struct mlx5_core_dev *mdev = priv->mdev;
664 inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
665 in = mlx5_vzalloc(inlen);
669 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
671 MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
672 MLX5_SET64(modify_rq_in, in, modify_bitmask,
673 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD);
674 MLX5_SET(rqc, rqc, vsd, vsd);
675 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
677 err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
684 static void mlx5e_disable_rq(struct mlx5e_rq *rq)
686 mlx5_core_destroy_rq(rq->priv->mdev, rq->rqn);
689 static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
691 unsigned long exp_time = jiffies + msecs_to_jiffies(20000);
692 struct mlx5e_channel *c = rq->channel;
693 struct mlx5e_priv *priv = c->priv;
694 struct mlx5_wq_ll *wq = &rq->wq;
696 while (time_before(jiffies, exp_time)) {
697 if (wq->cur_sz >= priv->params.min_rx_wqes)
706 static void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
708 struct mlx5_wq_ll *wq = &rq->wq;
709 struct mlx5e_rx_wqe *wqe;
713 /* UMR WQE (if in progress) is always at wq->head */
714 if (test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
715 mlx5e_free_rx_mpwqe(rq, &rq->mpwqe.info[wq->head]);
717 while (!mlx5_wq_ll_is_empty(wq)) {
718 wqe_ix_be = *wq->tail_next;
719 wqe_ix = be16_to_cpu(wqe_ix_be);
720 wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix);
721 rq->dealloc_wqe(rq, wqe_ix);
722 mlx5_wq_ll_pop(&rq->wq, wqe_ix_be,
723 &wqe->next.next_wqe_index);
727 static int mlx5e_open_rq(struct mlx5e_channel *c,
728 struct mlx5e_rq_param *param,
731 struct mlx5e_sq *sq = &c->icosq;
732 u16 pi = sq->pc & sq->wq.sz_m1;
735 err = mlx5e_create_rq(c, param, rq);
739 err = mlx5e_enable_rq(rq, param);
743 err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
747 if (param->am_enabled)
748 set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
750 sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP;
751 sq->db.ico_wqe[pi].num_wqebbs = 1;
752 mlx5e_send_nop(sq, true); /* trigger mlx5e_post_rx_wqes() */
757 mlx5e_disable_rq(rq);
759 mlx5e_destroy_rq(rq);
764 static void mlx5e_close_rq(struct mlx5e_rq *rq)
766 set_bit(MLX5E_RQ_STATE_FLUSH, &rq->state);
767 napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
768 cancel_work_sync(&rq->am.work);
770 mlx5e_disable_rq(rq);
771 mlx5e_free_rx_descs(rq);
772 mlx5e_destroy_rq(rq);
775 static void mlx5e_free_sq_xdp_db(struct mlx5e_sq *sq)
777 kfree(sq->db.xdp.di);
778 kfree(sq->db.xdp.wqe_info);
781 static int mlx5e_alloc_sq_xdp_db(struct mlx5e_sq *sq, int numa)
783 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
785 sq->db.xdp.di = kzalloc_node(sizeof(*sq->db.xdp.di) * wq_sz,
787 sq->db.xdp.wqe_info = kzalloc_node(sizeof(*sq->db.xdp.wqe_info) * wq_sz,
789 if (!sq->db.xdp.di || !sq->db.xdp.wqe_info) {
790 mlx5e_free_sq_xdp_db(sq);
797 static void mlx5e_free_sq_ico_db(struct mlx5e_sq *sq)
799 kfree(sq->db.ico_wqe);
802 static int mlx5e_alloc_sq_ico_db(struct mlx5e_sq *sq, int numa)
804 u8 wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
806 sq->db.ico_wqe = kzalloc_node(sizeof(*sq->db.ico_wqe) * wq_sz,
814 static void mlx5e_free_sq_txq_db(struct mlx5e_sq *sq)
816 kfree(sq->db.txq.wqe_info);
817 kfree(sq->db.txq.dma_fifo);
818 kfree(sq->db.txq.skb);
821 static int mlx5e_alloc_sq_txq_db(struct mlx5e_sq *sq, int numa)
823 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
824 int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
826 sq->db.txq.skb = kzalloc_node(wq_sz * sizeof(*sq->db.txq.skb),
828 sq->db.txq.dma_fifo = kzalloc_node(df_sz * sizeof(*sq->db.txq.dma_fifo),
830 sq->db.txq.wqe_info = kzalloc_node(wq_sz * sizeof(*sq->db.txq.wqe_info),
832 if (!sq->db.txq.skb || !sq->db.txq.dma_fifo || !sq->db.txq.wqe_info) {
833 mlx5e_free_sq_txq_db(sq);
837 sq->dma_fifo_mask = df_sz - 1;
842 static void mlx5e_free_sq_db(struct mlx5e_sq *sq)
846 mlx5e_free_sq_txq_db(sq);
849 mlx5e_free_sq_ico_db(sq);
852 mlx5e_free_sq_xdp_db(sq);
857 static int mlx5e_alloc_sq_db(struct mlx5e_sq *sq, int numa)
861 return mlx5e_alloc_sq_txq_db(sq, numa);
863 return mlx5e_alloc_sq_ico_db(sq, numa);
865 return mlx5e_alloc_sq_xdp_db(sq, numa);
871 static int mlx5e_sq_get_max_wqebbs(u8 sq_type)
875 return MLX5E_ICOSQ_MAX_WQEBBS;
877 return MLX5E_XDP_TX_WQEBBS;
879 return MLX5_SEND_WQE_MAX_WQEBBS;
882 static int mlx5e_create_sq(struct mlx5e_channel *c,
884 struct mlx5e_sq_param *param,
887 struct mlx5e_priv *priv = c->priv;
888 struct mlx5_core_dev *mdev = priv->mdev;
890 void *sqc = param->sqc;
891 void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
894 sq->type = param->type;
896 sq->tstamp = &priv->tstamp;
897 sq->mkey_be = c->mkey_be;
901 err = mlx5_alloc_map_uar(mdev, &sq->uar, !!MLX5_CAP_GEN(mdev, bf));
905 param->wq.db_numa_node = cpu_to_node(c->cpu);
907 err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq,
910 goto err_unmap_free_uar;
912 sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
913 if (sq->uar.bf_map) {
914 set_bit(MLX5E_SQ_STATE_BF_ENABLE, &sq->state);
915 sq->uar_map = sq->uar.bf_map;
917 sq->uar_map = sq->uar.map;
919 sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
920 sq->max_inline = param->max_inline;
921 sq->min_inline_mode =
922 MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5E_INLINE_MODE_VPORT_CONTEXT ?
923 param->min_inline_mode : 0;
925 err = mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu));
927 goto err_sq_wq_destroy;
929 if (sq->type == MLX5E_SQ_TXQ) {
932 txq_ix = c->ix + tc * priv->params.num_channels;
933 sq->txq = netdev_get_tx_queue(priv->netdev, txq_ix);
934 priv->txq_to_sq_map[txq_ix] = sq;
937 sq->edge = (sq->wq.sz_m1 + 1) - mlx5e_sq_get_max_wqebbs(sq->type);
938 sq->bf_budget = MLX5E_SQ_BF_BUDGET;
943 mlx5_wq_destroy(&sq->wq_ctrl);
946 mlx5_unmap_free_uar(mdev, &sq->uar);
951 static void mlx5e_destroy_sq(struct mlx5e_sq *sq)
953 struct mlx5e_channel *c = sq->channel;
954 struct mlx5e_priv *priv = c->priv;
956 mlx5e_free_sq_db(sq);
957 mlx5_wq_destroy(&sq->wq_ctrl);
958 mlx5_unmap_free_uar(priv->mdev, &sq->uar);
961 static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
963 struct mlx5e_channel *c = sq->channel;
964 struct mlx5e_priv *priv = c->priv;
965 struct mlx5_core_dev *mdev = priv->mdev;
973 inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
974 sizeof(u64) * sq->wq_ctrl.buf.npages;
975 in = mlx5_vzalloc(inlen);
979 sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
980 wq = MLX5_ADDR_OF(sqc, sqc, wq);
982 memcpy(sqc, param->sqc, sizeof(param->sqc));
984 MLX5_SET(sqc, sqc, tis_num_0, param->type == MLX5E_SQ_ICO ?
985 0 : priv->tisn[sq->tc]);
986 MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn);
987 MLX5_SET(sqc, sqc, min_wqe_inline_mode, sq->min_inline_mode);
988 MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
989 MLX5_SET(sqc, sqc, tis_lst_sz, param->type == MLX5E_SQ_ICO ? 0 : 1);
990 MLX5_SET(sqc, sqc, flush_in_error_en, 1);
992 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
993 MLX5_SET(wq, wq, uar_page, sq->uar.index);
994 MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift -
995 MLX5_ADAPTER_PAGE_SHIFT);
996 MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
998 mlx5_fill_page_array(&sq->wq_ctrl.buf,
999 (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
1001 err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn);
1008 static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state,
1009 int next_state, bool update_rl, int rl_index)
1011 struct mlx5e_channel *c = sq->channel;
1012 struct mlx5e_priv *priv = c->priv;
1013 struct mlx5_core_dev *mdev = priv->mdev;
1020 inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
1021 in = mlx5_vzalloc(inlen);
1025 sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
1027 MLX5_SET(modify_sq_in, in, sq_state, curr_state);
1028 MLX5_SET(sqc, sqc, state, next_state);
1029 if (update_rl && next_state == MLX5_SQC_STATE_RDY) {
1030 MLX5_SET64(modify_sq_in, in, modify_bitmask, 1);
1031 MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, rl_index);
1034 err = mlx5_core_modify_sq(mdev, sq->sqn, in, inlen);
1041 static void mlx5e_disable_sq(struct mlx5e_sq *sq)
1043 struct mlx5e_channel *c = sq->channel;
1044 struct mlx5e_priv *priv = c->priv;
1045 struct mlx5_core_dev *mdev = priv->mdev;
1047 mlx5_core_destroy_sq(mdev, sq->sqn);
1049 mlx5_rl_remove_rate(mdev, sq->rate_limit);
1052 static int mlx5e_open_sq(struct mlx5e_channel *c,
1054 struct mlx5e_sq_param *param,
1055 struct mlx5e_sq *sq)
1059 err = mlx5e_create_sq(c, tc, param, sq);
1063 err = mlx5e_enable_sq(sq, param);
1065 goto err_destroy_sq;
1067 err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY,
1070 goto err_disable_sq;
1073 netdev_tx_reset_queue(sq->txq);
1074 netif_tx_start_queue(sq->txq);
1080 mlx5e_disable_sq(sq);
1082 mlx5e_destroy_sq(sq);
1087 static inline void netif_tx_disable_queue(struct netdev_queue *txq)
1089 __netif_tx_lock_bh(txq);
1090 netif_tx_stop_queue(txq);
1091 __netif_tx_unlock_bh(txq);
1094 static void mlx5e_close_sq(struct mlx5e_sq *sq)
1096 set_bit(MLX5E_SQ_STATE_FLUSH, &sq->state);
1097 /* prevent netif_tx_wake_queue */
1098 napi_synchronize(&sq->channel->napi);
1101 netif_tx_disable_queue(sq->txq);
1103 /* last doorbell out, godspeed .. */
1104 if (mlx5e_sq_has_room_for(sq, 1)) {
1105 sq->db.txq.skb[(sq->pc & sq->wq.sz_m1)] = NULL;
1106 mlx5e_send_nop(sq, true);
1110 mlx5e_disable_sq(sq);
1111 mlx5e_free_sq_descs(sq);
1112 mlx5e_destroy_sq(sq);
1115 static int mlx5e_create_cq(struct mlx5e_channel *c,
1116 struct mlx5e_cq_param *param,
1117 struct mlx5e_cq *cq)
1119 struct mlx5e_priv *priv = c->priv;
1120 struct mlx5_core_dev *mdev = priv->mdev;
1121 struct mlx5_core_cq *mcq = &cq->mcq;
1127 param->wq.buf_numa_node = cpu_to_node(c->cpu);
1128 param->wq.db_numa_node = cpu_to_node(c->cpu);
1129 param->eq_ix = c->ix;
1131 err = mlx5_cqwq_create(mdev, ¶m->wq, param->cqc, &cq->wq,
1136 mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
1138 cq->napi = &c->napi;
1141 mcq->set_ci_db = cq->wq_ctrl.db.db;
1142 mcq->arm_db = cq->wq_ctrl.db.db + 1;
1143 *mcq->set_ci_db = 0;
1145 mcq->vector = param->eq_ix;
1146 mcq->comp = mlx5e_completion_event;
1147 mcq->event = mlx5e_cq_error_event;
1149 mcq->uar = &mdev->mlx5e_res.cq_uar;
1151 for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
1152 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
1163 static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
1165 mlx5_wq_destroy(&cq->wq_ctrl);
1168 static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
1170 struct mlx5e_priv *priv = cq->priv;
1171 struct mlx5_core_dev *mdev = priv->mdev;
1172 struct mlx5_core_cq *mcq = &cq->mcq;
1177 unsigned int irqn_not_used;
1181 inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
1182 sizeof(u64) * cq->wq_ctrl.buf.npages;
1183 in = mlx5_vzalloc(inlen);
1187 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
1189 memcpy(cqc, param->cqc, sizeof(param->cqc));
1191 mlx5_fill_page_array(&cq->wq_ctrl.buf,
1192 (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
1194 mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
1196 MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode);
1197 MLX5_SET(cqc, cqc, c_eqn, eqn);
1198 MLX5_SET(cqc, cqc, uar_page, mcq->uar->index);
1199 MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
1200 MLX5_ADAPTER_PAGE_SHIFT);
1201 MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
1203 err = mlx5_core_create_cq(mdev, mcq, in, inlen);
1215 static void mlx5e_disable_cq(struct mlx5e_cq *cq)
1217 struct mlx5e_priv *priv = cq->priv;
1218 struct mlx5_core_dev *mdev = priv->mdev;
1220 mlx5_core_destroy_cq(mdev, &cq->mcq);
1223 static int mlx5e_open_cq(struct mlx5e_channel *c,
1224 struct mlx5e_cq_param *param,
1225 struct mlx5e_cq *cq,
1226 struct mlx5e_cq_moder moderation)
1229 struct mlx5e_priv *priv = c->priv;
1230 struct mlx5_core_dev *mdev = priv->mdev;
1232 err = mlx5e_create_cq(c, param, cq);
1236 err = mlx5e_enable_cq(cq, param);
1238 goto err_destroy_cq;
1240 if (MLX5_CAP_GEN(mdev, cq_moderation))
1241 mlx5_core_modify_cq_moderation(mdev, &cq->mcq,
1247 mlx5e_destroy_cq(cq);
1252 static void mlx5e_close_cq(struct mlx5e_cq *cq)
1254 mlx5e_disable_cq(cq);
1255 mlx5e_destroy_cq(cq);
1258 static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix)
1260 return cpumask_first(priv->mdev->priv.irq_info[ix].mask);
1263 static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
1264 struct mlx5e_channel_param *cparam)
1266 struct mlx5e_priv *priv = c->priv;
1270 for (tc = 0; tc < c->num_tc; tc++) {
1271 err = mlx5e_open_cq(c, &cparam->tx_cq, &c->sq[tc].cq,
1272 priv->params.tx_cq_moderation);
1274 goto err_close_tx_cqs;
1280 for (tc--; tc >= 0; tc--)
1281 mlx5e_close_cq(&c->sq[tc].cq);
1286 static void mlx5e_close_tx_cqs(struct mlx5e_channel *c)
1290 for (tc = 0; tc < c->num_tc; tc++)
1291 mlx5e_close_cq(&c->sq[tc].cq);
1294 static int mlx5e_open_sqs(struct mlx5e_channel *c,
1295 struct mlx5e_channel_param *cparam)
1300 for (tc = 0; tc < c->num_tc; tc++) {
1301 err = mlx5e_open_sq(c, tc, &cparam->sq, &c->sq[tc]);
1309 for (tc--; tc >= 0; tc--)
1310 mlx5e_close_sq(&c->sq[tc]);
1315 static void mlx5e_close_sqs(struct mlx5e_channel *c)
1319 for (tc = 0; tc < c->num_tc; tc++)
1320 mlx5e_close_sq(&c->sq[tc]);
1323 static void mlx5e_build_channeltc_to_txq_map(struct mlx5e_priv *priv, int ix)
1327 for (i = 0; i < priv->profile->max_tc; i++)
1328 priv->channeltc_to_txq_map[ix][i] =
1329 ix + i * priv->params.num_channels;
1332 static int mlx5e_set_sq_maxrate(struct net_device *dev,
1333 struct mlx5e_sq *sq, u32 rate)
1335 struct mlx5e_priv *priv = netdev_priv(dev);
1336 struct mlx5_core_dev *mdev = priv->mdev;
1340 if (rate == sq->rate_limit)
1345 /* remove current rl index to free space to next ones */
1346 mlx5_rl_remove_rate(mdev, sq->rate_limit);
1351 err = mlx5_rl_add_rate(mdev, rate, &rl_index);
1353 netdev_err(dev, "Failed configuring rate %u: %d\n",
1359 err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY,
1360 MLX5_SQC_STATE_RDY, true, rl_index);
1362 netdev_err(dev, "Failed configuring rate %u: %d\n",
1364 /* remove the rate from the table */
1366 mlx5_rl_remove_rate(mdev, rate);
1370 sq->rate_limit = rate;
1374 static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
1376 struct mlx5e_priv *priv = netdev_priv(dev);
1377 struct mlx5_core_dev *mdev = priv->mdev;
1378 struct mlx5e_sq *sq = priv->txq_to_sq_map[index];
1381 if (!mlx5_rl_is_supported(mdev)) {
1382 netdev_err(dev, "Rate limiting is not supported on this device\n");
1386 /* rate is given in Mb/sec, HW config is in Kb/sec */
1389 /* Check whether rate in valid range, 0 is always valid */
1390 if (rate && !mlx5_rl_is_in_range(mdev, rate)) {
1391 netdev_err(dev, "TX rate %u, is not in range\n", rate);
1395 mutex_lock(&priv->state_lock);
1396 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
1397 err = mlx5e_set_sq_maxrate(dev, sq, rate);
1399 priv->tx_rates[index] = rate;
1400 mutex_unlock(&priv->state_lock);
1405 static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
1406 struct mlx5e_channel_param *cparam,
1407 struct mlx5e_channel **cp)
1409 struct mlx5e_cq_moder icosq_cq_moder = {0, 0};
1410 struct net_device *netdev = priv->netdev;
1411 struct mlx5e_cq_moder rx_cq_profile;
1412 int cpu = mlx5e_get_cpu(priv, ix);
1413 struct mlx5e_channel *c;
1414 struct mlx5e_sq *sq;
1418 c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
1425 c->pdev = &priv->mdev->pdev->dev;
1426 c->netdev = priv->netdev;
1427 c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
1428 c->num_tc = priv->params.num_tc;
1430 if (priv->params.rx_am_enabled)
1431 rx_cq_profile = mlx5e_am_get_def_profile(priv->params.rx_cq_period_mode);
1433 rx_cq_profile = priv->params.rx_cq_moderation;
1435 mlx5e_build_channeltc_to_txq_map(priv, ix);
1437 netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
1439 err = mlx5e_open_cq(c, &cparam->icosq_cq, &c->icosq.cq, icosq_cq_moder);
1443 err = mlx5e_open_tx_cqs(c, cparam);
1445 goto err_close_icosq_cq;
1447 err = mlx5e_open_cq(c, &cparam->rx_cq, &c->rq.cq,
1450 goto err_close_tx_cqs;
1452 napi_enable(&c->napi);
1454 err = mlx5e_open_sq(c, 0, &cparam->icosq, &c->icosq);
1456 goto err_disable_napi;
1458 err = mlx5e_open_sqs(c, cparam);
1460 goto err_close_icosq;
1462 for (i = 0; i < priv->params.num_tc; i++) {
1463 u32 txq_ix = priv->channeltc_to_txq_map[ix][i];
1465 if (priv->tx_rates[txq_ix]) {
1466 sq = priv->txq_to_sq_map[txq_ix];
1467 mlx5e_set_sq_maxrate(priv->netdev, sq,
1468 priv->tx_rates[txq_ix]);
1472 if (priv->xdp_prog) {
1473 /* XDP SQ CQ params are same as normal TXQ sq CQ params */
1474 err = mlx5e_open_cq(c, &cparam->tx_cq, &c->xdp_sq.cq,
1475 priv->params.tx_cq_moderation);
1479 err = mlx5e_open_sq(c, 0, &cparam->xdp_sq, &c->xdp_sq);
1481 mlx5e_close_cq(&c->xdp_sq.cq);
1486 c->xdp = !!priv->xdp_prog;
1487 err = mlx5e_open_rq(c, &cparam->rq, &c->rq);
1489 goto err_close_xdp_sq;
1491 netif_set_xps_queue(netdev, get_cpu_mask(c->cpu), ix);
1496 mlx5e_close_sq(&c->xdp_sq);
1502 mlx5e_close_sq(&c->icosq);
1505 napi_disable(&c->napi);
1506 mlx5e_close_cq(&c->rq.cq);
1509 mlx5e_close_tx_cqs(c);
1512 mlx5e_close_cq(&c->icosq.cq);
1515 netif_napi_del(&c->napi);
1516 napi_hash_del(&c->napi);
1522 static void mlx5e_close_channel(struct mlx5e_channel *c)
1524 mlx5e_close_rq(&c->rq);
1526 mlx5e_close_sq(&c->xdp_sq);
1528 mlx5e_close_sq(&c->icosq);
1529 napi_disable(&c->napi);
1531 mlx5e_close_cq(&c->xdp_sq.cq);
1532 mlx5e_close_cq(&c->rq.cq);
1533 mlx5e_close_tx_cqs(c);
1534 mlx5e_close_cq(&c->icosq.cq);
1535 netif_napi_del(&c->napi);
1537 napi_hash_del(&c->napi);
1543 static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
1544 struct mlx5e_rq_param *param)
1546 void *rqc = param->rqc;
1547 void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
1549 switch (priv->params.rq_wq_type) {
1550 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
1551 MLX5_SET(wq, wq, log_wqe_num_of_strides,
1552 priv->params.mpwqe_log_num_strides - 9);
1553 MLX5_SET(wq, wq, log_wqe_stride_size,
1554 priv->params.mpwqe_log_stride_sz - 6);
1555 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);
1557 default: /* MLX5_WQ_TYPE_LINKED_LIST */
1558 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
1561 MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
1562 MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
1563 MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size);
1564 MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn);
1565 MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter);
1567 param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
1568 param->wq.linear = 1;
1570 param->am_enabled = priv->params.rx_am_enabled;
1573 static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param *param)
1575 void *rqc = param->rqc;
1576 void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
1578 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
1579 MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
1582 static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
1583 struct mlx5e_sq_param *param)
1585 void *sqc = param->sqc;
1586 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1588 MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
1589 MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn);
1591 param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
1594 static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
1595 struct mlx5e_sq_param *param)
1597 void *sqc = param->sqc;
1598 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1600 mlx5e_build_sq_param_common(priv, param);
1601 MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size);
1603 param->max_inline = priv->params.tx_max_inline;
1604 param->min_inline_mode = priv->params.tx_min_inline_mode;
1605 param->type = MLX5E_SQ_TXQ;
1608 static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
1609 struct mlx5e_cq_param *param)
1611 void *cqc = param->cqc;
1613 MLX5_SET(cqc, cqc, uar_page, priv->mdev->mlx5e_res.cq_uar.index);
1616 static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
1617 struct mlx5e_cq_param *param)
1619 void *cqc = param->cqc;
1622 switch (priv->params.rq_wq_type) {
1623 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
1624 log_cq_size = priv->params.log_rq_size +
1625 priv->params.mpwqe_log_num_strides;
1627 default: /* MLX5_WQ_TYPE_LINKED_LIST */
1628 log_cq_size = priv->params.log_rq_size;
1631 MLX5_SET(cqc, cqc, log_cq_size, log_cq_size);
1632 if (priv->params.rx_cqe_compress) {
1633 MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_CQE_FORMAT_CSUM);
1634 MLX5_SET(cqc, cqc, cqe_comp_en, 1);
1637 mlx5e_build_common_cq_param(priv, param);
1639 param->cq_period_mode = priv->params.rx_cq_period_mode;
1642 static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
1643 struct mlx5e_cq_param *param)
1645 void *cqc = param->cqc;
1647 MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size);
1649 mlx5e_build_common_cq_param(priv, param);
1651 param->cq_period_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
1654 static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv,
1655 struct mlx5e_cq_param *param,
1658 void *cqc = param->cqc;
1660 MLX5_SET(cqc, cqc, log_cq_size, log_wq_size);
1662 mlx5e_build_common_cq_param(priv, param);
1664 param->cq_period_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
1667 static void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
1668 struct mlx5e_sq_param *param,
1671 void *sqc = param->sqc;
1672 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1674 mlx5e_build_sq_param_common(priv, param);
1676 MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
1677 MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(priv->mdev, reg_umr_sq));
1679 param->type = MLX5E_SQ_ICO;
1682 static void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv,
1683 struct mlx5e_sq_param *param)
1685 void *sqc = param->sqc;
1686 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1688 mlx5e_build_sq_param_common(priv, param);
1689 MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size);
1691 param->max_inline = priv->params.tx_max_inline;
1692 /* FOR XDP SQs will support only L2 inline mode */
1693 param->min_inline_mode = MLX5_INLINE_MODE_NONE;
1694 param->type = MLX5E_SQ_XDP;
1697 static void mlx5e_build_channel_param(struct mlx5e_priv *priv, struct mlx5e_channel_param *cparam)
1699 u8 icosq_log_wq_sz = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
1701 mlx5e_build_rq_param(priv, &cparam->rq);
1702 mlx5e_build_sq_param(priv, &cparam->sq);
1703 mlx5e_build_xdpsq_param(priv, &cparam->xdp_sq);
1704 mlx5e_build_icosq_param(priv, &cparam->icosq, icosq_log_wq_sz);
1705 mlx5e_build_rx_cq_param(priv, &cparam->rx_cq);
1706 mlx5e_build_tx_cq_param(priv, &cparam->tx_cq);
1707 mlx5e_build_ico_cq_param(priv, &cparam->icosq_cq, icosq_log_wq_sz);
1710 static int mlx5e_open_channels(struct mlx5e_priv *priv)
1712 struct mlx5e_channel_param *cparam;
1713 int nch = priv->params.num_channels;
1718 priv->channel = kcalloc(nch, sizeof(struct mlx5e_channel *),
1721 priv->txq_to_sq_map = kcalloc(nch * priv->params.num_tc,
1722 sizeof(struct mlx5e_sq *), GFP_KERNEL);
1724 cparam = kzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL);
1726 if (!priv->channel || !priv->txq_to_sq_map || !cparam)
1727 goto err_free_txq_to_sq_map;
1729 mlx5e_build_channel_param(priv, cparam);
1731 for (i = 0; i < nch; i++) {
1732 err = mlx5e_open_channel(priv, i, cparam, &priv->channel[i]);
1734 goto err_close_channels;
1737 for (j = 0; j < nch; j++) {
1738 err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j]->rq);
1740 goto err_close_channels;
1743 /* FIXME: This is a W/A for tx timeout watch dog false alarm when
1744 * polling for inactive tx queues.
1746 netif_tx_start_all_queues(priv->netdev);
1752 for (i--; i >= 0; i--)
1753 mlx5e_close_channel(priv->channel[i]);
1755 err_free_txq_to_sq_map:
1756 kfree(priv->txq_to_sq_map);
1757 kfree(priv->channel);
1763 static void mlx5e_close_channels(struct mlx5e_priv *priv)
1767 /* FIXME: This is a W/A only for tx timeout watch dog false alarm when
1768 * polling for inactive tx queues.
1770 netif_tx_stop_all_queues(priv->netdev);
1771 netif_tx_disable(priv->netdev);
1773 for (i = 0; i < priv->params.num_channels; i++)
1774 mlx5e_close_channel(priv->channel[i]);
1776 kfree(priv->txq_to_sq_map);
1777 kfree(priv->channel);
1780 static int mlx5e_rx_hash_fn(int hfunc)
1782 return (hfunc == ETH_RSS_HASH_TOP) ?
1783 MLX5_RX_HASH_FN_TOEPLITZ :
1784 MLX5_RX_HASH_FN_INVERTED_XOR8;
1787 static int mlx5e_bits_invert(unsigned long a, int size)
1792 for (i = 0; i < size; i++)
1793 inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i;
1798 static void mlx5e_fill_indir_rqt_rqns(struct mlx5e_priv *priv, void *rqtc)
1802 for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) {
1806 if (priv->params.rss_hfunc == ETH_RSS_HASH_XOR)
1807 ix = mlx5e_bits_invert(i, MLX5E_LOG_INDIR_RQT_SIZE);
1809 ix = priv->params.indirection_rqt[ix];
1810 rqn = test_bit(MLX5E_STATE_OPENED, &priv->state) ?
1811 priv->channel[ix]->rq.rqn :
1813 MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
1817 static void mlx5e_fill_direct_rqt_rqn(struct mlx5e_priv *priv, void *rqtc,
1820 u32 rqn = test_bit(MLX5E_STATE_OPENED, &priv->state) ?
1821 priv->channel[ix]->rq.rqn :
1824 MLX5_SET(rqtc, rqtc, rq_num[0], rqn);
1827 static int mlx5e_create_rqt(struct mlx5e_priv *priv, int sz,
1828 int ix, struct mlx5e_rqt *rqt)
1830 struct mlx5_core_dev *mdev = priv->mdev;
1836 inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
1837 in = mlx5_vzalloc(inlen);
1841 rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
1843 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
1844 MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
1846 if (sz > 1) /* RSS */
1847 mlx5e_fill_indir_rqt_rqns(priv, rqtc);
1849 mlx5e_fill_direct_rqt_rqn(priv, rqtc, ix);
1851 err = mlx5_core_create_rqt(mdev, in, inlen, &rqt->rqtn);
1853 rqt->enabled = true;
1859 void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt)
1861 rqt->enabled = false;
1862 mlx5_core_destroy_rqt(priv->mdev, rqt->rqtn);
1865 static int mlx5e_create_indirect_rqts(struct mlx5e_priv *priv)
1867 struct mlx5e_rqt *rqt = &priv->indir_rqt;
1869 return mlx5e_create_rqt(priv, MLX5E_INDIR_RQT_SIZE, 0, rqt);
1872 int mlx5e_create_direct_rqts(struct mlx5e_priv *priv)
1874 struct mlx5e_rqt *rqt;
1878 for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
1879 rqt = &priv->direct_tir[ix].rqt;
1880 err = mlx5e_create_rqt(priv, 1 /*size */, ix, rqt);
1882 goto err_destroy_rqts;
1888 for (ix--; ix >= 0; ix--)
1889 mlx5e_destroy_rqt(priv, &priv->direct_tir[ix].rqt);
1894 int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix)
1896 struct mlx5_core_dev *mdev = priv->mdev;
1902 inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz;
1903 in = mlx5_vzalloc(inlen);
1907 rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx);
1909 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
1910 if (sz > 1) /* RSS */
1911 mlx5e_fill_indir_rqt_rqns(priv, rqtc);
1913 mlx5e_fill_direct_rqt_rqn(priv, rqtc, ix);
1915 MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1);
1917 err = mlx5_core_modify_rqt(mdev, rqtn, in, inlen);
1924 static void mlx5e_redirect_rqts(struct mlx5e_priv *priv)
1929 if (priv->indir_rqt.enabled) {
1930 rqtn = priv->indir_rqt.rqtn;
1931 mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0);
1934 for (ix = 0; ix < priv->params.num_channels; ix++) {
1935 if (!priv->direct_tir[ix].rqt.enabled)
1937 rqtn = priv->direct_tir[ix].rqt.rqtn;
1938 mlx5e_redirect_rqt(priv, rqtn, 1, ix);
1942 static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
1944 if (!priv->params.lro_en)
1947 #define ROUGH_MAX_L2_L3_HDR_SZ 256
1949 MLX5_SET(tirc, tirc, lro_enable_mask,
1950 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
1951 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
1952 MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
1953 (priv->params.lro_wqe_sz -
1954 ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
1955 MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
1956 MLX5_CAP_ETH(priv->mdev,
1957 lro_timer_supported_periods[2]));
1960 void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv)
1962 MLX5_SET(tirc, tirc, rx_hash_fn,
1963 mlx5e_rx_hash_fn(priv->params.rss_hfunc));
1964 if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
1965 void *rss_key = MLX5_ADDR_OF(tirc, tirc,
1966 rx_hash_toeplitz_key);
1967 size_t len = MLX5_FLD_SZ_BYTES(tirc,
1968 rx_hash_toeplitz_key);
1970 MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
1971 memcpy(rss_key, priv->params.toeplitz_hash_key, len);
1975 static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
1977 struct mlx5_core_dev *mdev = priv->mdev;
1986 inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
1987 in = mlx5_vzalloc(inlen);
1991 MLX5_SET(modify_tir_in, in, bitmask.lro, 1);
1992 tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
1994 mlx5e_build_tir_ctx_lro(tirc, priv);
1996 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
1997 err = mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in,
2003 for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
2004 err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn,
2016 static int mlx5e_set_mtu(struct mlx5e_priv *priv, u16 mtu)
2018 struct mlx5_core_dev *mdev = priv->mdev;
2019 u16 hw_mtu = MLX5E_SW2HW_MTU(mtu);
2022 err = mlx5_set_port_mtu(mdev, hw_mtu, 1);
2026 /* Update vport context MTU */
2027 mlx5_modify_nic_vport_mtu(mdev, hw_mtu);
2031 static void mlx5e_query_mtu(struct mlx5e_priv *priv, u16 *mtu)
2033 struct mlx5_core_dev *mdev = priv->mdev;
2037 err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu);
2038 if (err || !hw_mtu) /* fallback to port oper mtu */
2039 mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
2041 *mtu = MLX5E_HW2SW_MTU(hw_mtu);
2044 static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
2046 struct mlx5e_priv *priv = netdev_priv(netdev);
2050 err = mlx5e_set_mtu(priv, netdev->mtu);
2054 mlx5e_query_mtu(priv, &mtu);
2055 if (mtu != netdev->mtu)
2056 netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n",
2057 __func__, mtu, netdev->mtu);
2063 static void mlx5e_netdev_set_tcs(struct net_device *netdev)
2065 struct mlx5e_priv *priv = netdev_priv(netdev);
2066 int nch = priv->params.num_channels;
2067 int ntc = priv->params.num_tc;
2070 netdev_reset_tc(netdev);
2075 netdev_set_num_tc(netdev, ntc);
2077 /* Map netdev TCs to offset 0
2078 * We have our own UP to TXQ mapping for QoS
2080 for (tc = 0; tc < ntc; tc++)
2081 netdev_set_tc_queue(netdev, tc, nch, 0);
2084 int mlx5e_open_locked(struct net_device *netdev)
2086 struct mlx5e_priv *priv = netdev_priv(netdev);
2087 struct mlx5_core_dev *mdev = priv->mdev;
2091 set_bit(MLX5E_STATE_OPENED, &priv->state);
2093 mlx5e_netdev_set_tcs(netdev);
2095 num_txqs = priv->params.num_channels * priv->params.num_tc;
2096 netif_set_real_num_tx_queues(netdev, num_txqs);
2097 netif_set_real_num_rx_queues(netdev, priv->params.num_channels);
2099 err = mlx5e_open_channels(priv);
2101 netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n",
2103 goto err_clear_state_opened_flag;
2106 err = mlx5e_refresh_tirs_self_loopback_enable(priv->mdev);
2108 netdev_err(netdev, "%s: mlx5e_refresh_tirs_self_loopback_enable failed, %d\n",
2110 goto err_close_channels;
2113 mlx5e_redirect_rqts(priv);
2114 mlx5e_update_carrier(priv);
2115 mlx5e_timestamp_init(priv);
2116 #ifdef CONFIG_RFS_ACCEL
2117 priv->netdev->rx_cpu_rmap = priv->mdev->rmap;
2119 if (priv->profile->update_stats)
2120 queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
2122 if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
2123 err = mlx5e_add_sqs_fwd_rules(priv);
2125 goto err_close_channels;
2130 mlx5e_close_channels(priv);
2131 err_clear_state_opened_flag:
2132 clear_bit(MLX5E_STATE_OPENED, &priv->state);
2136 int mlx5e_open(struct net_device *netdev)
2138 struct mlx5e_priv *priv = netdev_priv(netdev);
2141 mutex_lock(&priv->state_lock);
2142 err = mlx5e_open_locked(netdev);
2143 mutex_unlock(&priv->state_lock);
2148 int mlx5e_close_locked(struct net_device *netdev)
2150 struct mlx5e_priv *priv = netdev_priv(netdev);
2151 struct mlx5_core_dev *mdev = priv->mdev;
2153 /* May already be CLOSED in case a previous configuration operation
2154 * (e.g RX/TX queue size change) that involves close&open failed.
2156 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
2159 clear_bit(MLX5E_STATE_OPENED, &priv->state);
2161 if (MLX5_CAP_GEN(mdev, vport_group_manager))
2162 mlx5e_remove_sqs_fwd_rules(priv);
2164 mlx5e_timestamp_cleanup(priv);
2165 netif_carrier_off(priv->netdev);
2166 mlx5e_redirect_rqts(priv);
2167 mlx5e_close_channels(priv);
2172 int mlx5e_close(struct net_device *netdev)
2174 struct mlx5e_priv *priv = netdev_priv(netdev);
2177 if (!netif_device_present(netdev))
2180 mutex_lock(&priv->state_lock);
2181 err = mlx5e_close_locked(netdev);
2182 mutex_unlock(&priv->state_lock);
2187 static int mlx5e_create_drop_rq(struct mlx5e_priv *priv,
2188 struct mlx5e_rq *rq,
2189 struct mlx5e_rq_param *param)
2191 struct mlx5_core_dev *mdev = priv->mdev;
2192 void *rqc = param->rqc;
2193 void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
2196 param->wq.db_numa_node = param->wq.buf_numa_node;
2198 err = mlx5_wq_ll_create(mdev, ¶m->wq, rqc_wq, &rq->wq,
2208 static int mlx5e_create_drop_cq(struct mlx5e_priv *priv,
2209 struct mlx5e_cq *cq,
2210 struct mlx5e_cq_param *param)
2212 struct mlx5_core_dev *mdev = priv->mdev;
2213 struct mlx5_core_cq *mcq = &cq->mcq;
2218 err = mlx5_cqwq_create(mdev, ¶m->wq, param->cqc, &cq->wq,
2223 mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
2226 mcq->set_ci_db = cq->wq_ctrl.db.db;
2227 mcq->arm_db = cq->wq_ctrl.db.db + 1;
2228 *mcq->set_ci_db = 0;
2230 mcq->vector = param->eq_ix;
2231 mcq->comp = mlx5e_completion_event;
2232 mcq->event = mlx5e_cq_error_event;
2234 mcq->uar = &mdev->mlx5e_res.cq_uar;
2241 static int mlx5e_open_drop_rq(struct mlx5e_priv *priv)
2243 struct mlx5e_cq_param cq_param;
2244 struct mlx5e_rq_param rq_param;
2245 struct mlx5e_rq *rq = &priv->drop_rq;
2246 struct mlx5e_cq *cq = &priv->drop_rq.cq;
2249 memset(&cq_param, 0, sizeof(cq_param));
2250 memset(&rq_param, 0, sizeof(rq_param));
2251 mlx5e_build_drop_rq_param(&rq_param);
2253 err = mlx5e_create_drop_cq(priv, cq, &cq_param);
2257 err = mlx5e_enable_cq(cq, &cq_param);
2259 goto err_destroy_cq;
2261 err = mlx5e_create_drop_rq(priv, rq, &rq_param);
2263 goto err_disable_cq;
2265 err = mlx5e_enable_rq(rq, &rq_param);
2267 goto err_destroy_rq;
2272 mlx5e_destroy_rq(&priv->drop_rq);
2275 mlx5e_disable_cq(&priv->drop_rq.cq);
2278 mlx5e_destroy_cq(&priv->drop_rq.cq);
2283 static void mlx5e_close_drop_rq(struct mlx5e_priv *priv)
2285 mlx5e_disable_rq(&priv->drop_rq);
2286 mlx5e_destroy_rq(&priv->drop_rq);
2287 mlx5e_disable_cq(&priv->drop_rq.cq);
2288 mlx5e_destroy_cq(&priv->drop_rq.cq);
2291 static int mlx5e_create_tis(struct mlx5e_priv *priv, int tc)
2293 struct mlx5_core_dev *mdev = priv->mdev;
2294 u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {0};
2295 void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
2297 MLX5_SET(tisc, tisc, prio, tc << 1);
2298 MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.td.tdn);
2300 if (mlx5_lag_is_lacp_owner(mdev))
2301 MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1);
2303 return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]);
2306 static void mlx5e_destroy_tis(struct mlx5e_priv *priv, int tc)
2308 mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]);
2311 int mlx5e_create_tises(struct mlx5e_priv *priv)
2316 for (tc = 0; tc < priv->profile->max_tc; tc++) {
2317 err = mlx5e_create_tis(priv, tc);
2319 goto err_close_tises;
2325 for (tc--; tc >= 0; tc--)
2326 mlx5e_destroy_tis(priv, tc);
2331 void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
2335 for (tc = 0; tc < priv->profile->max_tc; tc++)
2336 mlx5e_destroy_tis(priv, tc);
2339 static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
2340 enum mlx5e_traffic_types tt)
2342 void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
2344 MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
2346 #define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
2347 MLX5_HASH_FIELD_SEL_DST_IP)
2349 #define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
2350 MLX5_HASH_FIELD_SEL_DST_IP |\
2351 MLX5_HASH_FIELD_SEL_L4_SPORT |\
2352 MLX5_HASH_FIELD_SEL_L4_DPORT)
2354 #define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
2355 MLX5_HASH_FIELD_SEL_DST_IP |\
2356 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
2358 mlx5e_build_tir_ctx_lro(tirc, priv);
2360 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
2361 MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
2362 mlx5e_build_tir_ctx_hash(tirc, priv);
2365 case MLX5E_TT_IPV4_TCP:
2366 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2367 MLX5_L3_PROT_TYPE_IPV4);
2368 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2369 MLX5_L4_PROT_TYPE_TCP);
2370 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2371 MLX5_HASH_IP_L4PORTS);
2374 case MLX5E_TT_IPV6_TCP:
2375 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2376 MLX5_L3_PROT_TYPE_IPV6);
2377 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2378 MLX5_L4_PROT_TYPE_TCP);
2379 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2380 MLX5_HASH_IP_L4PORTS);
2383 case MLX5E_TT_IPV4_UDP:
2384 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2385 MLX5_L3_PROT_TYPE_IPV4);
2386 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2387 MLX5_L4_PROT_TYPE_UDP);
2388 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2389 MLX5_HASH_IP_L4PORTS);
2392 case MLX5E_TT_IPV6_UDP:
2393 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2394 MLX5_L3_PROT_TYPE_IPV6);
2395 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2396 MLX5_L4_PROT_TYPE_UDP);
2397 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2398 MLX5_HASH_IP_L4PORTS);
2401 case MLX5E_TT_IPV4_IPSEC_AH:
2402 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2403 MLX5_L3_PROT_TYPE_IPV4);
2404 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2405 MLX5_HASH_IP_IPSEC_SPI);
2408 case MLX5E_TT_IPV6_IPSEC_AH:
2409 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2410 MLX5_L3_PROT_TYPE_IPV6);
2411 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2412 MLX5_HASH_IP_IPSEC_SPI);
2415 case MLX5E_TT_IPV4_IPSEC_ESP:
2416 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2417 MLX5_L3_PROT_TYPE_IPV4);
2418 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2419 MLX5_HASH_IP_IPSEC_SPI);
2422 case MLX5E_TT_IPV6_IPSEC_ESP:
2423 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2424 MLX5_L3_PROT_TYPE_IPV6);
2425 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2426 MLX5_HASH_IP_IPSEC_SPI);
2430 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2431 MLX5_L3_PROT_TYPE_IPV4);
2432 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2437 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2438 MLX5_L3_PROT_TYPE_IPV6);
2439 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2444 "mlx5e_build_indir_tir_ctx: bad traffic type!\n");
2448 static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
2451 MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
2453 mlx5e_build_tir_ctx_lro(tirc, priv);
2455 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
2456 MLX5_SET(tirc, tirc, indirect_table, rqtn);
2457 MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
2460 static int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv)
2462 struct mlx5e_tir *tir;
2469 inlen = MLX5_ST_SZ_BYTES(create_tir_in);
2470 in = mlx5_vzalloc(inlen);
2474 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
2475 memset(in, 0, inlen);
2476 tir = &priv->indir_tir[tt];
2477 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
2478 mlx5e_build_indir_tir_ctx(priv, tirc, tt);
2479 err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
2481 goto err_destroy_tirs;
2489 for (tt--; tt >= 0; tt--)
2490 mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[tt]);
2497 int mlx5e_create_direct_tirs(struct mlx5e_priv *priv)
2499 int nch = priv->profile->max_nch(priv->mdev);
2500 struct mlx5e_tir *tir;
2507 inlen = MLX5_ST_SZ_BYTES(create_tir_in);
2508 in = mlx5_vzalloc(inlen);
2512 for (ix = 0; ix < nch; ix++) {
2513 memset(in, 0, inlen);
2514 tir = &priv->direct_tir[ix];
2515 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
2516 mlx5e_build_direct_tir_ctx(priv, tirc,
2517 priv->direct_tir[ix].rqt.rqtn);
2518 err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
2520 goto err_destroy_ch_tirs;
2527 err_destroy_ch_tirs:
2528 for (ix--; ix >= 0; ix--)
2529 mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[ix]);
2536 static void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv)
2540 for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
2541 mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[i]);
2544 void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv)
2546 int nch = priv->profile->max_nch(priv->mdev);
2549 for (i = 0; i < nch; i++)
2550 mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[i]);
2553 int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd)
2558 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
2561 for (i = 0; i < priv->params.num_channels; i++) {
2562 err = mlx5e_modify_rq_vsd(&priv->channel[i]->rq, vsd);
2570 static int mlx5e_setup_tc(struct net_device *netdev, u8 tc)
2572 struct mlx5e_priv *priv = netdev_priv(netdev);
2576 if (tc && tc != MLX5E_MAX_NUM_TC)
2579 mutex_lock(&priv->state_lock);
2581 was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
2583 mlx5e_close_locked(priv->netdev);
2585 priv->params.num_tc = tc ? tc : 1;
2588 err = mlx5e_open_locked(priv->netdev);
2590 mutex_unlock(&priv->state_lock);
2595 static int mlx5e_ndo_setup_tc(struct net_device *dev, u32 handle,
2596 __be16 proto, struct tc_to_netdev *tc)
2598 struct mlx5e_priv *priv = netdev_priv(dev);
2600 if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS))
2604 case TC_SETUP_CLSFLOWER:
2605 switch (tc->cls_flower->command) {
2606 case TC_CLSFLOWER_REPLACE:
2607 return mlx5e_configure_flower(priv, proto, tc->cls_flower);
2608 case TC_CLSFLOWER_DESTROY:
2609 return mlx5e_delete_flower(priv, tc->cls_flower);
2610 case TC_CLSFLOWER_STATS:
2611 return mlx5e_stats_flower(priv, tc->cls_flower);
2618 if (tc->type != TC_SETUP_MQPRIO)
2621 return mlx5e_setup_tc(dev, tc->tc);
2624 struct rtnl_link_stats64 *
2625 mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
2627 struct mlx5e_priv *priv = netdev_priv(dev);
2628 struct mlx5e_sw_stats *sstats = &priv->stats.sw;
2629 struct mlx5e_vport_stats *vstats = &priv->stats.vport;
2630 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
2632 stats->rx_packets = sstats->rx_packets;
2633 stats->rx_bytes = sstats->rx_bytes;
2634 stats->tx_packets = sstats->tx_packets;
2635 stats->tx_bytes = sstats->tx_bytes;
2637 stats->rx_dropped = priv->stats.qcnt.rx_out_of_buffer;
2638 stats->tx_dropped = sstats->tx_queue_dropped;
2640 stats->rx_length_errors =
2641 PPORT_802_3_GET(pstats, a_in_range_length_errors) +
2642 PPORT_802_3_GET(pstats, a_out_of_range_length_field) +
2643 PPORT_802_3_GET(pstats, a_frame_too_long_errors);
2644 stats->rx_crc_errors =
2645 PPORT_802_3_GET(pstats, a_frame_check_sequence_errors);
2646 stats->rx_frame_errors = PPORT_802_3_GET(pstats, a_alignment_errors);
2647 stats->tx_aborted_errors = PPORT_2863_GET(pstats, if_out_discards);
2648 stats->tx_carrier_errors =
2649 PPORT_802_3_GET(pstats, a_symbol_error_during_carrier);
2650 stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors +
2651 stats->rx_frame_errors;
2652 stats->tx_errors = stats->tx_aborted_errors + stats->tx_carrier_errors;
2654 /* vport multicast also counts packets that are dropped due to steering
2655 * or rx out of buffer
2658 VPORT_COUNTER_GET(vstats, received_eth_multicast.packets);
2663 static void mlx5e_set_rx_mode(struct net_device *dev)
2665 struct mlx5e_priv *priv = netdev_priv(dev);
2667 queue_work(priv->wq, &priv->set_rx_mode_work);
2670 static int mlx5e_set_mac(struct net_device *netdev, void *addr)
2672 struct mlx5e_priv *priv = netdev_priv(netdev);
2673 struct sockaddr *saddr = addr;
2675 if (!is_valid_ether_addr(saddr->sa_data))
2676 return -EADDRNOTAVAIL;
2678 netif_addr_lock_bh(netdev);
2679 ether_addr_copy(netdev->dev_addr, saddr->sa_data);
2680 netif_addr_unlock_bh(netdev);
2682 queue_work(priv->wq, &priv->set_rx_mode_work);
2687 #define MLX5E_SET_FEATURE(netdev, feature, enable) \
2690 netdev->features |= feature; \
2692 netdev->features &= ~feature; \
2695 typedef int (*mlx5e_feature_handler)(struct net_device *netdev, bool enable);
2697 static int set_feature_lro(struct net_device *netdev, bool enable)
2699 struct mlx5e_priv *priv = netdev_priv(netdev);
2700 bool was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
2703 mutex_lock(&priv->state_lock);
2705 if (was_opened && (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST))
2706 mlx5e_close_locked(priv->netdev);
2708 priv->params.lro_en = enable;
2709 err = mlx5e_modify_tirs_lro(priv);
2711 netdev_err(netdev, "lro modify failed, %d\n", err);
2712 priv->params.lro_en = !enable;
2715 if (was_opened && (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST))
2716 mlx5e_open_locked(priv->netdev);
2718 mutex_unlock(&priv->state_lock);
2723 static int set_feature_vlan_filter(struct net_device *netdev, bool enable)
2725 struct mlx5e_priv *priv = netdev_priv(netdev);
2728 mlx5e_enable_vlan_filter(priv);
2730 mlx5e_disable_vlan_filter(priv);
2735 static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
2737 struct mlx5e_priv *priv = netdev_priv(netdev);
2739 if (!enable && mlx5e_tc_num_filters(priv)) {
2741 "Active offloaded tc filters, can't turn hw_tc_offload off\n");
2748 static int set_feature_rx_all(struct net_device *netdev, bool enable)
2750 struct mlx5e_priv *priv = netdev_priv(netdev);
2751 struct mlx5_core_dev *mdev = priv->mdev;
2753 return mlx5_set_port_fcs(mdev, !enable);
2756 static int set_feature_rx_vlan(struct net_device *netdev, bool enable)
2758 struct mlx5e_priv *priv = netdev_priv(netdev);
2761 mutex_lock(&priv->state_lock);
2763 priv->params.vlan_strip_disable = !enable;
2764 err = mlx5e_modify_rqs_vsd(priv, !enable);
2766 priv->params.vlan_strip_disable = enable;
2768 mutex_unlock(&priv->state_lock);
2773 #ifdef CONFIG_RFS_ACCEL
2774 static int set_feature_arfs(struct net_device *netdev, bool enable)
2776 struct mlx5e_priv *priv = netdev_priv(netdev);
2780 err = mlx5e_arfs_enable(priv);
2782 err = mlx5e_arfs_disable(priv);
2788 static int mlx5e_handle_feature(struct net_device *netdev,
2789 netdev_features_t wanted_features,
2790 netdev_features_t feature,
2791 mlx5e_feature_handler feature_handler)
2793 netdev_features_t changes = wanted_features ^ netdev->features;
2794 bool enable = !!(wanted_features & feature);
2797 if (!(changes & feature))
2800 err = feature_handler(netdev, enable);
2802 netdev_err(netdev, "%s feature 0x%llx failed err %d\n",
2803 enable ? "Enable" : "Disable", feature, err);
2807 MLX5E_SET_FEATURE(netdev, feature, enable);
2811 static int mlx5e_set_features(struct net_device *netdev,
2812 netdev_features_t features)
2816 err = mlx5e_handle_feature(netdev, features, NETIF_F_LRO,
2818 err |= mlx5e_handle_feature(netdev, features,
2819 NETIF_F_HW_VLAN_CTAG_FILTER,
2820 set_feature_vlan_filter);
2821 err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_TC,
2822 set_feature_tc_num_filters);
2823 err |= mlx5e_handle_feature(netdev, features, NETIF_F_RXALL,
2824 set_feature_rx_all);
2825 err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_VLAN_CTAG_RX,
2826 set_feature_rx_vlan);
2827 #ifdef CONFIG_RFS_ACCEL
2828 err |= mlx5e_handle_feature(netdev, features, NETIF_F_NTUPLE,
2832 return err ? -EINVAL : 0;
2835 #define MXL5_HW_MIN_MTU 64
2836 #define MXL5E_MIN_MTU (MXL5_HW_MIN_MTU + ETH_FCS_LEN)
2838 static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
2840 struct mlx5e_priv *priv = netdev_priv(netdev);
2841 struct mlx5_core_dev *mdev = priv->mdev;
2848 mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
2850 max_mtu = MLX5E_HW2SW_MTU(max_mtu);
2851 min_mtu = MLX5E_HW2SW_MTU(MXL5E_MIN_MTU);
2853 if (new_mtu > max_mtu || new_mtu < min_mtu) {
2855 "%s: Bad MTU (%d), valid range is: [%d..%d]\n",
2856 __func__, new_mtu, min_mtu, max_mtu);
2860 mutex_lock(&priv->state_lock);
2862 reset = !priv->params.lro_en &&
2863 (priv->params.rq_wq_type !=
2864 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);
2866 was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
2867 if (was_opened && reset)
2868 mlx5e_close_locked(netdev);
2870 netdev->mtu = new_mtu;
2871 mlx5e_set_dev_port_mtu(netdev);
2873 if (was_opened && reset)
2874 err = mlx5e_open_locked(netdev);
2876 mutex_unlock(&priv->state_lock);
2881 static int mlx5e_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2885 return mlx5e_hwstamp_set(dev, ifr);
2887 return mlx5e_hwstamp_get(dev, ifr);
2893 static int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
2895 struct mlx5e_priv *priv = netdev_priv(dev);
2896 struct mlx5_core_dev *mdev = priv->mdev;
2898 return mlx5_eswitch_set_vport_mac(mdev->priv.eswitch, vf + 1, mac);
2901 static int mlx5e_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos)
2903 struct mlx5e_priv *priv = netdev_priv(dev);
2904 struct mlx5_core_dev *mdev = priv->mdev;
2906 return mlx5_eswitch_set_vport_vlan(mdev->priv.eswitch, vf + 1,
2910 static int mlx5e_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
2912 struct mlx5e_priv *priv = netdev_priv(dev);
2913 struct mlx5_core_dev *mdev = priv->mdev;
2915 return mlx5_eswitch_set_vport_spoofchk(mdev->priv.eswitch, vf + 1, setting);
2918 static int mlx5e_set_vf_trust(struct net_device *dev, int vf, bool setting)
2920 struct mlx5e_priv *priv = netdev_priv(dev);
2921 struct mlx5_core_dev *mdev = priv->mdev;
2923 return mlx5_eswitch_set_vport_trust(mdev->priv.eswitch, vf + 1, setting);
2925 static int mlx5_vport_link2ifla(u8 esw_link)
2928 case MLX5_ESW_VPORT_ADMIN_STATE_DOWN:
2929 return IFLA_VF_LINK_STATE_DISABLE;
2930 case MLX5_ESW_VPORT_ADMIN_STATE_UP:
2931 return IFLA_VF_LINK_STATE_ENABLE;
2933 return IFLA_VF_LINK_STATE_AUTO;
2936 static int mlx5_ifla_link2vport(u8 ifla_link)
2938 switch (ifla_link) {
2939 case IFLA_VF_LINK_STATE_DISABLE:
2940 return MLX5_ESW_VPORT_ADMIN_STATE_DOWN;
2941 case IFLA_VF_LINK_STATE_ENABLE:
2942 return MLX5_ESW_VPORT_ADMIN_STATE_UP;
2944 return MLX5_ESW_VPORT_ADMIN_STATE_AUTO;
2947 static int mlx5e_set_vf_link_state(struct net_device *dev, int vf,
2950 struct mlx5e_priv *priv = netdev_priv(dev);
2951 struct mlx5_core_dev *mdev = priv->mdev;
2953 return mlx5_eswitch_set_vport_state(mdev->priv.eswitch, vf + 1,
2954 mlx5_ifla_link2vport(link_state));
2957 static int mlx5e_get_vf_config(struct net_device *dev,
2958 int vf, struct ifla_vf_info *ivi)
2960 struct mlx5e_priv *priv = netdev_priv(dev);
2961 struct mlx5_core_dev *mdev = priv->mdev;
2964 err = mlx5_eswitch_get_vport_config(mdev->priv.eswitch, vf + 1, ivi);
2967 ivi->linkstate = mlx5_vport_link2ifla(ivi->linkstate);
2971 static int mlx5e_get_vf_stats(struct net_device *dev,
2972 int vf, struct ifla_vf_stats *vf_stats)
2974 struct mlx5e_priv *priv = netdev_priv(dev);
2975 struct mlx5_core_dev *mdev = priv->mdev;
2977 return mlx5_eswitch_get_vport_stats(mdev->priv.eswitch, vf + 1,
2981 static void mlx5e_add_vxlan_port(struct net_device *netdev,
2982 struct udp_tunnel_info *ti)
2984 struct mlx5e_priv *priv = netdev_priv(netdev);
2986 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
2989 if (!mlx5e_vxlan_allowed(priv->mdev))
2992 mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 1);
2995 static void mlx5e_del_vxlan_port(struct net_device *netdev,
2996 struct udp_tunnel_info *ti)
2998 struct mlx5e_priv *priv = netdev_priv(netdev);
3000 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
3003 if (!mlx5e_vxlan_allowed(priv->mdev))
3006 mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 0);
3009 static netdev_features_t mlx5e_vxlan_features_check(struct mlx5e_priv *priv,
3010 struct sk_buff *skb,
3011 netdev_features_t features)
3013 struct udphdr *udph;
3017 switch (vlan_get_protocol(skb)) {
3018 case htons(ETH_P_IP):
3019 proto = ip_hdr(skb)->protocol;
3021 case htons(ETH_P_IPV6):
3022 proto = ipv6_hdr(skb)->nexthdr;
3028 if (proto == IPPROTO_UDP) {
3029 udph = udp_hdr(skb);
3030 port = be16_to_cpu(udph->dest);
3033 /* Verify if UDP port is being offloaded by HW */
3034 if (port && mlx5e_vxlan_lookup_port(priv, port))
3038 /* Disable CSUM and GSO if the udp dport is not offloaded by HW */
3039 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3042 static netdev_features_t mlx5e_features_check(struct sk_buff *skb,
3043 struct net_device *netdev,
3044 netdev_features_t features)
3046 struct mlx5e_priv *priv = netdev_priv(netdev);
3048 features = vlan_features_check(skb, features);
3049 features = vxlan_features_check(skb, features);
3051 /* Validate if the tunneled packet is being offloaded by HW */
3052 if (skb->encapsulation &&
3053 (features & NETIF_F_CSUM_MASK || features & NETIF_F_GSO_MASK))
3054 return mlx5e_vxlan_features_check(priv, skb, features);
3059 static void mlx5e_tx_timeout(struct net_device *dev)
3061 struct mlx5e_priv *priv = netdev_priv(dev);
3062 bool sched_work = false;
3065 netdev_err(dev, "TX timeout detected\n");
3067 for (i = 0; i < priv->params.num_channels * priv->params.num_tc; i++) {
3068 struct mlx5e_sq *sq = priv->txq_to_sq_map[i];
3070 if (!netif_xmit_stopped(netdev_get_tx_queue(dev, i)))
3073 set_bit(MLX5E_SQ_STATE_FLUSH, &sq->state);
3074 netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n",
3075 i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc);
3078 if (sched_work && test_bit(MLX5E_STATE_OPENED, &priv->state))
3079 schedule_work(&priv->tx_timeout_work);
3082 static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
3084 struct mlx5e_priv *priv = netdev_priv(netdev);
3085 struct bpf_prog *old_prog;
3087 bool reset, was_opened;
3090 mutex_lock(&priv->state_lock);
3092 if ((netdev->features & NETIF_F_LRO) && prog) {
3093 netdev_warn(netdev, "can't set XDP while LRO is on, disable LRO first\n");
3098 was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
3099 /* no need for full reset when exchanging programs */
3100 reset = (!priv->xdp_prog || !prog);
3102 if (was_opened && reset)
3103 mlx5e_close_locked(netdev);
3105 /* exchange programs */
3106 old_prog = xchg(&priv->xdp_prog, prog);
3108 bpf_prog_add(prog, 1);
3110 bpf_prog_put(old_prog);
3112 if (reset) /* change RQ type according to priv->xdp_prog */
3113 mlx5e_set_rq_priv_params(priv);
3115 if (was_opened && reset)
3116 mlx5e_open_locked(netdev);
3118 if (!test_bit(MLX5E_STATE_OPENED, &priv->state) || reset)
3121 /* exchanging programs w/o reset, we update ref counts on behalf
3122 * of the channels RQs here.
3124 bpf_prog_add(prog, priv->params.num_channels);
3125 for (i = 0; i < priv->params.num_channels; i++) {
3126 struct mlx5e_channel *c = priv->channel[i];
3128 set_bit(MLX5E_RQ_STATE_FLUSH, &c->rq.state);
3129 napi_synchronize(&c->napi);
3130 /* prevent mlx5e_poll_rx_cq from accessing rq->xdp_prog */
3132 old_prog = xchg(&c->rq.xdp_prog, prog);
3134 clear_bit(MLX5E_RQ_STATE_FLUSH, &c->rq.state);
3135 /* napi_schedule in case we have missed anything */
3136 set_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags);
3137 napi_schedule(&c->napi);
3140 bpf_prog_put(old_prog);
3144 mutex_unlock(&priv->state_lock);
3148 static bool mlx5e_xdp_attached(struct net_device *dev)
3150 struct mlx5e_priv *priv = netdev_priv(dev);
3152 return !!priv->xdp_prog;
3155 static int mlx5e_xdp(struct net_device *dev, struct netdev_xdp *xdp)
3157 switch (xdp->command) {
3158 case XDP_SETUP_PROG:
3159 return mlx5e_xdp_set(dev, xdp->prog);
3160 case XDP_QUERY_PROG:
3161 xdp->prog_attached = mlx5e_xdp_attached(dev);
3168 static const struct net_device_ops mlx5e_netdev_ops_basic = {
3169 .ndo_open = mlx5e_open,
3170 .ndo_stop = mlx5e_close,
3171 .ndo_start_xmit = mlx5e_xmit,
3172 .ndo_setup_tc = mlx5e_ndo_setup_tc,
3173 .ndo_select_queue = mlx5e_select_queue,
3174 .ndo_get_stats64 = mlx5e_get_stats,
3175 .ndo_set_rx_mode = mlx5e_set_rx_mode,
3176 .ndo_set_mac_address = mlx5e_set_mac,
3177 .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid,
3178 .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid,
3179 .ndo_set_features = mlx5e_set_features,
3180 .ndo_change_mtu = mlx5e_change_mtu,
3181 .ndo_do_ioctl = mlx5e_ioctl,
3182 .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate,
3183 #ifdef CONFIG_RFS_ACCEL
3184 .ndo_rx_flow_steer = mlx5e_rx_flow_steer,
3186 .ndo_tx_timeout = mlx5e_tx_timeout,
3187 .ndo_xdp = mlx5e_xdp,
3190 static const struct net_device_ops mlx5e_netdev_ops_sriov = {
3191 .ndo_open = mlx5e_open,
3192 .ndo_stop = mlx5e_close,
3193 .ndo_start_xmit = mlx5e_xmit,
3194 .ndo_setup_tc = mlx5e_ndo_setup_tc,
3195 .ndo_select_queue = mlx5e_select_queue,
3196 .ndo_get_stats64 = mlx5e_get_stats,
3197 .ndo_set_rx_mode = mlx5e_set_rx_mode,
3198 .ndo_set_mac_address = mlx5e_set_mac,
3199 .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid,
3200 .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid,
3201 .ndo_set_features = mlx5e_set_features,
3202 .ndo_change_mtu = mlx5e_change_mtu,
3203 .ndo_do_ioctl = mlx5e_ioctl,
3204 .ndo_udp_tunnel_add = mlx5e_add_vxlan_port,
3205 .ndo_udp_tunnel_del = mlx5e_del_vxlan_port,
3206 .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate,
3207 .ndo_features_check = mlx5e_features_check,
3208 #ifdef CONFIG_RFS_ACCEL
3209 .ndo_rx_flow_steer = mlx5e_rx_flow_steer,
3211 .ndo_set_vf_mac = mlx5e_set_vf_mac,
3212 .ndo_set_vf_vlan = mlx5e_set_vf_vlan,
3213 .ndo_set_vf_spoofchk = mlx5e_set_vf_spoofchk,
3214 .ndo_set_vf_trust = mlx5e_set_vf_trust,
3215 .ndo_get_vf_config = mlx5e_get_vf_config,
3216 .ndo_set_vf_link_state = mlx5e_set_vf_link_state,
3217 .ndo_get_vf_stats = mlx5e_get_vf_stats,
3218 .ndo_tx_timeout = mlx5e_tx_timeout,
3219 .ndo_xdp = mlx5e_xdp,
3222 static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
3224 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
3226 if (!MLX5_CAP_GEN(mdev, eth_net_offloads) ||
3227 !MLX5_CAP_GEN(mdev, nic_flow_table) ||
3228 !MLX5_CAP_ETH(mdev, csum_cap) ||
3229 !MLX5_CAP_ETH(mdev, max_lso_cap) ||
3230 !MLX5_CAP_ETH(mdev, vlan_cap) ||
3231 !MLX5_CAP_ETH(mdev, rss_ind_tbl_cap) ||
3232 MLX5_CAP_FLOWTABLE(mdev,
3233 flow_table_properties_nic_receive.max_ft_level)
3235 mlx5_core_warn(mdev,
3236 "Not creating net device, some required device capabilities are missing\n");
3239 if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
3240 mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
3241 if (!MLX5_CAP_GEN(mdev, cq_moderation))
3242 mlx5_core_warn(mdev, "CQ modiration is not supported\n");
3247 u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev)
3249 int bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
3251 return bf_buf_size -
3252 sizeof(struct mlx5e_tx_wqe) +
3253 2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/;
3256 #ifdef CONFIG_MLX5_CORE_EN_DCB
3257 static void mlx5e_ets_init(struct mlx5e_priv *priv)
3261 priv->params.ets.ets_cap = mlx5_max_tc(priv->mdev) + 1;
3262 for (i = 0; i < priv->params.ets.ets_cap; i++) {
3263 priv->params.ets.tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
3264 priv->params.ets.tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR;
3265 priv->params.ets.prio_tc[i] = i;
3268 /* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */
3269 priv->params.ets.prio_tc[0] = 1;
3270 priv->params.ets.prio_tc[1] = 0;
3274 void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev,
3275 u32 *indirection_rqt, int len,
3278 int node = mdev->priv.numa_node;
3279 int node_num_of_cores;
3283 node = first_online_node;
3285 node_num_of_cores = cpumask_weight(cpumask_of_node(node));
3287 if (node_num_of_cores)
3288 num_channels = min_t(int, num_channels, node_num_of_cores);
3290 for (i = 0; i < len; i++)
3291 indirection_rqt[i] = i % num_channels;
3294 static int mlx5e_get_pci_bw(struct mlx5_core_dev *mdev, u32 *pci_bw)
3296 enum pcie_link_width width;
3297 enum pci_bus_speed speed;
3300 err = pcie_get_minimum_link(mdev->pdev, &speed, &width);
3304 if (speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN)
3308 case PCIE_SPEED_2_5GT:
3309 *pci_bw = 2500 * width;
3311 case PCIE_SPEED_5_0GT:
3312 *pci_bw = 5000 * width;
3314 case PCIE_SPEED_8_0GT:
3315 *pci_bw = 8000 * width;
3324 static bool cqe_compress_heuristic(u32 link_speed, u32 pci_bw)
3326 return (link_speed && pci_bw &&
3327 (pci_bw < 40000) && (pci_bw < link_speed));
3330 void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
3332 params->rx_cq_period_mode = cq_period_mode;
3334 params->rx_cq_moderation.pkts =
3335 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
3336 params->rx_cq_moderation.usec =
3337 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
3339 if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
3340 params->rx_cq_moderation.usec =
3341 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
3344 static void mlx5e_query_min_inline(struct mlx5_core_dev *mdev,
3345 u8 *min_inline_mode)
3347 switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) {
3348 case MLX5E_INLINE_MODE_L2:
3349 *min_inline_mode = MLX5_INLINE_MODE_L2;
3351 case MLX5E_INLINE_MODE_VPORT_CONTEXT:
3352 mlx5_query_nic_vport_min_inline(mdev,
3355 case MLX5_INLINE_MODE_NOT_REQUIRED:
3356 *min_inline_mode = MLX5_INLINE_MODE_NONE;
3361 static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
3362 struct net_device *netdev,
3363 const struct mlx5e_profile *profile,
3366 struct mlx5e_priv *priv = netdev_priv(netdev);
3369 u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
3370 MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
3371 MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
3374 priv->netdev = netdev;
3375 priv->params.num_channels = profile->max_nch(mdev);
3376 priv->profile = profile;
3377 priv->ppriv = ppriv;
3379 priv->params.log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
3381 /* set CQE compression */
3382 priv->params.rx_cqe_compress_admin = false;
3383 if (MLX5_CAP_GEN(mdev, cqe_compression) &&
3384 MLX5_CAP_GEN(mdev, vport_group_manager)) {
3385 mlx5e_get_max_linkspeed(mdev, &link_speed);
3386 mlx5e_get_pci_bw(mdev, &pci_bw);
3387 mlx5_core_dbg(mdev, "Max link speed = %d, PCI BW = %d\n",
3388 link_speed, pci_bw);
3389 priv->params.rx_cqe_compress_admin =
3390 cqe_compress_heuristic(link_speed, pci_bw);
3392 priv->params.rx_cqe_compress = priv->params.rx_cqe_compress_admin;
3394 mlx5e_set_rq_priv_params(priv);
3395 if (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
3396 priv->params.lro_en = true;
3398 priv->params.rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
3399 mlx5e_set_rx_cq_mode_params(&priv->params, cq_period_mode);
3401 priv->params.tx_cq_moderation.usec =
3402 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
3403 priv->params.tx_cq_moderation.pkts =
3404 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
3405 priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev);
3406 mlx5e_query_min_inline(mdev, &priv->params.tx_min_inline_mode);
3407 priv->params.num_tc = 1;
3408 priv->params.rss_hfunc = ETH_RSS_HASH_XOR;
3410 netdev_rss_key_fill(priv->params.toeplitz_hash_key,
3411 sizeof(priv->params.toeplitz_hash_key));
3413 mlx5e_build_default_indir_rqt(mdev, priv->params.indirection_rqt,
3414 MLX5E_INDIR_RQT_SIZE, profile->max_nch(mdev));
3416 priv->params.lro_wqe_sz =
3417 MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ -
3418 /* Extra room needed for build_skb */
3420 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3422 /* Initialize pflags */
3423 MLX5E_SET_PRIV_FLAG(priv, MLX5E_PFLAG_RX_CQE_BASED_MODER,
3424 priv->params.rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
3426 #ifdef CONFIG_MLX5_CORE_EN_DCB
3427 mlx5e_ets_init(priv);
3430 mutex_init(&priv->state_lock);
3432 INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
3433 INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
3434 INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
3435 INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
3438 static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
3440 struct mlx5e_priv *priv = netdev_priv(netdev);
3442 mlx5_query_nic_vport_mac_address(priv->mdev, 0, netdev->dev_addr);
3443 if (is_zero_ether_addr(netdev->dev_addr) &&
3444 !MLX5_CAP_GEN(priv->mdev, vport_group_manager)) {
3445 eth_hw_addr_random(netdev);
3446 mlx5_core_info(priv->mdev, "Assigned random MAC address %pM\n", netdev->dev_addr);
3450 static const struct switchdev_ops mlx5e_switchdev_ops = {
3451 .switchdev_port_attr_get = mlx5e_attr_get,
3454 static void mlx5e_build_nic_netdev(struct net_device *netdev)
3456 struct mlx5e_priv *priv = netdev_priv(netdev);
3457 struct mlx5_core_dev *mdev = priv->mdev;
3461 SET_NETDEV_DEV(netdev, &mdev->pdev->dev);
3463 if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
3464 netdev->netdev_ops = &mlx5e_netdev_ops_sriov;
3465 #ifdef CONFIG_MLX5_CORE_EN_DCB
3466 netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
3469 netdev->netdev_ops = &mlx5e_netdev_ops_basic;
3472 netdev->watchdog_timeo = 15 * HZ;
3474 netdev->ethtool_ops = &mlx5e_ethtool_ops;
3476 netdev->vlan_features |= NETIF_F_SG;
3477 netdev->vlan_features |= NETIF_F_IP_CSUM;
3478 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
3479 netdev->vlan_features |= NETIF_F_GRO;
3480 netdev->vlan_features |= NETIF_F_TSO;
3481 netdev->vlan_features |= NETIF_F_TSO6;
3482 netdev->vlan_features |= NETIF_F_RXCSUM;
3483 netdev->vlan_features |= NETIF_F_RXHASH;
3485 if (!!MLX5_CAP_ETH(mdev, lro_cap))
3486 netdev->vlan_features |= NETIF_F_LRO;
3488 netdev->hw_features = netdev->vlan_features;
3489 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
3490 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
3491 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3493 if (mlx5e_vxlan_allowed(mdev)) {
3494 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
3495 NETIF_F_GSO_UDP_TUNNEL_CSUM |
3496 NETIF_F_GSO_PARTIAL;
3497 netdev->hw_enc_features |= NETIF_F_IP_CSUM;
3498 netdev->hw_enc_features |= NETIF_F_IPV6_CSUM;
3499 netdev->hw_enc_features |= NETIF_F_TSO;
3500 netdev->hw_enc_features |= NETIF_F_TSO6;
3501 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
3502 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
3503 NETIF_F_GSO_PARTIAL;
3504 netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
3507 mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled);
3510 netdev->hw_features |= NETIF_F_RXALL;
3512 netdev->features = netdev->hw_features;
3513 if (!priv->params.lro_en)
3514 netdev->features &= ~NETIF_F_LRO;
3517 netdev->features &= ~NETIF_F_RXALL;
3519 #define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f)
3520 if (FT_CAP(flow_modify_en) &&
3521 FT_CAP(modify_root) &&
3522 FT_CAP(identified_miss_table_mode) &&
3523 FT_CAP(flow_table_modify)) {
3524 netdev->hw_features |= NETIF_F_HW_TC;
3525 #ifdef CONFIG_RFS_ACCEL
3526 netdev->hw_features |= NETIF_F_NTUPLE;
3530 netdev->features |= NETIF_F_HIGHDMA;
3532 netdev->priv_flags |= IFF_UNICAST_FLT;
3534 mlx5e_set_netdev_dev_addr(netdev);
3536 #ifdef CONFIG_NET_SWITCHDEV
3537 if (MLX5_CAP_GEN(mdev, vport_group_manager))
3538 netdev->switchdev_ops = &mlx5e_switchdev_ops;
3542 static void mlx5e_create_q_counter(struct mlx5e_priv *priv)
3544 struct mlx5_core_dev *mdev = priv->mdev;
3547 err = mlx5_core_alloc_q_counter(mdev, &priv->q_counter);
3549 mlx5_core_warn(mdev, "alloc queue counter failed, %d\n", err);
3550 priv->q_counter = 0;
3554 static void mlx5e_destroy_q_counter(struct mlx5e_priv *priv)
3556 if (!priv->q_counter)
3559 mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter);
3562 static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv)
3564 struct mlx5_core_dev *mdev = priv->mdev;
3565 u64 npages = MLX5E_REQUIRED_MTTS(priv->profile->max_nch(mdev),
3566 BIT(MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW));
3567 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
3572 in = mlx5_vzalloc(inlen);
3576 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
3578 npages = min_t(u32, ALIGN(U16_MAX, 4) * 2, npages);
3580 MLX5_SET(mkc, mkc, free, 1);
3581 MLX5_SET(mkc, mkc, umr_en, 1);
3582 MLX5_SET(mkc, mkc, lw, 1);
3583 MLX5_SET(mkc, mkc, lr, 1);
3584 MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_MTT);
3586 MLX5_SET(mkc, mkc, qpn, 0xffffff);
3587 MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.pdn);
3588 MLX5_SET64(mkc, mkc, len, npages << PAGE_SHIFT);
3589 MLX5_SET(mkc, mkc, translations_octword_size,
3590 MLX5_MTT_OCTW(npages));
3591 MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT);
3593 err = mlx5_core_create_mkey(mdev, &priv->umr_mkey, in, inlen);
3599 static void mlx5e_nic_init(struct mlx5_core_dev *mdev,
3600 struct net_device *netdev,
3601 const struct mlx5e_profile *profile,
3604 struct mlx5e_priv *priv = netdev_priv(netdev);
3606 mlx5e_build_nic_netdev_priv(mdev, netdev, profile, ppriv);
3607 mlx5e_build_nic_netdev(netdev);
3608 mlx5e_vxlan_init(priv);
3611 static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
3613 struct mlx5_core_dev *mdev = priv->mdev;
3614 struct mlx5_eswitch *esw = mdev->priv.eswitch;
3616 mlx5e_vxlan_cleanup(priv);
3618 if (MLX5_CAP_GEN(mdev, vport_group_manager))
3619 mlx5_eswitch_unregister_vport_rep(esw, 0);
3622 static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
3624 struct mlx5_core_dev *mdev = priv->mdev;
3628 err = mlx5e_create_indirect_rqts(priv);
3630 mlx5_core_warn(mdev, "create indirect rqts failed, %d\n", err);
3634 err = mlx5e_create_direct_rqts(priv);
3636 mlx5_core_warn(mdev, "create direct rqts failed, %d\n", err);
3637 goto err_destroy_indirect_rqts;
3640 err = mlx5e_create_indirect_tirs(priv);
3642 mlx5_core_warn(mdev, "create indirect tirs failed, %d\n", err);
3643 goto err_destroy_direct_rqts;
3646 err = mlx5e_create_direct_tirs(priv);
3648 mlx5_core_warn(mdev, "create direct tirs failed, %d\n", err);
3649 goto err_destroy_indirect_tirs;
3652 err = mlx5e_create_flow_steering(priv);
3654 mlx5_core_warn(mdev, "create flow steering failed, %d\n", err);
3655 goto err_destroy_direct_tirs;
3658 err = mlx5e_tc_init(priv);
3660 goto err_destroy_flow_steering;
3664 err_destroy_flow_steering:
3665 mlx5e_destroy_flow_steering(priv);
3666 err_destroy_direct_tirs:
3667 mlx5e_destroy_direct_tirs(priv);
3668 err_destroy_indirect_tirs:
3669 mlx5e_destroy_indirect_tirs(priv);
3670 err_destroy_direct_rqts:
3671 for (i = 0; i < priv->profile->max_nch(mdev); i++)
3672 mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
3673 err_destroy_indirect_rqts:
3674 mlx5e_destroy_rqt(priv, &priv->indir_rqt);
3678 static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
3682 mlx5e_tc_cleanup(priv);
3683 mlx5e_destroy_flow_steering(priv);
3684 mlx5e_destroy_direct_tirs(priv);
3685 mlx5e_destroy_indirect_tirs(priv);
3686 for (i = 0; i < priv->profile->max_nch(priv->mdev); i++)
3687 mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
3688 mlx5e_destroy_rqt(priv, &priv->indir_rqt);
3691 static int mlx5e_init_nic_tx(struct mlx5e_priv *priv)
3695 err = mlx5e_create_tises(priv);
3697 mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
3701 #ifdef CONFIG_MLX5_CORE_EN_DCB
3702 mlx5e_dcbnl_ieee_setets_core(priv, &priv->params.ets);
3707 static void mlx5e_nic_enable(struct mlx5e_priv *priv)
3709 struct net_device *netdev = priv->netdev;
3710 struct mlx5_core_dev *mdev = priv->mdev;
3711 struct mlx5_eswitch *esw = mdev->priv.eswitch;
3712 struct mlx5_eswitch_rep rep;
3714 mlx5_lag_add(mdev, netdev);
3716 if (mlx5e_vxlan_allowed(mdev)) {
3718 udp_tunnel_get_rx_info(netdev);
3722 mlx5e_enable_async_events(priv);
3723 queue_work(priv->wq, &priv->set_rx_mode_work);
3725 if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
3726 mlx5_query_nic_vport_mac_address(mdev, 0, rep.hw_id);
3727 rep.load = mlx5e_nic_rep_load;
3728 rep.unload = mlx5e_nic_rep_unload;
3730 rep.priv_data = priv;
3731 mlx5_eswitch_register_vport_rep(esw, &rep);
3735 static void mlx5e_nic_disable(struct mlx5e_priv *priv)
3737 queue_work(priv->wq, &priv->set_rx_mode_work);
3738 mlx5e_disable_async_events(priv);
3739 mlx5_lag_remove(priv->mdev);
3742 static const struct mlx5e_profile mlx5e_nic_profile = {
3743 .init = mlx5e_nic_init,
3744 .cleanup = mlx5e_nic_cleanup,
3745 .init_rx = mlx5e_init_nic_rx,
3746 .cleanup_rx = mlx5e_cleanup_nic_rx,
3747 .init_tx = mlx5e_init_nic_tx,
3748 .cleanup_tx = mlx5e_cleanup_nic_tx,
3749 .enable = mlx5e_nic_enable,
3750 .disable = mlx5e_nic_disable,
3751 .update_stats = mlx5e_update_stats,
3752 .max_nch = mlx5e_get_max_num_channels,
3753 .max_tc = MLX5E_MAX_NUM_TC,
3756 struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
3757 const struct mlx5e_profile *profile,
3760 int nch = profile->max_nch(mdev);
3761 struct net_device *netdev;
3762 struct mlx5e_priv *priv;
3764 netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv),
3765 nch * profile->max_tc,
3768 mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
3772 profile->init(mdev, netdev, profile, ppriv);
3774 netif_carrier_off(netdev);
3776 priv = netdev_priv(netdev);
3778 priv->wq = create_singlethread_workqueue("mlx5e");
3780 goto err_cleanup_nic;
3785 profile->cleanup(priv);
3786 free_netdev(netdev);
3791 int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev)
3793 const struct mlx5e_profile *profile;
3794 struct mlx5e_priv *priv;
3797 priv = netdev_priv(netdev);
3798 profile = priv->profile;
3799 clear_bit(MLX5E_STATE_DESTROYING, &priv->state);
3801 err = mlx5e_create_umr_mkey(priv);
3803 mlx5_core_err(mdev, "create umr mkey failed, %d\n", err);
3807 err = profile->init_tx(priv);
3809 goto err_destroy_umr_mkey;
3811 err = mlx5e_open_drop_rq(priv);
3813 mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
3814 goto err_cleanup_tx;
3817 err = profile->init_rx(priv);
3819 goto err_close_drop_rq;
3821 mlx5e_create_q_counter(priv);
3823 mlx5e_init_l2_addr(priv);
3825 mlx5e_set_dev_port_mtu(netdev);
3827 if (profile->enable)
3828 profile->enable(priv);
3831 if (netif_running(netdev))
3833 netif_device_attach(netdev);
3839 mlx5e_close_drop_rq(priv);
3842 profile->cleanup_tx(priv);
3844 err_destroy_umr_mkey:
3845 mlx5_core_destroy_mkey(mdev, &priv->umr_mkey);
3851 static void mlx5e_register_vport_rep(struct mlx5_core_dev *mdev)
3853 struct mlx5_eswitch *esw = mdev->priv.eswitch;
3854 int total_vfs = MLX5_TOTAL_VPORTS(mdev);
3858 if (!MLX5_CAP_GEN(mdev, vport_group_manager))
3861 mlx5_query_nic_vport_mac_address(mdev, 0, mac);
3863 for (vport = 1; vport < total_vfs; vport++) {
3864 struct mlx5_eswitch_rep rep;
3866 rep.load = mlx5e_vport_rep_load;
3867 rep.unload = mlx5e_vport_rep_unload;
3869 ether_addr_copy(rep.hw_id, mac);
3870 mlx5_eswitch_register_vport_rep(esw, &rep);
3874 void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev)
3876 struct mlx5e_priv *priv = netdev_priv(netdev);
3877 const struct mlx5e_profile *profile = priv->profile;
3879 set_bit(MLX5E_STATE_DESTROYING, &priv->state);
3880 if (profile->disable)
3881 profile->disable(priv);
3883 flush_workqueue(priv->wq);
3886 if (netif_running(netdev))
3887 mlx5e_close(netdev);
3888 netif_device_detach(netdev);
3891 mlx5e_destroy_q_counter(priv);
3892 profile->cleanup_rx(priv);
3893 mlx5e_close_drop_rq(priv);
3894 profile->cleanup_tx(priv);
3895 mlx5_core_destroy_mkey(priv->mdev, &priv->umr_mkey);
3896 cancel_delayed_work_sync(&priv->update_stats_work);
3899 /* mlx5e_attach and mlx5e_detach scope should be only creating/destroying
3900 * hardware contexts and to connect it to the current netdev.
3902 static int mlx5e_attach(struct mlx5_core_dev *mdev, void *vpriv)
3904 struct mlx5e_priv *priv = vpriv;
3905 struct net_device *netdev = priv->netdev;
3908 if (netif_device_present(netdev))
3911 err = mlx5e_create_mdev_resources(mdev);
3915 err = mlx5e_attach_netdev(mdev, netdev);
3917 mlx5e_destroy_mdev_resources(mdev);
3924 static void mlx5e_detach(struct mlx5_core_dev *mdev, void *vpriv)
3926 struct mlx5e_priv *priv = vpriv;
3927 struct net_device *netdev = priv->netdev;
3929 if (!netif_device_present(netdev))
3932 mlx5e_detach_netdev(mdev, netdev);
3933 mlx5e_destroy_mdev_resources(mdev);
3936 static void *mlx5e_add(struct mlx5_core_dev *mdev)
3938 struct mlx5_eswitch *esw = mdev->priv.eswitch;
3939 int total_vfs = MLX5_TOTAL_VPORTS(mdev);
3944 struct net_device *netdev;
3946 err = mlx5e_check_required_hca_cap(mdev);
3950 mlx5e_register_vport_rep(mdev);
3952 if (MLX5_CAP_GEN(mdev, vport_group_manager))
3953 ppriv = &esw->offloads.vport_reps[0];
3955 netdev = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, ppriv);
3957 mlx5_core_err(mdev, "mlx5e_create_netdev failed\n");
3958 goto err_unregister_reps;
3961 priv = netdev_priv(netdev);
3963 err = mlx5e_attach(mdev, priv);
3965 mlx5_core_err(mdev, "mlx5e_attach failed, %d\n", err);
3966 goto err_destroy_netdev;
3969 err = register_netdev(netdev);
3971 mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
3978 mlx5e_detach(mdev, priv);
3981 mlx5e_destroy_netdev(mdev, priv);
3983 err_unregister_reps:
3984 for (vport = 1; vport < total_vfs; vport++)
3985 mlx5_eswitch_unregister_vport_rep(esw, vport);
3990 void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv)
3992 const struct mlx5e_profile *profile = priv->profile;
3993 struct net_device *netdev = priv->netdev;
3995 unregister_netdev(netdev);
3996 destroy_workqueue(priv->wq);
3997 if (profile->cleanup)
3998 profile->cleanup(priv);
3999 free_netdev(netdev);
4002 static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
4004 struct mlx5_eswitch *esw = mdev->priv.eswitch;
4005 int total_vfs = MLX5_TOTAL_VPORTS(mdev);
4006 struct mlx5e_priv *priv = vpriv;
4009 for (vport = 1; vport < total_vfs; vport++)
4010 mlx5_eswitch_unregister_vport_rep(esw, vport);
4012 mlx5e_detach(mdev, vpriv);
4013 mlx5e_destroy_netdev(mdev, priv);
4016 static void *mlx5e_get_netdev(void *vpriv)
4018 struct mlx5e_priv *priv = vpriv;
4020 return priv->netdev;
4023 static struct mlx5_interface mlx5e_interface = {
4025 .remove = mlx5e_remove,
4026 .attach = mlx5e_attach,
4027 .detach = mlx5e_detach,
4028 .event = mlx5e_async_event,
4029 .protocol = MLX5_INTERFACE_PROTOCOL_ETH,
4030 .get_dev = mlx5e_get_netdev,
4033 void mlx5e_init(void)
4035 mlx5e_build_ptys2ethtool_map();
4036 mlx5_register_interface(&mlx5e_interface);
4039 void mlx5e_cleanup(void)
4041 mlx5_unregister_interface(&mlx5e_interface);