qed*: IOV support spoof-checking
[cascardo/linux.git] / drivers / net / ethernet / qlogic / qed / qed_l2.c
1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015 QLogic Corporation
3  *
4  * This software is available under the terms of the GNU General Public License
5  * (GPL) Version 2, available from the file COPYING in the main directory of
6  * this source tree.
7  */
8
9 #include <linux/types.h>
10 #include <asm/byteorder.h>
11 #include <asm/param.h>
12 #include <linux/delay.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/etherdevice.h>
15 #include <linux/interrupt.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/pci.h>
19 #include <linux/slab.h>
20 #include <linux/stddef.h>
21 #include <linux/string.h>
22 #include <linux/version.h>
23 #include <linux/workqueue.h>
24 #include <linux/bitops.h>
25 #include <linux/bug.h>
26 #include "qed.h"
27 #include <linux/qed/qed_chain.h>
28 #include "qed_cxt.h"
29 #include "qed_dev_api.h"
30 #include <linux/qed/qed_eth_if.h>
31 #include "qed_hsi.h"
32 #include "qed_hw.h"
33 #include "qed_int.h"
34 #include "qed_l2.h"
35 #include "qed_mcp.h"
36 #include "qed_reg_addr.h"
37 #include "qed_sp.h"
38 #include "qed_sriov.h"
39
40
41 #define QED_MAX_SGES_NUM 16
42 #define CRC32_POLY 0x1edc6f41
43
44 int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
45                            struct qed_sp_vport_start_params *p_params)
46 {
47         struct vport_start_ramrod_data *p_ramrod = NULL;
48         struct qed_spq_entry *p_ent =  NULL;
49         struct qed_sp_init_data init_data;
50         u8 abs_vport_id = 0;
51         int rc = -EINVAL;
52         u16 rx_mode = 0;
53
54         rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
55         if (rc != 0)
56                 return rc;
57
58         memset(&init_data, 0, sizeof(init_data));
59         init_data.cid = qed_spq_get_cid(p_hwfn);
60         init_data.opaque_fid = p_params->opaque_fid;
61         init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
62
63         rc = qed_sp_init_request(p_hwfn, &p_ent,
64                                  ETH_RAMROD_VPORT_START,
65                                  PROTOCOLID_ETH, &init_data);
66         if (rc)
67                 return rc;
68
69         p_ramrod                = &p_ent->ramrod.vport_start;
70         p_ramrod->vport_id      = abs_vport_id;
71
72         p_ramrod->mtu                   = cpu_to_le16(p_params->mtu);
73         p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan;
74         p_ramrod->drop_ttl0_en          = p_params->drop_ttl0;
75
76         SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1);
77         SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1);
78
79         p_ramrod->rx_mode.state = cpu_to_le16(rx_mode);
80
81         /* TPA related fields */
82         memset(&p_ramrod->tpa_param, 0,
83                sizeof(struct eth_vport_tpa_param));
84
85         p_ramrod->tpa_param.max_buff_num = p_params->max_buffers_per_cqe;
86
87         switch (p_params->tpa_mode) {
88         case QED_TPA_MODE_GRO:
89                 p_ramrod->tpa_param.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
90                 p_ramrod->tpa_param.tpa_max_size = (u16)-1;
91                 p_ramrod->tpa_param.tpa_min_size_to_cont = p_params->mtu / 2;
92                 p_ramrod->tpa_param.tpa_min_size_to_start = p_params->mtu / 2;
93                 p_ramrod->tpa_param.tpa_ipv4_en_flg = 1;
94                 p_ramrod->tpa_param.tpa_ipv6_en_flg = 1;
95                 p_ramrod->tpa_param.tpa_pkt_split_flg = 1;
96                 p_ramrod->tpa_param.tpa_gro_consistent_flg = 1;
97                 break;
98         default:
99                 break;
100         }
101
102         /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
103         p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev,
104                                                   p_params->concrete_fid);
105
106         return qed_spq_post(p_hwfn, p_ent, NULL);
107 }
108
109 int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
110                        struct qed_sp_vport_start_params *p_params)
111 {
112         if (IS_VF(p_hwfn->cdev)) {
113                 return qed_vf_pf_vport_start(p_hwfn, p_params->vport_id,
114                                              p_params->mtu,
115                                              p_params->remove_inner_vlan,
116                                              p_params->tpa_mode,
117                                              p_params->max_buffers_per_cqe,
118                                              p_params->only_untagged);
119         }
120
121         return qed_sp_eth_vport_start(p_hwfn, p_params);
122 }
123
124 static int
125 qed_sp_vport_update_rss(struct qed_hwfn *p_hwfn,
126                         struct vport_update_ramrod_data *p_ramrod,
127                         struct qed_rss_params *p_params)
128 {
129         struct eth_vport_rss_config *rss = &p_ramrod->rss_config;
130         u16 abs_l2_queue = 0, capabilities = 0;
131         int rc = 0, i;
132
133         if (!p_params) {
134                 p_ramrod->common.update_rss_flg = 0;
135                 return rc;
136         }
137
138         BUILD_BUG_ON(QED_RSS_IND_TABLE_SIZE !=
139                      ETH_RSS_IND_TABLE_ENTRIES_NUM);
140
141         rc = qed_fw_rss_eng(p_hwfn, p_params->rss_eng_id, &rss->rss_id);
142         if (rc)
143                 return rc;
144
145         p_ramrod->common.update_rss_flg = p_params->update_rss_config;
146         rss->update_rss_capabilities = p_params->update_rss_capabilities;
147         rss->update_rss_ind_table = p_params->update_rss_ind_table;
148         rss->update_rss_key = p_params->update_rss_key;
149
150         rss->rss_mode = p_params->rss_enable ?
151                         ETH_VPORT_RSS_MODE_REGULAR :
152                         ETH_VPORT_RSS_MODE_DISABLED;
153
154         SET_FIELD(capabilities,
155                   ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY,
156                   !!(p_params->rss_caps & QED_RSS_IPV4));
157         SET_FIELD(capabilities,
158                   ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY,
159                   !!(p_params->rss_caps & QED_RSS_IPV6));
160         SET_FIELD(capabilities,
161                   ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY,
162                   !!(p_params->rss_caps & QED_RSS_IPV4_TCP));
163         SET_FIELD(capabilities,
164                   ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY,
165                   !!(p_params->rss_caps & QED_RSS_IPV6_TCP));
166         SET_FIELD(capabilities,
167                   ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY,
168                   !!(p_params->rss_caps & QED_RSS_IPV4_UDP));
169         SET_FIELD(capabilities,
170                   ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY,
171                   !!(p_params->rss_caps & QED_RSS_IPV6_UDP));
172         rss->tbl_size = p_params->rss_table_size_log;
173
174         rss->capabilities = cpu_to_le16(capabilities);
175
176         DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
177                    "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n",
178                    p_ramrod->common.update_rss_flg,
179                    rss->rss_mode, rss->update_rss_capabilities,
180                    capabilities, rss->update_rss_ind_table,
181                    rss->update_rss_key);
182
183         for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
184                 rc = qed_fw_l2_queue(p_hwfn,
185                                      (u8)p_params->rss_ind_table[i],
186                                      &abs_l2_queue);
187                 if (rc)
188                         return rc;
189
190                 rss->indirection_table[i] = cpu_to_le16(abs_l2_queue);
191                 DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, "i= %d, queue = %d\n",
192                            i, rss->indirection_table[i]);
193         }
194
195         for (i = 0; i < 10; i++)
196                 rss->rss_key[i] = cpu_to_le32(p_params->rss_key[i]);
197
198         return rc;
199 }
200
201 static void
202 qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn,
203                           struct vport_update_ramrod_data *p_ramrod,
204                           struct qed_filter_accept_flags accept_flags)
205 {
206         p_ramrod->common.update_rx_mode_flg =
207                 accept_flags.update_rx_mode_config;
208
209         p_ramrod->common.update_tx_mode_flg =
210                 accept_flags.update_tx_mode_config;
211
212         /* Set Rx mode accept flags */
213         if (p_ramrod->common.update_rx_mode_flg) {
214                 u8 accept_filter = accept_flags.rx_accept_filter;
215                 u16 state = 0;
216
217                 SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL,
218                           !(!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) ||
219                             !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
220
221                 SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED,
222                           !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED));
223
224                 SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL,
225                           !(!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) ||
226                             !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
227
228                 SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL,
229                           (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
230                            !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
231
232                 SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL,
233                           !!(accept_filter & QED_ACCEPT_BCAST));
234
235                 p_ramrod->rx_mode.state = cpu_to_le16(state);
236                 DP_VERBOSE(p_hwfn, QED_MSG_SP,
237                            "p_ramrod->rx_mode.state = 0x%x\n", state);
238         }
239
240         /* Set Tx mode accept flags */
241         if (p_ramrod->common.update_tx_mode_flg) {
242                 u8 accept_filter = accept_flags.tx_accept_filter;
243                 u16 state = 0;
244
245                 SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL,
246                           !!(accept_filter & QED_ACCEPT_NONE));
247
248                 SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL,
249                           (!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) &&
250                            !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
251
252                 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL,
253                           !!(accept_filter & QED_ACCEPT_NONE));
254
255                 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL,
256                           (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
257                            !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
258
259                 SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL,
260                           !!(accept_filter & QED_ACCEPT_BCAST));
261
262                 p_ramrod->tx_mode.state = cpu_to_le16(state);
263                 DP_VERBOSE(p_hwfn, QED_MSG_SP,
264                            "p_ramrod->tx_mode.state = 0x%x\n", state);
265         }
266 }
267
268 static void
269 qed_sp_vport_update_sge_tpa(struct qed_hwfn *p_hwfn,
270                             struct vport_update_ramrod_data *p_ramrod,
271                             struct qed_sge_tpa_params *p_params)
272 {
273         struct eth_vport_tpa_param *p_tpa;
274
275         if (!p_params) {
276                 p_ramrod->common.update_tpa_param_flg = 0;
277                 p_ramrod->common.update_tpa_en_flg = 0;
278                 p_ramrod->common.update_tpa_param_flg = 0;
279                 return;
280         }
281
282         p_ramrod->common.update_tpa_en_flg = p_params->update_tpa_en_flg;
283         p_tpa = &p_ramrod->tpa_param;
284         p_tpa->tpa_ipv4_en_flg = p_params->tpa_ipv4_en_flg;
285         p_tpa->tpa_ipv6_en_flg = p_params->tpa_ipv6_en_flg;
286         p_tpa->tpa_ipv4_tunn_en_flg = p_params->tpa_ipv4_tunn_en_flg;
287         p_tpa->tpa_ipv6_tunn_en_flg = p_params->tpa_ipv6_tunn_en_flg;
288
289         p_ramrod->common.update_tpa_param_flg = p_params->update_tpa_param_flg;
290         p_tpa->max_buff_num = p_params->max_buffers_per_cqe;
291         p_tpa->tpa_pkt_split_flg = p_params->tpa_pkt_split_flg;
292         p_tpa->tpa_hdr_data_split_flg = p_params->tpa_hdr_data_split_flg;
293         p_tpa->tpa_gro_consistent_flg = p_params->tpa_gro_consistent_flg;
294         p_tpa->tpa_max_aggs_num = p_params->tpa_max_aggs_num;
295         p_tpa->tpa_max_size = p_params->tpa_max_size;
296         p_tpa->tpa_min_size_to_start = p_params->tpa_min_size_to_start;
297         p_tpa->tpa_min_size_to_cont = p_params->tpa_min_size_to_cont;
298 }
299
300 static void
301 qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn,
302                         struct vport_update_ramrod_data *p_ramrod,
303                         struct qed_sp_vport_update_params *p_params)
304 {
305         int i;
306
307         memset(&p_ramrod->approx_mcast.bins, 0,
308                sizeof(p_ramrod->approx_mcast.bins));
309
310         if (p_params->update_approx_mcast_flg) {
311                 p_ramrod->common.update_approx_mcast_flg = 1;
312                 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
313                         u32 *p_bins = (u32 *)p_params->bins;
314                         __le32 val = cpu_to_le32(p_bins[i]);
315
316                         p_ramrod->approx_mcast.bins[i] = val;
317                 }
318         }
319 }
320
321 int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
322                         struct qed_sp_vport_update_params *p_params,
323                         enum spq_mode comp_mode,
324                         struct qed_spq_comp_cb *p_comp_data)
325 {
326         struct qed_rss_params *p_rss_params = p_params->rss_params;
327         struct vport_update_ramrod_data_cmn *p_cmn;
328         struct qed_sp_init_data init_data;
329         struct vport_update_ramrod_data *p_ramrod = NULL;
330         struct qed_spq_entry *p_ent = NULL;
331         u8 abs_vport_id = 0, val;
332         int rc = -EINVAL;
333
334         if (IS_VF(p_hwfn->cdev)) {
335                 rc = qed_vf_pf_vport_update(p_hwfn, p_params);
336                 return rc;
337         }
338
339         rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
340         if (rc != 0)
341                 return rc;
342
343         memset(&init_data, 0, sizeof(init_data));
344         init_data.cid = qed_spq_get_cid(p_hwfn);
345         init_data.opaque_fid = p_params->opaque_fid;
346         init_data.comp_mode = comp_mode;
347         init_data.p_comp_data = p_comp_data;
348
349         rc = qed_sp_init_request(p_hwfn, &p_ent,
350                                  ETH_RAMROD_VPORT_UPDATE,
351                                  PROTOCOLID_ETH, &init_data);
352         if (rc)
353                 return rc;
354
355         /* Copy input params to ramrod according to FW struct */
356         p_ramrod = &p_ent->ramrod.vport_update;
357         p_cmn = &p_ramrod->common;
358
359         p_cmn->vport_id = abs_vport_id;
360         p_cmn->rx_active_flg = p_params->vport_active_rx_flg;
361         p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg;
362         p_cmn->tx_active_flg = p_params->vport_active_tx_flg;
363         p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg;
364         p_cmn->accept_any_vlan = p_params->accept_any_vlan;
365         p_cmn->update_accept_any_vlan_flg =
366                         p_params->update_accept_any_vlan_flg;
367
368         p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg;
369         val = p_params->update_inner_vlan_removal_flg;
370         p_cmn->update_inner_vlan_removal_en_flg = val;
371
372         p_cmn->default_vlan_en = p_params->default_vlan_enable_flg;
373         val = p_params->update_default_vlan_enable_flg;
374         p_cmn->update_default_vlan_en_flg = val;
375
376         p_cmn->default_vlan = cpu_to_le16(p_params->default_vlan);
377         p_cmn->update_default_vlan_flg = p_params->update_default_vlan_flg;
378
379         p_cmn->silent_vlan_removal_en = p_params->silent_vlan_removal_flg;
380
381         p_ramrod->common.tx_switching_en = p_params->tx_switching_flg;
382         p_cmn->update_tx_switching_en_flg = p_params->update_tx_switching_flg;
383
384         p_cmn->anti_spoofing_en = p_params->anti_spoofing_en;
385         val = p_params->update_anti_spoofing_en_flg;
386         p_ramrod->common.update_anti_spoofing_en_flg = val;
387
388         rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
389         if (rc) {
390                 /* Return spq entry which is taken in qed_sp_init_request()*/
391                 qed_spq_return_entry(p_hwfn, p_ent);
392                 return rc;
393         }
394
395         /* Update mcast bins for VFs, PF doesn't use this functionality */
396         qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
397
398         qed_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags);
399         qed_sp_vport_update_sge_tpa(p_hwfn, p_ramrod, p_params->sge_tpa_params);
400         return qed_spq_post(p_hwfn, p_ent, NULL);
401 }
402
403 int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id)
404 {
405         struct vport_stop_ramrod_data *p_ramrod;
406         struct qed_sp_init_data init_data;
407         struct qed_spq_entry *p_ent;
408         u8 abs_vport_id = 0;
409         int rc;
410
411         if (IS_VF(p_hwfn->cdev))
412                 return qed_vf_pf_vport_stop(p_hwfn);
413
414         rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
415         if (rc != 0)
416                 return rc;
417
418         memset(&init_data, 0, sizeof(init_data));
419         init_data.cid = qed_spq_get_cid(p_hwfn);
420         init_data.opaque_fid = opaque_fid;
421         init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
422
423         rc = qed_sp_init_request(p_hwfn, &p_ent,
424                                  ETH_RAMROD_VPORT_STOP,
425                                  PROTOCOLID_ETH, &init_data);
426         if (rc)
427                 return rc;
428
429         p_ramrod = &p_ent->ramrod.vport_stop;
430         p_ramrod->vport_id = abs_vport_id;
431
432         return qed_spq_post(p_hwfn, p_ent, NULL);
433 }
434
435 static int
436 qed_vf_pf_accept_flags(struct qed_hwfn *p_hwfn,
437                        struct qed_filter_accept_flags *p_accept_flags)
438 {
439         struct qed_sp_vport_update_params s_params;
440
441         memset(&s_params, 0, sizeof(s_params));
442         memcpy(&s_params.accept_flags, p_accept_flags,
443                sizeof(struct qed_filter_accept_flags));
444
445         return qed_vf_pf_vport_update(p_hwfn, &s_params);
446 }
447
448 static int qed_filter_accept_cmd(struct qed_dev *cdev,
449                                  u8 vport,
450                                  struct qed_filter_accept_flags accept_flags,
451                                  u8 update_accept_any_vlan,
452                                  u8 accept_any_vlan,
453                                  enum spq_mode comp_mode,
454                                  struct qed_spq_comp_cb *p_comp_data)
455 {
456         struct qed_sp_vport_update_params vport_update_params;
457         int i, rc;
458
459         /* Prepare and send the vport rx_mode change */
460         memset(&vport_update_params, 0, sizeof(vport_update_params));
461         vport_update_params.vport_id = vport;
462         vport_update_params.accept_flags = accept_flags;
463         vport_update_params.update_accept_any_vlan_flg = update_accept_any_vlan;
464         vport_update_params.accept_any_vlan = accept_any_vlan;
465
466         for_each_hwfn(cdev, i) {
467                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
468
469                 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
470
471                 if (IS_VF(cdev)) {
472                         rc = qed_vf_pf_accept_flags(p_hwfn, &accept_flags);
473                         if (rc)
474                                 return rc;
475                         continue;
476                 }
477
478                 rc = qed_sp_vport_update(p_hwfn, &vport_update_params,
479                                          comp_mode, p_comp_data);
480                 if (rc != 0) {
481                         DP_ERR(cdev, "Update rx_mode failed %d\n", rc);
482                         return rc;
483                 }
484
485                 DP_VERBOSE(p_hwfn, QED_MSG_SP,
486                            "Accept filter configured, flags = [Rx]%x [Tx]%x\n",
487                            accept_flags.rx_accept_filter,
488                            accept_flags.tx_accept_filter);
489                 if (update_accept_any_vlan)
490                         DP_VERBOSE(p_hwfn, QED_MSG_SP,
491                                    "accept_any_vlan=%d configured\n",
492                                    accept_any_vlan);
493         }
494
495         return 0;
496 }
497
498 static int qed_sp_release_queue_cid(
499         struct qed_hwfn *p_hwfn,
500         struct qed_hw_cid_data *p_cid_data)
501 {
502         if (!p_cid_data->b_cid_allocated)
503                 return 0;
504
505         qed_cxt_release_cid(p_hwfn, p_cid_data->cid);
506
507         p_cid_data->b_cid_allocated = false;
508
509         return 0;
510 }
511
512 int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
513                                 u16 opaque_fid,
514                                 u32 cid,
515                                 struct qed_queue_start_common_params *params,
516                                 u8 stats_id,
517                                 u16 bd_max_bytes,
518                                 dma_addr_t bd_chain_phys_addr,
519                                 dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size)
520 {
521         struct rx_queue_start_ramrod_data *p_ramrod = NULL;
522         struct qed_spq_entry *p_ent = NULL;
523         struct qed_sp_init_data init_data;
524         struct qed_hw_cid_data *p_rx_cid;
525         u16 abs_rx_q_id = 0;
526         u8 abs_vport_id = 0;
527         int rc = -EINVAL;
528
529         /* Store information for the stop */
530         p_rx_cid                = &p_hwfn->p_rx_cids[params->queue_id];
531         p_rx_cid->cid           = cid;
532         p_rx_cid->opaque_fid    = opaque_fid;
533         p_rx_cid->vport_id      = params->vport_id;
534
535         rc = qed_fw_vport(p_hwfn, params->vport_id, &abs_vport_id);
536         if (rc != 0)
537                 return rc;
538
539         rc = qed_fw_l2_queue(p_hwfn, params->queue_id, &abs_rx_q_id);
540         if (rc != 0)
541                 return rc;
542
543         DP_VERBOSE(p_hwfn, QED_MSG_SP,
544                    "opaque_fid=0x%x, cid=0x%x, rx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
545                    opaque_fid, cid, params->queue_id, params->vport_id,
546                    params->sb);
547
548         /* Get SPQ entry */
549         memset(&init_data, 0, sizeof(init_data));
550         init_data.cid = cid;
551         init_data.opaque_fid = opaque_fid;
552         init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
553
554         rc = qed_sp_init_request(p_hwfn, &p_ent,
555                                  ETH_RAMROD_RX_QUEUE_START,
556                                  PROTOCOLID_ETH, &init_data);
557         if (rc)
558                 return rc;
559
560         p_ramrod = &p_ent->ramrod.rx_queue_start;
561
562         p_ramrod->sb_id                 = cpu_to_le16(params->sb);
563         p_ramrod->sb_index              = params->sb_idx;
564         p_ramrod->vport_id              = abs_vport_id;
565         p_ramrod->stats_counter_id      = stats_id;
566         p_ramrod->rx_queue_id           = cpu_to_le16(abs_rx_q_id);
567         p_ramrod->complete_cqe_flg      = 0;
568         p_ramrod->complete_event_flg    = 1;
569
570         p_ramrod->bd_max_bytes  = cpu_to_le16(bd_max_bytes);
571         DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr);
572
573         p_ramrod->num_of_pbl_pages      = cpu_to_le16(cqe_pbl_size);
574         DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
575
576         rc = qed_spq_post(p_hwfn, p_ent, NULL);
577
578         return rc;
579 }
580
581 static int
582 qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
583                           u16 opaque_fid,
584                           struct qed_queue_start_common_params *params,
585                           u16 bd_max_bytes,
586                           dma_addr_t bd_chain_phys_addr,
587                           dma_addr_t cqe_pbl_addr,
588                           u16 cqe_pbl_size, void __iomem **pp_prod)
589 {
590         struct qed_hw_cid_data *p_rx_cid;
591         u64 init_prod_val = 0;
592         u16 abs_l2_queue = 0;
593         u8 abs_stats_id = 0;
594         int rc;
595
596         if (IS_VF(p_hwfn->cdev)) {
597                 return qed_vf_pf_rxq_start(p_hwfn,
598                                            params->queue_id,
599                                            params->sb,
600                                            params->sb_idx,
601                                            bd_max_bytes,
602                                            bd_chain_phys_addr,
603                                            cqe_pbl_addr, cqe_pbl_size, pp_prod);
604         }
605
606         rc = qed_fw_l2_queue(p_hwfn, params->queue_id, &abs_l2_queue);
607         if (rc != 0)
608                 return rc;
609
610         rc = qed_fw_vport(p_hwfn, params->vport_id, &abs_stats_id);
611         if (rc != 0)
612                 return rc;
613
614         *pp_prod = (u8 __iomem *)p_hwfn->regview +
615                                  GTT_BAR0_MAP_REG_MSDM_RAM +
616                                  MSTORM_PRODS_OFFSET(abs_l2_queue);
617
618         /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
619         __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u64),
620                           (u32 *)(&init_prod_val));
621
622         /* Allocate a CID for the queue */
623         p_rx_cid = &p_hwfn->p_rx_cids[params->queue_id];
624         rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
625                                  &p_rx_cid->cid);
626         if (rc) {
627                 DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
628                 return rc;
629         }
630         p_rx_cid->b_cid_allocated = true;
631
632         rc = qed_sp_eth_rxq_start_ramrod(p_hwfn,
633                                          opaque_fid,
634                                          p_rx_cid->cid,
635                                          params,
636                                          abs_stats_id,
637                                          bd_max_bytes,
638                                          bd_chain_phys_addr,
639                                          cqe_pbl_addr,
640                                          cqe_pbl_size);
641
642         if (rc != 0)
643                 qed_sp_release_queue_cid(p_hwfn, p_rx_cid);
644
645         return rc;
646 }
647
648 int qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
649                                 u16 rx_queue_id,
650                                 u8 num_rxqs,
651                                 u8 complete_cqe_flg,
652                                 u8 complete_event_flg,
653                                 enum spq_mode comp_mode,
654                                 struct qed_spq_comp_cb *p_comp_data)
655 {
656         struct rx_queue_update_ramrod_data *p_ramrod = NULL;
657         struct qed_spq_entry *p_ent = NULL;
658         struct qed_sp_init_data init_data;
659         struct qed_hw_cid_data *p_rx_cid;
660         u16 qid, abs_rx_q_id = 0;
661         int rc = -EINVAL;
662         u8 i;
663
664         memset(&init_data, 0, sizeof(init_data));
665         init_data.comp_mode = comp_mode;
666         init_data.p_comp_data = p_comp_data;
667
668         for (i = 0; i < num_rxqs; i++) {
669                 qid = rx_queue_id + i;
670                 p_rx_cid = &p_hwfn->p_rx_cids[qid];
671
672                 /* Get SPQ entry */
673                 init_data.cid = p_rx_cid->cid;
674                 init_data.opaque_fid = p_rx_cid->opaque_fid;
675
676                 rc = qed_sp_init_request(p_hwfn, &p_ent,
677                                          ETH_RAMROD_RX_QUEUE_UPDATE,
678                                          PROTOCOLID_ETH, &init_data);
679                 if (rc)
680                         return rc;
681
682                 p_ramrod = &p_ent->ramrod.rx_queue_update;
683
684                 qed_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id);
685                 qed_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id);
686                 p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id);
687                 p_ramrod->complete_cqe_flg = complete_cqe_flg;
688                 p_ramrod->complete_event_flg = complete_event_flg;
689
690                 rc = qed_spq_post(p_hwfn, p_ent, NULL);
691                 if (rc)
692                         return rc;
693         }
694
695         return rc;
696 }
697
698 int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
699                              u16 rx_queue_id,
700                              bool eq_completion_only, bool cqe_completion)
701 {
702         struct qed_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id];
703         struct rx_queue_stop_ramrod_data *p_ramrod = NULL;
704         struct qed_spq_entry *p_ent = NULL;
705         struct qed_sp_init_data init_data;
706         u16 abs_rx_q_id = 0;
707         int rc = -EINVAL;
708
709         if (IS_VF(p_hwfn->cdev))
710                 return qed_vf_pf_rxq_stop(p_hwfn, rx_queue_id, cqe_completion);
711
712         /* Get SPQ entry */
713         memset(&init_data, 0, sizeof(init_data));
714         init_data.cid = p_rx_cid->cid;
715         init_data.opaque_fid = p_rx_cid->opaque_fid;
716         init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
717
718         rc = qed_sp_init_request(p_hwfn, &p_ent,
719                                  ETH_RAMROD_RX_QUEUE_STOP,
720                                  PROTOCOLID_ETH, &init_data);
721         if (rc)
722                 return rc;
723
724         p_ramrod = &p_ent->ramrod.rx_queue_stop;
725
726         qed_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id);
727         qed_fw_l2_queue(p_hwfn, rx_queue_id, &abs_rx_q_id);
728         p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id);
729
730         /* Cleaning the queue requires the completion to arrive there.
731          * In addition, VFs require the answer to come as eqe to PF.
732          */
733         p_ramrod->complete_cqe_flg =
734                 (!!(p_rx_cid->opaque_fid == p_hwfn->hw_info.opaque_fid) &&
735                  !eq_completion_only) || cqe_completion;
736         p_ramrod->complete_event_flg =
737                 !(p_rx_cid->opaque_fid == p_hwfn->hw_info.opaque_fid) ||
738                 eq_completion_only;
739
740         rc = qed_spq_post(p_hwfn, p_ent, NULL);
741         if (rc)
742                 return rc;
743
744         return qed_sp_release_queue_cid(p_hwfn, p_rx_cid);
745 }
746
747 int qed_sp_eth_txq_start_ramrod(struct qed_hwfn  *p_hwfn,
748                                 u16  opaque_fid,
749                                 u32  cid,
750                                 struct qed_queue_start_common_params *p_params,
751                                 u8  stats_id,
752                                 dma_addr_t pbl_addr,
753                                 u16 pbl_size,
754                                 union qed_qm_pq_params *p_pq_params)
755 {
756         struct tx_queue_start_ramrod_data *p_ramrod = NULL;
757         struct qed_spq_entry *p_ent = NULL;
758         struct qed_sp_init_data init_data;
759         struct qed_hw_cid_data *p_tx_cid;
760         u8 abs_vport_id;
761         int rc = -EINVAL;
762         u16 pq_id;
763
764         /* Store information for the stop */
765         p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id];
766         p_tx_cid->cid           = cid;
767         p_tx_cid->opaque_fid    = opaque_fid;
768
769         rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
770         if (rc)
771                 return rc;
772
773         /* Get SPQ entry */
774         memset(&init_data, 0, sizeof(init_data));
775         init_data.cid = cid;
776         init_data.opaque_fid = opaque_fid;
777         init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
778
779         rc = qed_sp_init_request(p_hwfn, &p_ent,
780                                  ETH_RAMROD_TX_QUEUE_START,
781                                  PROTOCOLID_ETH, &init_data);
782         if (rc)
783                 return rc;
784
785         p_ramrod                = &p_ent->ramrod.tx_queue_start;
786         p_ramrod->vport_id      = abs_vport_id;
787
788         p_ramrod->sb_id                 = cpu_to_le16(p_params->sb);
789         p_ramrod->sb_index              = p_params->sb_idx;
790         p_ramrod->stats_counter_id      = stats_id;
791
792         p_ramrod->pbl_size              = cpu_to_le16(pbl_size);
793         DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr);
794
795         pq_id                   = qed_get_qm_pq(p_hwfn,
796                                                 PROTOCOLID_ETH,
797                                                 p_pq_params);
798         p_ramrod->qm_pq_id      = cpu_to_le16(pq_id);
799
800         return qed_spq_post(p_hwfn, p_ent, NULL);
801 }
802
803 static int
804 qed_sp_eth_tx_queue_start(struct qed_hwfn *p_hwfn,
805                           u16 opaque_fid,
806                           struct qed_queue_start_common_params *p_params,
807                           dma_addr_t pbl_addr,
808                           u16 pbl_size, void __iomem **pp_doorbell)
809 {
810         struct qed_hw_cid_data *p_tx_cid;
811         union qed_qm_pq_params pq_params;
812         u8 abs_stats_id = 0;
813         int rc;
814
815         if (IS_VF(p_hwfn->cdev)) {
816                 return qed_vf_pf_txq_start(p_hwfn,
817                                            p_params->queue_id,
818                                            p_params->sb,
819                                            p_params->sb_idx,
820                                            pbl_addr, pbl_size, pp_doorbell);
821         }
822
823         rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_stats_id);
824         if (rc)
825                 return rc;
826
827         p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id];
828         memset(p_tx_cid, 0, sizeof(*p_tx_cid));
829         memset(&pq_params, 0, sizeof(pq_params));
830
831         /* Allocate a CID for the queue */
832         rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
833                                  &p_tx_cid->cid);
834         if (rc) {
835                 DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
836                 return rc;
837         }
838         p_tx_cid->b_cid_allocated = true;
839
840         DP_VERBOSE(p_hwfn, QED_MSG_SP,
841                    "opaque_fid=0x%x, cid=0x%x, tx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
842                    opaque_fid, p_tx_cid->cid,
843                    p_params->queue_id, p_params->vport_id, p_params->sb);
844
845         rc = qed_sp_eth_txq_start_ramrod(p_hwfn,
846                                          opaque_fid,
847                                          p_tx_cid->cid,
848                                          p_params,
849                                          abs_stats_id,
850                                          pbl_addr,
851                                          pbl_size,
852                                          &pq_params);
853
854         *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells +
855                                      qed_db_addr(p_tx_cid->cid, DQ_DEMS_LEGACY);
856
857         if (rc)
858                 qed_sp_release_queue_cid(p_hwfn, p_tx_cid);
859
860         return rc;
861 }
862
863 int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, u16 tx_queue_id)
864 {
865         struct qed_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id];
866         struct qed_spq_entry *p_ent = NULL;
867         struct qed_sp_init_data init_data;
868         int rc = -EINVAL;
869
870         if (IS_VF(p_hwfn->cdev))
871                 return qed_vf_pf_txq_stop(p_hwfn, tx_queue_id);
872
873         /* Get SPQ entry */
874         memset(&init_data, 0, sizeof(init_data));
875         init_data.cid = p_tx_cid->cid;
876         init_data.opaque_fid = p_tx_cid->opaque_fid;
877         init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
878
879         rc = qed_sp_init_request(p_hwfn, &p_ent,
880                                  ETH_RAMROD_TX_QUEUE_STOP,
881                                  PROTOCOLID_ETH, &init_data);
882         if (rc)
883                 return rc;
884
885         rc = qed_spq_post(p_hwfn, p_ent, NULL);
886         if (rc)
887                 return rc;
888
889         return qed_sp_release_queue_cid(p_hwfn, p_tx_cid);
890 }
891
892 static enum eth_filter_action
893 qed_filter_action(enum qed_filter_opcode opcode)
894 {
895         enum eth_filter_action action = MAX_ETH_FILTER_ACTION;
896
897         switch (opcode) {
898         case QED_FILTER_ADD:
899                 action = ETH_FILTER_ACTION_ADD;
900                 break;
901         case QED_FILTER_REMOVE:
902                 action = ETH_FILTER_ACTION_REMOVE;
903                 break;
904         case QED_FILTER_FLUSH:
905                 action = ETH_FILTER_ACTION_REMOVE_ALL;
906                 break;
907         default:
908                 action = MAX_ETH_FILTER_ACTION;
909         }
910
911         return action;
912 }
913
914 static void qed_set_fw_mac_addr(__le16 *fw_msb,
915                                 __le16 *fw_mid,
916                                 __le16 *fw_lsb,
917                                 u8 *mac)
918 {
919         ((u8 *)fw_msb)[0] = mac[1];
920         ((u8 *)fw_msb)[1] = mac[0];
921         ((u8 *)fw_mid)[0] = mac[3];
922         ((u8 *)fw_mid)[1] = mac[2];
923         ((u8 *)fw_lsb)[0] = mac[5];
924         ((u8 *)fw_lsb)[1] = mac[4];
925 }
926
927 static int
928 qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
929                         u16 opaque_fid,
930                         struct qed_filter_ucast *p_filter_cmd,
931                         struct vport_filter_update_ramrod_data **pp_ramrod,
932                         struct qed_spq_entry **pp_ent,
933                         enum spq_mode comp_mode,
934                         struct qed_spq_comp_cb *p_comp_data)
935 {
936         u8 vport_to_add_to = 0, vport_to_remove_from = 0;
937         struct vport_filter_update_ramrod_data *p_ramrod;
938         struct eth_filter_cmd *p_first_filter;
939         struct eth_filter_cmd *p_second_filter;
940         struct qed_sp_init_data init_data;
941         enum eth_filter_action action;
942         int rc;
943
944         rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
945                           &vport_to_remove_from);
946         if (rc)
947                 return rc;
948
949         rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
950                           &vport_to_add_to);
951         if (rc)
952                 return rc;
953
954         /* Get SPQ entry */
955         memset(&init_data, 0, sizeof(init_data));
956         init_data.cid = qed_spq_get_cid(p_hwfn);
957         init_data.opaque_fid = opaque_fid;
958         init_data.comp_mode = comp_mode;
959         init_data.p_comp_data = p_comp_data;
960
961         rc = qed_sp_init_request(p_hwfn, pp_ent,
962                                  ETH_RAMROD_FILTERS_UPDATE,
963                                  PROTOCOLID_ETH, &init_data);
964         if (rc)
965                 return rc;
966
967         *pp_ramrod = &(*pp_ent)->ramrod.vport_filter_update;
968         p_ramrod = *pp_ramrod;
969         p_ramrod->filter_cmd_hdr.rx = p_filter_cmd->is_rx_filter ? 1 : 0;
970         p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0;
971
972         switch (p_filter_cmd->opcode) {
973         case QED_FILTER_REPLACE:
974         case QED_FILTER_MOVE:
975                 p_ramrod->filter_cmd_hdr.cmd_cnt = 2; break;
976         default:
977                 p_ramrod->filter_cmd_hdr.cmd_cnt = 1; break;
978         }
979
980         p_first_filter  = &p_ramrod->filter_cmds[0];
981         p_second_filter = &p_ramrod->filter_cmds[1];
982
983         switch (p_filter_cmd->type) {
984         case QED_FILTER_MAC:
985                 p_first_filter->type = ETH_FILTER_TYPE_MAC; break;
986         case QED_FILTER_VLAN:
987                 p_first_filter->type = ETH_FILTER_TYPE_VLAN; break;
988         case QED_FILTER_MAC_VLAN:
989                 p_first_filter->type = ETH_FILTER_TYPE_PAIR; break;
990         case QED_FILTER_INNER_MAC:
991                 p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC; break;
992         case QED_FILTER_INNER_VLAN:
993                 p_first_filter->type = ETH_FILTER_TYPE_INNER_VLAN; break;
994         case QED_FILTER_INNER_PAIR:
995                 p_first_filter->type = ETH_FILTER_TYPE_INNER_PAIR; break;
996         case QED_FILTER_INNER_MAC_VNI_PAIR:
997                 p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR;
998                 break;
999         case QED_FILTER_MAC_VNI_PAIR:
1000                 p_first_filter->type = ETH_FILTER_TYPE_MAC_VNI_PAIR; break;
1001         case QED_FILTER_VNI:
1002                 p_first_filter->type = ETH_FILTER_TYPE_VNI; break;
1003         }
1004
1005         if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) ||
1006             (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
1007             (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC) ||
1008             (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR) ||
1009             (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
1010             (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR)) {
1011                 qed_set_fw_mac_addr(&p_first_filter->mac_msb,
1012                                     &p_first_filter->mac_mid,
1013                                     &p_first_filter->mac_lsb,
1014                                     (u8 *)p_filter_cmd->mac);
1015         }
1016
1017         if ((p_first_filter->type == ETH_FILTER_TYPE_VLAN) ||
1018             (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
1019             (p_first_filter->type == ETH_FILTER_TYPE_INNER_VLAN) ||
1020             (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR))
1021                 p_first_filter->vlan_id = cpu_to_le16(p_filter_cmd->vlan);
1022
1023         if ((p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
1024             (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR) ||
1025             (p_first_filter->type == ETH_FILTER_TYPE_VNI))
1026                 p_first_filter->vni = cpu_to_le32(p_filter_cmd->vni);
1027
1028         if (p_filter_cmd->opcode == QED_FILTER_MOVE) {
1029                 p_second_filter->type           = p_first_filter->type;
1030                 p_second_filter->mac_msb        = p_first_filter->mac_msb;
1031                 p_second_filter->mac_mid        = p_first_filter->mac_mid;
1032                 p_second_filter->mac_lsb        = p_first_filter->mac_lsb;
1033                 p_second_filter->vlan_id        = p_first_filter->vlan_id;
1034                 p_second_filter->vni            = p_first_filter->vni;
1035
1036                 p_first_filter->action = ETH_FILTER_ACTION_REMOVE;
1037
1038                 p_first_filter->vport_id = vport_to_remove_from;
1039
1040                 p_second_filter->action         = ETH_FILTER_ACTION_ADD;
1041                 p_second_filter->vport_id       = vport_to_add_to;
1042         } else if (p_filter_cmd->opcode == QED_FILTER_REPLACE) {
1043                 p_first_filter->vport_id = vport_to_add_to;
1044                 memcpy(p_second_filter, p_first_filter,
1045                        sizeof(*p_second_filter));
1046                 p_first_filter->action  = ETH_FILTER_ACTION_REMOVE_ALL;
1047                 p_second_filter->action = ETH_FILTER_ACTION_ADD;
1048         } else {
1049                 action = qed_filter_action(p_filter_cmd->opcode);
1050
1051                 if (action == MAX_ETH_FILTER_ACTION) {
1052                         DP_NOTICE(p_hwfn,
1053                                   "%d is not supported yet\n",
1054                                   p_filter_cmd->opcode);
1055                         return -EINVAL;
1056                 }
1057
1058                 p_first_filter->action = action;
1059                 p_first_filter->vport_id = (p_filter_cmd->opcode ==
1060                                             QED_FILTER_REMOVE) ?
1061                                            vport_to_remove_from :
1062                                            vport_to_add_to;
1063         }
1064
1065         return 0;
1066 }
1067
1068 int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
1069                             u16 opaque_fid,
1070                             struct qed_filter_ucast *p_filter_cmd,
1071                             enum spq_mode comp_mode,
1072                             struct qed_spq_comp_cb *p_comp_data)
1073 {
1074         struct vport_filter_update_ramrod_data  *p_ramrod       = NULL;
1075         struct qed_spq_entry                    *p_ent          = NULL;
1076         struct eth_filter_cmd_header            *p_header;
1077         int                                     rc;
1078
1079         rc = qed_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd,
1080                                      &p_ramrod, &p_ent,
1081                                      comp_mode, p_comp_data);
1082         if (rc != 0) {
1083                 DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc);
1084                 return rc;
1085         }
1086         p_header = &p_ramrod->filter_cmd_hdr;
1087         p_header->assert_on_error = p_filter_cmd->assert_on_error;
1088
1089         rc = qed_spq_post(p_hwfn, p_ent, NULL);
1090         if (rc != 0) {
1091                 DP_ERR(p_hwfn,
1092                        "Unicast filter ADD command failed %d\n",
1093                        rc);
1094                 return rc;
1095         }
1096
1097         DP_VERBOSE(p_hwfn, QED_MSG_SP,
1098                    "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n",
1099                    (p_filter_cmd->opcode == QED_FILTER_ADD) ? "ADD" :
1100                    ((p_filter_cmd->opcode == QED_FILTER_REMOVE) ?
1101                    "REMOVE" :
1102                    ((p_filter_cmd->opcode == QED_FILTER_MOVE) ?
1103                     "MOVE" : "REPLACE")),
1104                    (p_filter_cmd->type == QED_FILTER_MAC) ? "MAC" :
1105                    ((p_filter_cmd->type == QED_FILTER_VLAN) ?
1106                     "VLAN" : "MAC & VLAN"),
1107                    p_ramrod->filter_cmd_hdr.cmd_cnt,
1108                    p_filter_cmd->is_rx_filter,
1109                    p_filter_cmd->is_tx_filter);
1110         DP_VERBOSE(p_hwfn, QED_MSG_SP,
1111                    "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n",
1112                    p_filter_cmd->vport_to_add_to,
1113                    p_filter_cmd->vport_to_remove_from,
1114                    p_filter_cmd->mac[0],
1115                    p_filter_cmd->mac[1],
1116                    p_filter_cmd->mac[2],
1117                    p_filter_cmd->mac[3],
1118                    p_filter_cmd->mac[4],
1119                    p_filter_cmd->mac[5],
1120                    p_filter_cmd->vlan);
1121
1122         return 0;
1123 }
1124
1125 /*******************************************************************************
1126  * Description:
1127  *         Calculates crc 32 on a buffer
1128  *         Note: crc32_length MUST be aligned to 8
1129  * Return:
1130  ******************************************************************************/
1131 static u32 qed_calc_crc32c(u8 *crc32_packet,
1132                            u32 crc32_length,
1133                            u32 crc32_seed,
1134                            u8 complement)
1135 {
1136         u32 byte = 0;
1137         u32 bit = 0;
1138         u8 msb = 0;
1139         u8 current_byte = 0;
1140         u32 crc32_result = crc32_seed;
1141
1142         if ((!crc32_packet) ||
1143             (crc32_length == 0) ||
1144             ((crc32_length % 8) != 0))
1145                 return crc32_result;
1146         for (byte = 0; byte < crc32_length; byte++) {
1147                 current_byte = crc32_packet[byte];
1148                 for (bit = 0; bit < 8; bit++) {
1149                         msb = (u8)(crc32_result >> 31);
1150                         crc32_result = crc32_result << 1;
1151                         if (msb != (0x1 & (current_byte >> bit))) {
1152                                 crc32_result = crc32_result ^ CRC32_POLY;
1153                                 crc32_result |= 1; /*crc32_result[0] = 1;*/
1154                         }
1155                 }
1156         }
1157         return crc32_result;
1158 }
1159
1160 static inline u32 qed_crc32c_le(u32 seed,
1161                                 u8 *mac,
1162                                 u32 len)
1163 {
1164         u32 packet_buf[2] = { 0 };
1165
1166         memcpy((u8 *)(&packet_buf[0]), &mac[0], 6);
1167         return qed_calc_crc32c((u8 *)packet_buf, 8, seed, 0);
1168 }
1169
1170 u8 qed_mcast_bin_from_mac(u8 *mac)
1171 {
1172         u32 crc = qed_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED,
1173                                 mac, ETH_ALEN);
1174
1175         return crc & 0xff;
1176 }
1177
1178 static int
1179 qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
1180                         u16 opaque_fid,
1181                         struct qed_filter_mcast *p_filter_cmd,
1182                         enum spq_mode comp_mode,
1183                         struct qed_spq_comp_cb *p_comp_data)
1184 {
1185         unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
1186         struct vport_update_ramrod_data *p_ramrod = NULL;
1187         struct qed_spq_entry *p_ent = NULL;
1188         struct qed_sp_init_data init_data;
1189         u8 abs_vport_id = 0;
1190         int rc, i;
1191
1192         if (p_filter_cmd->opcode == QED_FILTER_ADD) {
1193                 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
1194                                   &abs_vport_id);
1195                 if (rc)
1196                         return rc;
1197         } else {
1198                 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
1199                                   &abs_vport_id);
1200                 if (rc)
1201                         return rc;
1202         }
1203
1204         /* Get SPQ entry */
1205         memset(&init_data, 0, sizeof(init_data));
1206         init_data.cid = qed_spq_get_cid(p_hwfn);
1207         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1208         init_data.comp_mode = comp_mode;
1209         init_data.p_comp_data = p_comp_data;
1210
1211         rc = qed_sp_init_request(p_hwfn, &p_ent,
1212                                  ETH_RAMROD_VPORT_UPDATE,
1213                                  PROTOCOLID_ETH, &init_data);
1214         if (rc) {
1215                 DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc);
1216                 return rc;
1217         }
1218
1219         p_ramrod = &p_ent->ramrod.vport_update;
1220         p_ramrod->common.update_approx_mcast_flg = 1;
1221
1222         /* explicitly clear out the entire vector */
1223         memset(&p_ramrod->approx_mcast.bins, 0,
1224                sizeof(p_ramrod->approx_mcast.bins));
1225         memset(bins, 0, sizeof(unsigned long) *
1226                ETH_MULTICAST_MAC_BINS_IN_REGS);
1227         /* filter ADD op is explicit set op and it removes
1228          *  any existing filters for the vport
1229          */
1230         if (p_filter_cmd->opcode == QED_FILTER_ADD) {
1231                 for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
1232                         u32 bit;
1233
1234                         bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
1235                         __set_bit(bit, bins);
1236                 }
1237
1238                 /* Convert to correct endianity */
1239                 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
1240                         u32 *p_bins = (u32 *)bins;
1241                         struct vport_update_ramrod_mcast *approx_mcast;
1242
1243                         approx_mcast = &p_ramrod->approx_mcast;
1244                         approx_mcast->bins[i] = cpu_to_le32(p_bins[i]);
1245                 }
1246         }
1247
1248         p_ramrod->common.vport_id = abs_vport_id;
1249
1250         return qed_spq_post(p_hwfn, p_ent, NULL);
1251 }
1252
1253 static int qed_filter_mcast_cmd(struct qed_dev *cdev,
1254                                 struct qed_filter_mcast *p_filter_cmd,
1255                                 enum spq_mode comp_mode,
1256                                 struct qed_spq_comp_cb *p_comp_data)
1257 {
1258         int rc = 0;
1259         int i;
1260
1261         /* only ADD and REMOVE operations are supported for multi-cast */
1262         if ((p_filter_cmd->opcode != QED_FILTER_ADD &&
1263              (p_filter_cmd->opcode != QED_FILTER_REMOVE)) ||
1264             (p_filter_cmd->num_mc_addrs > QED_MAX_MC_ADDRS))
1265                 return -EINVAL;
1266
1267         for_each_hwfn(cdev, i) {
1268                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1269
1270                 u16 opaque_fid;
1271
1272                 if (IS_VF(cdev)) {
1273                         qed_vf_pf_filter_mcast(p_hwfn, p_filter_cmd);
1274                         continue;
1275                 }
1276
1277                 opaque_fid = p_hwfn->hw_info.opaque_fid;
1278
1279                 rc = qed_sp_eth_filter_mcast(p_hwfn,
1280                                              opaque_fid,
1281                                              p_filter_cmd,
1282                                              comp_mode,
1283                                              p_comp_data);
1284         }
1285         return rc;
1286 }
1287
1288 static int qed_filter_ucast_cmd(struct qed_dev *cdev,
1289                                 struct qed_filter_ucast *p_filter_cmd,
1290                                 enum spq_mode comp_mode,
1291                                 struct qed_spq_comp_cb *p_comp_data)
1292 {
1293         int rc = 0;
1294         int i;
1295
1296         for_each_hwfn(cdev, i) {
1297                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1298                 u16 opaque_fid;
1299
1300                 if (IS_VF(cdev)) {
1301                         rc = qed_vf_pf_filter_ucast(p_hwfn, p_filter_cmd);
1302                         continue;
1303                 }
1304
1305                 opaque_fid = p_hwfn->hw_info.opaque_fid;
1306
1307                 rc = qed_sp_eth_filter_ucast(p_hwfn,
1308                                              opaque_fid,
1309                                              p_filter_cmd,
1310                                              comp_mode,
1311                                              p_comp_data);
1312                 if (rc != 0)
1313                         break;
1314         }
1315
1316         return rc;
1317 }
1318
1319 /* Statistics related code */
1320 static void __qed_get_vport_pstats_addrlen(struct qed_hwfn *p_hwfn,
1321                                            u32 *p_addr,
1322                                            u32 *p_len, u16 statistics_bin)
1323 {
1324         if (IS_PF(p_hwfn->cdev)) {
1325                 *p_addr = BAR0_MAP_REG_PSDM_RAM +
1326                     PSTORM_QUEUE_STAT_OFFSET(statistics_bin);
1327                 *p_len = sizeof(struct eth_pstorm_per_queue_stat);
1328         } else {
1329                 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1330                 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1331
1332                 *p_addr = p_resp->pfdev_info.stats_info.pstats.address;
1333                 *p_len = p_resp->pfdev_info.stats_info.pstats.len;
1334         }
1335 }
1336
1337 static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn,
1338                                    struct qed_ptt *p_ptt,
1339                                    struct qed_eth_stats *p_stats,
1340                                    u16 statistics_bin)
1341 {
1342         struct eth_pstorm_per_queue_stat pstats;
1343         u32 pstats_addr = 0, pstats_len = 0;
1344
1345         __qed_get_vport_pstats_addrlen(p_hwfn, &pstats_addr, &pstats_len,
1346                                        statistics_bin);
1347
1348         memset(&pstats, 0, sizeof(pstats));
1349         qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len);
1350
1351         p_stats->tx_ucast_bytes += HILO_64_REGPAIR(pstats.sent_ucast_bytes);
1352         p_stats->tx_mcast_bytes += HILO_64_REGPAIR(pstats.sent_mcast_bytes);
1353         p_stats->tx_bcast_bytes += HILO_64_REGPAIR(pstats.sent_bcast_bytes);
1354         p_stats->tx_ucast_pkts += HILO_64_REGPAIR(pstats.sent_ucast_pkts);
1355         p_stats->tx_mcast_pkts += HILO_64_REGPAIR(pstats.sent_mcast_pkts);
1356         p_stats->tx_bcast_pkts += HILO_64_REGPAIR(pstats.sent_bcast_pkts);
1357         p_stats->tx_err_drop_pkts += HILO_64_REGPAIR(pstats.error_drop_pkts);
1358 }
1359
1360 static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
1361                                    struct qed_ptt *p_ptt,
1362                                    struct qed_eth_stats *p_stats,
1363                                    u16 statistics_bin)
1364 {
1365         struct tstorm_per_port_stat tstats;
1366         u32 tstats_addr, tstats_len;
1367
1368         if (IS_PF(p_hwfn->cdev)) {
1369                 tstats_addr = BAR0_MAP_REG_TSDM_RAM +
1370                     TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
1371                 tstats_len = sizeof(struct tstorm_per_port_stat);
1372         } else {
1373                 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1374                 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1375
1376                 tstats_addr = p_resp->pfdev_info.stats_info.tstats.address;
1377                 tstats_len = p_resp->pfdev_info.stats_info.tstats.len;
1378         }
1379
1380         memset(&tstats, 0, sizeof(tstats));
1381         qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len);
1382
1383         p_stats->mftag_filter_discards +=
1384                 HILO_64_REGPAIR(tstats.mftag_filter_discard);
1385         p_stats->mac_filter_discards +=
1386                 HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
1387 }
1388
1389 static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn,
1390                                            u32 *p_addr,
1391                                            u32 *p_len, u16 statistics_bin)
1392 {
1393         if (IS_PF(p_hwfn->cdev)) {
1394                 *p_addr = BAR0_MAP_REG_USDM_RAM +
1395                     USTORM_QUEUE_STAT_OFFSET(statistics_bin);
1396                 *p_len = sizeof(struct eth_ustorm_per_queue_stat);
1397         } else {
1398                 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1399                 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1400
1401                 *p_addr = p_resp->pfdev_info.stats_info.ustats.address;
1402                 *p_len = p_resp->pfdev_info.stats_info.ustats.len;
1403         }
1404 }
1405
1406 static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn,
1407                                    struct qed_ptt *p_ptt,
1408                                    struct qed_eth_stats *p_stats,
1409                                    u16 statistics_bin)
1410 {
1411         struct eth_ustorm_per_queue_stat ustats;
1412         u32 ustats_addr = 0, ustats_len = 0;
1413
1414         __qed_get_vport_ustats_addrlen(p_hwfn, &ustats_addr, &ustats_len,
1415                                        statistics_bin);
1416
1417         memset(&ustats, 0, sizeof(ustats));
1418         qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len);
1419
1420         p_stats->rx_ucast_bytes += HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
1421         p_stats->rx_mcast_bytes += HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
1422         p_stats->rx_bcast_bytes += HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
1423         p_stats->rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
1424         p_stats->rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
1425         p_stats->rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
1426 }
1427
1428 static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn,
1429                                            u32 *p_addr,
1430                                            u32 *p_len, u16 statistics_bin)
1431 {
1432         if (IS_PF(p_hwfn->cdev)) {
1433                 *p_addr = BAR0_MAP_REG_MSDM_RAM +
1434                     MSTORM_QUEUE_STAT_OFFSET(statistics_bin);
1435                 *p_len = sizeof(struct eth_mstorm_per_queue_stat);
1436         } else {
1437                 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1438                 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1439
1440                 *p_addr = p_resp->pfdev_info.stats_info.mstats.address;
1441                 *p_len = p_resp->pfdev_info.stats_info.mstats.len;
1442         }
1443 }
1444
1445 static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn,
1446                                    struct qed_ptt *p_ptt,
1447                                    struct qed_eth_stats *p_stats,
1448                                    u16 statistics_bin)
1449 {
1450         struct eth_mstorm_per_queue_stat mstats;
1451         u32 mstats_addr = 0, mstats_len = 0;
1452
1453         __qed_get_vport_mstats_addrlen(p_hwfn, &mstats_addr, &mstats_len,
1454                                        statistics_bin);
1455
1456         memset(&mstats, 0, sizeof(mstats));
1457         qed_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len);
1458
1459         p_stats->no_buff_discards += HILO_64_REGPAIR(mstats.no_buff_discard);
1460         p_stats->packet_too_big_discard +=
1461                 HILO_64_REGPAIR(mstats.packet_too_big_discard);
1462         p_stats->ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard);
1463         p_stats->tpa_coalesced_pkts +=
1464                 HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
1465         p_stats->tpa_coalesced_events +=
1466                 HILO_64_REGPAIR(mstats.tpa_coalesced_events);
1467         p_stats->tpa_aborts_num += HILO_64_REGPAIR(mstats.tpa_aborts_num);
1468         p_stats->tpa_coalesced_bytes +=
1469                 HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
1470 }
1471
1472 static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn,
1473                                        struct qed_ptt *p_ptt,
1474                                        struct qed_eth_stats *p_stats)
1475 {
1476         struct port_stats port_stats;
1477         int j;
1478
1479         memset(&port_stats, 0, sizeof(port_stats));
1480
1481         qed_memcpy_from(p_hwfn, p_ptt, &port_stats,
1482                         p_hwfn->mcp_info->port_addr +
1483                         offsetof(struct public_port, stats),
1484                         sizeof(port_stats));
1485
1486         p_stats->rx_64_byte_packets             += port_stats.pmm.r64;
1487         p_stats->rx_65_to_127_byte_packets      += port_stats.pmm.r127;
1488         p_stats->rx_128_to_255_byte_packets     += port_stats.pmm.r255;
1489         p_stats->rx_256_to_511_byte_packets     += port_stats.pmm.r511;
1490         p_stats->rx_512_to_1023_byte_packets    += port_stats.pmm.r1023;
1491         p_stats->rx_1024_to_1518_byte_packets   += port_stats.pmm.r1518;
1492         p_stats->rx_1519_to_1522_byte_packets   += port_stats.pmm.r1522;
1493         p_stats->rx_1519_to_2047_byte_packets   += port_stats.pmm.r2047;
1494         p_stats->rx_2048_to_4095_byte_packets   += port_stats.pmm.r4095;
1495         p_stats->rx_4096_to_9216_byte_packets   += port_stats.pmm.r9216;
1496         p_stats->rx_9217_to_16383_byte_packets  += port_stats.pmm.r16383;
1497         p_stats->rx_crc_errors                  += port_stats.pmm.rfcs;
1498         p_stats->rx_mac_crtl_frames             += port_stats.pmm.rxcf;
1499         p_stats->rx_pause_frames                += port_stats.pmm.rxpf;
1500         p_stats->rx_pfc_frames                  += port_stats.pmm.rxpp;
1501         p_stats->rx_align_errors                += port_stats.pmm.raln;
1502         p_stats->rx_carrier_errors              += port_stats.pmm.rfcr;
1503         p_stats->rx_oversize_packets            += port_stats.pmm.rovr;
1504         p_stats->rx_jabbers                     += port_stats.pmm.rjbr;
1505         p_stats->rx_undersize_packets           += port_stats.pmm.rund;
1506         p_stats->rx_fragments                   += port_stats.pmm.rfrg;
1507         p_stats->tx_64_byte_packets             += port_stats.pmm.t64;
1508         p_stats->tx_65_to_127_byte_packets      += port_stats.pmm.t127;
1509         p_stats->tx_128_to_255_byte_packets     += port_stats.pmm.t255;
1510         p_stats->tx_256_to_511_byte_packets     += port_stats.pmm.t511;
1511         p_stats->tx_512_to_1023_byte_packets    += port_stats.pmm.t1023;
1512         p_stats->tx_1024_to_1518_byte_packets   += port_stats.pmm.t1518;
1513         p_stats->tx_1519_to_2047_byte_packets   += port_stats.pmm.t2047;
1514         p_stats->tx_2048_to_4095_byte_packets   += port_stats.pmm.t4095;
1515         p_stats->tx_4096_to_9216_byte_packets   += port_stats.pmm.t9216;
1516         p_stats->tx_9217_to_16383_byte_packets  += port_stats.pmm.t16383;
1517         p_stats->tx_pause_frames                += port_stats.pmm.txpf;
1518         p_stats->tx_pfc_frames                  += port_stats.pmm.txpp;
1519         p_stats->tx_lpi_entry_count             += port_stats.pmm.tlpiec;
1520         p_stats->tx_total_collisions            += port_stats.pmm.tncl;
1521         p_stats->rx_mac_bytes                   += port_stats.pmm.rbyte;
1522         p_stats->rx_mac_uc_packets              += port_stats.pmm.rxuca;
1523         p_stats->rx_mac_mc_packets              += port_stats.pmm.rxmca;
1524         p_stats->rx_mac_bc_packets              += port_stats.pmm.rxbca;
1525         p_stats->rx_mac_frames_ok               += port_stats.pmm.rxpok;
1526         p_stats->tx_mac_bytes                   += port_stats.pmm.tbyte;
1527         p_stats->tx_mac_uc_packets              += port_stats.pmm.txuca;
1528         p_stats->tx_mac_mc_packets              += port_stats.pmm.txmca;
1529         p_stats->tx_mac_bc_packets              += port_stats.pmm.txbca;
1530         p_stats->tx_mac_ctrl_frames             += port_stats.pmm.txcf;
1531         for (j = 0; j < 8; j++) {
1532                 p_stats->brb_truncates  += port_stats.brb.brb_truncate[j];
1533                 p_stats->brb_discards   += port_stats.brb.brb_discard[j];
1534         }
1535 }
1536
1537 static void __qed_get_vport_stats(struct qed_hwfn *p_hwfn,
1538                                   struct qed_ptt *p_ptt,
1539                                   struct qed_eth_stats *stats,
1540                                   u16 statistics_bin, bool b_get_port_stats)
1541 {
1542         __qed_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin);
1543         __qed_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin);
1544         __qed_get_vport_tstats(p_hwfn, p_ptt, stats, statistics_bin);
1545         __qed_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin);
1546
1547         if (b_get_port_stats && p_hwfn->mcp_info)
1548                 __qed_get_vport_port_stats(p_hwfn, p_ptt, stats);
1549 }
1550
1551 static void _qed_get_vport_stats(struct qed_dev *cdev,
1552                                  struct qed_eth_stats *stats)
1553 {
1554         u8 fw_vport = 0;
1555         int i;
1556
1557         memset(stats, 0, sizeof(*stats));
1558
1559         for_each_hwfn(cdev, i) {
1560                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1561                 struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn)
1562                                                     :  NULL;
1563
1564                 if (IS_PF(cdev)) {
1565                         /* The main vport index is relative first */
1566                         if (qed_fw_vport(p_hwfn, 0, &fw_vport)) {
1567                                 DP_ERR(p_hwfn, "No vport available!\n");
1568                                 goto out;
1569                         }
1570                 }
1571
1572                 if (IS_PF(cdev) && !p_ptt) {
1573                         DP_ERR(p_hwfn, "Failed to acquire ptt\n");
1574                         continue;
1575                 }
1576
1577                 __qed_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport,
1578                                       IS_PF(cdev) ? true : false);
1579
1580 out:
1581                 if (IS_PF(cdev) && p_ptt)
1582                         qed_ptt_release(p_hwfn, p_ptt);
1583         }
1584 }
1585
1586 void qed_get_vport_stats(struct qed_dev *cdev,
1587                          struct qed_eth_stats *stats)
1588 {
1589         u32 i;
1590
1591         if (!cdev) {
1592                 memset(stats, 0, sizeof(*stats));
1593                 return;
1594         }
1595
1596         _qed_get_vport_stats(cdev, stats);
1597
1598         if (!cdev->reset_stats)
1599                 return;
1600
1601         /* Reduce the statistics baseline */
1602         for (i = 0; i < sizeof(struct qed_eth_stats) / sizeof(u64); i++)
1603                 ((u64 *)stats)[i] -= ((u64 *)cdev->reset_stats)[i];
1604 }
1605
1606 /* zeroes V-PORT specific portion of stats (Port stats remains untouched) */
1607 void qed_reset_vport_stats(struct qed_dev *cdev)
1608 {
1609         int i;
1610
1611         for_each_hwfn(cdev, i) {
1612                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1613                 struct eth_mstorm_per_queue_stat mstats;
1614                 struct eth_ustorm_per_queue_stat ustats;
1615                 struct eth_pstorm_per_queue_stat pstats;
1616                 struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn)
1617                                                     : NULL;
1618                 u32 addr = 0, len = 0;
1619
1620                 if (IS_PF(cdev) && !p_ptt) {
1621                         DP_ERR(p_hwfn, "Failed to acquire ptt\n");
1622                         continue;
1623                 }
1624
1625                 memset(&mstats, 0, sizeof(mstats));
1626                 __qed_get_vport_mstats_addrlen(p_hwfn, &addr, &len, 0);
1627                 qed_memcpy_to(p_hwfn, p_ptt, addr, &mstats, len);
1628
1629                 memset(&ustats, 0, sizeof(ustats));
1630                 __qed_get_vport_ustats_addrlen(p_hwfn, &addr, &len, 0);
1631                 qed_memcpy_to(p_hwfn, p_ptt, addr, &ustats, len);
1632
1633                 memset(&pstats, 0, sizeof(pstats));
1634                 __qed_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0);
1635                 qed_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len);
1636
1637                 if (IS_PF(cdev))
1638                         qed_ptt_release(p_hwfn, p_ptt);
1639         }
1640
1641         /* PORT statistics are not necessarily reset, so we need to
1642          * read and create a baseline for future statistics.
1643          */
1644         if (!cdev->reset_stats)
1645                 DP_INFO(cdev, "Reset stats not allocated\n");
1646         else
1647                 _qed_get_vport_stats(cdev, cdev->reset_stats);
1648 }
1649
1650 static int qed_fill_eth_dev_info(struct qed_dev *cdev,
1651                                  struct qed_dev_eth_info *info)
1652 {
1653         int i;
1654
1655         memset(info, 0, sizeof(*info));
1656
1657         info->num_tc = 1;
1658
1659         if (IS_PF(cdev)) {
1660                 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
1661                         for_each_hwfn(cdev, i)
1662                             info->num_queues +=
1663                             FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE);
1664                         if (cdev->int_params.fp_msix_cnt)
1665                                 info->num_queues =
1666                                     min_t(u8, info->num_queues,
1667                                           cdev->int_params.fp_msix_cnt);
1668                 } else {
1669                         info->num_queues = cdev->num_hwfns;
1670                 }
1671
1672                 info->num_vlan_filters = RESC_NUM(&cdev->hwfns[0], QED_VLAN);
1673                 ether_addr_copy(info->port_mac,
1674                                 cdev->hwfns[0].hw_info.hw_mac_addr);
1675         } else {
1676                 qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev), &info->num_queues);
1677                 if (cdev->num_hwfns > 1) {
1678                         u8 queues = 0;
1679
1680                         qed_vf_get_num_rxqs(&cdev->hwfns[1], &queues);
1681                         info->num_queues += queues;
1682                 }
1683
1684                 qed_vf_get_num_vlan_filters(&cdev->hwfns[0],
1685                                             &info->num_vlan_filters);
1686                 qed_vf_get_port_mac(&cdev->hwfns[0], info->port_mac);
1687         }
1688
1689         qed_fill_dev_info(cdev, &info->common);
1690
1691         if (IS_VF(cdev))
1692                 memset(info->common.hw_mac, 0, ETH_ALEN);
1693
1694         return 0;
1695 }
1696
1697 static void qed_register_eth_ops(struct qed_dev *cdev,
1698                                  struct qed_eth_cb_ops *ops, void *cookie)
1699 {
1700         cdev->protocol_ops.eth = ops;
1701         cdev->ops_cookie = cookie;
1702
1703         /* For VF, we start bulletin reading */
1704         if (IS_VF(cdev))
1705                 qed_vf_start_iov_wq(cdev);
1706 }
1707
1708 static bool qed_check_mac(struct qed_dev *cdev, u8 *mac)
1709 {
1710         if (IS_PF(cdev))
1711                 return true;
1712
1713         return qed_vf_check_mac(&cdev->hwfns[0], mac);
1714 }
1715
1716 static int qed_start_vport(struct qed_dev *cdev,
1717                            struct qed_start_vport_params *params)
1718 {
1719         int rc, i;
1720
1721         for_each_hwfn(cdev, i) {
1722                 struct qed_sp_vport_start_params start = { 0 };
1723                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1724
1725                 start.tpa_mode = params->gro_enable ? QED_TPA_MODE_GRO :
1726                                                         QED_TPA_MODE_NONE;
1727                 start.remove_inner_vlan = params->remove_inner_vlan;
1728                 start.only_untagged = true;     /* untagged only */
1729                 start.drop_ttl0 = params->drop_ttl0;
1730                 start.opaque_fid = p_hwfn->hw_info.opaque_fid;
1731                 start.concrete_fid = p_hwfn->hw_info.concrete_fid;
1732                 start.vport_id = params->vport_id;
1733                 start.max_buffers_per_cqe = 16;
1734                 start.mtu = params->mtu;
1735
1736                 rc = qed_sp_vport_start(p_hwfn, &start);
1737                 if (rc) {
1738                         DP_ERR(cdev, "Failed to start VPORT\n");
1739                         return rc;
1740                 }
1741
1742                 qed_hw_start_fastpath(p_hwfn);
1743
1744                 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
1745                            "Started V-PORT %d with MTU %d\n",
1746                            start.vport_id, start.mtu);
1747         }
1748
1749         qed_reset_vport_stats(cdev);
1750
1751         return 0;
1752 }
1753
1754 static int qed_stop_vport(struct qed_dev *cdev,
1755                           u8 vport_id)
1756 {
1757         int rc, i;
1758
1759         for_each_hwfn(cdev, i) {
1760                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1761
1762                 rc = qed_sp_vport_stop(p_hwfn,
1763                                        p_hwfn->hw_info.opaque_fid,
1764                                        vport_id);
1765
1766                 if (rc) {
1767                         DP_ERR(cdev, "Failed to stop VPORT\n");
1768                         return rc;
1769                 }
1770         }
1771         return 0;
1772 }
1773
1774 static int qed_update_vport(struct qed_dev *cdev,
1775                             struct qed_update_vport_params *params)
1776 {
1777         struct qed_sp_vport_update_params sp_params;
1778         struct qed_rss_params sp_rss_params;
1779         int rc, i;
1780
1781         if (!cdev)
1782                 return -ENODEV;
1783
1784         memset(&sp_params, 0, sizeof(sp_params));
1785         memset(&sp_rss_params, 0, sizeof(sp_rss_params));
1786
1787         /* Translate protocol params into sp params */
1788         sp_params.vport_id = params->vport_id;
1789         sp_params.update_vport_active_rx_flg =
1790                 params->update_vport_active_flg;
1791         sp_params.update_vport_active_tx_flg =
1792                 params->update_vport_active_flg;
1793         sp_params.vport_active_rx_flg = params->vport_active_flg;
1794         sp_params.vport_active_tx_flg = params->vport_active_flg;
1795         sp_params.accept_any_vlan = params->accept_any_vlan;
1796         sp_params.update_accept_any_vlan_flg =
1797                 params->update_accept_any_vlan_flg;
1798
1799         /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns.
1800          * We need to re-fix the rss values per engine for CMT.
1801          */
1802         if (cdev->num_hwfns > 1 && params->update_rss_flg) {
1803                 struct qed_update_vport_rss_params *rss =
1804                         &params->rss_params;
1805                 int k, max = 0;
1806
1807                 /* Find largest entry, since it's possible RSS needs to
1808                  * be disabled [in case only 1 queue per-hwfn]
1809                  */
1810                 for (k = 0; k < QED_RSS_IND_TABLE_SIZE; k++)
1811                         max = (max > rss->rss_ind_table[k]) ?
1812                                 max : rss->rss_ind_table[k];
1813
1814                 /* Either fix RSS values or disable RSS */
1815                 if (cdev->num_hwfns < max + 1) {
1816                         int divisor = (max + cdev->num_hwfns - 1) /
1817                                 cdev->num_hwfns;
1818
1819                         DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
1820                                    "CMT - fixing RSS values (modulo %02x)\n",
1821                                    divisor);
1822
1823                         for (k = 0; k < QED_RSS_IND_TABLE_SIZE; k++)
1824                                 rss->rss_ind_table[k] =
1825                                         rss->rss_ind_table[k] % divisor;
1826                 } else {
1827                         DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
1828                                    "CMT - 1 queue per-hwfn; Disabling RSS\n");
1829                         params->update_rss_flg = 0;
1830                 }
1831         }
1832
1833         /* Now, update the RSS configuration for actual configuration */
1834         if (params->update_rss_flg) {
1835                 sp_rss_params.update_rss_config = 1;
1836                 sp_rss_params.rss_enable = 1;
1837                 sp_rss_params.update_rss_capabilities = 1;
1838                 sp_rss_params.update_rss_ind_table = 1;
1839                 sp_rss_params.update_rss_key = 1;
1840                 sp_rss_params.rss_caps = params->rss_params.rss_caps;
1841                 sp_rss_params.rss_table_size_log = 7; /* 2^7 = 128 */
1842                 memcpy(sp_rss_params.rss_ind_table,
1843                        params->rss_params.rss_ind_table,
1844                        QED_RSS_IND_TABLE_SIZE * sizeof(u16));
1845                 memcpy(sp_rss_params.rss_key, params->rss_params.rss_key,
1846                        QED_RSS_KEY_SIZE * sizeof(u32));
1847         }
1848         sp_params.rss_params = &sp_rss_params;
1849
1850         for_each_hwfn(cdev, i) {
1851                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1852
1853                 sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
1854                 rc = qed_sp_vport_update(p_hwfn, &sp_params,
1855                                          QED_SPQ_MODE_EBLOCK,
1856                                          NULL);
1857                 if (rc) {
1858                         DP_ERR(cdev, "Failed to update VPORT\n");
1859                         return rc;
1860                 }
1861
1862                 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
1863                            "Updated V-PORT %d: active_flag %d [update %d]\n",
1864                            params->vport_id, params->vport_active_flg,
1865                            params->update_vport_active_flg);
1866         }
1867
1868         return 0;
1869 }
1870
1871 static int qed_start_rxq(struct qed_dev *cdev,
1872                          struct qed_queue_start_common_params *params,
1873                          u16 bd_max_bytes,
1874                          dma_addr_t bd_chain_phys_addr,
1875                          dma_addr_t cqe_pbl_addr,
1876                          u16 cqe_pbl_size,
1877                          void __iomem **pp_prod)
1878 {
1879         int rc, hwfn_index;
1880         struct qed_hwfn *p_hwfn;
1881
1882         hwfn_index = params->rss_id % cdev->num_hwfns;
1883         p_hwfn = &cdev->hwfns[hwfn_index];
1884
1885         /* Fix queue ID in 100g mode */
1886         params->queue_id /= cdev->num_hwfns;
1887
1888         rc = qed_sp_eth_rx_queue_start(p_hwfn,
1889                                        p_hwfn->hw_info.opaque_fid,
1890                                        params,
1891                                        bd_max_bytes,
1892                                        bd_chain_phys_addr,
1893                                        cqe_pbl_addr,
1894                                        cqe_pbl_size,
1895                                        pp_prod);
1896
1897         if (rc) {
1898                 DP_ERR(cdev, "Failed to start RXQ#%d\n", params->queue_id);
1899                 return rc;
1900         }
1901
1902         DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
1903                    "Started RX-Q %d [rss %d] on V-PORT %d and SB %d\n",
1904                    params->queue_id, params->rss_id, params->vport_id,
1905                    params->sb);
1906
1907         return 0;
1908 }
1909
1910 static int qed_stop_rxq(struct qed_dev *cdev,
1911                         struct qed_stop_rxq_params *params)
1912 {
1913         int rc, hwfn_index;
1914         struct qed_hwfn *p_hwfn;
1915
1916         hwfn_index      = params->rss_id % cdev->num_hwfns;
1917         p_hwfn          = &cdev->hwfns[hwfn_index];
1918
1919         rc = qed_sp_eth_rx_queue_stop(p_hwfn,
1920                                       params->rx_queue_id / cdev->num_hwfns,
1921                                       params->eq_completion_only,
1922                                       false);
1923         if (rc) {
1924                 DP_ERR(cdev, "Failed to stop RXQ#%d\n", params->rx_queue_id);
1925                 return rc;
1926         }
1927
1928         return 0;
1929 }
1930
1931 static int qed_start_txq(struct qed_dev *cdev,
1932                          struct qed_queue_start_common_params *p_params,
1933                          dma_addr_t pbl_addr,
1934                          u16 pbl_size,
1935                          void __iomem **pp_doorbell)
1936 {
1937         struct qed_hwfn *p_hwfn;
1938         int rc, hwfn_index;
1939
1940         hwfn_index      = p_params->rss_id % cdev->num_hwfns;
1941         p_hwfn          = &cdev->hwfns[hwfn_index];
1942
1943         /* Fix queue ID in 100g mode */
1944         p_params->queue_id /= cdev->num_hwfns;
1945
1946         rc = qed_sp_eth_tx_queue_start(p_hwfn,
1947                                        p_hwfn->hw_info.opaque_fid,
1948                                        p_params,
1949                                        pbl_addr,
1950                                        pbl_size,
1951                                        pp_doorbell);
1952
1953         if (rc) {
1954                 DP_ERR(cdev, "Failed to start TXQ#%d\n", p_params->queue_id);
1955                 return rc;
1956         }
1957
1958         DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
1959                    "Started TX-Q %d [rss %d] on V-PORT %d and SB %d\n",
1960                    p_params->queue_id, p_params->rss_id, p_params->vport_id,
1961                    p_params->sb);
1962
1963         return 0;
1964 }
1965
1966 #define QED_HW_STOP_RETRY_LIMIT (10)
1967 static int qed_fastpath_stop(struct qed_dev *cdev)
1968 {
1969         qed_hw_stop_fastpath(cdev);
1970
1971         return 0;
1972 }
1973
1974 static int qed_stop_txq(struct qed_dev *cdev,
1975                         struct qed_stop_txq_params *params)
1976 {
1977         struct qed_hwfn *p_hwfn;
1978         int rc, hwfn_index;
1979
1980         hwfn_index      = params->rss_id % cdev->num_hwfns;
1981         p_hwfn          = &cdev->hwfns[hwfn_index];
1982
1983         rc = qed_sp_eth_tx_queue_stop(p_hwfn,
1984                                       params->tx_queue_id / cdev->num_hwfns);
1985         if (rc) {
1986                 DP_ERR(cdev, "Failed to stop TXQ#%d\n", params->tx_queue_id);
1987                 return rc;
1988         }
1989
1990         return 0;
1991 }
1992
1993 static int qed_tunn_configure(struct qed_dev *cdev,
1994                               struct qed_tunn_params *tunn_params)
1995 {
1996         struct qed_tunn_update_params tunn_info;
1997         int i, rc;
1998
1999         if (IS_VF(cdev))
2000                 return 0;
2001
2002         memset(&tunn_info, 0, sizeof(tunn_info));
2003         if (tunn_params->update_vxlan_port == 1) {
2004                 tunn_info.update_vxlan_udp_port = 1;
2005                 tunn_info.vxlan_udp_port = tunn_params->vxlan_port;
2006         }
2007
2008         if (tunn_params->update_geneve_port == 1) {
2009                 tunn_info.update_geneve_udp_port = 1;
2010                 tunn_info.geneve_udp_port = tunn_params->geneve_port;
2011         }
2012
2013         for_each_hwfn(cdev, i) {
2014                 struct qed_hwfn *hwfn = &cdev->hwfns[i];
2015
2016                 rc = qed_sp_pf_update_tunn_cfg(hwfn, &tunn_info,
2017                                                QED_SPQ_MODE_EBLOCK, NULL);
2018
2019                 if (rc)
2020                         return rc;
2021         }
2022
2023         return 0;
2024 }
2025
2026 static int qed_configure_filter_rx_mode(struct qed_dev *cdev,
2027                                         enum qed_filter_rx_mode_type type)
2028 {
2029         struct qed_filter_accept_flags accept_flags;
2030
2031         memset(&accept_flags, 0, sizeof(accept_flags));
2032
2033         accept_flags.update_rx_mode_config      = 1;
2034         accept_flags.update_tx_mode_config      = 1;
2035         accept_flags.rx_accept_filter           = QED_ACCEPT_UCAST_MATCHED |
2036                                                   QED_ACCEPT_MCAST_MATCHED |
2037                                                   QED_ACCEPT_BCAST;
2038         accept_flags.tx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
2039                                         QED_ACCEPT_MCAST_MATCHED |
2040                                         QED_ACCEPT_BCAST;
2041
2042         if (type == QED_FILTER_RX_MODE_TYPE_PROMISC)
2043                 accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
2044                                                  QED_ACCEPT_MCAST_UNMATCHED;
2045         else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC)
2046                 accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
2047
2048         return qed_filter_accept_cmd(cdev, 0, accept_flags, false, false,
2049                                      QED_SPQ_MODE_CB, NULL);
2050 }
2051
2052 static int qed_configure_filter_ucast(struct qed_dev *cdev,
2053                                       struct qed_filter_ucast_params *params)
2054 {
2055         struct qed_filter_ucast ucast;
2056
2057         if (!params->vlan_valid && !params->mac_valid) {
2058                 DP_NOTICE(
2059                         cdev,
2060                         "Tried configuring a unicast filter, but both MAC and VLAN are not set\n");
2061                 return -EINVAL;
2062         }
2063
2064         memset(&ucast, 0, sizeof(ucast));
2065         switch (params->type) {
2066         case QED_FILTER_XCAST_TYPE_ADD:
2067                 ucast.opcode = QED_FILTER_ADD;
2068                 break;
2069         case QED_FILTER_XCAST_TYPE_DEL:
2070                 ucast.opcode = QED_FILTER_REMOVE;
2071                 break;
2072         case QED_FILTER_XCAST_TYPE_REPLACE:
2073                 ucast.opcode = QED_FILTER_REPLACE;
2074                 break;
2075         default:
2076                 DP_NOTICE(cdev, "Unknown unicast filter type %d\n",
2077                           params->type);
2078         }
2079
2080         if (params->vlan_valid && params->mac_valid) {
2081                 ucast.type = QED_FILTER_MAC_VLAN;
2082                 ether_addr_copy(ucast.mac, params->mac);
2083                 ucast.vlan = params->vlan;
2084         } else if (params->mac_valid) {
2085                 ucast.type = QED_FILTER_MAC;
2086                 ether_addr_copy(ucast.mac, params->mac);
2087         } else {
2088                 ucast.type = QED_FILTER_VLAN;
2089                 ucast.vlan = params->vlan;
2090         }
2091
2092         ucast.is_rx_filter = true;
2093         ucast.is_tx_filter = true;
2094
2095         return qed_filter_ucast_cmd(cdev, &ucast, QED_SPQ_MODE_CB, NULL);
2096 }
2097
2098 static int qed_configure_filter_mcast(struct qed_dev *cdev,
2099                                       struct qed_filter_mcast_params *params)
2100 {
2101         struct qed_filter_mcast mcast;
2102         int i;
2103
2104         memset(&mcast, 0, sizeof(mcast));
2105         switch (params->type) {
2106         case QED_FILTER_XCAST_TYPE_ADD:
2107                 mcast.opcode = QED_FILTER_ADD;
2108                 break;
2109         case QED_FILTER_XCAST_TYPE_DEL:
2110                 mcast.opcode = QED_FILTER_REMOVE;
2111                 break;
2112         default:
2113                 DP_NOTICE(cdev, "Unknown multicast filter type %d\n",
2114                           params->type);
2115         }
2116
2117         mcast.num_mc_addrs = params->num;
2118         for (i = 0; i < mcast.num_mc_addrs; i++)
2119                 ether_addr_copy(mcast.mac[i], params->mac[i]);
2120
2121         return qed_filter_mcast_cmd(cdev, &mcast,
2122                                     QED_SPQ_MODE_CB, NULL);
2123 }
2124
2125 static int qed_configure_filter(struct qed_dev *cdev,
2126                                 struct qed_filter_params *params)
2127 {
2128         enum qed_filter_rx_mode_type accept_flags;
2129
2130         switch (params->type) {
2131         case QED_FILTER_TYPE_UCAST:
2132                 return qed_configure_filter_ucast(cdev, &params->filter.ucast);
2133         case QED_FILTER_TYPE_MCAST:
2134                 return qed_configure_filter_mcast(cdev, &params->filter.mcast);
2135         case QED_FILTER_TYPE_RX_MODE:
2136                 accept_flags = params->filter.accept_flags;
2137                 return qed_configure_filter_rx_mode(cdev, accept_flags);
2138         default:
2139                 DP_NOTICE(cdev, "Unknown filter type %d\n",
2140                           (int)params->type);
2141                 return -EINVAL;
2142         }
2143 }
2144
2145 static int qed_fp_cqe_completion(struct qed_dev *dev,
2146                                  u8 rss_id,
2147                                  struct eth_slow_path_rx_cqe *cqe)
2148 {
2149         return qed_eth_cqe_completion(&dev->hwfns[rss_id % dev->num_hwfns],
2150                                       cqe);
2151 }
2152
2153 #ifdef CONFIG_QED_SRIOV
2154 extern const struct qed_iov_hv_ops qed_iov_ops_pass;
2155 #endif
2156
2157 static const struct qed_eth_ops qed_eth_ops_pass = {
2158         .common = &qed_common_ops_pass,
2159 #ifdef CONFIG_QED_SRIOV
2160         .iov = &qed_iov_ops_pass,
2161 #endif
2162         .fill_dev_info = &qed_fill_eth_dev_info,
2163         .register_ops = &qed_register_eth_ops,
2164         .check_mac = &qed_check_mac,
2165         .vport_start = &qed_start_vport,
2166         .vport_stop = &qed_stop_vport,
2167         .vport_update = &qed_update_vport,
2168         .q_rx_start = &qed_start_rxq,
2169         .q_rx_stop = &qed_stop_rxq,
2170         .q_tx_start = &qed_start_txq,
2171         .q_tx_stop = &qed_stop_txq,
2172         .filter_config = &qed_configure_filter,
2173         .fastpath_stop = &qed_fastpath_stop,
2174         .eth_cqe_completion = &qed_fp_cqe_completion,
2175         .get_vport_stats = &qed_get_vport_stats,
2176         .tunn_config = &qed_tunn_configure,
2177 };
2178
2179 const struct qed_eth_ops *qed_get_eth_ops(void)
2180 {
2181         return &qed_eth_ops_pass;
2182 }
2183 EXPORT_SYMBOL(qed_get_eth_ops);
2184
2185 void qed_put_eth_ops(void)
2186 {
2187         /* TODO - reference count for module? */
2188 }
2189 EXPORT_SYMBOL(qed_put_eth_ops);