Merge tag 'spi-fix-v4.9-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi
[cascardo/linux.git] / include / linux / qed / qed_if.h
1 /* QLogic qed NIC Driver
2  *
3  * Copyright (c) 2015 QLogic Corporation
4  *
5  * This software is available under the terms of the GNU General Public License
6  * (GPL) Version 2, available from the file COPYING in the main directory of
7  * this source tree.
8  */
9
10 #ifndef _QED_IF_H
11 #define _QED_IF_H
12
13 #include <linux/types.h>
14 #include <linux/interrupt.h>
15 #include <linux/netdevice.h>
16 #include <linux/pci.h>
17 #include <linux/skbuff.h>
18 #include <linux/types.h>
19 #include <asm/byteorder.h>
20 #include <linux/io.h>
21 #include <linux/compiler.h>
22 #include <linux/kernel.h>
23 #include <linux/list.h>
24 #include <linux/slab.h>
25 #include <linux/qed/common_hsi.h>
26 #include <linux/qed/qed_chain.h>
27
28 enum dcbx_protocol_type {
29         DCBX_PROTOCOL_ISCSI,
30         DCBX_PROTOCOL_FCOE,
31         DCBX_PROTOCOL_ROCE,
32         DCBX_PROTOCOL_ROCE_V2,
33         DCBX_PROTOCOL_ETH,
34         DCBX_MAX_PROTOCOL_TYPE
35 };
36
37 #define QED_ROCE_PROTOCOL_INDEX (3)
38
39 #ifdef CONFIG_DCB
40 #define QED_LLDP_CHASSIS_ID_STAT_LEN 4
41 #define QED_LLDP_PORT_ID_STAT_LEN 4
42 #define QED_DCBX_MAX_APP_PROTOCOL 32
43 #define QED_MAX_PFC_PRIORITIES 8
44 #define QED_DCBX_DSCP_SIZE 64
45
46 struct qed_dcbx_lldp_remote {
47         u32 peer_chassis_id[QED_LLDP_CHASSIS_ID_STAT_LEN];
48         u32 peer_port_id[QED_LLDP_PORT_ID_STAT_LEN];
49         bool enable_rx;
50         bool enable_tx;
51         u32 tx_interval;
52         u32 max_credit;
53 };
54
55 struct qed_dcbx_lldp_local {
56         u32 local_chassis_id[QED_LLDP_CHASSIS_ID_STAT_LEN];
57         u32 local_port_id[QED_LLDP_PORT_ID_STAT_LEN];
58 };
59
60 struct qed_dcbx_app_prio {
61         u8 roce;
62         u8 roce_v2;
63         u8 fcoe;
64         u8 iscsi;
65         u8 eth;
66 };
67
68 struct qed_dbcx_pfc_params {
69         bool willing;
70         bool enabled;
71         u8 prio[QED_MAX_PFC_PRIORITIES];
72         u8 max_tc;
73 };
74
75 enum qed_dcbx_sf_ieee_type {
76         QED_DCBX_SF_IEEE_ETHTYPE,
77         QED_DCBX_SF_IEEE_TCP_PORT,
78         QED_DCBX_SF_IEEE_UDP_PORT,
79         QED_DCBX_SF_IEEE_TCP_UDP_PORT
80 };
81
82 struct qed_app_entry {
83         bool ethtype;
84         enum qed_dcbx_sf_ieee_type sf_ieee;
85         bool enabled;
86         u8 prio;
87         u16 proto_id;
88         enum dcbx_protocol_type proto_type;
89 };
90
91 struct qed_dcbx_params {
92         struct qed_app_entry app_entry[QED_DCBX_MAX_APP_PROTOCOL];
93         u16 num_app_entries;
94         bool app_willing;
95         bool app_valid;
96         bool app_error;
97         bool ets_willing;
98         bool ets_enabled;
99         bool ets_cbs;
100         bool valid;
101         u8 ets_pri_tc_tbl[QED_MAX_PFC_PRIORITIES];
102         u8 ets_tc_bw_tbl[QED_MAX_PFC_PRIORITIES];
103         u8 ets_tc_tsa_tbl[QED_MAX_PFC_PRIORITIES];
104         struct qed_dbcx_pfc_params pfc;
105         u8 max_ets_tc;
106 };
107
108 struct qed_dcbx_admin_params {
109         struct qed_dcbx_params params;
110         bool valid;
111 };
112
113 struct qed_dcbx_remote_params {
114         struct qed_dcbx_params params;
115         bool valid;
116 };
117
118 struct qed_dcbx_operational_params {
119         struct qed_dcbx_app_prio app_prio;
120         struct qed_dcbx_params params;
121         bool valid;
122         bool enabled;
123         bool ieee;
124         bool cee;
125         u32 err;
126 };
127
128 struct qed_dcbx_get {
129         struct qed_dcbx_operational_params operational;
130         struct qed_dcbx_lldp_remote lldp_remote;
131         struct qed_dcbx_lldp_local lldp_local;
132         struct qed_dcbx_remote_params remote;
133         struct qed_dcbx_admin_params local;
134 };
135 #endif
136
137 enum qed_led_mode {
138         QED_LED_MODE_OFF,
139         QED_LED_MODE_ON,
140         QED_LED_MODE_RESTORE
141 };
142
143 #define DIRECT_REG_WR(reg_addr, val) writel((u32)val, \
144                                             (void __iomem *)(reg_addr))
145
146 #define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr))
147
148 #define QED_COALESCE_MAX 0xFF
149 #define QED_DEFAULT_RX_USECS 12
150
151 /* forward */
152 struct qed_dev;
153
154 struct qed_eth_pf_params {
155         /* The following parameters are used during HW-init
156          * and these parameters need to be passed as arguments
157          * to update_pf_params routine invoked before slowpath start
158          */
159         u16 num_cons;
160 };
161
162 /* Most of the the parameters below are described in the FW iSCSI / TCP HSI */
163 struct qed_iscsi_pf_params {
164         u64 glbl_q_params_addr;
165         u64 bdq_pbl_base_addr[2];
166         u32 max_cwnd;
167         u16 cq_num_entries;
168         u16 cmdq_num_entries;
169         u16 dup_ack_threshold;
170         u16 tx_sws_timer;
171         u16 min_rto;
172         u16 min_rto_rt;
173         u16 max_rto;
174
175         /* The following parameters are used during HW-init
176          * and these parameters need to be passed as arguments
177          * to update_pf_params routine invoked before slowpath start
178          */
179         u16 num_cons;
180         u16 num_tasks;
181
182         /* The following parameters are used during protocol-init */
183         u16 half_way_close_timeout;
184         u16 bdq_xoff_threshold[2];
185         u16 bdq_xon_threshold[2];
186         u16 cmdq_xoff_threshold;
187         u16 cmdq_xon_threshold;
188         u16 rq_buffer_size;
189
190         u8 num_sq_pages_in_ring;
191         u8 num_r2tq_pages_in_ring;
192         u8 num_uhq_pages_in_ring;
193         u8 num_queues;
194         u8 log_page_size;
195         u8 rqe_log_size;
196         u8 max_fin_rt;
197         u8 gl_rq_pi;
198         u8 gl_cmd_pi;
199         u8 debug_mode;
200         u8 ll2_ooo_queue_id;
201         u8 ooo_enable;
202
203         u8 is_target;
204         u8 bdq_pbl_num_entries[2];
205 };
206
207 struct qed_rdma_pf_params {
208         /* Supplied to QED during resource allocation (may affect the ILT and
209          * the doorbell BAR).
210          */
211         u32 min_dpis;           /* number of requested DPIs */
212         u32 num_mrs;            /* number of requested memory regions */
213         u32 num_qps;            /* number of requested Queue Pairs */
214         u32 num_srqs;           /* number of requested SRQ */
215         u8 roce_edpm_mode;      /* see QED_ROCE_EDPM_MODE_ENABLE */
216         u8 gl_pi;               /* protocol index */
217
218         /* Will allocate rate limiters to be used with QPs */
219         u8 enable_dcqcn;
220 };
221
222 struct qed_pf_params {
223         struct qed_eth_pf_params eth_pf_params;
224         struct qed_iscsi_pf_params iscsi_pf_params;
225         struct qed_rdma_pf_params rdma_pf_params;
226 };
227
228 enum qed_int_mode {
229         QED_INT_MODE_INTA,
230         QED_INT_MODE_MSIX,
231         QED_INT_MODE_MSI,
232         QED_INT_MODE_POLL,
233 };
234
235 struct qed_sb_info {
236         struct status_block     *sb_virt;
237         dma_addr_t              sb_phys;
238         u32                     sb_ack; /* Last given ack */
239         u16                     igu_sb_id;
240         void __iomem            *igu_addr;
241         u8                      flags;
242 #define QED_SB_INFO_INIT        0x1
243 #define QED_SB_INFO_SETUP       0x2
244
245         struct qed_dev          *cdev;
246 };
247
248 struct qed_dev_info {
249         unsigned long   pci_mem_start;
250         unsigned long   pci_mem_end;
251         unsigned int    pci_irq;
252         u8              num_hwfns;
253
254         u8              hw_mac[ETH_ALEN];
255         bool            is_mf_default;
256
257         /* FW version */
258         u16             fw_major;
259         u16             fw_minor;
260         u16             fw_rev;
261         u16             fw_eng;
262
263         /* MFW version */
264         u32             mfw_rev;
265
266         u32             flash_size;
267         u8              mf_mode;
268         bool            tx_switching;
269         bool            rdma_supported;
270 };
271
272 enum qed_sb_type {
273         QED_SB_TYPE_L2_QUEUE,
274         QED_SB_TYPE_CNQ,
275 };
276
277 enum qed_protocol {
278         QED_PROTOCOL_ETH,
279         QED_PROTOCOL_ISCSI,
280 };
281
282 enum qed_link_mode_bits {
283         QED_LM_FIBRE_BIT = BIT(0),
284         QED_LM_Autoneg_BIT = BIT(1),
285         QED_LM_Asym_Pause_BIT = BIT(2),
286         QED_LM_Pause_BIT = BIT(3),
287         QED_LM_1000baseT_Half_BIT = BIT(4),
288         QED_LM_1000baseT_Full_BIT = BIT(5),
289         QED_LM_10000baseKR_Full_BIT = BIT(6),
290         QED_LM_25000baseKR_Full_BIT = BIT(7),
291         QED_LM_40000baseLR4_Full_BIT = BIT(8),
292         QED_LM_50000baseKR2_Full_BIT = BIT(9),
293         QED_LM_100000baseKR4_Full_BIT = BIT(10),
294         QED_LM_COUNT = 11
295 };
296
297 struct qed_link_params {
298         bool    link_up;
299
300 #define QED_LINK_OVERRIDE_SPEED_AUTONEG         BIT(0)
301 #define QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS      BIT(1)
302 #define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED    BIT(2)
303 #define QED_LINK_OVERRIDE_PAUSE_CONFIG          BIT(3)
304 #define QED_LINK_OVERRIDE_LOOPBACK_MODE         BIT(4)
305         u32     override_flags;
306         bool    autoneg;
307         u32     adv_speeds;
308         u32     forced_speed;
309 #define QED_LINK_PAUSE_AUTONEG_ENABLE           BIT(0)
310 #define QED_LINK_PAUSE_RX_ENABLE                BIT(1)
311 #define QED_LINK_PAUSE_TX_ENABLE                BIT(2)
312         u32     pause_config;
313 #define QED_LINK_LOOPBACK_NONE                  BIT(0)
314 #define QED_LINK_LOOPBACK_INT_PHY               BIT(1)
315 #define QED_LINK_LOOPBACK_EXT_PHY               BIT(2)
316 #define QED_LINK_LOOPBACK_EXT                   BIT(3)
317 #define QED_LINK_LOOPBACK_MAC                   BIT(4)
318         u32     loopback_mode;
319 };
320
321 struct qed_link_output {
322         bool    link_up;
323
324         /* In QED_LM_* defs */
325         u32     supported_caps;
326         u32     advertised_caps;
327         u32     lp_caps;
328
329         u32     speed;                  /* In Mb/s */
330         u8      duplex;                 /* In DUPLEX defs */
331         u8      port;                   /* In PORT defs */
332         bool    autoneg;
333         u32     pause_config;
334 };
335
336 struct qed_probe_params {
337         enum qed_protocol protocol;
338         u32 dp_module;
339         u8 dp_level;
340         bool is_vf;
341 };
342
343 #define QED_DRV_VER_STR_SIZE 12
344 struct qed_slowpath_params {
345         u32     int_mode;
346         u8      drv_major;
347         u8      drv_minor;
348         u8      drv_rev;
349         u8      drv_eng;
350         u8      name[QED_DRV_VER_STR_SIZE];
351 };
352
353 #define ILT_PAGE_SIZE_TCFC 0x8000 /* 32KB */
354
355 struct qed_int_info {
356         struct msix_entry       *msix;
357         u8                      msix_cnt;
358
359         /* This should be updated by the protocol driver */
360         u8                      used_cnt;
361 };
362
363 struct qed_common_cb_ops {
364         void    (*link_update)(void                     *dev,
365                                struct qed_link_output   *link);
366 };
367
368 struct qed_selftest_ops {
369 /**
370  * @brief selftest_interrupt - Perform interrupt test
371  *
372  * @param cdev
373  *
374  * @return 0 on success, error otherwise.
375  */
376         int (*selftest_interrupt)(struct qed_dev *cdev);
377
378 /**
379  * @brief selftest_memory - Perform memory test
380  *
381  * @param cdev
382  *
383  * @return 0 on success, error otherwise.
384  */
385         int (*selftest_memory)(struct qed_dev *cdev);
386
387 /**
388  * @brief selftest_register - Perform register test
389  *
390  * @param cdev
391  *
392  * @return 0 on success, error otherwise.
393  */
394         int (*selftest_register)(struct qed_dev *cdev);
395
396 /**
397  * @brief selftest_clock - Perform clock test
398  *
399  * @param cdev
400  *
401  * @return 0 on success, error otherwise.
402  */
403         int (*selftest_clock)(struct qed_dev *cdev);
404 };
405
406 struct qed_common_ops {
407         struct qed_selftest_ops *selftest;
408
409         struct qed_dev* (*probe)(struct pci_dev *dev,
410                                  struct qed_probe_params *params);
411
412         void            (*remove)(struct qed_dev *cdev);
413
414         int             (*set_power_state)(struct qed_dev *cdev,
415                                            pci_power_t state);
416
417         void            (*set_id)(struct qed_dev *cdev,
418                                   char name[],
419                                   char ver_str[]);
420
421         /* Client drivers need to make this call before slowpath_start.
422          * PF params required for the call before slowpath_start is
423          * documented within the qed_pf_params structure definition.
424          */
425         void            (*update_pf_params)(struct qed_dev *cdev,
426                                             struct qed_pf_params *params);
427         int             (*slowpath_start)(struct qed_dev *cdev,
428                                           struct qed_slowpath_params *params);
429
430         int             (*slowpath_stop)(struct qed_dev *cdev);
431
432         /* Requests to use `cnt' interrupts for fastpath.
433          * upon success, returns number of interrupts allocated for fastpath.
434          */
435         int             (*set_fp_int)(struct qed_dev *cdev,
436                                       u16 cnt);
437
438         /* Fills `info' with pointers required for utilizing interrupts */
439         int             (*get_fp_int)(struct qed_dev *cdev,
440                                       struct qed_int_info *info);
441
442         u32             (*sb_init)(struct qed_dev *cdev,
443                                    struct qed_sb_info *sb_info,
444                                    void *sb_virt_addr,
445                                    dma_addr_t sb_phy_addr,
446                                    u16 sb_id,
447                                    enum qed_sb_type type);
448
449         u32             (*sb_release)(struct qed_dev *cdev,
450                                       struct qed_sb_info *sb_info,
451                                       u16 sb_id);
452
453         void            (*simd_handler_config)(struct qed_dev *cdev,
454                                                void *token,
455                                                int index,
456                                                void (*handler)(void *));
457
458         void            (*simd_handler_clean)(struct qed_dev *cdev,
459                                               int index);
460
461         int (*dbg_all_data) (struct qed_dev *cdev, void *buffer);
462
463         int (*dbg_all_data_size) (struct qed_dev *cdev);
464
465 /**
466  * @brief can_link_change - can the instance change the link or not
467  *
468  * @param cdev
469  *
470  * @return true if link-change is allowed, false otherwise.
471  */
472         bool (*can_link_change)(struct qed_dev *cdev);
473
474 /**
475  * @brief set_link - set links according to params
476  *
477  * @param cdev
478  * @param params - values used to override the default link configuration
479  *
480  * @return 0 on success, error otherwise.
481  */
482         int             (*set_link)(struct qed_dev *cdev,
483                                     struct qed_link_params *params);
484
485 /**
486  * @brief get_link - returns the current link state.
487  *
488  * @param cdev
489  * @param if_link - structure to be filled with current link configuration.
490  */
491         void            (*get_link)(struct qed_dev *cdev,
492                                     struct qed_link_output *if_link);
493
494 /**
495  * @brief - drains chip in case Tx completions fail to arrive due to pause.
496  *
497  * @param cdev
498  */
499         int             (*drain)(struct qed_dev *cdev);
500
501 /**
502  * @brief update_msglvl - update module debug level
503  *
504  * @param cdev
505  * @param dp_module
506  * @param dp_level
507  */
508         void            (*update_msglvl)(struct qed_dev *cdev,
509                                          u32 dp_module,
510                                          u8 dp_level);
511
512         int             (*chain_alloc)(struct qed_dev *cdev,
513                                        enum qed_chain_use_mode intended_use,
514                                        enum qed_chain_mode mode,
515                                        enum qed_chain_cnt_type cnt_type,
516                                        u32 num_elems,
517                                        size_t elem_size,
518                                        struct qed_chain *p_chain);
519
520         void            (*chain_free)(struct qed_dev *cdev,
521                                       struct qed_chain *p_chain);
522
523 /**
524  * @brief get_coalesce - Get coalesce parameters in usec
525  *
526  * @param cdev
527  * @param rx_coal - Rx coalesce value in usec
528  * @param tx_coal - Tx coalesce value in usec
529  *
530  */
531         void (*get_coalesce)(struct qed_dev *cdev, u16 *rx_coal, u16 *tx_coal);
532
533 /**
534  * @brief set_coalesce - Configure Rx coalesce value in usec
535  *
536  * @param cdev
537  * @param rx_coal - Rx coalesce value in usec
538  * @param tx_coal - Tx coalesce value in usec
539  * @param qid - Queue index
540  * @param sb_id - Status Block Id
541  *
542  * @return 0 on success, error otherwise.
543  */
544         int (*set_coalesce)(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal,
545                             u8 qid, u16 sb_id);
546
547 /**
548  * @brief set_led - Configure LED mode
549  *
550  * @param cdev
551  * @param mode - LED mode
552  *
553  * @return 0 on success, error otherwise.
554  */
555         int (*set_led)(struct qed_dev *cdev,
556                        enum qed_led_mode mode);
557 };
558
559 #define MASK_FIELD(_name, _value) \
560         ((_value) &= (_name ## _MASK))
561
562 #define FIELD_VALUE(_name, _value) \
563         ((_value & _name ## _MASK) << _name ## _SHIFT)
564
565 #define SET_FIELD(value, name, flag)                           \
566         do {                                                   \
567                 (value) &= ~(name ## _MASK << name ## _SHIFT); \
568                 (value) |= (((u64)flag) << (name ## _SHIFT));  \
569         } while (0)
570
571 #define GET_FIELD(value, name) \
572         (((value) >> (name ## _SHIFT)) & name ## _MASK)
573
574 /* Debug print definitions */
575 #define DP_ERR(cdev, fmt, ...)                                               \
576                 pr_err("[%s:%d(%s)]" fmt,                                    \
577                        __func__, __LINE__,                                   \
578                        DP_NAME(cdev) ? DP_NAME(cdev) : "",                   \
579                        ## __VA_ARGS__)                                       \
580
581 #define DP_NOTICE(cdev, fmt, ...)                                     \
582         do {                                                          \
583                 if (unlikely((cdev)->dp_level <= QED_LEVEL_NOTICE)) { \
584                         pr_notice("[%s:%d(%s)]" fmt,                  \
585                                   __func__, __LINE__,                 \
586                                   DP_NAME(cdev) ? DP_NAME(cdev) : "", \
587                                   ## __VA_ARGS__);                    \
588                                                                       \
589                 }                                                     \
590         } while (0)
591
592 #define DP_INFO(cdev, fmt, ...)                                       \
593         do {                                                          \
594                 if (unlikely((cdev)->dp_level <= QED_LEVEL_INFO)) {   \
595                         pr_notice("[%s:%d(%s)]" fmt,                  \
596                                   __func__, __LINE__,                 \
597                                   DP_NAME(cdev) ? DP_NAME(cdev) : "", \
598                                   ## __VA_ARGS__);                    \
599                 }                                                     \
600         } while (0)
601
602 #define DP_VERBOSE(cdev, module, fmt, ...)                              \
603         do {                                                            \
604                 if (unlikely(((cdev)->dp_level <= QED_LEVEL_VERBOSE) && \
605                              ((cdev)->dp_module & module))) {           \
606                         pr_notice("[%s:%d(%s)]" fmt,                    \
607                                   __func__, __LINE__,                   \
608                                   DP_NAME(cdev) ? DP_NAME(cdev) : "",   \
609                                   ## __VA_ARGS__);                      \
610                 }                                                       \
611         } while (0)
612
613 enum DP_LEVEL {
614         QED_LEVEL_VERBOSE       = 0x0,
615         QED_LEVEL_INFO          = 0x1,
616         QED_LEVEL_NOTICE        = 0x2,
617         QED_LEVEL_ERR           = 0x3,
618 };
619
620 #define QED_LOG_LEVEL_SHIFT     (30)
621 #define QED_LOG_VERBOSE_MASK    (0x3fffffff)
622 #define QED_LOG_INFO_MASK       (0x40000000)
623 #define QED_LOG_NOTICE_MASK     (0x80000000)
624
625 enum DP_MODULE {
626         QED_MSG_SPQ     = 0x10000,
627         QED_MSG_STATS   = 0x20000,
628         QED_MSG_DCB     = 0x40000,
629         QED_MSG_IOV     = 0x80000,
630         QED_MSG_SP      = 0x100000,
631         QED_MSG_STORAGE = 0x200000,
632         QED_MSG_CXT     = 0x800000,
633         QED_MSG_LL2     = 0x1000000,
634         QED_MSG_ILT     = 0x2000000,
635         QED_MSG_RDMA    = 0x4000000,
636         QED_MSG_DEBUG   = 0x8000000,
637         /* to be added...up to 0x8000000 */
638 };
639
640 enum qed_mf_mode {
641         QED_MF_DEFAULT,
642         QED_MF_OVLAN,
643         QED_MF_NPAR,
644 };
645
646 struct qed_eth_stats {
647         u64     no_buff_discards;
648         u64     packet_too_big_discard;
649         u64     ttl0_discard;
650         u64     rx_ucast_bytes;
651         u64     rx_mcast_bytes;
652         u64     rx_bcast_bytes;
653         u64     rx_ucast_pkts;
654         u64     rx_mcast_pkts;
655         u64     rx_bcast_pkts;
656         u64     mftag_filter_discards;
657         u64     mac_filter_discards;
658         u64     tx_ucast_bytes;
659         u64     tx_mcast_bytes;
660         u64     tx_bcast_bytes;
661         u64     tx_ucast_pkts;
662         u64     tx_mcast_pkts;
663         u64     tx_bcast_pkts;
664         u64     tx_err_drop_pkts;
665         u64     tpa_coalesced_pkts;
666         u64     tpa_coalesced_events;
667         u64     tpa_aborts_num;
668         u64     tpa_not_coalesced_pkts;
669         u64     tpa_coalesced_bytes;
670
671         /* port */
672         u64     rx_64_byte_packets;
673         u64     rx_65_to_127_byte_packets;
674         u64     rx_128_to_255_byte_packets;
675         u64     rx_256_to_511_byte_packets;
676         u64     rx_512_to_1023_byte_packets;
677         u64     rx_1024_to_1518_byte_packets;
678         u64     rx_1519_to_1522_byte_packets;
679         u64     rx_1519_to_2047_byte_packets;
680         u64     rx_2048_to_4095_byte_packets;
681         u64     rx_4096_to_9216_byte_packets;
682         u64     rx_9217_to_16383_byte_packets;
683         u64     rx_crc_errors;
684         u64     rx_mac_crtl_frames;
685         u64     rx_pause_frames;
686         u64     rx_pfc_frames;
687         u64     rx_align_errors;
688         u64     rx_carrier_errors;
689         u64     rx_oversize_packets;
690         u64     rx_jabbers;
691         u64     rx_undersize_packets;
692         u64     rx_fragments;
693         u64     tx_64_byte_packets;
694         u64     tx_65_to_127_byte_packets;
695         u64     tx_128_to_255_byte_packets;
696         u64     tx_256_to_511_byte_packets;
697         u64     tx_512_to_1023_byte_packets;
698         u64     tx_1024_to_1518_byte_packets;
699         u64     tx_1519_to_2047_byte_packets;
700         u64     tx_2048_to_4095_byte_packets;
701         u64     tx_4096_to_9216_byte_packets;
702         u64     tx_9217_to_16383_byte_packets;
703         u64     tx_pause_frames;
704         u64     tx_pfc_frames;
705         u64     tx_lpi_entry_count;
706         u64     tx_total_collisions;
707         u64     brb_truncates;
708         u64     brb_discards;
709         u64     rx_mac_bytes;
710         u64     rx_mac_uc_packets;
711         u64     rx_mac_mc_packets;
712         u64     rx_mac_bc_packets;
713         u64     rx_mac_frames_ok;
714         u64     tx_mac_bytes;
715         u64     tx_mac_uc_packets;
716         u64     tx_mac_mc_packets;
717         u64     tx_mac_bc_packets;
718         u64     tx_mac_ctrl_frames;
719 };
720
721 #define QED_SB_IDX              0x0002
722
723 #define RX_PI           0
724 #define TX_PI(tc)       (RX_PI + 1 + tc)
725
726 struct qed_sb_cnt_info {
727         int     sb_cnt;
728         int     sb_iov_cnt;
729         int     sb_free_blk;
730 };
731
732 static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info)
733 {
734         u32 prod = 0;
735         u16 rc = 0;
736
737         prod = le32_to_cpu(sb_info->sb_virt->prod_index) &
738                STATUS_BLOCK_PROD_INDEX_MASK;
739         if (sb_info->sb_ack != prod) {
740                 sb_info->sb_ack = prod;
741                 rc |= QED_SB_IDX;
742         }
743
744         /* Let SB update */
745         mmiowb();
746         return rc;
747 }
748
749 /**
750  *
751  * @brief This function creates an update command for interrupts that is
752  *        written to the IGU.
753  *
754  * @param sb_info       - This is the structure allocated and
755  *                 initialized per status block. Assumption is
756  *                 that it was initialized using qed_sb_init
757  * @param int_cmd       - Enable/Disable/Nop
758  * @param upd_flg       - whether igu consumer should be
759  *                 updated.
760  *
761  * @return inline void
762  */
763 static inline void qed_sb_ack(struct qed_sb_info *sb_info,
764                               enum igu_int_cmd int_cmd,
765                               u8 upd_flg)
766 {
767         struct igu_prod_cons_update igu_ack = { 0 };
768
769         igu_ack.sb_id_and_flags =
770                 ((sb_info->sb_ack << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
771                  (upd_flg << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
772                  (int_cmd << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
773                  (IGU_SEG_ACCESS_REG <<
774                   IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
775
776         DIRECT_REG_WR(sb_info->igu_addr, igu_ack.sb_id_and_flags);
777
778         /* Both segments (interrupts & acks) are written to same place address;
779          * Need to guarantee all commands will be received (in-order) by HW.
780          */
781         mmiowb();
782         barrier();
783 }
784
785 static inline void __internal_ram_wr(void *p_hwfn,
786                                      void __iomem *addr,
787                                      int size,
788                                      u32 *data)
789
790 {
791         unsigned int i;
792
793         for (i = 0; i < size / sizeof(*data); i++)
794                 DIRECT_REG_WR(&((u32 __iomem *)addr)[i], data[i]);
795 }
796
797 static inline void internal_ram_wr(void __iomem *addr,
798                                    int size,
799                                    u32 *data)
800 {
801         __internal_ram_wr(NULL, addr, size, data);
802 }
803
804 enum qed_rss_caps {
805         QED_RSS_IPV4            = 0x1,
806         QED_RSS_IPV6            = 0x2,
807         QED_RSS_IPV4_TCP        = 0x4,
808         QED_RSS_IPV6_TCP        = 0x8,
809         QED_RSS_IPV4_UDP        = 0x10,
810         QED_RSS_IPV6_UDP        = 0x20,
811 };
812
813 #define QED_RSS_IND_TABLE_SIZE 128
814 #define QED_RSS_KEY_SIZE 10 /* size in 32b chunks */
815 #endif