Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma
[cascardo/linux.git] / drivers / infiniband / hw / mlx5 / mlx5_ib.h
index 67cc741..dcdcd19 100644 (file)
@@ -44,6 +44,7 @@
 #include <linux/types.h>
 #include <linux/mlx5/transobj.h>
 #include <rdma/ib_user_verbs.h>
+#include <rdma/mlx5-abi.h>
 
 #define mlx5_ib_dbg(dev, format, arg...)                               \
 pr_debug("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__,   \
@@ -142,6 +143,7 @@ struct mlx5_ib_pd {
 #define MLX5_IB_FLOW_LEFTOVERS_PRIO    (MLX5_IB_FLOW_MCAST_PRIO + 1)
 
 #define MLX5_IB_NUM_FLOW_FT            (MLX5_IB_FLOW_LEFTOVERS_PRIO + 1)
+#define MLX5_IB_NUM_SNIFFER_FTS                2
 struct mlx5_ib_flow_prio {
        struct mlx5_flow_table          *flow_table;
        unsigned int                    refcount;
@@ -150,12 +152,14 @@ struct mlx5_ib_flow_prio {
 struct mlx5_ib_flow_handler {
        struct list_head                list;
        struct ib_flow                  ibflow;
-       unsigned int                    prio;
+       struct mlx5_ib_flow_prio        *prio;
        struct mlx5_flow_rule   *rule;
 };
 
 struct mlx5_ib_flow_db {
        struct mlx5_ib_flow_prio        prios[MLX5_IB_NUM_FLOW_FT];
+       struct mlx5_ib_flow_prio        sniffer[MLX5_IB_NUM_SNIFFER_FTS];
+       struct mlx5_flow_table          *lag_demux_ft;
        /* Protect flow steering bypass flow tables
         * when add/del flow rules.
         * only single add/removal of flow steering rule could be done
@@ -225,7 +229,7 @@ struct mlx5_ib_wq {
 
 struct mlx5_ib_rwq {
        struct ib_wq            ibwq;
-       u32                     rqn;
+       struct mlx5_core_qp     core_qp;
        u32                     rq_num_pas;
        u32                     log_rq_stride;
        u32                     log_rq_size;
@@ -603,6 +607,7 @@ struct mlx5_roce {
        rwlock_t                netdev_lock;
        struct net_device       *netdev;
        struct notifier_block   nb;
+       atomic_t                next_port;
 };
 
 struct mlx5_ib_dev {
@@ -663,6 +668,11 @@ static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp)
        return container_of(mqp, struct mlx5_ib_qp_base, mqp)->container_mibqp;
 }
 
+static inline struct mlx5_ib_rwq *to_mibrwq(struct mlx5_core_qp *core_qp)
+{
+       return container_of(core_qp, struct mlx5_ib_rwq, core_qp);
+}
+
 static inline struct mlx5_ib_mr *to_mibmr(struct mlx5_core_mkey *mmkey)
 {
        return container_of(mmkey, struct mlx5_ib_mr, mmkey);
@@ -947,4 +957,40 @@ static inline int verify_assign_uidx(u8 cqe_version, u32 cmd_uidx,
 
        return 0;
 }
+
+static inline int get_qp_user_index(struct mlx5_ib_ucontext *ucontext,
+                                   struct mlx5_ib_create_qp *ucmd,
+                                   int inlen,
+                                   u32 *user_index)
+{
+       u8 cqe_version = ucontext->cqe_version;
+
+       if (field_avail(struct mlx5_ib_create_qp, uidx, inlen) &&
+           !cqe_version && (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
+               return 0;
+
+       if (!!(field_avail(struct mlx5_ib_create_qp, uidx, inlen) !=
+              !!cqe_version))
+               return -EINVAL;
+
+       return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
+}
+
+static inline int get_srq_user_index(struct mlx5_ib_ucontext *ucontext,
+                                    struct mlx5_ib_create_srq *ucmd,
+                                    int inlen,
+                                    u32 *user_index)
+{
+       u8 cqe_version = ucontext->cqe_version;
+
+       if (field_avail(struct mlx5_ib_create_srq, uidx, inlen) &&
+           !cqe_version && (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
+               return 0;
+
+       if (!!(field_avail(struct mlx5_ib_create_srq, uidx, inlen) !=
+              !!cqe_version))
+               return -EINVAL;
+
+       return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
+}
 #endif /* MLX5_IB_H */