mlx5_core: Add support for page faults events and low level handling
[cascardo/linux.git] / include / linux / mlx5 / qp.h
index 3fa075d..6b1d6f6 100644 (file)
@@ -50,6 +50,9 @@
 #define MLX5_BSF_APPTAG_ESCAPE 0x1
 #define MLX5_BSF_APPREF_ESCAPE 0x2
 
+#define MLX5_QPN_BITS          24
+#define MLX5_QPN_MASK          ((1 << MLX5_QPN_BITS) - 1)
+
 enum mlx5_qp_optpar {
        MLX5_QP_OPTPAR_ALT_ADDR_PATH            = 1 << 0,
        MLX5_QP_OPTPAR_RRE                      = 1 << 1,
@@ -189,6 +192,9 @@ struct mlx5_wqe_ctrl_seg {
        __be32                  imm;
 };
 
+#define MLX5_WQE_CTRL_DS_MASK 0x3f
+#define MLX5_WQE_DS_UNITS 16
+
 struct mlx5_wqe_xrc_seg {
        __be32                  xrc_srqn;
        u8                      rsvd[12];
@@ -360,9 +366,46 @@ struct mlx5_stride_block_ctrl_seg {
        __be16          num_entries;
 };
 
+enum mlx5_pagefault_flags {
+       MLX5_PFAULT_REQUESTOR = 1 << 0,
+       MLX5_PFAULT_WRITE     = 1 << 1,
+       MLX5_PFAULT_RDMA      = 1 << 2,
+};
+
+/* Contains the details of a pagefault. */
+struct mlx5_pagefault {
+       u32                     bytes_committed;
+       u8                      event_subtype;
+       enum mlx5_pagefault_flags flags;
+       union {
+               /* Initiator or send message responder pagefault details. */
+               struct {
+                       /* Received packet size, only valid for responders. */
+                       u32     packet_size;
+                       /*
+                        * WQE index. Refers to either the send queue or
+                        * receive queue, according to event_subtype.
+                        */
+                       u16     wqe_index;
+               } wqe;
+               /* RDMA responder pagefault details */
+               struct {
+                       u32     r_key;
+                       /*
+                        * Received packet size, minimal size page fault
+                        * resolution required for forward progress.
+                        */
+                       u32     packet_size;
+                       u32     rdma_op_len;
+                       u64     rdma_va;
+               } rdma;
+       };
+};
+
 struct mlx5_core_qp {
        struct mlx5_core_rsc_common     common; /* must be first */
        void (*event)           (struct mlx5_core_qp *, int);
+       void (*pfault_handler)(struct mlx5_core_qp *, struct mlx5_pagefault *);
        int                     qpn;
        struct mlx5_rsc_debug   *dbg;
        int                     pid;
@@ -530,6 +573,17 @@ static inline struct mlx5_core_mr *__mlx5_mr_lookup(struct mlx5_core_dev *dev, u
        return radix_tree_lookup(&dev->priv.mr_table.tree, key);
 }
 
+struct mlx5_page_fault_resume_mbox_in {
+       struct mlx5_inbox_hdr   hdr;
+       __be32                  flags_qpn;
+       u8                      reserved[4];
+};
+
+struct mlx5_page_fault_resume_mbox_out {
+       struct mlx5_outbox_hdr  hdr;
+       u8                      rsvd[8];
+};
+
 int mlx5_core_create_qp(struct mlx5_core_dev *dev,
                        struct mlx5_core_qp *qp,
                        struct mlx5_create_qp_mbox_in *in,
@@ -549,6 +603,10 @@ void mlx5_init_qp_table(struct mlx5_core_dev *dev);
 void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev);
 int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
 void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 qpn,
+                               u8 context, int error);
+#endif
 
 static inline const char *mlx5_qp_type_str(int type)
 {