Merge tag 'mac80211-for-davem-2016-09-13' of git://git.kernel.org/pub/scm/linux/kerne...
[cascardo/linux.git] / include / rdma / rdmavt_qp.h
index b0ab12b..bd34d0b 100644 (file)
 #define RVT_PROCESS_OR_FLUSH_SEND \
        (RVT_PROCESS_SEND_OK | RVT_FLUSH_SEND)
 
+/*
+ * Internal send flags
+ */
+#define RVT_SEND_RESERVE_USED           IB_SEND_RESERVED_START
+#define RVT_SEND_COMPLETION_ONLY       (IB_SEND_RESERVED_START << 1)
+
 /*
  * Send work request queue entry.
  * The size of the sg_list is determined when the QP is created and stored
@@ -216,14 +222,12 @@ struct rvt_mmap_info {
  * to send a RDMA read response or atomic operation.
  */
 struct rvt_ack_entry {
-       u8 opcode;
-       u8 sent;
+       struct rvt_sge rdma_sge;
+       u64 atomic_data;
        u32 psn;
        u32 lpsn;
-       union {
-               struct rvt_sge rdma_sge;
-               u64 atomic_data;
-       };
+       u8 opcode;
+       u8 sent;
 };
 
 #define        RC_QP_SCALING_INTERVAL  5
@@ -232,6 +236,7 @@ struct rvt_ack_entry {
 #define RVT_OPERATION_ATOMIC      0x00000002
 #define RVT_OPERATION_ATOMIC_SGE  0x00000004
 #define RVT_OPERATION_LOCAL       0x00000008
+#define RVT_OPERATION_USE_RESERVE 0x00000010
 
 #define RVT_OPERATION_MAX (IB_WR_RESERVED10 + 1)
 
@@ -328,6 +333,7 @@ struct rvt_qp {
        u32 s_next_psn;         /* PSN for next request */
        u32 s_avail;            /* number of entries avail */
        u32 s_ssn;              /* SSN of tail entry */
+       atomic_t s_reserved_used; /* reserved entries in use */
 
        spinlock_t s_lock ____cacheline_aligned_in_smp;
        u32 s_flags;
@@ -459,6 +465,49 @@ static inline struct rvt_rwqe *rvt_get_rwqe_ptr(struct rvt_rq *rq, unsigned n)
                  rq->max_sge * sizeof(struct ib_sge)) * n);
 }
 
+/**
+ * rvt_qp_wqe_reserve - reserve operation
+ * @qp - the rvt qp
+ * @wqe - the send wqe
+ *
+ * This routine used in post send to record
+ * a wqe relative reserved operation use.
+ */
+static inline void rvt_qp_wqe_reserve(
+       struct rvt_qp *qp,
+       struct rvt_swqe *wqe)
+{
+       wqe->wr.send_flags |= RVT_SEND_RESERVE_USED;
+       atomic_inc(&qp->s_reserved_used);
+}
+
+/**
+ * rvt_qp_wqe_unreserve - clean reserved operation
+ * @qp - the rvt qp
+ * @wqe - the send wqe
+ *
+ * This decrements the reserve use count.
+ *
+ * This call MUST precede the change to
+ * s_last to insure that post send sees a stable
+ * s_avail.
+ *
+ * An smp_mp__after_atomic() is used to insure
+ * the compiler does not juggle the order of the s_last
+ * ring index and the decrementing of s_reserved_used.
+ */
+static inline void rvt_qp_wqe_unreserve(
+       struct rvt_qp *qp,
+       struct rvt_swqe *wqe)
+{
+       if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED)) {
+               wqe->wr.send_flags &= ~RVT_SEND_RESERVE_USED;
+               atomic_dec(&qp->s_reserved_used);
+               /* insure no compiler re-order up to s_last change */
+               smp_mb__after_atomic();
+       }
+}
+
 extern const int  ib_rvt_state_ops[];
 
 struct rvt_dev_info;