Merge tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland...
[cascardo/linux.git] / drivers / net / ethernet / mellanox / mlx4 / eq.c
index c48cf6f..251ae2f 100644 (file)
@@ -101,15 +101,21 @@ static void eq_set_ci(struct mlx4_eq *eq, int req_not)
        mb();
 }
 
-static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry)
+static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry, u8 eqe_factor)
 {
-       unsigned long off = (entry & (eq->nent - 1)) * MLX4_EQ_ENTRY_SIZE;
-       return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE;
+       /* (entry & (eq->nent - 1)) gives us a cyclic array */
+       unsigned long offset = (entry & (eq->nent - 1)) * (MLX4_EQ_ENTRY_SIZE << eqe_factor);
+       /* CX3 is capable of extending the EQE from 32 to 64 bytes.
+        * When this feature is enabled, the first (in the lower addresses)
+        * 32 bytes in the 64 byte EQE are reserved and the next 32 bytes
+        * contain the legacy EQE information.
+        */
+       return eq->page_list[offset / PAGE_SIZE].buf + (offset + (eqe_factor ? MLX4_EQ_ENTRY_SIZE : 0)) % PAGE_SIZE;
 }
 
-static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq)
+static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq, u8 eqe_factor)
 {
-       struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index);
+       struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index, eqe_factor);
        return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe;
 }
 
@@ -177,7 +183,7 @@ static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe)
                return;
        }
 
-       memcpy(s_eqe, eqe, sizeof(struct mlx4_eqe) - 1);
+       memcpy(s_eqe, eqe, dev->caps.eqe_size - 1);
        s_eqe->slave_id = slave;
        /* ensure all information is written before setting the ownersip bit */
        wmb();
@@ -401,6 +407,7 @@ void mlx4_master_handle_slave_flr(struct work_struct *work)
        struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
        int i;
        int err;
+       unsigned long flags;
 
        mlx4_dbg(dev, "mlx4_handle_slave_flr\n");
 
@@ -412,10 +419,10 @@ void mlx4_master_handle_slave_flr(struct work_struct *work)
 
                        mlx4_delete_all_resources_for_slave(dev, i);
                        /*return the slave to running mode*/
-                       spin_lock(&priv->mfunc.master.slave_state_lock);
+                       spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
                        slave_state[i].last_cmd = MLX4_COMM_CMD_RESET;
                        slave_state[i].is_slave_going_down = 0;
-                       spin_unlock(&priv->mfunc.master.slave_state_lock);
+                       spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
                        /*notify the FW:*/
                        err = mlx4_cmd(dev, 0, i, 0, MLX4_CMD_INFORM_FLR_DONE,
                                       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
@@ -440,8 +447,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
        u8 update_slave_state;
        int i;
        enum slave_port_gen_event gen_event;
+       unsigned long flags;
 
-       while ((eqe = next_eqe_sw(eq))) {
+       while ((eqe = next_eqe_sw(eq, dev->caps.eqe_factor))) {
                /*
                 * Make sure we read EQ entry contents after we've
                 * checked the ownership bit.
@@ -647,13 +655,13 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
                        } else
                                update_slave_state = 1;
 
-                       spin_lock(&priv->mfunc.master.slave_state_lock);
+                       spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
                        if (update_slave_state) {
                                priv->mfunc.master.slave_state[flr_slave].active = false;
                                priv->mfunc.master.slave_state[flr_slave].last_cmd = MLX4_COMM_CMD_FLR;
                                priv->mfunc.master.slave_state[flr_slave].is_slave_going_down = 1;
                        }
-                       spin_unlock(&priv->mfunc.master.slave_state_lock);
+                       spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
                        queue_work(priv->mfunc.master.comm_wq,
                                   &priv->mfunc.master.slave_flr_event_work);
                        break;
@@ -864,7 +872,8 @@ static int mlx4_create_eq(struct mlx4_dev *dev, int nent,
 
        eq->dev   = dev;
        eq->nent  = roundup_pow_of_two(max(nent, 2));
-       npages = PAGE_ALIGN(eq->nent * MLX4_EQ_ENTRY_SIZE) / PAGE_SIZE;
+       /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes */
+       npages = PAGE_ALIGN(eq->nent * (MLX4_EQ_ENTRY_SIZE << dev->caps.eqe_factor)) / PAGE_SIZE;
 
        eq->page_list = kmalloc(npages * sizeof *eq->page_list,
                                GFP_KERNEL);
@@ -966,8 +975,9 @@ static void mlx4_free_eq(struct mlx4_dev *dev,
        struct mlx4_priv *priv = mlx4_priv(dev);
        struct mlx4_cmd_mailbox *mailbox;
        int err;
-       int npages = PAGE_ALIGN(MLX4_EQ_ENTRY_SIZE * eq->nent) / PAGE_SIZE;
        int i;
+       /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes */
+       int npages = PAGE_ALIGN((MLX4_EQ_ENTRY_SIZE << dev->caps.eqe_factor) * eq->nent) / PAGE_SIZE;
 
        mailbox = mlx4_alloc_cmd_mailbox(dev);
        if (IS_ERR(mailbox))