Merge branch 'mlx4'
authorDavid S. Miller <davem@davemloft.net>
Mon, 15 Dec 2014 16:35:00 +0000 (11:35 -0500)
committerDavid S. Miller <davem@davemloft.net>
Mon, 15 Dec 2014 16:35:00 +0000 (11:35 -0500)
Or Gerlitz says:

====================
mlx4 driver fixes for 3.19-rc1

Just fixes for two small issues introduced in the 3.19 merge window
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/mellanox/mlx4/fw.c
drivers/net/ethernet/mellanox/mlx4/fw.h
drivers/net/ethernet/mellanox/mlx4/main.c

index ef3b95b..51807bb 100644 (file)
@@ -787,11 +787,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
                if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size))
                        field = 3;
                dev_cap->bf_regs_per_page = 1 << (field & 0x3f);
-               mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n",
-                        dev_cap->bf_reg_size, dev_cap->bf_regs_per_page);
        } else {
                dev_cap->bf_reg_size = 0;
-               mlx4_dbg(dev, "BlueFlame not available\n");
        }
 
        MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_SQ_OFFSET);
@@ -902,9 +899,6 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
                        goto out;
        }
 
-       mlx4_dbg(dev, "Base MM extensions: flags %08x, rsvd L_Key %08x\n",
-                dev_cap->bmme_flags, dev_cap->reserved_lkey);
-
        /*
         * Each UAR has 4 EQ doorbells; so if a UAR is reserved, then
         * we can't use any EQs whose doorbell falls on that page,
@@ -916,6 +910,21 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
        else
                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SYS_EQS;
 
+out:
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return err;
+}
+
+void mlx4_dev_cap_dump(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
+{
+       if (dev_cap->bf_reg_size > 0)
+               mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n",
+                        dev_cap->bf_reg_size, dev_cap->bf_regs_per_page);
+       else
+               mlx4_dbg(dev, "BlueFlame not available\n");
+
+       mlx4_dbg(dev, "Base MM extensions: flags %08x, rsvd L_Key %08x\n",
+                dev_cap->bmme_flags, dev_cap->reserved_lkey);
        mlx4_dbg(dev, "Max ICM size %lld MB\n",
                 (unsigned long long) dev_cap->max_icm_sz >> 20);
        mlx4_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n",
@@ -949,13 +958,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
                 dev_cap->dmfs_high_rate_qpn_base);
        mlx4_dbg(dev, "DMFS high rate steer QPn range: %d\n",
                 dev_cap->dmfs_high_rate_qpn_range);
-
        dump_dev_cap_flags(dev, dev_cap->flags);
        dump_dev_cap_flags2(dev, dev_cap->flags2);
-
-out:
-       mlx4_free_cmd_mailbox(dev, mailbox);
-       return err;
 }
 
 int mlx4_QUERY_PORT(struct mlx4_dev *dev, int port, struct mlx4_port_cap *port_cap)
index 794e282..62562b6 100644 (file)
@@ -224,6 +224,7 @@ struct mlx4_set_ib_param {
        u32 cap_mask;
 };
 
+void mlx4_dev_cap_dump(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap);
 int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap);
 int mlx4_QUERY_PORT(struct mlx4_dev *dev, int port, struct mlx4_port_cap *port_cap);
 int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port,
index e25436b..b935bf3 100644 (file)
@@ -305,6 +305,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
                mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
                return err;
        }
+       mlx4_dev_cap_dump(dev, dev_cap);
 
        if (dev_cap->min_page_sz > PAGE_SIZE) {
                mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
@@ -2488,41 +2489,42 @@ static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev,
                             u8 total_vfs, int existing_vfs)
 {
        u64 dev_flags = dev->flags;
+       int err = 0;
 
-       dev->dev_vfs = kzalloc(
-                       total_vfs * sizeof(*dev->dev_vfs),
-                       GFP_KERNEL);
+       atomic_inc(&pf_loading);
+       if (dev->flags &  MLX4_FLAG_SRIOV) {
+               if (existing_vfs != total_vfs) {
+                       mlx4_err(dev, "SR-IOV was already enabled, but with num_vfs (%d) different than requested (%d)\n",
+                                existing_vfs, total_vfs);
+                       total_vfs = existing_vfs;
+               }
+       }
+
+       dev->dev_vfs = kzalloc(total_vfs * sizeof(*dev->dev_vfs), GFP_KERNEL);
        if (NULL == dev->dev_vfs) {
                mlx4_err(dev, "Failed to allocate memory for VFs\n");
                goto disable_sriov;
-       } else if (!(dev->flags &  MLX4_FLAG_SRIOV)) {
-               int err = 0;
-
-               atomic_inc(&pf_loading);
-               if (existing_vfs) {
-                       if (existing_vfs != total_vfs)
-                               mlx4_err(dev, "SR-IOV was already enabled, but with num_vfs (%d) different than requested (%d)\n",
-                                        existing_vfs, total_vfs);
-               } else {
-                       mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", total_vfs);
-                       err = pci_enable_sriov(pdev, total_vfs);
-               }
-               if (err) {
-                       mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n",
-                                err);
-                       atomic_dec(&pf_loading);
-                       goto disable_sriov;
-               } else {
-                       mlx4_warn(dev, "Running in master mode\n");
-                       dev_flags |= MLX4_FLAG_SRIOV |
-                               MLX4_FLAG_MASTER;
-                       dev_flags &= ~MLX4_FLAG_SLAVE;
-                       dev->num_vfs = total_vfs;
-               }
+       }
+
+       if (!(dev->flags &  MLX4_FLAG_SRIOV)) {
+               mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", total_vfs);
+               err = pci_enable_sriov(pdev, total_vfs);
+       }
+       if (err) {
+               mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n",
+                        err);
+               goto disable_sriov;
+       } else {
+               mlx4_warn(dev, "Running in master mode\n");
+               dev_flags |= MLX4_FLAG_SRIOV |
+                       MLX4_FLAG_MASTER;
+               dev_flags &= ~MLX4_FLAG_SLAVE;
+               dev->num_vfs = total_vfs;
        }
        return dev_flags;
 
 disable_sriov:
+       atomic_dec(&pf_loading);
        dev->num_vfs = 0;
        kfree(dev->dev_vfs);
        return dev_flags & ~MLX4_FLAG_MASTER;
@@ -2606,8 +2608,10 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
                }
 
                if (total_vfs) {
-                       existing_vfs = pci_num_vf(pdev);
                        dev->flags = MLX4_FLAG_MASTER;
+                       existing_vfs = pci_num_vf(pdev);
+                       if (existing_vfs)
+                               dev->flags |= MLX4_FLAG_SRIOV;
                        dev->num_vfs = total_vfs;
                }
        }
@@ -2643,6 +2647,7 @@ slave_start:
        }
 
        if (mlx4_is_master(dev)) {
+               /* when we hit the goto slave_start below, dev_cap already initialized */
                if (!dev_cap) {
                        dev_cap = kzalloc(sizeof(*dev_cap), GFP_KERNEL);
 
@@ -2849,6 +2854,7 @@ slave_start:
        if (mlx4_is_master(dev) && dev->num_vfs)
                atomic_dec(&pf_loading);
 
+       kfree(dev_cap);
        return 0;
 
 err_port: