IB/mlx5: Added support for re-registration of MRs
[cascardo/linux.git] / drivers / infiniband / hw / mlx5 / main.c
index ec737e2..16f7d0b 100644 (file)
@@ -504,6 +504,11 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
            (MLX5_CAP_ETH(dev->mdev, csum_cap)))
                        props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
 
+       if (MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) {
+               props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
+               props->device_cap_flags |= IB_DEVICE_UD_TSO;
+       }
+
        props->vendor_part_id      = mdev->pdev->device;
        props->hw_ver              = mdev->pdev->revision;
 
@@ -844,6 +849,8 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
        int err;
        int i;
        size_t reqlen;
+       size_t min_req_v2 = offsetof(struct mlx5_ib_alloc_ucontext_req_v2,
+                                    max_cqe_version);
 
        if (!dev->ib_active)
                return ERR_PTR(-EAGAIN);
@@ -854,7 +861,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
        reqlen = udata->inlen - sizeof(struct ib_uverbs_cmd_hdr);
        if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req))
                ver = 0;
-       else if (reqlen >= sizeof(struct mlx5_ib_alloc_ucontext_req_v2))
+       else if (reqlen >= min_req_v2)
                ver = 2;
        else
                return ERR_PTR(-EINVAL);
@@ -1714,6 +1721,17 @@ static struct device_attribute *mlx5_class_attributes[] = {
        &dev_attr_reg_pages,
 };
 
+static void pkey_change_handler(struct work_struct *work)
+{
+       struct mlx5_ib_port_resources *ports =
+               container_of(work, struct mlx5_ib_port_resources,
+                            pkey_change_work);
+
+       mutex_lock(&ports->devr->mutex);
+       mlx5_ib_gsi_pkey_change(ports->gsi);
+       mutex_unlock(&ports->devr->mutex);
+}
+
 static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
                          enum mlx5_dev_event event, unsigned long param)
 {
@@ -1750,6 +1768,8 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
        case MLX5_DEV_EVENT_PKEY_CHANGE:
                ibev.event = IB_EVENT_PKEY_CHANGE;
                port = (u8)param;
+
+               schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work);
                break;
 
        case MLX5_DEV_EVENT_GUID_CHANGE:
@@ -1959,10 +1979,13 @@ static int create_dev_resources(struct mlx5_ib_resources *devr)
        struct ib_srq_init_attr attr;
        struct mlx5_ib_dev *dev;
        struct ib_cq_init_attr cq_attr = {.cqe = 1};
+       int port;
        int ret = 0;
 
        dev = container_of(devr, struct mlx5_ib_dev, devr);
 
+       mutex_init(&devr->mutex);
+
        devr->p0 = mlx5_ib_alloc_pd(&dev->ib_dev, NULL, NULL);
        if (IS_ERR(devr->p0)) {
                ret = PTR_ERR(devr->p0);
@@ -2050,6 +2073,12 @@ static int create_dev_resources(struct mlx5_ib_resources *devr)
        atomic_inc(&devr->p0->usecnt);
        atomic_set(&devr->s0->usecnt, 0);
 
+       for (port = 0; port < ARRAY_SIZE(devr->ports); ++port) {
+               INIT_WORK(&devr->ports[port].pkey_change_work,
+                         pkey_change_handler);
+               devr->ports[port].devr = devr;
+       }
+
        return 0;
 
 error5:
@@ -2068,12 +2097,20 @@ error0:
 
 static void destroy_dev_resources(struct mlx5_ib_resources *devr)
 {
+       struct mlx5_ib_dev *dev =
+               container_of(devr, struct mlx5_ib_dev, devr);
+       int port;
+
        mlx5_ib_destroy_srq(devr->s1);
        mlx5_ib_destroy_srq(devr->s0);
        mlx5_ib_dealloc_xrcd(devr->x0);
        mlx5_ib_dealloc_xrcd(devr->x1);
        mlx5_ib_destroy_cq(devr->c0);
        mlx5_ib_dealloc_pd(devr->p0);
+
+       /* Make sure no change P_Key work items are still executing */
+       for (port = 0; port < dev->num_ports; ++port)
+               cancel_work_sync(&devr->ports[port].pkey_change_work);
 }
 
 static u32 get_core_cap_flags(struct ib_device *ibdev)
@@ -2196,6 +2233,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
                (1ull << IB_USER_VERBS_CMD_ALLOC_PD)            |
                (1ull << IB_USER_VERBS_CMD_DEALLOC_PD)          |
                (1ull << IB_USER_VERBS_CMD_REG_MR)              |
+               (1ull << IB_USER_VERBS_CMD_REREG_MR)            |
                (1ull << IB_USER_VERBS_CMD_DEREG_MR)            |
                (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
                (1ull << IB_USER_VERBS_CMD_CREATE_CQ)           |
@@ -2214,7 +2252,9 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
                (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ)         |
                (1ull << IB_USER_VERBS_CMD_OPEN_QP);
        dev->ib_dev.uverbs_ex_cmd_mask =
-               (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE);
+               (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE)     |
+               (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ)        |
+               (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP);
 
        dev->ib_dev.query_device        = mlx5_ib_query_device;
        dev->ib_dev.query_port          = mlx5_ib_query_port;
@@ -2254,6 +2294,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
        dev->ib_dev.req_notify_cq       = mlx5_ib_arm_cq;
        dev->ib_dev.get_dma_mr          = mlx5_ib_get_dma_mr;
        dev->ib_dev.reg_user_mr         = mlx5_ib_reg_user_mr;
+       dev->ib_dev.rereg_user_mr       = mlx5_ib_rereg_user_mr;
        dev->ib_dev.dereg_mr            = mlx5_ib_dereg_mr;
        dev->ib_dev.attach_mcast        = mlx5_ib_mcg_attach;
        dev->ib_dev.detach_mcast        = mlx5_ib_mcg_detach;