Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 23 Jul 2013 21:38:20 +0000 (14:38 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 23 Jul 2013 21:38:20 +0000 (14:38 -0700)
Pull vhost fixes from Michael Tsirkin:
 "vhost: more fixes for 3.11

  This includes some fixes for vhost net and scsi drivers.

  The test module has already been reworked to avoid rcu usage, but the
  necessary core changes are missing, we fixed this.

  Unlikely to affect any real-world users, but it's early in the cycle
  so, let's merge them"

(It was earlier when Michael originally sent the email, but it somehot
got missed in the flood, so here it is after -rc2)

* tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost:
  vhost: Remove custom vhost rcu usage
  vhost-scsi: Always access vq->private_data under vq mutex
  vhost-net: Always access vq->private_data under vq mutex

1  2 
drivers/vhost/scsi.c

diff --combined drivers/vhost/scsi.c
@@@ -448,19 -448,7 +448,19 @@@ static u32 tcm_vhost_tpg_get_inst_index
  
  static void tcm_vhost_release_cmd(struct se_cmd *se_cmd)
  {
 -      return;
 +      struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
 +                              struct tcm_vhost_cmd, tvc_se_cmd);
 +
 +      if (tv_cmd->tvc_sgl_count) {
 +              u32 i;
 +              for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
 +                      put_page(sg_page(&tv_cmd->tvc_sgl[i]));
 +
 +              kfree(tv_cmd->tvc_sgl);
 +        }
 +
 +      tcm_vhost_put_inflight(tv_cmd->inflight);
 +      kfree(tv_cmd);
  }
  
  static int tcm_vhost_shutdown_session(struct se_session *se_sess)
@@@ -530,9 -518,9 +530,9 @@@ static int tcm_vhost_queue_status(struc
        return 0;
  }
  
 -static int tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd)
 +static void tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd)
  {
 -      return 0;
 +      return;
  }
  
  static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
@@@ -572,13 -560,19 +572,13 @@@ static void vhost_scsi_free_cmd(struct 
        struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
  
        /* TODO locking against target/backend threads? */
 -      transport_generic_free_cmd(se_cmd, 1);
 +      transport_generic_free_cmd(se_cmd, 0);
  
 -      if (cmd->tvc_sgl_count) {
 -              u32 i;
 -              for (i = 0; i < cmd->tvc_sgl_count; i++)
 -                      put_page(sg_page(&cmd->tvc_sgl[i]));
 -
 -              kfree(cmd->tvc_sgl);
 -      }
 -
 -      tcm_vhost_put_inflight(cmd->inflight);
 +}
  
 -      kfree(cmd);
 +static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
 +{
 +      return target_put_sess_cmd(se_cmd->se_sess, se_cmd);
  }
  
  static void
@@@ -862,7 -856,7 +862,7 @@@ static void tcm_vhost_submission_work(s
                        cmd->tvc_cdb, &cmd->tvc_sense_buf[0],
                        cmd->tvc_lun, cmd->tvc_exp_data_len,
                        cmd->tvc_task_attr, cmd->tvc_data_direction,
 -                      0, sg_ptr, cmd->tvc_sgl_count,
 +                      TARGET_SCF_ACK_KREF, sg_ptr, cmd->tvc_sgl_count,
                        sg_bidi_ptr, sg_no_bidi);
        if (rc < 0) {
                transport_send_check_condition_and_sense(se_cmd,
@@@ -902,19 -896,15 +902,15 @@@ vhost_scsi_handle_vq(struct vhost_scsi 
        int head, ret;
        u8 target;
  
+       mutex_lock(&vq->mutex);
        /*
         * We can handle the vq only after the endpoint is setup by calling the
         * VHOST_SCSI_SET_ENDPOINT ioctl.
-        *
-        * TODO: Check that we are running from vhost_worker which acts
-        * as read-side critical section for vhost kind of RCU.
-        * See the comments in struct vhost_virtqueue in drivers/vhost/vhost.h
         */
-       vs_tpg = rcu_dereference_check(vq->private_data, 1);
+       vs_tpg = vq->private_data;
        if (!vs_tpg)
-               return;
+               goto out;
  
-       mutex_lock(&vq->mutex);
        vhost_disable_notify(&vs->dev, vq);
  
        for (;;) {
@@@ -1064,6 -1054,7 +1060,7 @@@ err_free
        vhost_scsi_free_cmd(cmd);
  err_cmd:
        vhost_scsi_send_bad_target(vs, vq, head, out);
+ out:
        mutex_unlock(&vq->mutex);
  }
  
@@@ -1232,9 -1223,8 +1229,8 @@@ vhost_scsi_set_endpoint(struct vhost_sc
                       sizeof(vs->vs_vhost_wwpn));
                for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
                        vq = &vs->vqs[i].vq;
-                       /* Flushing the vhost_work acts as synchronize_rcu */
                        mutex_lock(&vq->mutex);
-                       rcu_assign_pointer(vq->private_data, vs_tpg);
+                       vq->private_data = vs_tpg;
                        vhost_init_used(vq);
                        mutex_unlock(&vq->mutex);
                }
@@@ -1313,9 -1303,8 +1309,8 @@@ vhost_scsi_clear_endpoint(struct vhost_
        if (match) {
                for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
                        vq = &vs->vqs[i].vq;
-                       /* Flushing the vhost_work acts as synchronize_rcu */
                        mutex_lock(&vq->mutex);
-                       rcu_assign_pointer(vq->private_data, NULL);
+                       vq->private_data = NULL;
                        mutex_unlock(&vq->mutex);
                }
        }
@@@ -2034,7 -2023,6 +2029,7 @@@ static struct target_core_fabric_ops tc
        .tpg_release_fabric_acl         = tcm_vhost_release_fabric_acl,
        .tpg_get_inst_index             = tcm_vhost_tpg_get_inst_index,
        .release_cmd                    = tcm_vhost_release_cmd,
 +      .check_stop_free                = vhost_scsi_check_stop_free,
        .shutdown_session               = tcm_vhost_shutdown_session,
        .close_session                  = tcm_vhost_close_session,
        .sess_get_index                 = tcm_vhost_sess_get_index,