Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target...
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 11 Jul 2013 19:57:19 +0000 (12:57 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 11 Jul 2013 19:57:19 +0000 (12:57 -0700)
Pull SCSI target updates from Nicholas Bellinger:
 "Lots of activity this round on performance improvements in target-core
  while benchmarking the prototype scsi-mq initiator code with
  vhost-scsi fabric ports, along with a number of iscsi/iser-target
  improvements and hardening fixes for exception path cases post v3.10
  merge.

  The highlights include:

   - Make persistent reservations APTPL buffer allocated on-demand, and
     drop per t10_reservation buffer.  (grover)
   - Make virtual LUN=0 a NULLIO device, and skip allocation of NULLIO
     device pages (grover)
   - Add transport_cmd_check_stop write_pending bit to avoid extra
     access of ->t_state_lock is WRITE I/O submission fast-path.  (nab)
   - Drop unnecessary CMD_T_DEV_ACTIVE check from
     transport_lun_remove_cmd to avoid extra access of ->t_state_lock in
     release fast-path.  (nab)
   - Avoid extra t_state_lock access in __target_execute_cmd fast-path
     (nab)
   - Drop unnecessary vhost-scsi wait_for_tasks=true usage +
     ->t_state_lock access in release fast-path.  (nab)
   - Convert vhost-scsi to use modern se_cmd->cmd_kref
     TARGET_SCF_ACK_KREF usage (nab)
   - Add tracepoints for SCSI commands being processed (roland)
   - Refactoring of iscsi-target handling of ISCSI_OP_NOOP +
     ISCSI_OP_TEXT to be transport independent (nab)
   - Add iscsi-target SendTargets=$IQN support for in-band discovery
     (nab)
   - Add iser-target support for in-band discovery (nab + Or)
   - Add iscsi-target demo-mode TPG authentication context support (nab)
   - Fix isert_put_reject payload buffer post (nab)
   - Fix iscsit_add_reject* usage for iser (nab)
   - Fix iscsit_sequence_cmd reject handling for iser (nab)
   - Fix ISCSI_OP_SCSI_TMFUNC handling for iser (nab)
   - Fix session reset bug with RDMA_CM_EVENT_DISCONNECTED (nab)

  The last five iscsi/iser-target items are CC'ed to stable, as they do
  address issues present in v3.10 code.  They are certainly larger than
  I'd like for stable patch set, but are important to ensure proper
  REJECT exception handling in iser-target for 3.10.y"

* 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (51 commits)
  iser-target: Ignore non TEXT + LOGOUT opcodes for discovery
  target: make queue_tm_rsp() return void
  target: remove unused codes from enum tcm_tmrsp_table
  iscsi-target: kstrtou* configfs attribute parameter cleanups
  iscsi-target: Fix tfc_tpg_auth_cit configfs length overflow
  iscsi-target: Fix tfc_tpg_nacl_auth_cit configfs length overflow
  iser-target: Add support for ISCSI_OP_TEXT opcode + payload handling
  iser-target: Rename sense_buf_[dma,len] to pdu_[dma,len]
  iser-target: Add vendor_err debug output
  target: Add (obsolete) checking for PMI/LBA fields in READ CAPACITY(10)
  target: Return correct sense data for IO past the end of a device
  target: Add tracepoints for SCSI commands being processed
  iser-target: Fix session reset bug with RDMA_CM_EVENT_DISCONNECTED
  iscsi-target: Fix ISCSI_OP_SCSI_TMFUNC handling for iser
  iscsi-target: Fix iscsit_sequence_cmd reject handling for iser
  iscsi-target: Fix iscsit_add_reject* usage for iser
  iser-target: Fix isert_put_reject payload buffer post
  iscsi-target: missing kfree() on error path
  iscsi-target: Drop left-over iscsi_conn->bad_hdr
  target: Make core_scsi3_update_and_write_aptpl return sense_reason_t
  ...

1  2 
drivers/infiniband/ulp/isert/ib_isert.c
drivers/scsi/qla2xxx/tcm_qla2xxx.c
drivers/target/iscsi/iscsi_target_configfs.c
drivers/target/iscsi/iscsi_target_erl0.c
drivers/target/iscsi/iscsi_target_nego.c
drivers/vhost/scsi.c

@@@ -388,6 -388,7 +388,7 @@@ isert_connect_request(struct rdma_cm_i
        init_waitqueue_head(&isert_conn->conn_wait_comp_err);
        kref_init(&isert_conn->conn_kref);
        kref_get(&isert_conn->conn_kref);
+       mutex_init(&isert_conn->conn_mutex);
  
        cma_id->context = isert_conn;
        isert_conn->conn_cm_id = cma_id;
@@@ -540,15 -541,32 +541,32 @@@ isert_disconnect_work(struct work_struc
                                struct isert_conn, conn_logout_work);
  
        pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
+       mutex_lock(&isert_conn->conn_mutex);
        isert_conn->state = ISER_CONN_DOWN;
  
        if (isert_conn->post_recv_buf_count == 0 &&
            atomic_read(&isert_conn->post_send_buf_count) == 0) {
                pr_debug("Calling wake_up(&isert_conn->conn_wait);\n");
-               wake_up(&isert_conn->conn_wait);
+               mutex_unlock(&isert_conn->conn_mutex);
+               goto wake_up;
+       }
+       if (!isert_conn->conn_cm_id) {
+               mutex_unlock(&isert_conn->conn_mutex);
+               isert_put_conn(isert_conn);
+               return;
        }
+       if (!isert_conn->logout_posted) {
+               pr_debug("Calling rdma_disconnect for !logout_posted from"
+                        " isert_disconnect_work\n");
+               rdma_disconnect(isert_conn->conn_cm_id);
+               mutex_unlock(&isert_conn->conn_mutex);
+               iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
+               goto wake_up;
+       }
+       mutex_unlock(&isert_conn->conn_mutex);
  
+ wake_up:
+       wake_up(&isert_conn->conn_wait);
        isert_put_conn(isert_conn);
  }
  
@@@ -934,16 -952,11 +952,11 @@@ isert_handle_scsi_cmd(struct isert_con
        }
  
  sequence_cmd:
-       rc = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
+       rc = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
  
        if (!rc && dump_payload == false && unsol_data)
                iscsit_set_unsoliticed_dataout(cmd);
  
-       if (rc == CMDSN_ERROR_CANNOT_RECOVER)
-               return iscsit_add_reject_from_cmd(
-                          ISCSI_REASON_PROTOCOL_ERROR,
-                          1, 0, (unsigned char *)hdr, cmd);
        return 0;
  }
  
@@@ -1000,6 -1013,52 +1013,52 @@@ isert_handle_iscsi_dataout(struct isert
        return 0;
  }
  
+ static int
+ isert_handle_nop_out(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
+                    struct iser_rx_desc *rx_desc, unsigned char *buf)
+ {
+       struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
+       struct iscsi_conn *conn = isert_conn->conn;
+       struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf;
+       int rc;
+       rc = iscsit_setup_nop_out(conn, cmd, hdr);
+       if (rc < 0)
+               return rc;
+       /*
+        * FIXME: Add support for NOPOUT payload using unsolicited RDMA payload
+        */
+       return iscsit_process_nop_out(conn, cmd, hdr);
+ }
+ static int
+ isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
+                     struct iser_rx_desc *rx_desc, struct iscsi_text *hdr)
+ {
+       struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
+       struct iscsi_conn *conn = isert_conn->conn;
+       u32 payload_length = ntoh24(hdr->dlength);
+       int rc;
+       unsigned char *text_in;
+       rc = iscsit_setup_text_cmd(conn, cmd, hdr);
+       if (rc < 0)
+               return rc;
+       text_in = kzalloc(payload_length, GFP_KERNEL);
+       if (!text_in) {
+               pr_err("Unable to allocate text_in of payload_length: %u\n",
+                      payload_length);
+               return -ENOMEM;
+       }
+       cmd->text_in_ptr = text_in;
+       memcpy(cmd->text_in_ptr, &rx_desc->data[0], payload_length);
+       return iscsit_process_text_cmd(conn, cmd, hdr);
+ }
  static int
  isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
                uint32_t read_stag, uint64_t read_va,
  {
        struct iscsi_hdr *hdr = &rx_desc->iscsi_header;
        struct iscsi_conn *conn = isert_conn->conn;
+       struct iscsi_session *sess = conn->sess;
        struct iscsi_cmd *cmd;
        struct isert_cmd *isert_cmd;
        int ret = -EINVAL;
        u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK);
  
+       if (sess->sess_ops->SessionType &&
+          (!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) {
+               pr_err("Got illegal opcode: 0x%02x in SessionType=Discovery,"
+                      " ignoring\n", opcode);
+               return 0;
+       }
        switch (opcode) {
        case ISCSI_OP_SCSI_CMD:
                cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
                if (!cmd)
                        break;
  
-               ret = iscsit_handle_nop_out(conn, cmd, (unsigned char *)hdr);
+               isert_cmd = container_of(cmd, struct isert_cmd, iscsi_cmd);
+               ret = isert_handle_nop_out(isert_conn, isert_cmd,
+                                          rx_desc, (unsigned char *)hdr);
                break;
        case ISCSI_OP_SCSI_DATA_OUT:
                ret = isert_handle_iscsi_dataout(isert_conn, rx_desc,
                                                    SECONDS_FOR_LOGOUT_COMP *
                                                    HZ);
                break;
+       case ISCSI_OP_TEXT:
+               cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+               if (!cmd)
+                       break;
+               isert_cmd = container_of(cmd, struct isert_cmd, iscsi_cmd);
+               ret = isert_handle_text_cmd(isert_conn, isert_cmd,
+                                           rx_desc, (struct iscsi_text *)hdr);
+               break;
        default:
                pr_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
                dump_stack();
@@@ -1184,14 -1262,12 +1262,12 @@@ isert_put_cmd(struct isert_cmd *isert_c
  {
        struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
        struct isert_conn *isert_conn = isert_cmd->conn;
-       struct iscsi_conn *conn;
+       struct iscsi_conn *conn = isert_conn->conn;
  
        pr_debug("Entering isert_put_cmd: %p\n", isert_cmd);
  
        switch (cmd->iscsi_opcode) {
        case ISCSI_OP_SCSI_CMD:
-               conn = isert_conn->conn;
                spin_lock_bh(&conn->cmd_lock);
                if (!list_empty(&cmd->i_conn_node))
                        list_del(&cmd->i_conn_node);
                        iscsit_stop_dataout_timer(cmd);
  
                isert_unmap_cmd(isert_cmd, isert_conn);
-               /*
-                * Fall-through
-                */
+               transport_generic_free_cmd(&cmd->se_cmd, 0);
+               break;
        case ISCSI_OP_SCSI_TMFUNC:
+               spin_lock_bh(&conn->cmd_lock);
+               if (!list_empty(&cmd->i_conn_node))
+                       list_del(&cmd->i_conn_node);
+               spin_unlock_bh(&conn->cmd_lock);
                transport_generic_free_cmd(&cmd->se_cmd, 0);
                break;
        case ISCSI_OP_REJECT:
        case ISCSI_OP_NOOP_OUT:
-               conn = isert_conn->conn;
+       case ISCSI_OP_TEXT:
                spin_lock_bh(&conn->cmd_lock);
                if (!list_empty(&cmd->i_conn_node))
                        list_del(&cmd->i_conn_node);
                 * associated cmd->se_cmd needs to be released.
                 */
                if (cmd->se_cmd.se_tfo != NULL) {
+                       pr_debug("Calling transport_generic_free_cmd from"
+                                " isert_put_cmd for 0x%02x\n",
+                                cmd->iscsi_opcode);
                        transport_generic_free_cmd(&cmd->se_cmd, 0);
                        break;
                }
@@@ -1249,11 -1331,11 +1331,11 @@@ static voi
  isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
                     struct ib_device *ib_dev)
  {
-       if (isert_cmd->sense_buf_dma != 0) {
-               pr_debug("Calling ib_dma_unmap_single for isert_cmd->sense_buf_dma\n");
-               ib_dma_unmap_single(ib_dev, isert_cmd->sense_buf_dma,
-                                   isert_cmd->sense_buf_len, DMA_TO_DEVICE);
-               isert_cmd->sense_buf_dma = 0;
+       if (isert_cmd->pdu_buf_dma != 0) {
+               pr_debug("Calling ib_dma_unmap_single for isert_cmd->pdu_buf_dma\n");
+               ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma,
+                                   isert_cmd->pdu_buf_len, DMA_TO_DEVICE);
+               isert_cmd->pdu_buf_dma = 0;
        }
  
        isert_unmap_tx_desc(tx_desc, ib_dev);
@@@ -1318,8 -1400,8 +1400,8 @@@ isert_do_control_comp(struct work_struc
                atomic_dec(&isert_conn->post_send_buf_count);
  
                cmd->i_state = ISTATE_SENT_STATUS;
-               complete(&cmd->reject_comp);
                isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev);
+               break;
        case ISTATE_SEND_LOGOUTRSP:
                pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
                /*
                isert_conn->logout_posted = true;
                iscsit_logout_post_handler(cmd, cmd->conn);
                break;
+       case ISTATE_SEND_TEXTRSP:
+               atomic_dec(&isert_conn->post_send_buf_count);
+               cmd->i_state = ISTATE_SENT_STATUS;
+               isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev);
+               break;
        default:
                pr_err("Unknown do_control_comp i_state %d\n", cmd->i_state);
                dump_stack();
@@@ -1345,7 -1432,9 +1432,9 @@@ isert_response_completion(struct iser_t
        struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
  
        if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
-           cmd->i_state == ISTATE_SEND_LOGOUTRSP) {
+           cmd->i_state == ISTATE_SEND_LOGOUTRSP ||
+           cmd->i_state == ISTATE_SEND_REJECT ||
+           cmd->i_state == ISTATE_SEND_TEXTRSP) {
                isert_unmap_tx_desc(tx_desc, ib_dev);
  
                INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp);
@@@ -1419,7 -1508,11 +1508,11 @@@ isert_cq_comp_err(struct iser_tx_desc *
                pr_debug("isert_cq_comp_err >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
                pr_debug("Calling wake_up from isert_cq_comp_err\n");
  
-               isert_conn->state = ISER_CONN_TERMINATING;
+               mutex_lock(&isert_conn->conn_mutex);
+               if (isert_conn->state != ISER_CONN_DOWN)
+                       isert_conn->state = ISER_CONN_TERMINATING;
+               mutex_unlock(&isert_conn->conn_mutex);
                wake_up(&isert_conn->conn_wait_comp_err);
        }
  }
@@@ -1445,6 -1538,7 +1538,7 @@@ isert_cq_tx_work(struct work_struct *wo
                } else {
                        pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
                        pr_debug("TX wc.status: 0x%08x\n", wc.status);
+                       pr_debug("TX wc.vendor_err: 0x%08x\n", wc.vendor_err);
                        atomic_dec(&isert_conn->post_send_buf_count);
                        isert_cq_comp_err(tx_desc, isert_conn);
                }
@@@ -1484,9 -1578,11 +1578,11 @@@ isert_cq_rx_work(struct work_struct *wo
                        isert_rx_completion(rx_desc, isert_conn, xfer_len);
                } else {
                        pr_debug("RX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
-                       if (wc.status != IB_WC_WR_FLUSH_ERR)
+                       if (wc.status != IB_WC_WR_FLUSH_ERR) {
                                pr_debug("RX wc.status: 0x%08x\n", wc.status);
+                               pr_debug("RX wc.vendor_err: 0x%08x\n",
+                                        wc.vendor_err);
+                       }
                        isert_conn->post_recv_buf_count--;
                        isert_cq_comp_err(NULL, isert_conn);
                }
@@@ -1543,7 -1639,7 +1639,7 @@@ isert_put_response(struct iscsi_conn *c
            (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
                struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
                struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
-               u32 padding, sense_len;
+               u32 padding, pdu_len;
  
                put_unaligned_be16(cmd->se_cmd.scsi_sense_length,
                                   cmd->sense_buffer);
  
                padding = -(cmd->se_cmd.scsi_sense_length) & 3;
                hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length);
-               sense_len = cmd->se_cmd.scsi_sense_length + padding;
+               pdu_len = cmd->se_cmd.scsi_sense_length + padding;
  
-               isert_cmd->sense_buf_dma = ib_dma_map_single(ib_dev,
-                               (void *)cmd->sense_buffer, sense_len,
+               isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
+                               (void *)cmd->sense_buffer, pdu_len,
                                DMA_TO_DEVICE);
  
-               isert_cmd->sense_buf_len = sense_len;
-               tx_dsg->addr    = isert_cmd->sense_buf_dma;
-               tx_dsg->length  = sense_len;
+               isert_cmd->pdu_buf_len = pdu_len;
+               tx_dsg->addr    = isert_cmd->pdu_buf_dma;
+               tx_dsg->length  = pdu_len;
                tx_dsg->lkey    = isert_conn->conn_mr->lkey;
                isert_cmd->tx_desc.num_sge = 2;
        }
@@@ -1587,7 -1683,7 +1683,7 @@@ isert_put_nopin(struct iscsi_cmd *cmd, 
        isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
        isert_init_send_wr(isert_cmd, send_wr);
  
 -      pr_debug("Posting NOPIN Reponse IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
 +      pr_debug("Posting NOPIN Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
  
        return isert_post_response(isert_conn, isert_cmd);
  }
@@@ -1637,11 -1733,25 +1733,25 @@@ isert_put_reject(struct iscsi_cmd *cmd
                                struct isert_cmd, iscsi_cmd);
        struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
        struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
+       struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+       struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
+       struct iscsi_reject *hdr =
+               (struct iscsi_reject *)&isert_cmd->tx_desc.iscsi_header;
  
        isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
-       iscsit_build_reject(cmd, conn, (struct iscsi_reject *)
-                               &isert_cmd->tx_desc.iscsi_header);
+       iscsit_build_reject(cmd, conn, hdr);
        isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
+       hton24(hdr->dlength, ISCSI_HDR_LEN);
+       isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
+                       (void *)cmd->buf_ptr, ISCSI_HDR_LEN,
+                       DMA_TO_DEVICE);
+       isert_cmd->pdu_buf_len = ISCSI_HDR_LEN;
+       tx_dsg->addr    = isert_cmd->pdu_buf_dma;
+       tx_dsg->length  = ISCSI_HDR_LEN;
+       tx_dsg->lkey    = isert_conn->conn_mr->lkey;
+       isert_cmd->tx_desc.num_sge = 2;
        isert_init_send_wr(isert_cmd, send_wr);
  
        pr_debug("Posting Reject IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
        return isert_post_response(isert_conn, isert_cmd);
  }
  
+ static int
+ isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+ {
+       struct isert_cmd *isert_cmd = container_of(cmd,
+                               struct isert_cmd, iscsi_cmd);
+       struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
+       struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
+       struct iscsi_text_rsp *hdr =
+               (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
+       u32 txt_rsp_len;
+       int rc;
+       isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
+       rc = iscsit_build_text_rsp(cmd, conn, hdr);
+       if (rc < 0)
+               return rc;
+       txt_rsp_len = rc;
+       isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
+       if (txt_rsp_len) {
+               struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+               struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
+               void *txt_rsp_buf = cmd->buf_ptr;
+               isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
+                               txt_rsp_buf, txt_rsp_len, DMA_TO_DEVICE);
+               isert_cmd->pdu_buf_len = txt_rsp_len;
+               tx_dsg->addr    = isert_cmd->pdu_buf_dma;
+               tx_dsg->length  = txt_rsp_len;
+               tx_dsg->lkey    = isert_conn->conn_mr->lkey;
+               isert_cmd->tx_desc.num_sge = 2;
+       }
+       isert_init_send_wr(isert_cmd, send_wr);
+       pr_debug("Posting Text Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
+       return isert_post_response(isert_conn, isert_cmd);
+ }
  static int
  isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
                    struct ib_sge *ib_sge, struct ib_send_wr *send_wr,
@@@ -1947,6 -2098,9 +2098,9 @@@ isert_response_queue(struct iscsi_conn 
        case ISTATE_SEND_REJECT:
                ret = isert_put_reject(cmd, conn);
                break;
+       case ISTATE_SEND_TEXTRSP:
+               ret = isert_put_text_rsp(cmd, conn);
+               break;
        case ISTATE_SEND_STATUS:
                /*
                 * Special case for sending non GOOD SCSI status from TX thread
@@@ -2175,6 -2329,17 +2329,17 @@@ isert_free_np(struct iscsi_np *np
        kfree(isert_np);
  }
  
+ static int isert_check_state(struct isert_conn *isert_conn, int state)
+ {
+       int ret;
+       mutex_lock(&isert_conn->conn_mutex);
+       ret = (isert_conn->state == state);
+       mutex_unlock(&isert_conn->conn_mutex);
+       return ret;
+ }
  static void isert_free_conn(struct iscsi_conn *conn)
  {
        struct isert_conn *isert_conn = conn->context;
         * Decrement post_send_buf_count for special case when called
         * from isert_do_control_comp() -> iscsit_logout_post_handler()
         */
+       mutex_lock(&isert_conn->conn_mutex);
        if (isert_conn->logout_posted)
                atomic_dec(&isert_conn->post_send_buf_count);
  
-       if (isert_conn->conn_cm_id)
+       if (isert_conn->conn_cm_id && isert_conn->state != ISER_CONN_DOWN) {
+               pr_debug("Calling rdma_disconnect from isert_free_conn\n");
                rdma_disconnect(isert_conn->conn_cm_id);
+       }
        /*
         * Only wait for conn_wait_comp_err if the isert_conn made it
         * into full feature phase..
         */
-       if (isert_conn->state > ISER_CONN_INIT) {
+       if (isert_conn->state == ISER_CONN_UP) {
                pr_debug("isert_free_conn: Before wait_event comp_err %d\n",
                         isert_conn->state);
+               mutex_unlock(&isert_conn->conn_mutex);
                wait_event(isert_conn->conn_wait_comp_err,
-                          isert_conn->state == ISER_CONN_TERMINATING);
-               pr_debug("isert_free_conn: After wait_event #1 >>>>>>>>>>>>\n");
+                         (isert_check_state(isert_conn, ISER_CONN_TERMINATING)));
+               wait_event(isert_conn->conn_wait,
+                         (isert_check_state(isert_conn, ISER_CONN_DOWN)));
+               isert_put_conn(isert_conn);
+               return;
+       }
+       if (isert_conn->state == ISER_CONN_INIT) {
+               mutex_unlock(&isert_conn->conn_mutex);
+               isert_put_conn(isert_conn);
+               return;
        }
+       pr_debug("isert_free_conn: wait_event conn_wait %d\n",
+                isert_conn->state);
+       mutex_unlock(&isert_conn->conn_mutex);
  
-       pr_debug("isert_free_conn: wait_event conn_wait %d\n", isert_conn->state);
-       wait_event(isert_conn->conn_wait, isert_conn->state == ISER_CONN_DOWN);
-       pr_debug("isert_free_conn: After wait_event #2 >>>>>>>>>>>>>>>>>>>>\n");
+       wait_event(isert_conn->conn_wait,
+                 (isert_check_state(isert_conn, ISER_CONN_DOWN)));
  
        isert_put_conn(isert_conn);
  }
@@@ -688,12 -688,8 +688,12 @@@ static int tcm_qla2xxx_queue_status(str
                 * For FCP_READ with CHECK_CONDITION status, clear cmd->bufflen
                 * for qla_tgt_xmit_response LLD code
                 */
 +              if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
 +                      se_cmd->se_cmd_flags &= ~SCF_OVERFLOW_BIT;
 +                      se_cmd->residual_count = 0;
 +              }
                se_cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
 -              se_cmd->residual_count = se_cmd->data_length;
 +              se_cmd->residual_count += se_cmd->data_length;
  
                cmd->bufflen = 0;
        }
        return qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status);
  }
  
- static int tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd)
+ static void tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd)
  {
        struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
        struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd,
         * CTIO response packet.
         */
        qlt_xmit_tm_rsp(mcmd);
-       return 0;
  }
  
  /* Local pointer to allocated TCM configfs fabric module */
@@@ -799,12 -793,14 +797,14 @@@ static void tcm_qla2xxx_put_session(str
  
  static void tcm_qla2xxx_put_sess(struct qla_tgt_sess *sess)
  {
-       tcm_qla2xxx_put_session(sess->se_sess);
+       assert_spin_locked(&sess->vha->hw->hardware_lock);
+       kref_put(&sess->se_sess->sess_kref, tcm_qla2xxx_release_session);
  }
  
  static void tcm_qla2xxx_shutdown_sess(struct qla_tgt_sess *sess)
  {
-       tcm_qla2xxx_shutdown_session(sess->se_sess);
+       assert_spin_locked(&sess->vha->hw->hardware_lock);
+       target_sess_cmd_list_set_waiting(sess->se_sess);
  }
  
  static struct se_node_acl *tcm_qla2xxx_make_nodeacl(
@@@ -20,6 -20,7 +20,7 @@@
   ****************************************************************************/
  
  #include <linux/configfs.h>
+ #include <linux/ctype.h>
  #include <linux/export.h>
  #include <linux/inet.h>
  #include <target/target_core_base.h>
@@@ -78,11 -79,12 +79,12 @@@ static ssize_t lio_target_np_store_sctp
        struct iscsi_tpg_np *tpg_np = container_of(se_tpg_np,
                                struct iscsi_tpg_np, se_tpg_np);
        struct iscsi_tpg_np *tpg_np_sctp = NULL;
-       char *endptr;
        u32 op;
        int ret;
  
-       op = simple_strtoul(page, &endptr, 0);
+       ret = kstrtou32(page, 0, &op);
+       if (ret)
+               return ret;
        if ((op != 1) && (op != 0)) {
                pr_err("Illegal value for tpg_enable: %u\n", op);
                return -EINVAL;
@@@ -155,7 -157,7 +157,7 @@@ static ssize_t lio_target_np_store_iser
        struct iscsi_tpg_np *tpg_np_iser = NULL;
        char *endptr;
        u32 op;
 -      int rc;
 +      int rc = 0;
  
        op = simple_strtoul(page, &endptr, 0);
        if ((op != 1) && (op != 0)) {
                return -EINVAL;
  
        if (op) {
 -              int rc = request_module("ib_isert");
 -              if (rc != 0)
 +              rc = request_module("ib_isert");
 +              if (rc != 0) {
                        pr_warn("Unable to request_module for ib_isert\n");
 +                      rc = 0;
 +              }
  
                tpg_np_iser = iscsit_tpg_add_network_portal(tpg, &np->np_sockaddr,
                                np->np_ip, tpg_np, ISCSI_INFINIBAND);
 -              if (!tpg_np_iser || IS_ERR(tpg_np_iser))
 +              if (IS_ERR(tpg_np_iser)) {
 +                      rc = PTR_ERR(tpg_np_iser);
                        goto out;
 +              }
        } else {
                tpg_np_iser = iscsit_tpg_locate_child_np(tpg_np, ISCSI_INFINIBAND);
 -              if (!tpg_np_iser)
 -                      goto out;
 -
 -              rc = iscsit_tpg_del_network_portal(tpg, tpg_np_iser);
 -              if (rc < 0)
 -                      goto out;
 +              if (tpg_np_iser) {
 +                      rc = iscsit_tpg_del_network_portal(tpg, tpg_np_iser);
 +                      if (rc < 0)
 +                              goto out;
 +              }
        }
  
 -      printk("lio_target_np_store_iser() done, op: %d\n", op);
 -
        iscsit_put_tpg(tpg);
        return count;
  out:
        iscsit_put_tpg(tpg);
 -      return -EINVAL;
 +      return rc;
  }
  
  TF_NP_BASE_ATTR(lio_target, iser, S_IRUGO | S_IWUSR);
@@@ -382,11 -383,12 +384,12 @@@ static ssize_t iscsi_nacl_attrib_store_
  {                                                                     \
        struct iscsi_node_acl *nacl = container_of(se_nacl, struct iscsi_node_acl, \
                                        se_node_acl);                   \
-       char *endptr;                                                   \
        u32 val;                                                        \
        int ret;                                                        \
                                                                        \
-       val = simple_strtoul(page, &endptr, 0);                         \
+       ret = kstrtou32(page, 0, &val);                                 \
+       if (ret)                                                        \
+               return ret;                                             \
        ret = iscsit_na_##name(nacl, val);                              \
        if (ret < 0)                                                    \
                return ret;                                             \
@@@ -474,7 -476,7 +477,7 @@@ static ssize_t __iscsi_##prefix##_store
        if (!capable(CAP_SYS_ADMIN))                                    \
                return -EPERM;                                          \
                                                                        \
-       snprintf(auth->name, PAGE_SIZE, "%s", page);                    \
+       snprintf(auth->name, sizeof(auth->name), "%s", page);           \
        if (!strncmp("NULL", auth->name, 4))                            \
                auth->naf_flags &= ~flags;                              \
        else                                                            \
@@@ -789,11 -791,12 +792,12 @@@ static ssize_t lio_target_nacl_store_cm
        struct iscsi_portal_group *tpg = container_of(se_tpg,
                        struct iscsi_portal_group, tpg_se_tpg);
        struct config_item *acl_ci, *tpg_ci, *wwn_ci;
-       char *endptr;
        u32 cmdsn_depth = 0;
        int ret;
  
-       cmdsn_depth = simple_strtoul(page, &endptr, 0);
+       ret = kstrtou32(page, 0, &cmdsn_depth);
+       if (ret)
+               return ret;
        if (cmdsn_depth > TA_DEFAULT_CMDSN_DEPTH_MAX) {
                pr_err("Passed cmdsn_depth: %u exceeds"
                        " TA_DEFAULT_CMDSN_DEPTH_MAX: %u\n", cmdsn_depth,
@@@ -977,14 -980,15 +981,15 @@@ static ssize_t iscsi_tpg_attrib_store_#
  {                                                                     \
        struct iscsi_portal_group *tpg = container_of(se_tpg,           \
                        struct iscsi_portal_group, tpg_se_tpg); \
-       char *endptr;                                                   \
        u32 val;                                                        \
        int ret;                                                        \
                                                                        \
        if (iscsit_get_tpg(tpg) < 0)                                    \
                return -EINVAL;                                         \
                                                                        \
-       val = simple_strtoul(page, &endptr, 0);                         \
+       ret = kstrtou32(page, 0, &val);                                 \
+       if (ret)                                                        \
+               goto out;                                               \
        ret = iscsit_ta_##name(tpg, val);                               \
        if (ret < 0)                                                    \
                goto out;                                               \
@@@ -1053,6 -1057,131 +1058,131 @@@ static struct configfs_attribute *lio_t
  
  /* End items for lio_target_tpg_attrib_cit */
  
+ /* Start items for lio_target_tpg_auth_cit */
+ #define __DEF_TPG_AUTH_STR(prefix, name, flags)                                       \
+ static ssize_t __iscsi_##prefix##_show_##name(                                        \
+       struct se_portal_group *se_tpg,                                         \
+       char *page)                                                             \
+ {                                                                             \
+       struct iscsi_portal_group *tpg = container_of(se_tpg,                   \
+                               struct iscsi_portal_group, tpg_se_tpg);         \
+       struct iscsi_node_auth *auth = &tpg->tpg_demo_auth;                     \
+                                                                               \
+       if (!capable(CAP_SYS_ADMIN))                                            \
+               return -EPERM;                                                  \
+                                                                               \
+       return snprintf(page, PAGE_SIZE, "%s\n", auth->name);                   \
+ }                                                                             \
+                                                                               \
+ static ssize_t __iscsi_##prefix##_store_##name(                                       \
+       struct se_portal_group *se_tpg,                                         \
+       const char *page,                                                       \
+       size_t count)                                                           \
+ {                                                                             \
+       struct iscsi_portal_group *tpg = container_of(se_tpg,                   \
+                               struct iscsi_portal_group, tpg_se_tpg);         \
+       struct iscsi_node_auth *auth = &tpg->tpg_demo_auth;                     \
+                                                                               \
+       if (!capable(CAP_SYS_ADMIN))                                            \
+               return -EPERM;                                                  \
+                                                                               \
+       snprintf(auth->name, sizeof(auth->name), "%s", page);                   \
+       if (!(strncmp("NULL", auth->name, 4)))                                  \
+               auth->naf_flags &= ~flags;                                      \
+       else                                                                    \
+               auth->naf_flags |= flags;                                       \
+                                                                               \
+       if ((auth->naf_flags & NAF_USERID_IN_SET) &&                            \
+           (auth->naf_flags & NAF_PASSWORD_IN_SET))                            \
+               auth->authenticate_target = 1;                                  \
+       else                                                                    \
+               auth->authenticate_target = 0;                                  \
+                                                                               \
+       return count;                                                           \
+ }
+ #define __DEF_TPG_AUTH_INT(prefix, name)                                      \
+ static ssize_t __iscsi_##prefix##_show_##name(                                        \
+       struct se_portal_group *se_tpg,                                         \
+       char *page)                                                             \
+ {                                                                             \
+       struct iscsi_portal_group *tpg = container_of(se_tpg,                   \
+                               struct iscsi_portal_group, tpg_se_tpg);         \
+       struct iscsi_node_auth *auth = &tpg->tpg_demo_auth;                     \
+                                                                               \
+       if (!capable(CAP_SYS_ADMIN))                                            \
+               return -EPERM;                                                  \
+                                                                               \
+       return snprintf(page, PAGE_SIZE, "%d\n", auth->name);                   \
+ }
+ #define DEF_TPG_AUTH_STR(name, flags)                                         \
+       __DEF_TPG_AUTH_STR(tpg_auth, name, flags)                               \
+ static ssize_t iscsi_tpg_auth_show_##name(                                    \
+       struct se_portal_group *se_tpg,                                         \
+       char *page)                                                             \
+ {                                                                             \
+       return __iscsi_tpg_auth_show_##name(se_tpg, page);                      \
+ }                                                                             \
+                                                                               \
+ static ssize_t iscsi_tpg_auth_store_##name(                                   \
+       struct se_portal_group *se_tpg,                                         \
+       const char *page,                                                       \
+       size_t count)                                                           \
+ {                                                                             \
+       return __iscsi_tpg_auth_store_##name(se_tpg, page, count);              \
+ }
+ #define DEF_TPG_AUTH_INT(name)                                                        \
+       __DEF_TPG_AUTH_INT(tpg_auth, name)                                      \
+ static ssize_t iscsi_tpg_auth_show_##name(                                    \
+       struct se_portal_group *se_tpg,                                         \
+       char *page)                                                             \
+ {                                                                             \
+       return __iscsi_tpg_auth_show_##name(se_tpg, page);                      \
+ }
+ #define TPG_AUTH_ATTR(_name, _mode) TF_TPG_AUTH_ATTR(iscsi, _name, _mode);
+ #define TPG_AUTH_ATTR_RO(_name) TF_TPG_AUTH_ATTR_RO(iscsi, _name);
+ /*
+  *  * One-way authentication userid
+  *   */
+ DEF_TPG_AUTH_STR(userid, NAF_USERID_SET);
+ TPG_AUTH_ATTR(userid, S_IRUGO | S_IWUSR);
+ /*
+  *  * One-way authentication password
+  *   */
+ DEF_TPG_AUTH_STR(password, NAF_PASSWORD_SET);
+ TPG_AUTH_ATTR(password, S_IRUGO | S_IWUSR);
+ /*
+  *  * Enforce mutual authentication
+  *   */
+ DEF_TPG_AUTH_INT(authenticate_target);
+ TPG_AUTH_ATTR_RO(authenticate_target);
+ /*
+  *  * Mutual authentication userid
+  *   */
+ DEF_TPG_AUTH_STR(userid_mutual, NAF_USERID_IN_SET);
+ TPG_AUTH_ATTR(userid_mutual, S_IRUGO | S_IWUSR);
+ /*
+  *  * Mutual authentication password
+  *   */
+ DEF_TPG_AUTH_STR(password_mutual, NAF_PASSWORD_IN_SET);
+ TPG_AUTH_ATTR(password_mutual, S_IRUGO | S_IWUSR);
+ static struct configfs_attribute *lio_target_tpg_auth_attrs[] = {
+       &iscsi_tpg_auth_userid.attr,
+       &iscsi_tpg_auth_password.attr,
+       &iscsi_tpg_auth_authenticate_target.attr,
+       &iscsi_tpg_auth_userid_mutual.attr,
+       &iscsi_tpg_auth_password_mutual.attr,
+       NULL,
+ };
+ /* End items for lio_target_tpg_auth_cit */
  /* Start items for lio_target_tpg_param_cit */
  
  #define DEF_TPG_PARAM(name)                                           \
@@@ -1087,13 -1216,14 +1217,14 @@@ static ssize_t iscsi_tpg_param_store_##
        struct iscsi_portal_group *tpg = container_of(se_tpg,           \
                        struct iscsi_portal_group, tpg_se_tpg);         \
        char *buf;                                                      \
-       int ret;                                                        \
+       int ret, len;                                                   \
                                                                        \
        buf = kzalloc(PAGE_SIZE, GFP_KERNEL);                           \
        if (!buf)                                                       \
                return -ENOMEM;                                         \
-       snprintf(buf, PAGE_SIZE, "%s=%s", __stringify(name), page);     \
-       buf[strlen(buf)-1] = '\0'; /* Kill newline */                   \
+       len = snprintf(buf, PAGE_SIZE, "%s=%s", __stringify(name), page);       \
+       if (isspace(buf[len-1]))                                        \
+               buf[len-1] = '\0'; /* Kill newline */                   \
                                                                        \
        if (iscsit_get_tpg(tpg) < 0) {                                  \
                kfree(buf);                                             \
@@@ -1230,11 -1360,12 +1361,12 @@@ static ssize_t lio_target_tpg_store_ena
  {
        struct iscsi_portal_group *tpg = container_of(se_tpg,
                        struct iscsi_portal_group, tpg_se_tpg);
-       char *endptr;
        u32 op;
-       int ret = 0;
+       int ret;
  
-       op = simple_strtoul(page, &endptr, 0);
+       ret = kstrtou32(page, 0, &op);
+       if (ret)
+               return ret;
        if ((op != 1) && (op != 0)) {
                pr_err("Illegal value for tpg_enable: %u\n", op);
                return -EINVAL;
@@@ -1282,15 -1413,15 +1414,15 @@@ static struct se_portal_group *lio_targ
  {
        struct iscsi_portal_group *tpg;
        struct iscsi_tiqn *tiqn;
-       char *tpgt_str, *end_ptr;
-       int ret = 0;
-       unsigned short int tpgt;
+       char *tpgt_str;
+       int ret;
+       u16 tpgt;
  
        tiqn = container_of(wwn, struct iscsi_tiqn, tiqn_wwn);
        /*
         * Only tpgt_# directory groups can be created below
         * target/iscsi/iqn.superturodiskarry/
-       */
+        */
        tpgt_str = strstr(name, "tpgt_");
        if (!tpgt_str) {
                pr_err("Unable to locate \"tpgt_#\" directory"
                return NULL;
        }
        tpgt_str += 5; /* Skip ahead of "tpgt_" */
-       tpgt = (unsigned short int) simple_strtoul(tpgt_str, &end_ptr, 0);
+       ret = kstrtou16(tpgt_str, 0, &tpgt);
+       if (ret)
+               return NULL;
  
        tpg = iscsit_alloc_portal_group(tiqn, tpgt);
        if (!tpg)
@@@ -1506,10 -1639,12 +1640,12 @@@ static ssize_t iscsi_disc_store_enforce
  {
        struct iscsi_param *param;
        struct iscsi_portal_group *discovery_tpg = iscsit_global->discovery_tpg;
-       char *endptr;
        u32 op;
+       int err;
  
-       op = simple_strtoul(page, &endptr, 0);
+       err = kstrtou32(page, 0, &op);
+       if (err)
+               return -EINVAL;
        if ((op != 1) && (op != 0)) {
                pr_err("Illegal value for enforce_discovery_auth:"
                                " %u\n", op);
@@@ -1655,13 -1790,12 +1791,12 @@@ static int lio_queue_status(struct se_c
        return 0;
  }
  
- static int lio_queue_tm_rsp(struct se_cmd *se_cmd)
+ static void lio_queue_tm_rsp(struct se_cmd *se_cmd)
  {
        struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
  
        cmd->i_state = ISTATE_SEND_TASKMGTRSP;
        iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
-       return 0;
  }
  
  static char *lio_tpg_get_endpoint_wwn(struct se_portal_group *se_tpg)
@@@ -1866,6 -2000,7 +2001,7 @@@ int iscsi_target_register_configfs(void
        TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = lio_target_wwn_attrs;
        TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = lio_target_tpg_attrs;
        TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = lio_target_tpg_attrib_attrs;
+       TF_CIT_TMPL(fabric)->tfc_tpg_auth_cit.ct_attrs = lio_target_tpg_auth_attrs;
        TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = lio_target_tpg_param_attrs;
        TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = lio_target_portal_attrs;
        TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = lio_target_initiator_attrs;
@@@ -746,13 -746,12 +746,12 @@@ int iscsit_check_post_dataout
                if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
                        pr_err("Unable to recover from DataOUT CRC"
                                " failure while ERL=0, closing session.\n");
-                       iscsit_add_reject_from_cmd(ISCSI_REASON_DATA_DIGEST_ERROR,
-                                       1, 0, buf, cmd);
+                       iscsit_reject_cmd(cmd, ISCSI_REASON_DATA_DIGEST_ERROR,
+                                         buf);
                        return DATAOUT_CANNOT_RECOVER;
                }
  
-               iscsit_add_reject_from_cmd(ISCSI_REASON_DATA_DIGEST_ERROR,
-                               0, 0, buf, cmd);
+               iscsit_reject_cmd(cmd, ISCSI_REASON_DATA_DIGEST_ERROR, buf);
                return iscsit_dataout_post_crc_failed(cmd, buf);
        }
  }
@@@ -842,11 -841,11 +841,11 @@@ int iscsit_stop_time2retain_timer(struc
                return 0;
  
        sess->time2retain_timer_flags |= ISCSI_TF_STOP;
 -      spin_unlock_bh(&se_tpg->session_lock);
 +      spin_unlock(&se_tpg->session_lock);
  
        del_timer_sync(&sess->time2retain_timer);
  
 -      spin_lock_bh(&se_tpg->session_lock);
 +      spin_lock(&se_tpg->session_lock);
        sess->time2retain_timer_flags &= ~ISCSI_TF_RUNNING;
        pr_debug("Stopped Time2Retain Timer for SID: %u\n",
                        sess->sid);
@@@ -909,6 -908,7 +908,7 @@@ void iscsit_cause_connection_reinstatem
        wait_for_completion(&conn->conn_wait_comp);
        complete(&conn->conn_post_wait_comp);
  }
+ EXPORT_SYMBOL(iscsit_cause_connection_reinstatement);
  
  void iscsit_fall_back_to_erl0(struct iscsi_session *sess)
  {
@@@ -112,6 -112,7 +112,7 @@@ static u32 iscsi_handle_authentication
        struct iscsi_session *sess = conn->sess;
        struct iscsi_node_auth *auth;
        struct iscsi_node_acl *iscsi_nacl;
+       struct iscsi_portal_group *iscsi_tpg;
        struct se_node_acl *se_nacl;
  
        if (!sess->sess_ops->SessionType) {
                        return -1;
                }
  
-               auth = ISCSI_NODE_AUTH(iscsi_nacl);
+               if (se_nacl->dynamic_node_acl) {
+                       iscsi_tpg = container_of(se_nacl->se_tpg,
+                                       struct iscsi_portal_group, tpg_se_tpg);
+                       auth = &iscsi_tpg->tpg_demo_auth;
+               } else {
+                       iscsi_nacl = container_of(se_nacl, struct iscsi_node_acl,
+                                                 se_node_acl);
+                       auth = ISCSI_NODE_AUTH(iscsi_nacl);
+               }
        } else {
                /*
                 * For SessionType=Discovery
@@@ -721,6 -732,9 +732,6 @@@ int iscsi_target_locate_portal
  
                start += strlen(key) + strlen(value) + 2;
        }
 -
 -      printk("i_buf: %s, s_buf: %s, t_buf: %s\n", i_buf, s_buf, t_buf);
 -
        /*
         * See 5.3.  Login Phase.
         */
diff --combined drivers/vhost/scsi.c
@@@ -49,6 -49,7 +49,6 @@@
  #include <linux/llist.h>
  #include <linux/bitmap.h>
  
 -#include "vhost.c"
  #include "vhost.h"
  
  #define TCM_VHOST_VERSION  "v0.1"
@@@ -115,6 -116,7 +115,6 @@@ struct tcm_vhost_nacl 
        struct se_node_acl se_node_acl;
  };
  
 -struct vhost_scsi;
  struct tcm_vhost_tpg {
        /* Vhost port target portal group tag for TCM */
        u16 tport_tpgt;
@@@ -216,7 -218,7 +216,7 @@@ static int iov_num_pages(struct iovec *
               ((unsigned long)iov->iov_base & PAGE_MASK)) >> PAGE_SHIFT;
  }
  
 -void tcm_vhost_done_inflight(struct kref *kref)
 +static void tcm_vhost_done_inflight(struct kref *kref)
  {
        struct vhost_scsi_inflight *inflight;
  
@@@ -327,12 -329,11 +327,12 @@@ static u32 tcm_vhost_get_default_depth(
        return 1;
  }
  
 -static u32 tcm_vhost_get_pr_transport_id(struct se_portal_group *se_tpg,
 -      struct se_node_acl *se_nacl,
 -      struct t10_pr_registration *pr_reg,
 -      int *format_code,
 -      unsigned char *buf)
 +static u32
 +tcm_vhost_get_pr_transport_id(struct se_portal_group *se_tpg,
 +                            struct se_node_acl *se_nacl,
 +                            struct t10_pr_registration *pr_reg,
 +                            int *format_code,
 +                            unsigned char *buf)
  {
        struct tcm_vhost_tpg *tpg = container_of(se_tpg,
                                struct tcm_vhost_tpg, se_tpg);
                        format_code, buf);
  }
  
 -static u32 tcm_vhost_get_pr_transport_id_len(struct se_portal_group *se_tpg,
 -      struct se_node_acl *se_nacl,
 -      struct t10_pr_registration *pr_reg,
 -      int *format_code)
 +static u32
 +tcm_vhost_get_pr_transport_id_len(struct se_portal_group *se_tpg,
 +                                struct se_node_acl *se_nacl,
 +                                struct t10_pr_registration *pr_reg,
 +                                int *format_code)
  {
        struct tcm_vhost_tpg *tpg = container_of(se_tpg,
                                struct tcm_vhost_tpg, se_tpg);
                        format_code);
  }
  
 -static char *tcm_vhost_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
 -      const char *buf,
 -      u32 *out_tid_len,
 -      char **port_nexus_ptr)
 +static char *
 +tcm_vhost_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
 +                                  const char *buf,
 +                                  u32 *out_tid_len,
 +                                  char **port_nexus_ptr)
  {
        struct tcm_vhost_tpg *tpg = container_of(se_tpg,
                                struct tcm_vhost_tpg, se_tpg);
                        port_nexus_ptr);
  }
  
 -static struct se_node_acl *tcm_vhost_alloc_fabric_acl(
 -      struct se_portal_group *se_tpg)
 +static struct se_node_acl *
 +tcm_vhost_alloc_fabric_acl(struct se_portal_group *se_tpg)
  {
        struct tcm_vhost_nacl *nacl;
  
        return &nacl->se_node_acl;
  }
  
 -static void tcm_vhost_release_fabric_acl(struct se_portal_group *se_tpg,
 -      struct se_node_acl *se_nacl)
 +static void
 +tcm_vhost_release_fabric_acl(struct se_portal_group *se_tpg,
 +                           struct se_node_acl *se_nacl)
  {
        struct tcm_vhost_nacl *nacl = container_of(se_nacl,
                        struct tcm_vhost_nacl, se_node_acl);
@@@ -448,7 -446,19 +448,19 @@@ static u32 tcm_vhost_tpg_get_inst_index
  
  static void tcm_vhost_release_cmd(struct se_cmd *se_cmd)
  {
-       return;
+       struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
+                               struct tcm_vhost_cmd, tvc_se_cmd);
+       if (tv_cmd->tvc_sgl_count) {
+               u32 i;
+               for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
+                       put_page(sg_page(&tv_cmd->tvc_sgl[i]));
+               kfree(tv_cmd->tvc_sgl);
+         }
+       tcm_vhost_put_inflight(tv_cmd->inflight);
+       kfree(tv_cmd);
  }
  
  static int tcm_vhost_shutdown_session(struct se_session *se_sess)
@@@ -493,34 -503,34 +505,34 @@@ static int tcm_vhost_get_cmd_state(stru
        return 0;
  }
  
 -static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *tv_cmd)
 +static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *cmd)
  {
 -      struct vhost_scsi *vs = tv_cmd->tvc_vhost;
 +      struct vhost_scsi *vs = cmd->tvc_vhost;
  
 -      llist_add(&tv_cmd->tvc_completion_list, &vs->vs_completion_list);
 +      llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list);
  
        vhost_work_queue(&vs->dev, &vs->vs_completion_work);
  }
  
  static int tcm_vhost_queue_data_in(struct se_cmd *se_cmd)
  {
 -      struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
 +      struct tcm_vhost_cmd *cmd = container_of(se_cmd,
                                struct tcm_vhost_cmd, tvc_se_cmd);
 -      vhost_scsi_complete_cmd(tv_cmd);
 +      vhost_scsi_complete_cmd(cmd);
        return 0;
  }
  
  static int tcm_vhost_queue_status(struct se_cmd *se_cmd)
  {
 -      struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
 +      struct tcm_vhost_cmd *cmd = container_of(se_cmd,
                                struct tcm_vhost_cmd, tvc_se_cmd);
 -      vhost_scsi_complete_cmd(tv_cmd);
 +      vhost_scsi_complete_cmd(cmd);
        return 0;
  }
  
- static int tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd)
+ static void tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd)
  {
-       return 0;
+       return;
  }
  
  static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
        kfree(evt);
  }
  
 -static struct tcm_vhost_evt *tcm_vhost_allocate_evt(struct vhost_scsi *vs,
 -      u32 event, u32 reason)
 +static struct tcm_vhost_evt *
 +tcm_vhost_allocate_evt(struct vhost_scsi *vs,
 +                     u32 event, u32 reason)
  {
        struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
        struct tcm_vhost_evt *evt;
        return evt;
  }
  
 -static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd)
 +static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *cmd)
  {
 -      struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
 +      struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
  
        /* TODO locking against target/backend threads? */
-       transport_generic_free_cmd(se_cmd, 1);
+       transport_generic_free_cmd(se_cmd, 0);
  
-       if (cmd->tvc_sgl_count) {
-               u32 i;
-               for (i = 0; i < cmd->tvc_sgl_count; i++)
-                       put_page(sg_page(&cmd->tvc_sgl[i]));
-               kfree(cmd->tvc_sgl);
-       }
-       tcm_vhost_put_inflight(cmd->inflight);
+ }
  
-       kfree(cmd);
+ static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
+ {
+       return target_put_sess_cmd(se_cmd->se_sess, se_cmd);
  }
  
 -static void tcm_vhost_do_evt_work(struct vhost_scsi *vs,
 -      struct tcm_vhost_evt *evt)
 +static void
 +tcm_vhost_do_evt_work(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
  {
        struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
        struct virtio_scsi_event *event = &evt->event;
@@@ -655,7 -658,7 +661,7 @@@ static void vhost_scsi_complete_cmd_wor
                                        vs_completion_work);
        DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
        struct virtio_scsi_cmd_resp v_rsp;
 -      struct tcm_vhost_cmd *tv_cmd;
 +      struct tcm_vhost_cmd *cmd;
        struct llist_node *llnode;
        struct se_cmd *se_cmd;
        int ret, vq;
        bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
        llnode = llist_del_all(&vs->vs_completion_list);
        while (llnode) {
 -              tv_cmd = llist_entry(llnode, struct tcm_vhost_cmd,
 +              cmd = llist_entry(llnode, struct tcm_vhost_cmd,
                                     tvc_completion_list);
                llnode = llist_next(llnode);
 -              se_cmd = &tv_cmd->tvc_se_cmd;
 +              se_cmd = &cmd->tvc_se_cmd;
  
                pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
 -                      tv_cmd, se_cmd->residual_count, se_cmd->scsi_status);
 +                      cmd, se_cmd->residual_count, se_cmd->scsi_status);
  
                memset(&v_rsp, 0, sizeof(v_rsp));
                v_rsp.resid = se_cmd->residual_count;
                /* TODO is status_qualifier field needed? */
                v_rsp.status = se_cmd->scsi_status;
                v_rsp.sense_len = se_cmd->scsi_sense_length;
 -              memcpy(v_rsp.sense, tv_cmd->tvc_sense_buf,
 +              memcpy(v_rsp.sense, cmd->tvc_sense_buf,
                       v_rsp.sense_len);
 -              ret = copy_to_user(tv_cmd->tvc_resp, &v_rsp, sizeof(v_rsp));
 +              ret = copy_to_user(cmd->tvc_resp, &v_rsp, sizeof(v_rsp));
                if (likely(ret == 0)) {
                        struct vhost_scsi_virtqueue *q;
 -                      vhost_add_used(tv_cmd->tvc_vq, tv_cmd->tvc_vq_desc, 0);
 -                      q = container_of(tv_cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
 +                      vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
 +                      q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
                        vq = q - vs->vqs;
                        __set_bit(vq, signal);
                } else
                        pr_err("Faulted on virtio_scsi_cmd_resp\n");
  
 -              vhost_scsi_free_cmd(tv_cmd);
 +              vhost_scsi_free_cmd(cmd);
        }
  
        vq = -1;
                vhost_signal(&vs->dev, &vs->vqs[vq].vq);
  }
  
 -static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd(
 -      struct vhost_virtqueue *vq,
 -      struct tcm_vhost_tpg *tv_tpg,
 -      struct virtio_scsi_cmd_req *v_req,
 -      u32 exp_data_len,
 -      int data_direction)
 +static struct tcm_vhost_cmd *
 +vhost_scsi_allocate_cmd(struct vhost_virtqueue *vq,
 +                      struct tcm_vhost_tpg *tpg,
 +                      struct virtio_scsi_cmd_req *v_req,
 +                      u32 exp_data_len,
 +                      int data_direction)
  {
 -      struct tcm_vhost_cmd *tv_cmd;
 +      struct tcm_vhost_cmd *cmd;
        struct tcm_vhost_nexus *tv_nexus;
  
 -      tv_nexus = tv_tpg->tpg_nexus;
 +      tv_nexus = tpg->tpg_nexus;
        if (!tv_nexus) {
                pr_err("Unable to locate active struct tcm_vhost_nexus\n");
                return ERR_PTR(-EIO);
        }
  
 -      tv_cmd = kzalloc(sizeof(struct tcm_vhost_cmd), GFP_ATOMIC);
 -      if (!tv_cmd) {
 +      cmd = kzalloc(sizeof(struct tcm_vhost_cmd), GFP_ATOMIC);
 +      if (!cmd) {
                pr_err("Unable to allocate struct tcm_vhost_cmd\n");
                return ERR_PTR(-ENOMEM);
        }
 -      tv_cmd->tvc_tag = v_req->tag;
 -      tv_cmd->tvc_task_attr = v_req->task_attr;
 -      tv_cmd->tvc_exp_data_len = exp_data_len;
 -      tv_cmd->tvc_data_direction = data_direction;
 -      tv_cmd->tvc_nexus = tv_nexus;
 -      tv_cmd->inflight = tcm_vhost_get_inflight(vq);
 +      cmd->tvc_tag = v_req->tag;
 +      cmd->tvc_task_attr = v_req->task_attr;
 +      cmd->tvc_exp_data_len = exp_data_len;
 +      cmd->tvc_data_direction = data_direction;
 +      cmd->tvc_nexus = tv_nexus;
 +      cmd->inflight = tcm_vhost_get_inflight(vq);
  
 -      return tv_cmd;
 +      return cmd;
  }
  
  /*
   *
   * Returns the number of scatterlist entries used or -errno on error.
   */
 -static int vhost_scsi_map_to_sgl(struct scatterlist *sgl,
 -      unsigned int sgl_count, struct iovec *iov, int write)
 +static int
 +vhost_scsi_map_to_sgl(struct scatterlist *sgl,
 +                    unsigned int sgl_count,
 +                    struct iovec *iov,
 +                    int write)
  {
        unsigned int npages = 0, pages_nr, offset, nbytes;
        struct scatterlist *sg = sgl;
@@@ -781,11 -781,8 +787,11 @@@ out
        return ret;
  }
  
 -static int vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *tv_cmd,
 -      struct iovec *iov, unsigned int niov, int write)
 +static int
 +vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *cmd,
 +                        struct iovec *iov,
 +                        unsigned int niov,
 +                        int write)
  {
        int ret;
        unsigned int i;
  
        /* TODO overflow checking */
  
 -      sg = kmalloc(sizeof(tv_cmd->tvc_sgl[0]) * sgl_count, GFP_ATOMIC);
 +      sg = kmalloc(sizeof(cmd->tvc_sgl[0]) * sgl_count, GFP_ATOMIC);
        if (!sg)
                return -ENOMEM;
        pr_debug("%s sg %p sgl_count %u is_err %d\n", __func__,
               sg, sgl_count, !sg);
        sg_init_table(sg, sgl_count);
  
 -      tv_cmd->tvc_sgl = sg;
 -      tv_cmd->tvc_sgl_count = sgl_count;
 +      cmd->tvc_sgl = sg;
 +      cmd->tvc_sgl_count = sgl_count;
  
        pr_debug("Mapping %u iovecs for %u pages\n", niov, sgl_count);
        for (i = 0; i < niov; i++) {
                ret = vhost_scsi_map_to_sgl(sg, sgl_count, &iov[i], write);
                if (ret < 0) {
 -                      for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
 -                              put_page(sg_page(&tv_cmd->tvc_sgl[i]));
 -                      kfree(tv_cmd->tvc_sgl);
 -                      tv_cmd->tvc_sgl = NULL;
 -                      tv_cmd->tvc_sgl_count = 0;
 +                      for (i = 0; i < cmd->tvc_sgl_count; i++)
 +                              put_page(sg_page(&cmd->tvc_sgl[i]));
 +                      kfree(cmd->tvc_sgl);
 +                      cmd->tvc_sgl = NULL;
 +                      cmd->tvc_sgl_count = 0;
                        return ret;
                }
  
  
  static void tcm_vhost_submission_work(struct work_struct *work)
  {
 -      struct tcm_vhost_cmd *tv_cmd =
 +      struct tcm_vhost_cmd *cmd =
                container_of(work, struct tcm_vhost_cmd, work);
        struct tcm_vhost_nexus *tv_nexus;
 -      struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
 +      struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
        struct scatterlist *sg_ptr, *sg_bidi_ptr = NULL;
        int rc, sg_no_bidi = 0;
  
 -      if (tv_cmd->tvc_sgl_count) {
 -              sg_ptr = tv_cmd->tvc_sgl;
 +      if (cmd->tvc_sgl_count) {
 +              sg_ptr = cmd->tvc_sgl;
  /* FIXME: Fix BIDI operation in tcm_vhost_submission_work() */
  #if 0
                if (se_cmd->se_cmd_flags & SCF_BIDI) {
        } else {
                sg_ptr = NULL;
        }
 -      tv_nexus = tv_cmd->tvc_nexus;
 +      tv_nexus = cmd->tvc_nexus;
  
        rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
 -                      tv_cmd->tvc_cdb, &tv_cmd->tvc_sense_buf[0],
 -                      tv_cmd->tvc_lun, tv_cmd->tvc_exp_data_len,
 -                      tv_cmd->tvc_task_attr, tv_cmd->tvc_data_direction,
 -                      TARGET_SCF_ACK_KREF, sg_ptr, tv_cmd->tvc_sgl_count,
 +                      cmd->tvc_cdb, &cmd->tvc_sense_buf[0],
 +                      cmd->tvc_lun, cmd->tvc_exp_data_len,
 +                      cmd->tvc_task_attr, cmd->tvc_data_direction,
-                       0, sg_ptr, cmd->tvc_sgl_count,
++                      TARGET_SCF_ACK_KREF, sg_ptr, cmd->tvc_sgl_count,
                        sg_bidi_ptr, sg_no_bidi);
        if (rc < 0) {
                transport_send_check_condition_and_sense(se_cmd,
        }
  }
  
 -static void vhost_scsi_send_bad_target(struct vhost_scsi *vs,
 -      struct vhost_virtqueue *vq, int head, unsigned out)
 +static void
 +vhost_scsi_send_bad_target(struct vhost_scsi *vs,
 +                         struct vhost_virtqueue *vq,
 +                         int head, unsigned out)
  {
        struct virtio_scsi_cmd_resp __user *resp;
        struct virtio_scsi_cmd_resp rsp;
                pr_err("Faulted on virtio_scsi_cmd_resp\n");
  }
  
 -static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
 -      struct vhost_virtqueue *vq)
 +static void
 +vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
  {
        struct tcm_vhost_tpg **vs_tpg;
        struct virtio_scsi_cmd_req v_req;
 -      struct tcm_vhost_tpg *tv_tpg;
 -      struct tcm_vhost_cmd *tv_cmd;
 +      struct tcm_vhost_tpg *tpg;
 +      struct tcm_vhost_cmd *cmd;
        u32 exp_data_len, data_first, data_num, data_direction;
        unsigned out, in, i;
        int head, ret;
  
                /* Extract the tpgt */
                target = v_req.lun[1];
 -              tv_tpg = ACCESS_ONCE(vs_tpg[target]);
 +              tpg = ACCESS_ONCE(vs_tpg[target]);
  
                /* Target does not exist, fail the request */
 -              if (unlikely(!tv_tpg)) {
 +              if (unlikely(!tpg)) {
                        vhost_scsi_send_bad_target(vs, vq, head, out);
                        continue;
                }
                for (i = 0; i < data_num; i++)
                        exp_data_len += vq->iov[data_first + i].iov_len;
  
 -              tv_cmd = vhost_scsi_allocate_cmd(vq, tv_tpg, &v_req,
 +              cmd = vhost_scsi_allocate_cmd(vq, tpg, &v_req,
                                        exp_data_len, data_direction);
 -              if (IS_ERR(tv_cmd)) {
 +              if (IS_ERR(cmd)) {
                        vq_err(vq, "vhost_scsi_allocate_cmd failed %ld\n",
 -                                      PTR_ERR(tv_cmd));
 +                                      PTR_ERR(cmd));
                        goto err_cmd;
                }
                pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction"
 -                      ": %d\n", tv_cmd, exp_data_len, data_direction);
 +                      ": %d\n", cmd, exp_data_len, data_direction);
  
 -              tv_cmd->tvc_vhost = vs;
 -              tv_cmd->tvc_vq = vq;
 -              tv_cmd->tvc_resp = vq->iov[out].iov_base;
 +              cmd->tvc_vhost = vs;
 +              cmd->tvc_vq = vq;
 +              cmd->tvc_resp = vq->iov[out].iov_base;
  
                /*
 -               * Copy in the recieved CDB descriptor into tv_cmd->tvc_cdb
 +               * Copy in the recieved CDB descriptor into cmd->tvc_cdb
                 * that will be used by tcm_vhost_new_cmd_map() and down into
                 * target_setup_cmd_from_cdb()
                 */
 -              memcpy(tv_cmd->tvc_cdb, v_req.cdb, TCM_VHOST_MAX_CDB_SIZE);
 +              memcpy(cmd->tvc_cdb, v_req.cdb, TCM_VHOST_MAX_CDB_SIZE);
                /*
                 * Check that the recieved CDB size does not exceeded our
                 * hardcoded max for tcm_vhost
                 */
                /* TODO what if cdb was too small for varlen cdb header? */
 -              if (unlikely(scsi_command_size(tv_cmd->tvc_cdb) >
 +              if (unlikely(scsi_command_size(cmd->tvc_cdb) >
                                        TCM_VHOST_MAX_CDB_SIZE)) {
                        vq_err(vq, "Received SCSI CDB with command_size: %d that"
                                " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
 -                              scsi_command_size(tv_cmd->tvc_cdb),
 +                              scsi_command_size(cmd->tvc_cdb),
                                TCM_VHOST_MAX_CDB_SIZE);
                        goto err_free;
                }
 -              tv_cmd->tvc_lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
 +              cmd->tvc_lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
  
                pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
 -                      tv_cmd->tvc_cdb[0], tv_cmd->tvc_lun);
 +                      cmd->tvc_cdb[0], cmd->tvc_lun);
  
                if (data_direction != DMA_NONE) {
 -                      ret = vhost_scsi_map_iov_to_sgl(tv_cmd,
 +                      ret = vhost_scsi_map_iov_to_sgl(cmd,
                                        &vq->iov[data_first], data_num,
                                        data_direction == DMA_TO_DEVICE);
                        if (unlikely(ret)) {
                 * complete the virtio-scsi request in TCM callback context via
                 * tcm_vhost_queue_data_in() and tcm_vhost_queue_status()
                 */
 -              tv_cmd->tvc_vq_desc = head;
 +              cmd->tvc_vq_desc = head;
                /*
                 * Dispatch tv_cmd descriptor for cmwq execution in process
                 * context provided by tcm_vhost_workqueue.  This also ensures
                 * tv_cmd is executed on the same kworker CPU as this vhost
                 * thread to gain positive L2 cache locality effects..
                 */
 -              INIT_WORK(&tv_cmd->work, tcm_vhost_submission_work);
 -              queue_work(tcm_vhost_workqueue, &tv_cmd->work);
 +              INIT_WORK(&cmd->work, tcm_vhost_submission_work);
 +              queue_work(tcm_vhost_workqueue, &cmd->work);
        }
  
        mutex_unlock(&vq->mutex);
        return;
  
  err_free:
 -      vhost_scsi_free_cmd(tv_cmd);
 +      vhost_scsi_free_cmd(cmd);
  err_cmd:
        vhost_scsi_send_bad_target(vs, vq, head, out);
        mutex_unlock(&vq->mutex);
@@@ -1066,12 -1061,8 +1072,12 @@@ static void vhost_scsi_ctl_handle_kick(
        pr_debug("%s: The handling func for control queue.\n", __func__);
  }
  
 -static void tcm_vhost_send_evt(struct vhost_scsi *vs, struct tcm_vhost_tpg *tpg,
 -      struct se_lun *lun, u32 event, u32 reason)
 +static void
 +tcm_vhost_send_evt(struct vhost_scsi *vs,
 +                 struct tcm_vhost_tpg *tpg,
 +                 struct se_lun *lun,
 +                 u32 event,
 +                 u32 reason)
  {
        struct tcm_vhost_evt *evt;
  
@@@ -1161,12 -1152,12 +1167,12 @@@ static void vhost_scsi_flush(struct vho
   *  The lock nesting rule is:
   *    tcm_vhost_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
   */
 -static int vhost_scsi_set_endpoint(
 -      struct vhost_scsi *vs,
 -      struct vhost_scsi_target *t)
 +static int
 +vhost_scsi_set_endpoint(struct vhost_scsi *vs,
 +                      struct vhost_scsi_target *t)
  {
        struct tcm_vhost_tport *tv_tport;
 -      struct tcm_vhost_tpg *tv_tpg;
 +      struct tcm_vhost_tpg *tpg;
        struct tcm_vhost_tpg **vs_tpg;
        struct vhost_virtqueue *vq;
        int index, ret, i, len;
        if (vs->vs_tpg)
                memcpy(vs_tpg, vs->vs_tpg, len);
  
 -      list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) {
 -              mutex_lock(&tv_tpg->tv_tpg_mutex);
 -              if (!tv_tpg->tpg_nexus) {
 -                      mutex_unlock(&tv_tpg->tv_tpg_mutex);
 +      list_for_each_entry(tpg, &tcm_vhost_list, tv_tpg_list) {
 +              mutex_lock(&tpg->tv_tpg_mutex);
 +              if (!tpg->tpg_nexus) {
 +                      mutex_unlock(&tpg->tv_tpg_mutex);
                        continue;
                }
 -              if (tv_tpg->tv_tpg_vhost_count != 0) {
 -                      mutex_unlock(&tv_tpg->tv_tpg_mutex);
 +              if (tpg->tv_tpg_vhost_count != 0) {
 +                      mutex_unlock(&tpg->tv_tpg_mutex);
                        continue;
                }
 -              tv_tport = tv_tpg->tport;
 +              tv_tport = tpg->tport;
  
                if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
 -                      if (vs->vs_tpg && vs->vs_tpg[tv_tpg->tport_tpgt]) {
 +                      if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) {
                                kfree(vs_tpg);
 -                              mutex_unlock(&tv_tpg->tv_tpg_mutex);
 +                              mutex_unlock(&tpg->tv_tpg_mutex);
                                ret = -EEXIST;
                                goto out;
                        }
 -                      tv_tpg->tv_tpg_vhost_count++;
 -                      tv_tpg->vhost_scsi = vs;
 -                      vs_tpg[tv_tpg->tport_tpgt] = tv_tpg;
 +                      tpg->tv_tpg_vhost_count++;
 +                      tpg->vhost_scsi = vs;
 +                      vs_tpg[tpg->tport_tpgt] = tpg;
                        smp_mb__after_atomic_inc();
                        match = true;
                }
 -              mutex_unlock(&tv_tpg->tv_tpg_mutex);
 +              mutex_unlock(&tpg->tv_tpg_mutex);
        }
  
        if (match) {
        return ret;
  }
  
 -static int vhost_scsi_clear_endpoint(
 -      struct vhost_scsi *vs,
 -      struct vhost_scsi_target *t)
 +static int
 +vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
 +                        struct vhost_scsi_target *t)
  {
        struct tcm_vhost_tport *tv_tport;
 -      struct tcm_vhost_tpg *tv_tpg;
 +      struct tcm_vhost_tpg *tpg;
        struct vhost_virtqueue *vq;
        bool match = false;
        int index, ret, i;
  
        for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
                target = i;
 -              tv_tpg = vs->vs_tpg[target];
 -              if (!tv_tpg)
 +              tpg = vs->vs_tpg[target];
 +              if (!tpg)
                        continue;
  
 -              mutex_lock(&tv_tpg->tv_tpg_mutex);
 -              tv_tport = tv_tpg->tport;
 +              mutex_lock(&tpg->tv_tpg_mutex);
 +              tv_tport = tpg->tport;
                if (!tv_tport) {
                        ret = -ENODEV;
                        goto err_tpg;
                }
  
                if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
 -                      pr_warn("tv_tport->tport_name: %s, tv_tpg->tport_tpgt: %hu"
 +                      pr_warn("tv_tport->tport_name: %s, tpg->tport_tpgt: %hu"
                                " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
 -                              tv_tport->tport_name, tv_tpg->tport_tpgt,
 +                              tv_tport->tport_name, tpg->tport_tpgt,
                                t->vhost_wwpn, t->vhost_tpgt);
                        ret = -EINVAL;
                        goto err_tpg;
                }
 -              tv_tpg->tv_tpg_vhost_count--;
 -              tv_tpg->vhost_scsi = NULL;
 +              tpg->tv_tpg_vhost_count--;
 +              tpg->vhost_scsi = NULL;
                vs->vs_tpg[target] = NULL;
                match = true;
 -              mutex_unlock(&tv_tpg->tv_tpg_mutex);
 +              mutex_unlock(&tpg->tv_tpg_mutex);
        }
        if (match) {
                for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
        return 0;
  
  err_tpg:
 -      mutex_unlock(&tv_tpg->tv_tpg_mutex);
 +      mutex_unlock(&tpg->tv_tpg_mutex);
  err_dev:
        mutex_unlock(&vs->dev.mutex);
        mutex_unlock(&tcm_vhost_mutex);
@@@ -1353,70 -1344,68 +1359,70 @@@ static int vhost_scsi_set_features(stru
  
  static int vhost_scsi_open(struct inode *inode, struct file *f)
  {
 -      struct vhost_scsi *s;
 +      struct vhost_scsi *vs;
        struct vhost_virtqueue **vqs;
        int r, i;
  
 -      s = kzalloc(sizeof(*s), GFP_KERNEL);
 -      if (!s)
 +      vs = kzalloc(sizeof(*vs), GFP_KERNEL);
 +      if (!vs)
                return -ENOMEM;
  
        vqs = kmalloc(VHOST_SCSI_MAX_VQ * sizeof(*vqs), GFP_KERNEL);
        if (!vqs) {
 -              kfree(s);
 +              kfree(vs);
                return -ENOMEM;
        }
  
 -      vhost_work_init(&s->vs_completion_work, vhost_scsi_complete_cmd_work);
 -      vhost_work_init(&s->vs_event_work, tcm_vhost_evt_work);
 +      vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work);
 +      vhost_work_init(&vs->vs_event_work, tcm_vhost_evt_work);
  
 -      s->vs_events_nr = 0;
 -      s->vs_events_missed = false;
 +      vs->vs_events_nr = 0;
 +      vs->vs_events_missed = false;
  
 -      vqs[VHOST_SCSI_VQ_CTL] = &s->vqs[VHOST_SCSI_VQ_CTL].vq;
 -      vqs[VHOST_SCSI_VQ_EVT] = &s->vqs[VHOST_SCSI_VQ_EVT].vq;
 -      s->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
 -      s->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
 +      vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq;
 +      vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
 +      vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
 +      vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
        for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
 -              vqs[i] = &s->vqs[i].vq;
 -              s->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
 +              vqs[i] = &vs->vqs[i].vq;
 +              vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
        }
 -      r = vhost_dev_init(&s->dev, vqs, VHOST_SCSI_MAX_VQ);
 +      r = vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ);
  
 -      tcm_vhost_init_inflight(s, NULL);
 +      tcm_vhost_init_inflight(vs, NULL);
  
        if (r < 0) {
                kfree(vqs);
 -              kfree(s);
 +              kfree(vs);
                return r;
        }
  
 -      f->private_data = s;
 +      f->private_data = vs;
        return 0;
  }
  
  static int vhost_scsi_release(struct inode *inode, struct file *f)
  {
 -      struct vhost_scsi *s = f->private_data;
 +      struct vhost_scsi *vs = f->private_data;
        struct vhost_scsi_target t;
  
 -      mutex_lock(&s->dev.mutex);
 -      memcpy(t.vhost_wwpn, s->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
 -      mutex_unlock(&s->dev.mutex);
 -      vhost_scsi_clear_endpoint(s, &t);
 -      vhost_dev_stop(&s->dev);
 -      vhost_dev_cleanup(&s->dev, false);
 +      mutex_lock(&vs->dev.mutex);
 +      memcpy(t.vhost_wwpn, vs->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
 +      mutex_unlock(&vs->dev.mutex);
 +      vhost_scsi_clear_endpoint(vs, &t);
 +      vhost_dev_stop(&vs->dev);
 +      vhost_dev_cleanup(&vs->dev, false);
        /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
 -      vhost_scsi_flush(s);
 -      kfree(s->dev.vqs);
 -      kfree(s);
 +      vhost_scsi_flush(vs);
 +      kfree(vs->dev.vqs);
 +      kfree(vs);
        return 0;
  }
  
 -static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl,
 -                              unsigned long arg)
 +static long
 +vhost_scsi_ioctl(struct file *f,
 +               unsigned int ioctl,
 +               unsigned long arg)
  {
        struct vhost_scsi *vs = f->private_data;
        struct vhost_scsi_target backend;
@@@ -1532,9 -1521,8 +1538,9 @@@ static char *tcm_vhost_dump_proto_id(st
        return "Unknown";
  }
  
 -static void tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg,
 -      struct se_lun *lun, bool plug)
 +static void
 +tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg,
 +                struct se_lun *lun, bool plug)
  {
  
        struct vhost_scsi *vs = tpg->vhost_scsi;
@@@ -1574,18 -1562,18 +1580,18 @@@ static void tcm_vhost_hotunplug(struct 
  }
  
  static int tcm_vhost_port_link(struct se_portal_group *se_tpg,
 -      struct se_lun *lun)
 +                             struct se_lun *lun)
  {
 -      struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
 +      struct tcm_vhost_tpg *tpg = container_of(se_tpg,
                                struct tcm_vhost_tpg, se_tpg);
  
        mutex_lock(&tcm_vhost_mutex);
  
 -      mutex_lock(&tv_tpg->tv_tpg_mutex);
 -      tv_tpg->tv_tpg_port_count++;
 -      mutex_unlock(&tv_tpg->tv_tpg_mutex);
 +      mutex_lock(&tpg->tv_tpg_mutex);
 +      tpg->tv_tpg_port_count++;
 +      mutex_unlock(&tpg->tv_tpg_mutex);
  
 -      tcm_vhost_hotplug(tv_tpg, lun);
 +      tcm_vhost_hotplug(tpg, lun);
  
        mutex_unlock(&tcm_vhost_mutex);
  
  }
  
  static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg,
 -      struct se_lun *lun)
 +                                struct se_lun *lun)
  {
 -      struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
 +      struct tcm_vhost_tpg *tpg = container_of(se_tpg,
                                struct tcm_vhost_tpg, se_tpg);
  
        mutex_lock(&tcm_vhost_mutex);
  
 -      mutex_lock(&tv_tpg->tv_tpg_mutex);
 -      tv_tpg->tv_tpg_port_count--;
 -      mutex_unlock(&tv_tpg->tv_tpg_mutex);
 +      mutex_lock(&tpg->tv_tpg_mutex);
 +      tpg->tv_tpg_port_count--;
 +      mutex_unlock(&tpg->tv_tpg_mutex);
  
 -      tcm_vhost_hotunplug(tv_tpg, lun);
 +      tcm_vhost_hotunplug(tpg, lun);
  
        mutex_unlock(&tcm_vhost_mutex);
  }
  
 -static struct se_node_acl *tcm_vhost_make_nodeacl(
 -      struct se_portal_group *se_tpg,
 -      struct config_group *group,
 -      const char *name)
 +static struct se_node_acl *
 +tcm_vhost_make_nodeacl(struct se_portal_group *se_tpg,
 +                     struct config_group *group,
 +                     const char *name)
  {
        struct se_node_acl *se_nacl, *se_nacl_new;
        struct tcm_vhost_nacl *nacl;
@@@ -1653,23 -1641,23 +1659,23 @@@ static void tcm_vhost_drop_nodeacl(stru
        kfree(nacl);
  }
  
 -static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tv_tpg,
 -      const char *name)
 +static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
 +                              const char *name)
  {
        struct se_portal_group *se_tpg;
        struct tcm_vhost_nexus *tv_nexus;
  
 -      mutex_lock(&tv_tpg->tv_tpg_mutex);
 -      if (tv_tpg->tpg_nexus) {
 -              mutex_unlock(&tv_tpg->tv_tpg_mutex);
 -              pr_debug("tv_tpg->tpg_nexus already exists\n");
 +      mutex_lock(&tpg->tv_tpg_mutex);
 +      if (tpg->tpg_nexus) {
 +              mutex_unlock(&tpg->tv_tpg_mutex);
 +              pr_debug("tpg->tpg_nexus already exists\n");
                return -EEXIST;
        }
 -      se_tpg = &tv_tpg->se_tpg;
 +      se_tpg = &tpg->se_tpg;
  
        tv_nexus = kzalloc(sizeof(struct tcm_vhost_nexus), GFP_KERNEL);
        if (!tv_nexus) {
 -              mutex_unlock(&tv_tpg->tv_tpg_mutex);
 +              mutex_unlock(&tpg->tv_tpg_mutex);
                pr_err("Unable to allocate struct tcm_vhost_nexus\n");
                return -ENOMEM;
        }
         */
        tv_nexus->tvn_se_sess = transport_init_session();
        if (IS_ERR(tv_nexus->tvn_se_sess)) {
 -              mutex_unlock(&tv_tpg->tv_tpg_mutex);
 +              mutex_unlock(&tpg->tv_tpg_mutex);
                kfree(tv_nexus);
                return -ENOMEM;
        }
        tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
                                se_tpg, (unsigned char *)name);
        if (!tv_nexus->tvn_se_sess->se_node_acl) {
 -              mutex_unlock(&tv_tpg->tv_tpg_mutex);
 +              mutex_unlock(&tpg->tv_tpg_mutex);
                pr_debug("core_tpg_check_initiator_node_acl() failed"
                                " for %s\n", name);
                transport_free_session(tv_nexus->tvn_se_sess);
         */
        __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
                        tv_nexus->tvn_se_sess, tv_nexus);
 -      tv_tpg->tpg_nexus = tv_nexus;
 +      tpg->tpg_nexus = tv_nexus;
  
 -      mutex_unlock(&tv_tpg->tv_tpg_mutex);
 +      mutex_unlock(&tpg->tv_tpg_mutex);
        return 0;
  }
  
@@@ -1758,40 -1746,40 +1764,40 @@@ static int tcm_vhost_drop_nexus(struct 
  }
  
  static ssize_t tcm_vhost_tpg_show_nexus(struct se_portal_group *se_tpg,
 -      char *page)
 +                                      char *page)
  {
 -      struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
 +      struct tcm_vhost_tpg *tpg = container_of(se_tpg,
                                struct tcm_vhost_tpg, se_tpg);
        struct tcm_vhost_nexus *tv_nexus;
        ssize_t ret;
  
 -      mutex_lock(&tv_tpg->tv_tpg_mutex);
 -      tv_nexus = tv_tpg->tpg_nexus;
 +      mutex_lock(&tpg->tv_tpg_mutex);
 +      tv_nexus = tpg->tpg_nexus;
        if (!tv_nexus) {
 -              mutex_unlock(&tv_tpg->tv_tpg_mutex);
 +              mutex_unlock(&tpg->tv_tpg_mutex);
                return -ENODEV;
        }
        ret = snprintf(page, PAGE_SIZE, "%s\n",
                        tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
 -      mutex_unlock(&tv_tpg->tv_tpg_mutex);
 +      mutex_unlock(&tpg->tv_tpg_mutex);
  
        return ret;
  }
  
  static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg,
 -      const char *page,
 -      size_t count)
 +                                       const char *page,
 +                                       size_t count)
  {
 -      struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
 +      struct tcm_vhost_tpg *tpg = container_of(se_tpg,
                                struct tcm_vhost_tpg, se_tpg);
 -      struct tcm_vhost_tport *tport_wwn = tv_tpg->tport;
 +      struct tcm_vhost_tport *tport_wwn = tpg->tport;
        unsigned char i_port[TCM_VHOST_NAMELEN], *ptr, *port_ptr;
        int ret;
        /*
         * Shutdown the active I_T nexus if 'NULL' is passed..
         */
        if (!strncmp(page, "NULL", 4)) {
 -              ret = tcm_vhost_drop_nexus(tv_tpg);
 +              ret = tcm_vhost_drop_nexus(tpg);
                return (!ret) ? count : ret;
        }
        /*
@@@ -1849,7 -1837,7 +1855,7 @@@ check_newline
        if (i_port[strlen(i_port)-1] == '\n')
                i_port[strlen(i_port)-1] = '\0';
  
 -      ret = tcm_vhost_make_nexus(tv_tpg, port_ptr);
 +      ret = tcm_vhost_make_nexus(tpg, port_ptr);
        if (ret < 0)
                return ret;
  
@@@ -1863,10 -1851,9 +1869,10 @@@ static struct configfs_attribute *tcm_v
        NULL,
  };
  
 -static struct se_portal_group *tcm_vhost_make_tpg(struct se_wwn *wwn,
 -      struct config_group *group,
 -      const char *name)
 +static struct se_portal_group *
 +tcm_vhost_make_tpg(struct se_wwn *wwn,
 +                 struct config_group *group,
 +                 const char *name)
  {
        struct tcm_vhost_tport *tport = container_of(wwn,
                        struct tcm_vhost_tport, tport_wwn);
@@@ -1922,10 -1909,9 +1928,10 @@@ static void tcm_vhost_drop_tpg(struct s
        kfree(tpg);
  }
  
 -static struct se_wwn *tcm_vhost_make_tport(struct target_fabric_configfs *tf,
 -      struct config_group *group,
 -      const char *name)
 +static struct se_wwn *
 +tcm_vhost_make_tport(struct target_fabric_configfs *tf,
 +                   struct config_group *group,
 +                   const char *name)
  {
        struct tcm_vhost_tport *tport;
        char *ptr;
@@@ -1995,9 -1981,9 +2001,9 @@@ static void tcm_vhost_drop_tport(struc
        kfree(tport);
  }
  
 -static ssize_t tcm_vhost_wwn_show_attr_version(
 -      struct target_fabric_configfs *tf,
 -      char *page)
 +static ssize_t
 +tcm_vhost_wwn_show_attr_version(struct target_fabric_configfs *tf,
 +                              char *page)
  {
        return sprintf(page, "TCM_VHOST fabric module %s on %s/%s"
                "on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
@@@ -2028,6 -2014,7 +2034,7 @@@ static struct target_core_fabric_ops tc
        .tpg_release_fabric_acl         = tcm_vhost_release_fabric_acl,
        .tpg_get_inst_index             = tcm_vhost_tpg_get_inst_index,
        .release_cmd                    = tcm_vhost_release_cmd,
+       .check_stop_free                = vhost_scsi_check_stop_free,
        .shutdown_session               = tcm_vhost_shutdown_session,
        .close_session                  = tcm_vhost_close_session,
        .sess_get_index                 = tcm_vhost_sess_get_index,