Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[deliverable/linux.git] / drivers / target / target_core_transport.c
index 7cfb519a83f9c932135680cc332755eaf54ffeff..0eaae23d12b576547fa51f25ed8f05579f6ec646 100644 (file)
@@ -66,15 +66,12 @@ struct kmem_cache *t10_alua_lu_gp_mem_cache;
 struct kmem_cache *t10_alua_tg_pt_gp_cache;
 struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
 
-static int transport_generic_write_pending(struct se_cmd *);
-static int transport_processing_thread(void *param);
-static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *);
 static void transport_complete_task_attr(struct se_cmd *cmd);
 static void transport_handle_queue_full(struct se_cmd *cmd,
                struct se_device *dev);
 static int transport_generic_get_mem(struct se_cmd *cmd);
+static int target_get_sess_cmd(struct se_session *, struct se_cmd *, bool);
 static void transport_put_cmd(struct se_cmd *cmd);
-static void transport_remove_cmd_from_queue(struct se_cmd *cmd);
 static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
 static void target_complete_ok_work(struct work_struct *work);
 
@@ -195,14 +192,6 @@ u32 scsi_get_new_index(scsi_index_t type)
        return new_index;
 }
 
-static void transport_init_queue_obj(struct se_queue_obj *qobj)
-{
-       atomic_set(&qobj->queue_cnt, 0);
-       INIT_LIST_HEAD(&qobj->qobj_list);
-       init_waitqueue_head(&qobj->thread_wq);
-       spin_lock_init(&qobj->cmd_queue_lock);
-}
-
 void transport_subsystem_check_init(void)
 {
        int ret;
@@ -243,7 +232,6 @@ struct se_session *transport_init_session(void)
        INIT_LIST_HEAD(&se_sess->sess_list);
        INIT_LIST_HEAD(&se_sess->sess_acl_list);
        INIT_LIST_HEAD(&se_sess->sess_cmd_list);
-       INIT_LIST_HEAD(&se_sess->sess_wait_list);
        spin_lock_init(&se_sess->sess_cmd_lock);
        kref_init(&se_sess->sess_kref);
 
@@ -468,18 +456,7 @@ static void target_remove_from_state_list(struct se_cmd *cmd)
        spin_unlock_irqrestore(&dev->execute_task_lock, flags);
 }
 
-/*     transport_cmd_check_stop():
- *
- *     'transport_off = 1' determines if CMD_T_ACTIVE should be cleared.
- *     'transport_off = 2' determines if task_dev_state should be removed.
- *
- *     A non-zero u8 t_state sets cmd->t_state.
- *     Returns 1 when command is stopped, else 0.
- */
-static int transport_cmd_check_stop(
-       struct se_cmd *cmd,
-       int transport_off,
-       u8 t_state)
+static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists)
 {
        unsigned long flags;
 
@@ -493,13 +470,23 @@ static int transport_cmd_check_stop(
                        __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd));
 
                cmd->transport_state &= ~CMD_T_ACTIVE;
-               if (transport_off == 2)
+               if (remove_from_lists)
                        target_remove_from_state_list(cmd);
                spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
                complete(&cmd->transport_lun_stop_comp);
                return 1;
        }
+
+       if (remove_from_lists) {
+               target_remove_from_state_list(cmd);
+
+               /*
+                * Clear struct se_cmd->se_lun before the handoff to FE.
+                */
+               cmd->se_lun = NULL;
+       }
+
        /*
         * Determine if frontend context caller is requesting the stopping of
         * this command for frontend exceptions.
@@ -509,58 +496,36 @@ static int transport_cmd_check_stop(
                        __func__, __LINE__,
                        cmd->se_tfo->get_task_tag(cmd));
 
-               if (transport_off == 2)
-                       target_remove_from_state_list(cmd);
-
-               /*
-                * Clear struct se_cmd->se_lun before the transport_off == 2 handoff
-                * to FE.
-                */
-               if (transport_off == 2)
-                       cmd->se_lun = NULL;
                spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
                complete(&cmd->t_transport_stop_comp);
                return 1;
        }
-       if (transport_off) {
-               cmd->transport_state &= ~CMD_T_ACTIVE;
-               if (transport_off == 2) {
-                       target_remove_from_state_list(cmd);
-                       /*
-                        * Clear struct se_cmd->se_lun before the transport_off == 2
-                        * handoff to fabric module.
-                        */
-                       cmd->se_lun = NULL;
-                       /*
-                        * Some fabric modules like tcm_loop can release
-                        * their internally allocated I/O reference now and
-                        * struct se_cmd now.
-                        *
-                        * Fabric modules are expected to return '1' here if the
-                        * se_cmd being passed is released at this point,
-                        * or zero if not being released.
-                        */
-                       if (cmd->se_tfo->check_stop_free != NULL) {
-                               spin_unlock_irqrestore(
-                                       &cmd->t_state_lock, flags);
-
-                               return cmd->se_tfo->check_stop_free(cmd);
-                       }
+
+       cmd->transport_state &= ~CMD_T_ACTIVE;
+       if (remove_from_lists) {
+               /*
+                * Some fabric modules like tcm_loop can release
+                * their internally allocated I/O reference now and
+                * struct se_cmd now.
+                *
+                * Fabric modules are expected to return '1' here if the
+                * se_cmd being passed is released at this point,
+                * or zero if not being released.
+                */
+               if (cmd->se_tfo->check_stop_free != NULL) {
+                       spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+                       return cmd->se_tfo->check_stop_free(cmd);
                }
-               spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+       }
 
-               return 0;
-       } else if (t_state)
-               cmd->t_state = t_state;
        spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
        return 0;
 }
 
 static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
 {
-       return transport_cmd_check_stop(cmd, 2, 0);
+       return transport_cmd_check_stop(cmd, true);
 }
 
 static void transport_lun_remove_cmd(struct se_cmd *cmd)
@@ -591,79 +556,8 @@ void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
 
        if (transport_cmd_check_stop_to_fabric(cmd))
                return;
-       if (remove) {
-               transport_remove_cmd_from_queue(cmd);
+       if (remove)
                transport_put_cmd(cmd);
-       }
-}
-
-static void transport_add_cmd_to_queue(struct se_cmd *cmd, int t_state,
-               bool at_head)
-{
-       struct se_device *dev = cmd->se_dev;
-       struct se_queue_obj *qobj = &dev->dev_queue_obj;
-       unsigned long flags;
-
-       if (t_state) {
-               spin_lock_irqsave(&cmd->t_state_lock, flags);
-               cmd->t_state = t_state;
-               cmd->transport_state |= CMD_T_ACTIVE;
-               spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-       }
-
-       spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
-
-       /* If the cmd is already on the list, remove it before we add it */
-       if (!list_empty(&cmd->se_queue_node))
-               list_del(&cmd->se_queue_node);
-       else
-               atomic_inc(&qobj->queue_cnt);
-
-       if (at_head)
-               list_add(&cmd->se_queue_node, &qobj->qobj_list);
-       else
-               list_add_tail(&cmd->se_queue_node, &qobj->qobj_list);
-       cmd->transport_state |= CMD_T_QUEUED;
-       spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
-
-       wake_up_interruptible(&qobj->thread_wq);
-}
-
-static struct se_cmd *
-transport_get_cmd_from_queue(struct se_queue_obj *qobj)
-{
-       struct se_cmd *cmd;
-       unsigned long flags;
-
-       spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
-       if (list_empty(&qobj->qobj_list)) {
-               spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
-               return NULL;
-       }
-       cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node);
-
-       cmd->transport_state &= ~CMD_T_QUEUED;
-       list_del_init(&cmd->se_queue_node);
-       atomic_dec(&qobj->queue_cnt);
-       spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
-
-       return cmd;
-}
-
-static void transport_remove_cmd_from_queue(struct se_cmd *cmd)
-{
-       struct se_queue_obj *qobj = &cmd->se_dev->dev_queue_obj;
-       unsigned long flags;
-
-       spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
-       if (!(cmd->transport_state & CMD_T_QUEUED)) {
-               spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
-               return;
-       }
-       cmd->transport_state &= ~CMD_T_QUEUED;
-       atomic_dec(&qobj->queue_cnt);
-       list_del_init(&cmd->se_queue_node);
-       spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
 }
 
 static void target_complete_failure_work(struct work_struct *work)
@@ -742,68 +636,11 @@ static void target_add_to_state_list(struct se_cmd *cmd)
        spin_unlock_irqrestore(&dev->execute_task_lock, flags);
 }
 
-static void __target_add_to_execute_list(struct se_cmd *cmd)
-{
-       struct se_device *dev = cmd->se_dev;
-       bool head_of_queue = false;
-
-       if (!list_empty(&cmd->execute_list))
-               return;
-
-       if (dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED &&
-           cmd->sam_task_attr == MSG_HEAD_TAG)
-               head_of_queue = true;
-
-       if (head_of_queue)
-               list_add(&cmd->execute_list, &dev->execute_list);
-       else
-               list_add_tail(&cmd->execute_list, &dev->execute_list);
-
-       atomic_inc(&dev->execute_tasks);
-
-       if (cmd->state_active)
-               return;
-
-       if (head_of_queue)
-               list_add(&cmd->state_list, &dev->state_list);
-       else
-               list_add_tail(&cmd->state_list, &dev->state_list);
-
-       cmd->state_active = true;
-}
-
-static void target_add_to_execute_list(struct se_cmd *cmd)
-{
-       unsigned long flags;
-       struct se_device *dev = cmd->se_dev;
-
-       spin_lock_irqsave(&dev->execute_task_lock, flags);
-       __target_add_to_execute_list(cmd);
-       spin_unlock_irqrestore(&dev->execute_task_lock, flags);
-}
-
-void __target_remove_from_execute_list(struct se_cmd *cmd)
-{
-       list_del_init(&cmd->execute_list);
-       atomic_dec(&cmd->se_dev->execute_tasks);
-}
-
-static void target_remove_from_execute_list(struct se_cmd *cmd)
-{
-       struct se_device *dev = cmd->se_dev;
-       unsigned long flags;
-
-       if (WARN_ON(list_empty(&cmd->execute_list)))
-               return;
-
-       spin_lock_irqsave(&dev->execute_task_lock, flags);
-       __target_remove_from_execute_list(cmd);
-       spin_unlock_irqrestore(&dev->execute_task_lock, flags);
-}
-
 /*
  * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status
  */
+static void transport_write_pending_qf(struct se_cmd *cmd);
+static void transport_complete_qf(struct se_cmd *cmd);
 
 static void target_qf_do_work(struct work_struct *work)
 {
@@ -827,7 +664,10 @@ static void target_qf_do_work(struct work_struct *work)
                        (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
                        : "UNKNOWN");
 
-               transport_add_cmd_to_queue(cmd, cmd->t_state, true);
+               if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP)
+                       transport_write_pending_qf(cmd);
+               else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK)
+                       transport_complete_qf(cmd);
        }
 }
 
@@ -874,8 +714,7 @@ void transport_dump_dev_state(
                break;
        }
 
-       *bl += sprintf(b + *bl, "  Execute/Max Queue Depth: %d/%d",
-               atomic_read(&dev->execute_tasks), dev->queue_depth);
+       *bl += sprintf(b + *bl, "  Max Queue Depth: %d", dev->queue_depth);
        *bl += sprintf(b + *bl, "  SectorSize: %u  HwMaxSectors: %u\n",
                dev->se_sub_dev->se_dev_attrib.block_size,
                dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
@@ -1212,7 +1051,6 @@ struct se_device *transport_add_device_to_core_hba(
                return NULL;
        }
 
-       transport_init_queue_obj(&dev->dev_queue_obj);
        dev->dev_flags          = device_flags;
        dev->dev_status         |= TRANSPORT_DEVICE_DEACTIVATED;
        dev->dev_ptr            = transport_dev;
@@ -1222,7 +1060,6 @@ struct se_device *transport_add_device_to_core_hba(
        INIT_LIST_HEAD(&dev->dev_list);
        INIT_LIST_HEAD(&dev->dev_sep_list);
        INIT_LIST_HEAD(&dev->dev_tmr_list);
-       INIT_LIST_HEAD(&dev->execute_list);
        INIT_LIST_HEAD(&dev->delayed_cmd_list);
        INIT_LIST_HEAD(&dev->state_list);
        INIT_LIST_HEAD(&dev->qf_cmd_list);
@@ -1261,17 +1098,17 @@ struct se_device *transport_add_device_to_core_hba(
         * Setup the Asymmetric Logical Unit Assignment for struct se_device
         */
        if (core_setup_alua(dev, force_pt) < 0)
-               goto out;
+               goto err_dev_list;
 
        /*
         * Startup the struct se_device processing thread
         */
-       dev->process_thread = kthread_run(transport_processing_thread, dev,
-                                         "LIO_%s", dev->transport->name);
-       if (IS_ERR(dev->process_thread)) {
-               pr_err("Unable to create kthread: LIO_%s\n",
+       dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1,
+                                     dev->transport->name);
+       if (!dev->tmr_wq) {
+               pr_err("Unable to create tmr workqueue for %s\n",
                        dev->transport->name);
-               goto out;
+               goto err_dev_list;
        }
        /*
         * Setup work_queue for QUEUE_FULL
@@ -1289,7 +1126,7 @@ struct se_device *transport_add_device_to_core_hba(
                if (!inquiry_prod || !inquiry_rev) {
                        pr_err("All non TCM/pSCSI plugins require"
                                " INQUIRY consts\n");
-                       goto out;
+                       goto err_wq;
                }
 
                strncpy(&dev->se_sub_dev->t10_wwn.vendor[0], "LIO-ORG", 8);
@@ -1299,9 +1136,10 @@ struct se_device *transport_add_device_to_core_hba(
        scsi_dump_inquiry(dev);
 
        return dev;
-out:
-       kthread_stop(dev->process_thread);
 
+err_wq:
+       destroy_workqueue(dev->tmr_wq);
+err_dev_list:
        spin_lock(&hba->device_lock);
        list_del(&dev->dev_list);
        hba->dev_count--;
@@ -1380,9 +1218,7 @@ void transport_init_se_cmd(
        INIT_LIST_HEAD(&cmd->se_lun_node);
        INIT_LIST_HEAD(&cmd->se_delayed_node);
        INIT_LIST_HEAD(&cmd->se_qf_node);
-       INIT_LIST_HEAD(&cmd->se_queue_node);
        INIT_LIST_HEAD(&cmd->se_cmd_list);
-       INIT_LIST_HEAD(&cmd->execute_list);
        INIT_LIST_HEAD(&cmd->state_list);
        init_completion(&cmd->transport_lun_fe_stop_comp);
        init_completion(&cmd->transport_lun_stop_comp);
@@ -1576,10 +1412,9 @@ int transport_handle_cdb_direct(
                return -EINVAL;
        }
        /*
-        * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE following
-        * transport_generic_handle_cdb*() -> transport_add_cmd_to_queue()
-        * in existing usage to ensure that outstanding descriptors are handled
-        * correctly during shutdown via transport_wait_for_tasks()
+        * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE to ensure that
+        * outstanding descriptors are handled correctly during shutdown via
+        * transport_wait_for_tasks()
         *
         * Also, we don't take cmd->t_state_lock here as we only expect
         * this to be called for initial descriptor submission.
@@ -1613,10 +1448,14 @@ EXPORT_SYMBOL(transport_handle_cdb_direct);
  * @data_dir: DMA data direction
  * @flags: flags for command submission from target_sc_flags_tables
  *
+ * Returns non zero to signal active I/O shutdown failure.  All other
+ * setup exceptions will be returned as a SCSI CHECK_CONDITION response,
+ * but still return zero here.
+ *
  * This may only be called from process context, and also currently
  * assumes internal allocation of fabric payload buffer by target-core.
  **/
-void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
+int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
                unsigned char *cdb, unsigned char *sense, u32 unpacked_lun,
                u32 data_length, int task_attr, int data_dir, int flags)
 {
@@ -1642,7 +1481,9 @@ void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
         * for fabrics using TARGET_SCF_ACK_KREF that expect a second
         * kref_put() to happen during fabric packet acknowledgement.
         */
-       target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
+       rc = target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
+       if (rc)
+               return rc;
        /*
         * Signal bidirectional data payloads to target-core
         */
@@ -1655,13 +1496,13 @@ void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
                transport_send_check_condition_and_sense(se_cmd,
                                se_cmd->scsi_sense_reason, 0);
                target_put_sess_cmd(se_sess, se_cmd);
-               return;
+               return 0;
        }
 
        rc = target_setup_cmd_from_cdb(se_cmd, cdb);
        if (rc != 0) {
                transport_generic_request_failure(se_cmd);
-               return;
+               return 0;
        }
 
        /*
@@ -1670,14 +1511,8 @@ void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
         */
        core_alua_check_nonop_delay(se_cmd);
 
-       /*
-        * Dispatch se_cmd descriptor to se_lun->lun_se_dev backend
-        * for immediate execution of READs, otherwise wait for
-        * transport_generic_handle_data() to be called for WRITEs
-        * when fabric has filled the incoming buffer.
-        */
        transport_handle_cdb_direct(se_cmd);
-       return;
+       return 0;
 }
 EXPORT_SYMBOL(target_submit_cmd);
 
@@ -1732,7 +1567,11 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
                se_cmd->se_tmr_req->ref_task_tag = tag;
 
        /* See target_submit_cmd for commentary */
-       target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
+       ret = target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
+       if (ret) {
+               core_tmr_release_req(se_cmd->se_tmr_req);
+               return ret;
+       }
 
        ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun);
        if (ret) {
@@ -1749,67 +1588,6 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
 }
 EXPORT_SYMBOL(target_submit_tmr);
 
-/*
- * Used by fabric module frontends defining a TFO->new_cmd_map() caller
- * to  queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to
- * complete setup in TCM process context w/ TFO->new_cmd_map().
- */
-int transport_generic_handle_cdb_map(
-       struct se_cmd *cmd)
-{
-       if (!cmd->se_lun) {
-               dump_stack();
-               pr_err("cmd->se_lun is NULL\n");
-               return -EINVAL;
-       }
-
-       transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP, false);
-       return 0;
-}
-EXPORT_SYMBOL(transport_generic_handle_cdb_map);
-
-/*     transport_generic_handle_data():
- *
- *
- */
-int transport_generic_handle_data(
-       struct se_cmd *cmd)
-{
-       /*
-        * For the software fabric case, then we assume the nexus is being
-        * failed/shutdown when signals are pending from the kthread context
-        * caller, so we return a failure.  For the HW target mode case running
-        * in interrupt code, the signal_pending() check is skipped.
-        */
-       if (!in_interrupt() && signal_pending(current))
-               return -EPERM;
-       /*
-        * If the received CDB has aleady been ABORTED by the generic
-        * target engine, we now call transport_check_aborted_status()
-        * to queue any delated TASK_ABORTED status for the received CDB to the
-        * fabric module as we are expecting no further incoming DATA OUT
-        * sequences at this point.
-        */
-       if (transport_check_aborted_status(cmd, 1) != 0)
-               return 0;
-
-       transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE, false);
-       return 0;
-}
-EXPORT_SYMBOL(transport_generic_handle_data);
-
-/*     transport_generic_handle_tmr():
- *
- *
- */
-int transport_generic_handle_tmr(
-       struct se_cmd *cmd)
-{
-       transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR, false);
-       return 0;
-}
-EXPORT_SYMBOL(transport_generic_handle_tmr);
-
 /*
  * If the cmd is active, request it to be stopped and sleep until it
  * has completed.
@@ -1867,6 +1645,7 @@ void transport_generic_request_failure(struct se_cmd *cmd)
        case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
        case TCM_UNKNOWN_MODE_PAGE:
        case TCM_WRITE_PROTECTED:
+       case TCM_ADDRESS_OUT_OF_RANGE:
        case TCM_CHECK_CONDITION_ABORT_CMD:
        case TCM_CHECK_CONDITION_UNIT_ATTENTION:
        case TCM_CHECK_CONDITION_NOT_READY:
@@ -1902,13 +1681,7 @@ void transport_generic_request_failure(struct se_cmd *cmd)
                cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
                break;
        }
-       /*
-        * If a fabric does not define a cmd->se_tfo->new_cmd_map caller,
-        * make the call to transport_send_check_condition_and_sense()
-        * directly.  Otherwise expect the fabric to make the call to
-        * transport_send_check_condition_and_sense() after handling
-        * possible unsoliticied write data payloads.
-        */
+
        ret = transport_send_check_condition_and_sense(cmd,
                        cmd->scsi_sense_reason, 0);
        if (ret == -EAGAIN || ret == -ENOMEM)
@@ -1926,153 +1699,123 @@ queue_full:
 }
 EXPORT_SYMBOL(transport_generic_request_failure);
 
-/*
- * Called from Fabric Module context from transport_execute_tasks()
- *
- * The return of this function determins if the tasks from struct se_cmd
- * get added to the execution queue in transport_execute_tasks(),
- * or are added to the delayed or ordered lists here.
- */
-static inline int transport_execute_task_attr(struct se_cmd *cmd)
+static void __target_execute_cmd(struct se_cmd *cmd)
 {
-       if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
-               return 1;
+       int error = 0;
+
+       spin_lock_irq(&cmd->t_state_lock);
+       cmd->transport_state |= (CMD_T_BUSY|CMD_T_SENT);
+       spin_unlock_irq(&cmd->t_state_lock);
+
+       if (cmd->execute_cmd)
+               error = cmd->execute_cmd(cmd);
+
+       if (error) {
+               spin_lock_irq(&cmd->t_state_lock);
+               cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT);
+               spin_unlock_irq(&cmd->t_state_lock);
+
+               transport_generic_request_failure(cmd);
+       }
+}
+
+void target_execute_cmd(struct se_cmd *cmd)
+{
+       struct se_device *dev = cmd->se_dev;
+
+       /*
+        * If the received CDB has aleady been aborted stop processing it here.
+        */
+       if (transport_check_aborted_status(cmd, 1))
+               return;
+
+       /*
+        * Determine if IOCTL context caller in requesting the stopping of this
+        * command for LUN shutdown purposes.
+        */
+       spin_lock_irq(&cmd->t_state_lock);
+       if (cmd->transport_state & CMD_T_LUN_STOP) {
+               pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n",
+                       __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd));
+
+               cmd->transport_state &= ~CMD_T_ACTIVE;
+               spin_unlock_irq(&cmd->t_state_lock);
+               complete(&cmd->transport_lun_stop_comp);
+               return;
+       }
+       /*
+        * Determine if frontend context caller is requesting the stopping of
+        * this command for frontend exceptions.
+        */
+       if (cmd->transport_state & CMD_T_STOP) {
+               pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n",
+                       __func__, __LINE__,
+                       cmd->se_tfo->get_task_tag(cmd));
+
+               spin_unlock_irq(&cmd->t_state_lock);
+               complete(&cmd->t_transport_stop_comp);
+               return;
+       }
+
+       cmd->t_state = TRANSPORT_PROCESSING;
+       spin_unlock_irq(&cmd->t_state_lock);
+
+       if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
+               goto execute;
+
        /*
         * Check for the existence of HEAD_OF_QUEUE, and if true return 1
         * to allow the passed struct se_cmd list of tasks to the front of the list.
         */
-        if (cmd->sam_task_attr == MSG_HEAD_TAG) {
-               pr_debug("Added HEAD_OF_QUEUE for CDB:"
-                       " 0x%02x, se_ordered_id: %u\n",
-                       cmd->t_task_cdb[0],
-                       cmd->se_ordered_id);
-               return 1;
-       } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
-               atomic_inc(&cmd->se_dev->dev_ordered_sync);
+       switch (cmd->sam_task_attr) {
+       case MSG_HEAD_TAG:
+               pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x, "
+                        "se_ordered_id: %u\n",
+                        cmd->t_task_cdb[0], cmd->se_ordered_id);
+               goto execute;
+       case MSG_ORDERED_TAG:
+               atomic_inc(&dev->dev_ordered_sync);
                smp_mb__after_atomic_inc();
 
-               pr_debug("Added ORDERED for CDB: 0x%02x to ordered"
-                               " list, se_ordered_id: %u\n",
-                               cmd->t_task_cdb[0],
-                               cmd->se_ordered_id);
+               pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, "
+                        " se_ordered_id: %u\n",
+                        cmd->t_task_cdb[0], cmd->se_ordered_id);
+
                /*
-                * Add ORDERED command to tail of execution queue if
-                * no other older commands exist that need to be
-                * completed first.
+                * Execute an ORDERED command if no other older commands
+                * exist that need to be completed first.
                 */
-               if (!atomic_read(&cmd->se_dev->simple_cmds))
-                       return 1;
-       } else {
+               if (!atomic_read(&dev->simple_cmds))
+                       goto execute;
+               break;
+       default:
                /*
                 * For SIMPLE and UNTAGGED Task Attribute commands
                 */
-               atomic_inc(&cmd->se_dev->simple_cmds);
+               atomic_inc(&dev->simple_cmds);
                smp_mb__after_atomic_inc();
+               break;
        }
-       /*
-        * Otherwise if one or more outstanding ORDERED task attribute exist,
-        * add the dormant task(s) built for the passed struct se_cmd to the
-        * execution queue and become in Active state for this struct se_device.
-        */
-       if (atomic_read(&cmd->se_dev->dev_ordered_sync) != 0) {
-               /*
-                * Otherwise, add cmd w/ tasks to delayed cmd queue that
-                * will be drained upon completion of HEAD_OF_QUEUE task.
-                */
-               spin_lock(&cmd->se_dev->delayed_cmd_lock);
-               cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR;
-               list_add_tail(&cmd->se_delayed_node,
-                               &cmd->se_dev->delayed_cmd_list);
-               spin_unlock(&cmd->se_dev->delayed_cmd_lock);
+
+       if (atomic_read(&dev->dev_ordered_sync) != 0) {
+               spin_lock(&dev->delayed_cmd_lock);
+               list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list);
+               spin_unlock(&dev->delayed_cmd_lock);
 
                pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to"
                        " delayed CMD list, se_ordered_id: %u\n",
                        cmd->t_task_cdb[0], cmd->sam_task_attr,
                        cmd->se_ordered_id);
-               /*
-                * Return zero to let transport_execute_tasks() know
-                * not to add the delayed tasks to the execution list.
-                */
-               return 0;
+               return;
        }
+
+execute:
        /*
         * Otherwise, no ORDERED task attributes exist..
         */
-       return 1;
-}
-
-/*
- * Called from fabric module context in transport_generic_new_cmd() and
- * transport_generic_process_write()
- */
-static void transport_execute_tasks(struct se_cmd *cmd)
-{
-       int add_tasks;
-       struct se_device *se_dev = cmd->se_dev;
-       /*
-        * Call transport_cmd_check_stop() to see if a fabric exception
-        * has occurred that prevents execution.
-        */
-       if (!transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING)) {
-               /*
-                * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE
-                * attribute for the tasks of the received struct se_cmd CDB
-                */
-               add_tasks = transport_execute_task_attr(cmd);
-               if (add_tasks) {
-                       __transport_execute_tasks(se_dev, cmd);
-                       return;
-               }
-       }
-       __transport_execute_tasks(se_dev, NULL);
-}
-
-static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *new_cmd)
-{
-       int error;
-       struct se_cmd *cmd = NULL;
-       unsigned long flags;
-
-check_depth:
-       spin_lock_irq(&dev->execute_task_lock);
-       if (new_cmd != NULL)
-               __target_add_to_execute_list(new_cmd);
-
-       if (list_empty(&dev->execute_list)) {
-               spin_unlock_irq(&dev->execute_task_lock);
-               return 0;
-       }
-       cmd = list_first_entry(&dev->execute_list, struct se_cmd, execute_list);
-       __target_remove_from_execute_list(cmd);
-       spin_unlock_irq(&dev->execute_task_lock);
-
-       spin_lock_irqsave(&cmd->t_state_lock, flags);
-       cmd->transport_state |= CMD_T_BUSY;
-       cmd->transport_state |= CMD_T_SENT;
-
-       spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
-       if (cmd->execute_cmd)
-               error = cmd->execute_cmd(cmd);
-       else {
-               error = dev->transport->execute_cmd(cmd, cmd->t_data_sg,
-                               cmd->t_data_nents, cmd->data_direction);
-       }
-
-       if (error != 0) {
-               spin_lock_irqsave(&cmd->t_state_lock, flags);
-               cmd->transport_state &= ~CMD_T_BUSY;
-               cmd->transport_state &= ~CMD_T_SENT;
-               spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
-               transport_generic_request_failure(cmd);
-       }
-
-       new_cmd = NULL;
-       goto check_depth;
-
-       return 0;
+       __target_execute_cmd(cmd);
 }
+EXPORT_SYMBOL(target_execute_cmd);
 
 /*
  * Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd
@@ -2129,6 +1872,33 @@ out:
        return -1;
 }
 
+/*
+ * Process all commands up to the last received ORDERED task attribute which
+ * requires another blocking boundary
+ */
+static void target_restart_delayed_cmds(struct se_device *dev)
+{
+       for (;;) {
+               struct se_cmd *cmd;
+
+               spin_lock(&dev->delayed_cmd_lock);
+               if (list_empty(&dev->delayed_cmd_list)) {
+                       spin_unlock(&dev->delayed_cmd_lock);
+                       break;
+               }
+
+               cmd = list_entry(dev->delayed_cmd_list.next,
+                                struct se_cmd, se_delayed_node);
+               list_del(&cmd->se_delayed_node);
+               spin_unlock(&dev->delayed_cmd_lock);
+
+               __target_execute_cmd(cmd);
+
+               if (cmd->sam_task_attr == MSG_ORDERED_TAG)
+                       break;
+       }
+}
+
 /*
  * Called from I/O completion to determine which dormant/delayed
  * and ordered cmds need to have their tasks added to the execution queue.
@@ -2136,8 +1906,6 @@ out:
 static void transport_complete_task_attr(struct se_cmd *cmd)
 {
        struct se_device *dev = cmd->se_dev;
-       struct se_cmd *cmd_p, *cmd_tmp;
-       int new_active_tasks = 0;
 
        if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
                atomic_dec(&dev->simple_cmds);
@@ -2159,38 +1927,8 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
                pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
                        " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id);
        }
-       /*
-        * Process all commands up to the last received
-        * ORDERED task attribute which requires another blocking
-        * boundary
-        */
-       spin_lock(&dev->delayed_cmd_lock);
-       list_for_each_entry_safe(cmd_p, cmd_tmp,
-                       &dev->delayed_cmd_list, se_delayed_node) {
 
-               list_del(&cmd_p->se_delayed_node);
-               spin_unlock(&dev->delayed_cmd_lock);
-
-               pr_debug("Calling add_tasks() for"
-                       " cmd_p: 0x%02x Task Attr: 0x%02x"
-                       " Dormant -> Active, se_ordered_id: %u\n",
-                       cmd_p->t_task_cdb[0],
-                       cmd_p->sam_task_attr, cmd_p->se_ordered_id);
-
-               target_add_to_execute_list(cmd_p);
-               new_active_tasks++;
-
-               spin_lock(&dev->delayed_cmd_lock);
-               if (cmd_p->sam_task_attr == MSG_ORDERED_TAG)
-                       break;
-       }
-       spin_unlock(&dev->delayed_cmd_lock);
-       /*
-        * If new tasks have become active, wake up the transport thread
-        * to do the processing of the Active tasks.
-        */
-       if (new_active_tasks != 0)
-               wake_up_interruptible(&dev->dev_queue_obj.thread_wq);
+       target_restart_delayed_cmds(dev);
 }
 
 static void transport_complete_qf(struct se_cmd *cmd)
@@ -2608,38 +2346,42 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
        atomic_inc(&cmd->t_fe_count);
 
        /*
-        * For WRITEs, let the fabric know its buffer is ready.
-        *
-        * The command will be added to the execution queue after its write
-        * data has arrived.
+        * If this command is not a write we can execute it right here,
+        * for write buffers we need to notify the fabric driver first
+        * and let it call back once the write buffers are ready.
         */
-       if (cmd->data_direction == DMA_TO_DEVICE) {
-               target_add_to_state_list(cmd);
-               return transport_generic_write_pending(cmd);
+       target_add_to_state_list(cmd);
+       if (cmd->data_direction != DMA_TO_DEVICE) {
+               target_execute_cmd(cmd);
+               return 0;
        }
-       /*
-        * Everything else but a WRITE, add the command to the execution queue.
-        */
-       transport_execute_tasks(cmd);
-       return 0;
+
+       spin_lock_irq(&cmd->t_state_lock);
+       cmd->t_state = TRANSPORT_WRITE_PENDING;
+       spin_unlock_irq(&cmd->t_state_lock);
+
+       transport_cmd_check_stop(cmd, false);
+
+       ret = cmd->se_tfo->write_pending(cmd);
+       if (ret == -EAGAIN || ret == -ENOMEM)
+               goto queue_full;
+
+       if (ret < 0)
+               return ret;
+       return 1;
 
 out_fail:
        cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
        cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
        return -EINVAL;
+queue_full:
+       pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
+       cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
+       transport_handle_queue_full(cmd, cmd->se_dev);
+       return 0;
 }
 EXPORT_SYMBOL(transport_generic_new_cmd);
 
-/*     transport_generic_process_write():
- *
- *
- */
-void transport_generic_process_write(struct se_cmd *cmd)
-{
-       transport_execute_tasks(cmd);
-}
-EXPORT_SYMBOL(transport_generic_process_write);
-
 static void transport_write_pending_qf(struct se_cmd *cmd)
 {
        int ret;
@@ -2652,43 +2394,6 @@ static void transport_write_pending_qf(struct se_cmd *cmd)
        }
 }
 
-static int transport_generic_write_pending(struct se_cmd *cmd)
-{
-       unsigned long flags;
-       int ret;
-
-       spin_lock_irqsave(&cmd->t_state_lock, flags);
-       cmd->t_state = TRANSPORT_WRITE_PENDING;
-       spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
-       /*
-        * Clear the se_cmd for WRITE_PENDING status in order to set
-        * CMD_T_ACTIVE so that transport_generic_handle_data can be called
-        * from HW target mode interrupt code.  This is safe to be called
-        * with transport_off=1 before the cmd->se_tfo->write_pending
-        * because the se_cmd->se_lun pointer is not being cleared.
-        */
-       transport_cmd_check_stop(cmd, 1, 0);
-
-       /*
-        * Call the fabric write_pending function here to let the
-        * frontend know that WRITE buffers are ready.
-        */
-       ret = cmd->se_tfo->write_pending(cmd);
-       if (ret == -EAGAIN || ret == -ENOMEM)
-               goto queue_full;
-       else if (ret < 0)
-               return ret;
-
-       return 1;
-
-queue_full:
-       pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
-       cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
-       transport_handle_queue_full(cmd, cmd->se_dev);
-       return 0;
-}
-
 void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
 {
        if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
@@ -2715,10 +2420,11 @@ EXPORT_SYMBOL(transport_generic_free_cmd);
  * @se_cmd:    command descriptor to add
  * @ack_kref:  Signal that fabric will perform an ack target_put_sess_cmd()
  */
-void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
-                       bool ack_kref)
+static int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
+                              bool ack_kref)
 {
        unsigned long flags;
+       int ret = 0;
 
        kref_init(&se_cmd->cmd_kref);
        /*
@@ -2732,11 +2438,17 @@ void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
        }
 
        spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
+       if (se_sess->sess_tearing_down) {
+               ret = -ESHUTDOWN;
+               goto out;
+       }
        list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
        se_cmd->check_release = 1;
+
+out:
        spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+       return ret;
 }
-EXPORT_SYMBOL(target_get_sess_cmd);
 
 static void target_release_cmd_kref(struct kref *kref)
 {
@@ -2771,28 +2483,27 @@ int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
 }
 EXPORT_SYMBOL(target_put_sess_cmd);
 
-/* target_splice_sess_cmd_list - Split active cmds into sess_wait_list
- * @se_sess:   session to split
+/* target_sess_cmd_list_set_waiting - Flag all commands in
+ *         sess_cmd_list to complete cmd_wait_comp.  Set
+ *         sess_tearing_down so no more commands are queued.
+ * @se_sess:   session to flag
  */
-void target_splice_sess_cmd_list(struct se_session *se_sess)
+void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
 {
        struct se_cmd *se_cmd;
        unsigned long flags;
 
-       WARN_ON(!list_empty(&se_sess->sess_wait_list));
-       INIT_LIST_HEAD(&se_sess->sess_wait_list);
-
        spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
-       se_sess->sess_tearing_down = 1;
 
-       list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
+       WARN_ON(se_sess->sess_tearing_down);
+       se_sess->sess_tearing_down = 1;
 
-       list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list)
+       list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list)
                se_cmd->cmd_wait_set = 1;
 
        spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
 }
-EXPORT_SYMBOL(target_splice_sess_cmd_list);
+EXPORT_SYMBOL(target_sess_cmd_list_set_waiting);
 
 /* target_wait_for_sess_cmds - Wait for outstanding descriptors
  * @se_sess:    session to wait for active I/O
@@ -2806,7 +2517,7 @@ void target_wait_for_sess_cmds(
        bool rc = false;
 
        list_for_each_entry_safe(se_cmd, tmp_cmd,
-                               &se_sess->sess_wait_list, se_cmd_list) {
+                               &se_sess->sess_cmd_list, se_cmd_list) {
                list_del(&se_cmd->se_cmd_list);
 
                pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
@@ -2858,26 +2569,20 @@ static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
                pr_debug("ConfigFS ITT[0x%08x] - CMD_T_STOP, skipping\n",
                         cmd->se_tfo->get_task_tag(cmd));
                spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-               transport_cmd_check_stop(cmd, 1, 0);
+               transport_cmd_check_stop(cmd, false);
                return -EPERM;
        }
        cmd->transport_state |= CMD_T_LUN_FE_STOP;
        spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
-       wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
-
        // XXX: audit task_flags checks.
        spin_lock_irqsave(&cmd->t_state_lock, flags);
        if ((cmd->transport_state & CMD_T_BUSY) &&
            (cmd->transport_state & CMD_T_SENT)) {
                if (!target_stop_cmd(cmd, &flags))
                        ret++;
-               spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-       } else {
-               spin_unlock_irqrestore(&cmd->t_state_lock,
-                               flags);
-               target_remove_from_execute_list(cmd);
        }
+       spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
        pr_debug("ConfigFS: cmd: %p stop tasks ret:"
                        " %d\n", cmd, ret);
@@ -2888,7 +2593,6 @@ static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
                pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
                                cmd->se_tfo->get_task_tag(cmd));
        }
-       transport_remove_cmd_from_queue(cmd);
 
        return 0;
 }
@@ -2907,11 +2611,6 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun)
                       struct se_cmd, se_lun_node);
                list_del_init(&cmd->se_lun_node);
 
-               /*
-                * This will notify iscsi_target_transport.c:
-                * transport_cmd_check_stop() that a LUN shutdown is in
-                * progress for the iscsi_cmd_t.
-                */
                spin_lock(&cmd->t_state_lock);
                pr_debug("SE_LUN[%d] - Setting cmd->transport"
                        "_lun_stop for  ITT: 0x%08x\n",
@@ -2978,7 +2677,7 @@ check_cond:
 
                        spin_unlock_irqrestore(&cmd->t_state_lock,
                                        cmd_flags);
-                       transport_cmd_check_stop(cmd, 1, 0);
+                       transport_cmd_check_stop(cmd, false);
                        complete(&cmd->transport_lun_fe_stop_comp);
                        spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
                        continue;
@@ -3092,8 +2791,6 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
 
        spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
-       wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
-
        wait_for_completion(&cmd->t_transport_stop_comp);
 
        spin_lock_irqsave(&cmd->t_state_lock, flags);
@@ -3276,6 +2973,15 @@ int transport_send_check_condition_and_sense(
                /* WRITE PROTECTED */
                buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27;
                break;
+       case TCM_ADDRESS_OUT_OF_RANGE:
+               /* CURRENT ERROR */
+               buffer[offset] = 0x70;
+               buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+               /* ILLEGAL REQUEST */
+               buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+               /* LOGICAL BLOCK ADDRESS OUT OF RANGE */
+               buffer[offset+SPC_ASC_KEY_OFFSET] = 0x21;
+               break;
        case TCM_CHECK_CONDITION_UNIT_ATTENTION:
                /* CURRENT ERROR */
                buffer[offset] = 0x70;
@@ -3376,8 +3082,9 @@ void transport_send_task_abort(struct se_cmd *cmd)
        cmd->se_tfo->queue_status(cmd);
 }
 
-static int transport_generic_do_tmr(struct se_cmd *cmd)
+static void target_tmr_work(struct work_struct *work)
 {
+       struct se_cmd *cmd = container_of(work, struct se_cmd, work);
        struct se_device *dev = cmd->se_dev;
        struct se_tmr_req *tmr = cmd->se_tmr_req;
        int ret;
@@ -3413,80 +3120,13 @@ static int transport_generic_do_tmr(struct se_cmd *cmd)
        cmd->se_tfo->queue_tm_rsp(cmd);
 
        transport_cmd_check_stop_to_fabric(cmd);
-       return 0;
 }
 
-/*     transport_processing_thread():
- *
- *
- */
-static int transport_processing_thread(void *param)
+int transport_generic_handle_tmr(
+       struct se_cmd *cmd)
 {
-       int ret;
-       struct se_cmd *cmd;
-       struct se_device *dev = param;
-
-       while (!kthread_should_stop()) {
-               ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq,
-                               atomic_read(&dev->dev_queue_obj.queue_cnt) ||
-                               kthread_should_stop());
-               if (ret < 0)
-                       goto out;
-
-get_cmd:
-               cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj);
-               if (!cmd)
-                       continue;
-
-               switch (cmd->t_state) {
-               case TRANSPORT_NEW_CMD:
-                       BUG();
-                       break;
-               case TRANSPORT_NEW_CMD_MAP:
-                       if (!cmd->se_tfo->new_cmd_map) {
-                               pr_err("cmd->se_tfo->new_cmd_map is"
-                                       " NULL for TRANSPORT_NEW_CMD_MAP\n");
-                               BUG();
-                       }
-                       ret = cmd->se_tfo->new_cmd_map(cmd);
-                       if (ret < 0) {
-                               transport_generic_request_failure(cmd);
-                               break;
-                       }
-                       ret = transport_generic_new_cmd(cmd);
-                       if (ret < 0) {
-                               transport_generic_request_failure(cmd);
-                               break;
-                       }
-                       break;
-               case TRANSPORT_PROCESS_WRITE:
-                       transport_generic_process_write(cmd);
-                       break;
-               case TRANSPORT_PROCESS_TMR:
-                       transport_generic_do_tmr(cmd);
-                       break;
-               case TRANSPORT_COMPLETE_QF_WP:
-                       transport_write_pending_qf(cmd);
-                       break;
-               case TRANSPORT_COMPLETE_QF_OK:
-                       transport_complete_qf(cmd);
-                       break;
-               default:
-                       pr_err("Unknown t_state: %d  for ITT: 0x%08x "
-                               "i_state: %d on SE LUN: %u\n",
-                               cmd->t_state,
-                               cmd->se_tfo->get_task_tag(cmd),
-                               cmd->se_tfo->get_cmd_state(cmd),
-                               cmd->se_lun->unpacked_lun);
-                       BUG();
-               }
-
-               goto get_cmd;
-       }
-
-out:
-       WARN_ON(!list_empty(&dev->state_list));
-       WARN_ON(!list_empty(&dev->dev_queue_obj.qobj_list));
-       dev->process_thread = NULL;
+       INIT_WORK(&cmd->work, target_tmr_work);
+       queue_work(cmd->se_dev->tmr_wq, &cmd->work);
        return 0;
 }
+EXPORT_SYMBOL(transport_generic_handle_tmr);
This page took 0.040559 seconds and 5 git commands to generate.