static int sub_api_initialized;
+static struct workqueue_struct *target_completion_wq;
static struct kmem_cache *se_cmd_cache;
static struct kmem_cache *se_sess_cache;
struct kmem_cache *se_tmr_req_cache;
static void transport_complete_task_attr(struct se_cmd *cmd);
static void transport_handle_queue_full(struct se_cmd *cmd,
struct se_device *dev);
-static void transport_direct_request_timeout(struct se_cmd *cmd);
static void transport_free_dev_tasks(struct se_cmd *cmd);
-static u32 transport_allocate_tasks(struct se_cmd *cmd,
- unsigned long long starting_lba,
- enum dma_data_direction data_direction,
- struct scatterlist *sgl, unsigned int nents);
static int transport_generic_get_mem(struct se_cmd *cmd);
static void transport_put_cmd(struct se_cmd *cmd);
static void transport_remove_cmd_from_queue(struct se_cmd *cmd);
static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
-static void transport_stop_all_task_timers(struct se_cmd *cmd);
+static void transport_generic_request_failure(struct se_cmd *, int, int);
+static void target_complete_ok_work(struct work_struct *work);
int init_se_kmem_caches(void)
{
if (!se_tmr_req_cache) {
pr_err("kmem_cache_create() for struct se_tmr_req"
" failed\n");
- goto out;
+ goto out_free_cmd_cache;
}
se_sess_cache = kmem_cache_create("se_sess_cache",
sizeof(struct se_session), __alignof__(struct se_session),
if (!se_sess_cache) {
pr_err("kmem_cache_create() for struct se_session"
" failed\n");
- goto out;
+ goto out_free_tmr_req_cache;
}
se_ua_cache = kmem_cache_create("se_ua_cache",
sizeof(struct se_ua), __alignof__(struct se_ua),
0, NULL);
if (!se_ua_cache) {
pr_err("kmem_cache_create() for struct se_ua failed\n");
- goto out;
+ goto out_free_sess_cache;
}
t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
sizeof(struct t10_pr_registration),
if (!t10_pr_reg_cache) {
pr_err("kmem_cache_create() for struct t10_pr_registration"
" failed\n");
- goto out;
+ goto out_free_ua_cache;
}
t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
if (!t10_alua_lu_gp_cache) {
pr_err("kmem_cache_create() for t10_alua_lu_gp_cache"
" failed\n");
- goto out;
+ goto out_free_pr_reg_cache;
}
t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
sizeof(struct t10_alua_lu_gp_member),
if (!t10_alua_lu_gp_mem_cache) {
pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_"
"cache failed\n");
- goto out;
+ goto out_free_lu_gp_cache;
}
t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
sizeof(struct t10_alua_tg_pt_gp),
if (!t10_alua_tg_pt_gp_cache) {
pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
"cache failed\n");
- goto out;
+ goto out_free_lu_gp_mem_cache;
}
t10_alua_tg_pt_gp_mem_cache = kmem_cache_create(
"t10_alua_tg_pt_gp_mem_cache",
if (!t10_alua_tg_pt_gp_mem_cache) {
pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
"mem_t failed\n");
- goto out;
+ goto out_free_tg_pt_gp_cache;
}
+ target_completion_wq = alloc_workqueue("target_completion",
+ WQ_MEM_RECLAIM, 0);
+ if (!target_completion_wq)
+ goto out_free_tg_pt_gp_mem_cache;
+
return 0;
+
+out_free_tg_pt_gp_mem_cache:
+ kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
+out_free_tg_pt_gp_cache:
+ kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
+out_free_lu_gp_mem_cache:
+ kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
+out_free_lu_gp_cache:
+ kmem_cache_destroy(t10_alua_lu_gp_cache);
+out_free_pr_reg_cache:
+ kmem_cache_destroy(t10_pr_reg_cache);
+out_free_ua_cache:
+ kmem_cache_destroy(se_ua_cache);
+out_free_sess_cache:
+ kmem_cache_destroy(se_sess_cache);
+out_free_tmr_req_cache:
+ kmem_cache_destroy(se_tmr_req_cache);
+out_free_cmd_cache:
+ kmem_cache_destroy(se_cmd_cache);
out:
- if (se_cmd_cache)
- kmem_cache_destroy(se_cmd_cache);
- if (se_tmr_req_cache)
- kmem_cache_destroy(se_tmr_req_cache);
- if (se_sess_cache)
- kmem_cache_destroy(se_sess_cache);
- if (se_ua_cache)
- kmem_cache_destroy(se_ua_cache);
- if (t10_pr_reg_cache)
- kmem_cache_destroy(t10_pr_reg_cache);
- if (t10_alua_lu_gp_cache)
- kmem_cache_destroy(t10_alua_lu_gp_cache);
- if (t10_alua_lu_gp_mem_cache)
- kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
- if (t10_alua_tg_pt_gp_cache)
- kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
- if (t10_alua_tg_pt_gp_mem_cache)
- kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
return -ENOMEM;
}
void release_se_kmem_caches(void)
{
+ destroy_workqueue(target_completion_wq);
kmem_cache_destroy(se_cmd_cache);
kmem_cache_destroy(se_tmr_req_cache);
kmem_cache_destroy(se_sess_cache);
}
EXPORT_SYMBOL(transport_init_queue_obj);
-static int transport_subsystem_reqmods(void)
+void transport_subsystem_check_init(void)
{
int ret;
+ if (sub_api_initialized)
+ return;
+
ret = request_module("target_core_iblock");
if (ret != 0)
pr_err("Unable to load target_core_iblock\n");
if (ret != 0)
pr_err("Unable to load target_core_stgt\n");
- return 0;
-}
-
-int transport_subsystem_check_init(void)
-{
- int ret;
-
- if (sub_api_initialized)
- return 0;
- /*
- * Request the loading of known TCM subsystem plugins..
- */
- ret = transport_subsystem_reqmods();
- if (ret < 0)
- return ret;
-
sub_api_initialized = 1;
- return 0;
+ return;
}
struct se_session *transport_init_session(void)
}
INIT_LIST_HEAD(&se_sess->sess_list);
INIT_LIST_HEAD(&se_sess->sess_acl_list);
+ INIT_LIST_HEAD(&se_sess->sess_cmd_list);
+ INIT_LIST_HEAD(&se_sess->sess_wait_list);
+ spin_lock_init(&se_sess->sess_cmd_lock);
return se_sess;
}
" == TRUE for ITT: 0x%08x\n", __func__, __LINE__,
cmd->se_tfo->get_task_tag(cmd));
- cmd->deferred_t_state = cmd->t_state;
- cmd->t_state = TRANSPORT_DEFERRED_CMD;
atomic_set(&cmd->t_transport_active, 0);
if (transport_off == 2)
transport_all_task_dev_remove_state(cmd);
" TRUE for ITT: 0x%08x\n", __func__, __LINE__,
cmd->se_tfo->get_task_tag(cmd));
- cmd->deferred_t_state = cmd->t_state;
- cmd->t_state = TRANSPORT_DEFERRED_CMD;
if (transport_off == 2)
transport_all_task_dev_remove_state(cmd);
* Some fabric modules like tcm_loop can release
* their internally allocated I/O reference now and
* struct se_cmd now.
+ *
+ * Fabric modules are expected to return '1' here if the
+ * se_cmd being passed is released at this point,
+ * or zero if not being released.
*/
if (cmd->se_tfo->check_stop_free != NULL) {
spin_unlock_irqrestore(
&cmd->t_state_lock, flags);
- cmd->se_tfo->check_stop_free(cmd);
- return 1;
+ return cmd->se_tfo->check_stop_free(cmd);
}
}
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
}
EXPORT_SYMBOL(transport_complete_sync_cache);
+static void target_complete_failure_work(struct work_struct *work)
+{
+ struct se_cmd *cmd = container_of(work, struct se_cmd, work);
+
+ transport_generic_request_failure(cmd, 1, 1);
+}
+
/* transport_complete_task():
*
* Called from interrupt and non interrupt context depending
{
struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = cmd->se_dev;
- int t_state;
unsigned long flags;
#if 0
pr_debug("task: %p CDB: 0x%02x obj_ptr: %p\n", task,
* to complete for an exception condition
*/
if (task->task_flags & TF_REQUEST_STOP) {
- /*
- * Decrement cmd->t_se_count if this task had
- * previously thrown its timeout exception handler.
- */
- if (task->task_flags & TF_TIMEOUT) {
- atomic_dec(&cmd->t_se_count);
- task->task_flags &= ~TF_TIMEOUT;
- }
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
complete(&task->task_stop_comp);
return;
}
- /*
- * If the task's timeout handler has fired, use the t_task_cdbs_timeout
- * left counter to determine when the struct se_cmd is ready to be queued to
- * the processing thread.
- */
- if (task->task_flags & TF_TIMEOUT) {
- if (!atomic_dec_and_test(
- &cmd->t_task_cdbs_timeout_left)) {
- spin_unlock_irqrestore(&cmd->t_state_lock,
- flags);
- return;
- }
- t_state = TRANSPORT_COMPLETE_TIMEOUT;
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- transport_add_cmd_to_queue(cmd, t_state, false);
- return;
- }
- atomic_dec(&cmd->t_task_cdbs_timeout_left);
+ if (!success)
+ cmd->t_tasks_failed = 1;
/*
* Decrement the outstanding t_task_cdbs_left count. The last
* device queue depending upon int success.
*/
if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) {
- if (!success)
- cmd->t_tasks_failed = 1;
-
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return;
}
- if (!success || cmd->t_tasks_failed) {
- t_state = TRANSPORT_COMPLETE_FAILURE;
+ if (cmd->t_tasks_failed) {
if (!task->task_error_status) {
task->task_error_status =
PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
cmd->transport_error_status =
PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
}
+ INIT_WORK(&cmd->work, target_complete_failure_work);
} else {
atomic_set(&cmd->t_transport_complete, 1);
- t_state = TRANSPORT_COMPLETE_OK;
+ INIT_WORK(&cmd->work, target_complete_ok_work);
}
+
+ cmd->t_state = TRANSPORT_COMPLETE;
+ atomic_set(&cmd->t_transport_active, 1);
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- transport_add_cmd_to_queue(cmd, t_state, false);
+ queue_work(target_completion_wq, &cmd->work);
}
EXPORT_SYMBOL(transport_complete_task);
}
/*
- * Handle QUEUE_FULL / -EAGAIN status
+ * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status
*/
static void target_qf_do_work(struct work_struct *work)
INIT_LIST_HEAD(&cmd->se_ordered_node);
INIT_LIST_HEAD(&cmd->se_qf_node);
INIT_LIST_HEAD(&cmd->se_queue_node);
-
+ INIT_LIST_HEAD(&cmd->se_cmd_list);
INIT_LIST_HEAD(&cmd->t_task_list);
init_completion(&cmd->transport_lun_fe_stop_comp);
init_completion(&cmd->transport_lun_stop_comp);
init_completion(&cmd->t_transport_stop_comp);
+ init_completion(&cmd->cmd_wait_comp);
spin_lock_init(&cmd->t_state_lock);
atomic_set(&cmd->transport_dev_active, 1);
}
EXPORT_SYMBOL(transport_generic_allocate_tasks);
-static void transport_generic_request_failure(struct se_cmd *,
- struct se_device *, int, int);
/*
* Used by fabric module frontends to queue tasks directly.
* Many only be used from process context only
* and call transport_generic_request_failure() if necessary..
*/
ret = transport_generic_new_cmd(cmd);
- if (ret == -EAGAIN)
- return 0;
- else if (ret < 0) {
+ if (ret < 0) {
cmd->transport_error_status = ret;
- transport_generic_request_failure(cmd, NULL, 0,
+ transport_generic_request_failure(cmd, 0,
(cmd->data_direction != DMA_TO_DEVICE));
}
return 0;
}
EXPORT_SYMBOL(transport_generic_handle_tmr);
-void transport_generic_free_cmd_intr(
- struct se_cmd *cmd)
+/*
+ * If the task is active, request it to be stopped and sleep until it
+ * has completed.
+ */
+bool target_stop_task(struct se_task *task, unsigned long *flags)
{
- transport_add_cmd_to_queue(cmd, TRANSPORT_FREE_CMD_INTR, false);
+ struct se_cmd *cmd = task->task_se_cmd;
+ bool was_active = false;
+
+ if (task->task_flags & TF_ACTIVE) {
+ task->task_flags |= TF_REQUEST_STOP;
+ spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
+
+ pr_debug("Task %p waiting to complete\n", task);
+ wait_for_completion(&task->task_stop_comp);
+ pr_debug("Task %p stopped successfully\n", task);
+
+ spin_lock_irqsave(&cmd->t_state_lock, *flags);
+ atomic_dec(&cmd->t_task_cdbs_left);
+ task->task_flags &= ~(TF_ACTIVE | TF_REQUEST_STOP);
+ was_active = true;
+ }
+
+ return was_active;
}
-EXPORT_SYMBOL(transport_generic_free_cmd_intr);
static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
{
continue;
}
- /*
- * If the struct se_task is active, sleep until it is returned
- * from the plugin.
- */
- if (task->task_flags & TF_ACTIVE) {
- task->task_flags |= TF_REQUEST_STOP;
- spin_unlock_irqrestore(&cmd->t_state_lock,
- flags);
-
- pr_debug("Task %p waiting to complete\n", task);
- wait_for_completion(&task->task_stop_comp);
- pr_debug("Task %p stopped successfully\n", task);
-
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- atomic_dec(&cmd->t_task_cdbs_left);
- task->task_flags &= ~(TF_ACTIVE | TF_REQUEST_STOP);
- } else {
+ if (!target_stop_task(task, &flags)) {
pr_debug("Task %p - did nothing\n", task);
ret++;
}
-
- __transport_stop_task_timer(task, &flags);
}
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
*/
static void transport_generic_request_failure(
struct se_cmd *cmd,
- struct se_device *dev,
int complete,
int sc)
{
pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
" CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
cmd->t_task_cdb[0]);
- pr_debug("-----[ i_state: %d t_state/def_t_state:"
- " %d/%d transport_error_status: %d\n",
+ pr_debug("-----[ i_state: %d t_state: %d transport_error_status: %d\n",
cmd->se_tfo->get_cmd_state(cmd),
- cmd->t_state, cmd->deferred_t_state,
+ cmd->t_state,
cmd->transport_error_status);
pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d"
" t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
atomic_read(&cmd->t_transport_stop),
atomic_read(&cmd->t_transport_sent));
- transport_stop_all_task_timers(cmd);
-
- if (dev)
- atomic_inc(&dev->depth_left);
/*
* For SAM Task Attribute emulation for failed struct se_cmd
*/
transport_complete_task_attr(cmd);
if (complete) {
- transport_direct_request_timeout(cmd);
cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE;
}
ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
ret = cmd->se_tfo->queue_status(cmd);
- if (ret == -EAGAIN)
+ if (ret == -EAGAIN || ret == -ENOMEM)
goto queue_full;
goto check_stop;
case PYX_TRANSPORT_USE_SENSE_REASON:
else {
ret = transport_send_check_condition_and_sense(cmd,
cmd->scsi_sense_reason, 0);
- if (ret == -EAGAIN)
+ if (ret == -EAGAIN || ret == -ENOMEM)
goto queue_full;
}
transport_handle_queue_full(cmd, cmd->se_dev);
}
-static void transport_direct_request_timeout(struct se_cmd *cmd)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- if (!atomic_read(&cmd->t_transport_timeout)) {
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- return;
- }
- if (atomic_read(&cmd->t_task_cdbs_timeout_left)) {
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- return;
- }
-
- atomic_sub(atomic_read(&cmd->t_transport_timeout),
- &cmd->t_se_count);
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-}
-
-static void transport_generic_request_timeout(struct se_cmd *cmd)
-{
- unsigned long flags;
-
- /*
- * Reset cmd->t_se_count to allow transport_put_cmd()
- * to allow last call to free memory resources.
- */
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- if (atomic_read(&cmd->t_transport_timeout) > 1) {
- int tmp = (atomic_read(&cmd->t_transport_timeout) - 1);
-
- atomic_sub(tmp, &cmd->t_se_count);
- }
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
- transport_put_cmd(cmd);
-}
-
static inline u32 transport_lba_21(unsigned char *cdb)
{
return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
}
-/*
- * Called from interrupt context.
- */
-static void transport_task_timeout_handler(unsigned long data)
-{
- struct se_task *task = (struct se_task *)data;
- struct se_cmd *cmd = task->task_se_cmd;
- unsigned long flags;
-
- pr_debug("transport task timeout fired! task: %p cmd: %p\n", task, cmd);
-
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- if (task->task_flags & TF_TIMER_STOP) {
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- return;
- }
- task->task_flags &= ~TF_TIMER_RUNNING;
-
- /*
- * Determine if transport_complete_task() has already been called.
- */
- if (!(task->task_flags & TF_ACTIVE)) {
- pr_debug("transport task: %p cmd: %p timeout !TF_ACTIVE\n",
- task, cmd);
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- return;
- }
-
- atomic_inc(&cmd->t_se_count);
- atomic_inc(&cmd->t_transport_timeout);
- cmd->t_tasks_failed = 1;
-
- task->task_flags |= TF_TIMEOUT;
- task->task_error_status = PYX_TRANSPORT_TASK_TIMEOUT;
- task->task_scsi_status = 1;
-
- if (task->task_flags & TF_REQUEST_STOP) {
- pr_debug("transport task: %p cmd: %p timeout TF_REQUEST_STOP"
- " == 1\n", task, cmd);
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- complete(&task->task_stop_comp);
- return;
- }
-
- if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) {
- pr_debug("transport task: %p cmd: %p timeout non zero"
- " t_task_cdbs_left\n", task, cmd);
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- return;
- }
- pr_debug("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n",
- task, cmd);
-
- cmd->t_state = TRANSPORT_COMPLETE_FAILURE;
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
- transport_add_cmd_to_queue(cmd, TRANSPORT_COMPLETE_FAILURE, false);
-}
-
-/*
- * Called with cmd->t_state_lock held.
- */
-static void transport_start_task_timer(struct se_task *task)
-{
- struct se_device *dev = task->task_se_cmd->se_dev;
- int timeout;
-
- if (task->task_flags & TF_TIMER_RUNNING)
- return;
- /*
- * If the task_timeout is disabled, exit now.
- */
- timeout = dev->se_sub_dev->se_dev_attrib.task_timeout;
- if (!timeout)
- return;
-
- init_timer(&task->task_timer);
- task->task_timer.expires = (get_jiffies_64() + timeout * HZ);
- task->task_timer.data = (unsigned long) task;
- task->task_timer.function = transport_task_timeout_handler;
-
- task->task_flags |= TF_TIMER_RUNNING;
- add_timer(&task->task_timer);
-#if 0
- pr_debug("Starting task timer for cmd: %p task: %p seconds:"
- " %d\n", task->task_se_cmd, task, timeout);
-#endif
-}
-
-/*
- * Called with spin_lock_irq(&cmd->t_state_lock) held.
- */
-void __transport_stop_task_timer(struct se_task *task, unsigned long *flags)
-{
- struct se_cmd *cmd = task->task_se_cmd;
-
- if (!(task->task_flags & TF_TIMER_RUNNING))
- return;
-
- task->task_flags |= TF_TIMER_STOP;
- spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
-
- del_timer_sync(&task->task_timer);
-
- spin_lock_irqsave(&cmd->t_state_lock, *flags);
- task->task_flags &= ~TF_TIMER_RUNNING;
- task->task_flags &= ~TF_TIMER_STOP;
-}
-
-static void transport_stop_all_task_timers(struct se_cmd *cmd)
-{
- struct se_task *task = NULL, *task_tmp;
- unsigned long flags;
-
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- list_for_each_entry_safe(task, task_tmp,
- &cmd->t_task_list, t_list)
- __transport_stop_task_timer(task, &flags);
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-}
-
static inline int transport_tcq_window_closed(struct se_device *dev)
{
if (dev->dev_tcq_window_closed++ <
if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) {
cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE;
- transport_generic_request_failure(cmd, NULL, 0, 1);
+ transport_generic_request_failure(cmd, 0, 1);
return 0;
}
if (atomic_read(&cmd->t_task_cdbs_sent) ==
cmd->t_task_list_num)
- atomic_set(&cmd->transport_sent, 1);
+ atomic_set(&cmd->t_transport_sent, 1);
- transport_start_task_timer(task);
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
/*
- * The struct se_cmd->transport_emulate_cdb() function pointer is used
+ * The struct se_cmd->execute_task() function pointer is used
* to grab REPORT_LUNS and other CDBs we want to handle before they hit the
* struct se_subsystem_api->do_task() caller below.
*/
- if (cmd->transport_emulate_cdb) {
- error = cmd->transport_emulate_cdb(cmd);
+ if (cmd->execute_task) {
+ error = cmd->execute_task(task);
if (error != 0) {
cmd->transport_error_status = error;
spin_lock_irqsave(&cmd->t_state_lock, flags);
task->task_flags &= ~TF_ACTIVE;
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- atomic_set(&cmd->transport_sent, 0);
+ atomic_set(&cmd->t_transport_sent, 0);
transport_stop_tasks_for_cmd(cmd);
- transport_generic_request_failure(cmd, dev, 0, 1);
+ atomic_inc(&dev->depth_left);
+ transport_generic_request_failure(cmd, 0, 1);
goto check_depth;
}
/*
- * Handle the successful completion for transport_emulate_cdb()
+ * Handle the successful completion for execute_task()
* for synchronous operation, following SCF_EMULATE_CDB_ASYNC
* Otherwise the caller is expected to complete the task with
* proper status.
spin_lock_irqsave(&cmd->t_state_lock, flags);
task->task_flags &= ~TF_ACTIVE;
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- atomic_set(&cmd->transport_sent, 0);
+ atomic_set(&cmd->t_transport_sent, 0);
transport_stop_tasks_for_cmd(cmd);
- transport_generic_request_failure(cmd, dev, 0, 1);
+ atomic_inc(&dev->depth_left);
+ transport_generic_request_failure(cmd, 0, 1);
}
}
size = transport_get_size(sectors, cdb, cmd);
cmd->t_task_lba = transport_lba_32(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
- passthrough = (dev->transport->transport_type ==
- TRANSPORT_PLUGIN_PHBA_PDEV);
- /*
- * Skip the remaining assignments for TCM/PSCSI passthrough
- */
- if (passthrough)
- break;
+
+ if (dev->transport->transport_type ==
+ TRANSPORT_PLUGIN_PHBA_PDEV)
+ goto out_unsupported_cdb;
/*
- * Setup BIDI XOR callback to be run during transport_generic_complete_ok()
+ * Setup BIDI XOR callback to be run after I/O completion.
*/
cmd->transport_complete_callback = &transport_xor_callback;
cmd->t_tasks_fua = (cdb[1] & 0x8);
cmd->t_task_lba = transport_lba_64_ext(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
- /*
- * Skip the remaining assignments for TCM/PSCSI passthrough
- */
if (passthrough)
- break;
-
+ goto out_unsupported_cdb;
/*
- * Setup BIDI XOR callback to be run during
- * transport_generic_complete_ok()
+ * Setup BIDI XOR callback to be run during after I/O
+ * completion.
*/
cmd->transport_complete_callback = &transport_xor_callback;
cmd->t_tasks_fua = (cdb[10] & 0x8);
/*
* Check for emulated MI_REPORT_TARGET_PGS.
*/
- if (cdb[1] == MI_REPORT_TARGET_PGS) {
- cmd->transport_emulate_cdb =
- (su_dev->t10_alua.alua_type ==
- SPC3_ALUA_EMULATED) ?
- core_emulate_report_target_port_groups :
- NULL;
+ if (cdb[1] == MI_REPORT_TARGET_PGS &&
+ su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
+ cmd->execute_task =
+ target_emulate_report_target_port_groups;
}
size = (cdb[6] << 24) | (cdb[7] << 16) |
(cdb[8] << 8) | cdb[9];
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case PERSISTENT_RESERVE_IN:
+ if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
+ cmd->execute_task = target_scsi3_emulate_pr_in;
+ size = (cdb[7] << 8) + cdb[8];
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+ break;
case PERSISTENT_RESERVE_OUT:
- cmd->transport_emulate_cdb =
- (su_dev->t10_pr.res_type ==
- SPC3_PERSISTENT_RESERVATIONS) ?
- core_scsi3_emulate_pr : NULL;
+ if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
+ cmd->execute_task = target_scsi3_emulate_pr_out;
size = (cdb[7] << 8) + cdb[8];
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
*
* Check for emulated MO_SET_TARGET_PGS.
*/
- if (cdb[1] == MO_SET_TARGET_PGS) {
- cmd->transport_emulate_cdb =
- (su_dev->t10_alua.alua_type ==
- SPC3_ALUA_EMULATED) ?
- core_emulate_set_target_port_groups :
- NULL;
+ if (cdb[1] == MO_SET_TARGET_PGS &&
+ su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
+ cmd->execute_task =
+ target_emulate_set_target_port_groups;
}
size = (cdb[6] << 24) | (cdb[7] << 16) |
* is running in SPC_PASSTHROUGH, and wants reservations
* emulation disabled.
*/
- cmd->transport_emulate_cdb =
- (su_dev->t10_pr.res_type !=
- SPC_PASSTHROUGH) ?
- core_scsi2_emulate_crh : NULL;
+ if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
+ cmd->execute_task = target_scsi2_reservation_reserve;
cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
break;
case RELEASE:
else
size = cmd->data_length;
- cmd->transport_emulate_cdb =
- (su_dev->t10_pr.res_type !=
- SPC_PASSTHROUGH) ?
- core_scsi2_emulate_crh : NULL;
+ if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
+ cmd->execute_task = target_scsi2_reservation_release;
cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
break;
case SYNCHRONIZE_CACHE:
size = transport_get_size(sectors, cdb, cmd);
cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
- /*
- * For TCM/pSCSI passthrough, skip cmd->transport_emulate_cdb()
- */
if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
break;
/*
cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
break;
case REPORT_LUNS:
- cmd->transport_emulate_cdb =
- transport_core_report_lun_response;
+ cmd->execute_task = target_report_luns;
size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
/*
* Do implict HEAD_OF_QUEUE processing for REPORT_LUNS
}
/*
- * Called from transport_generic_complete_ok() and
- * transport_generic_request_failure() to determine which dormant/delayed
+ * Called from I/O completion to determine which dormant/delayed
* and ordered cmds need to have their tasks added to the execution queue.
*/
static void transport_complete_task_attr(struct se_cmd *cmd)
{
int ret = 0;
- transport_stop_all_task_timers(cmd);
if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
transport_complete_task_attr(cmd);
schedule_work(&cmd->se_dev->qf_work_queue);
}
-static void transport_generic_complete_ok(struct se_cmd *cmd)
+static void target_complete_ok_work(struct work_struct *work)
{
+ struct se_cmd *cmd = container_of(work, struct se_cmd, work);
int reason = 0, ret;
+
/*
* Check if we need to move delayed/dormant tasks from cmds on the
* delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
if (cmd->scsi_status) {
ret = transport_send_check_condition_and_sense(
cmd, reason, 1);
- if (ret == -EAGAIN)
+ if (ret == -EAGAIN || ret == -ENOMEM)
goto queue_full;
transport_lun_remove_cmd(cmd);
spin_unlock(&cmd->se_lun->lun_sep_lock);
ret = cmd->se_tfo->queue_data_in(cmd);
- if (ret == -EAGAIN)
+ if (ret == -EAGAIN || ret == -ENOMEM)
goto queue_full;
break;
case DMA_TO_DEVICE:
}
spin_unlock(&cmd->se_lun->lun_sep_lock);
ret = cmd->se_tfo->queue_data_in(cmd);
- if (ret == -EAGAIN)
+ if (ret == -EAGAIN || ret == -ENOMEM)
goto queue_full;
break;
}
/* Fall through for DMA_TO_DEVICE */
case DMA_NONE:
ret = cmd->se_tfo->queue_status(cmd);
- if (ret == -EAGAIN)
+ if (ret == -EAGAIN || ret == -ENOMEM)
goto queue_full;
break;
default:
while (!list_empty(&dispose_list)) {
task = list_first_entry(&dispose_list, struct se_task, t_list);
- kfree(task->task_sg_bidi);
- kfree(task->task_sg);
+ if (task->task_sg != cmd->t_data_sg &&
+ task->task_sg != cmd->t_bidi_data_sg)
+ kfree(task->task_sg);
list_del(&task->t_list);
}
EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);
-static int transport_new_cmd_obj(struct se_cmd *cmd)
-{
- struct se_device *dev = cmd->se_dev;
- int set_counts = 1, rc, task_cdbs;
-
- /*
- * Setup any BIDI READ tasks and memory from
- * cmd->t_mem_bidi_list so the READ struct se_tasks
- * are queued first for the non pSCSI passthrough case.
- */
- if (cmd->t_bidi_data_sg &&
- (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) {
- rc = transport_allocate_tasks(cmd,
- cmd->t_task_lba,
- DMA_FROM_DEVICE,
- cmd->t_bidi_data_sg,
- cmd->t_bidi_data_nents);
- if (rc <= 0) {
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->scsi_sense_reason =
- TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -EINVAL;
- }
- atomic_inc(&cmd->t_fe_count);
- atomic_inc(&cmd->t_se_count);
- set_counts = 0;
- }
- /*
- * Setup the tasks and memory from cmd->t_mem_list
- * Note for BIDI transfers this will contain the WRITE payload
- */
- task_cdbs = transport_allocate_tasks(cmd,
- cmd->t_task_lba,
- cmd->data_direction,
- cmd->t_data_sg,
- cmd->t_data_nents);
- if (task_cdbs <= 0) {
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->scsi_sense_reason =
- TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -EINVAL;
- }
-
- if (set_counts) {
- atomic_inc(&cmd->t_fe_count);
- atomic_inc(&cmd->t_se_count);
- }
-
- cmd->t_task_list_num = task_cdbs;
-
- atomic_set(&cmd->t_task_cdbs_left, task_cdbs);
- atomic_set(&cmd->t_task_cdbs_ex_left, task_cdbs);
- atomic_set(&cmd->t_task_cdbs_timeout_left, task_cdbs);
- return 0;
-}
-
void *transport_kmap_first_data_page(struct se_cmd *cmd)
{
struct scatterlist *sg = cmd->t_data_sg;
/*
* Break up cmd into chunks transport can handle
*/
-static int transport_allocate_data_tasks(
- struct se_cmd *cmd,
- unsigned long long lba,
+static int
+transport_allocate_data_tasks(struct se_cmd *cmd,
enum dma_data_direction data_direction,
- struct scatterlist *sgl,
- unsigned int sgl_nents)
+ struct scatterlist *cmd_sg, unsigned int sgl_nents)
{
- struct se_task *task;
struct se_device *dev = cmd->se_dev;
- unsigned long flags;
int task_count, i;
- sector_t sectors, dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors;
- u32 sector_size = dev->se_sub_dev->se_dev_attrib.block_size;
- struct scatterlist *sg;
- struct scatterlist *cmd_sg;
+ unsigned long long lba;
+ sector_t sectors, dev_max_sectors;
+ u32 sector_size;
+
+ if (transport_cmd_get_valid_sectors(cmd) < 0)
+ return -EINVAL;
+
+ dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors;
+ sector_size = dev->se_sub_dev->se_dev_attrib.block_size;
WARN_ON(cmd->data_length % sector_size);
+
+ lba = cmd->t_task_lba;
sectors = DIV_ROUND_UP(cmd->data_length, sector_size);
task_count = DIV_ROUND_UP_SECTOR_T(sectors, dev_max_sectors);
-
- cmd_sg = sgl;
+
+ /*
+ * If we need just a single task reuse the SG list in the command
+ * and avoid a lot of work.
+ */
+ if (task_count == 1) {
+ struct se_task *task;
+ unsigned long flags;
+
+ task = transport_generic_get_task(cmd, data_direction);
+ if (!task)
+ return -ENOMEM;
+
+ task->task_sg = cmd_sg;
+ task->task_sg_nents = sgl_nents;
+
+ task->task_lba = lba;
+ task->task_sectors = sectors;
+ task->task_size = task->task_sectors * sector_size;
+
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ list_add_tail(&task->t_list, &cmd->t_task_list);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
+ return task_count;
+ }
+
for (i = 0; i < task_count; i++) {
+ struct se_task *task;
unsigned int task_size, task_sg_nents_padded;
+ struct scatterlist *sg;
+ unsigned long flags;
int count;
task = transport_generic_get_task(cmd, data_direction);
if (!task)
return -ENOMEM;
- task->task_sg = kmalloc(sizeof(struct scatterlist) * cmd->t_data_nents,
- GFP_KERNEL);
- if (!task->task_sg) {
- cmd->se_dev->transport->free_task(task);
- return -ENOMEM;
- }
-
- memcpy(task->task_sg, cmd->t_data_sg,
- sizeof(struct scatterlist) * cmd->t_data_nents);
+ task->task_sg = cmd->t_data_sg;
task->task_size = cmd->data_length;
task->task_sg_nents = cmd->t_data_nents;
return 1;
}
-static u32 transport_allocate_tasks(
- struct se_cmd *cmd,
- unsigned long long lba,
- enum dma_data_direction data_direction,
- struct scatterlist *sgl,
- unsigned int sgl_nents)
-{
- if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
- if (transport_cmd_get_valid_sectors(cmd) < 0)
- return -EINVAL;
-
- return transport_allocate_data_tasks(cmd, lba, data_direction,
- sgl, sgl_nents);
- } else
- return transport_allocate_control_task(cmd);
-
-}
-
-
-/* transport_generic_new_cmd(): Called from transport_processing_thread()
- *
- * Allocate storage transport resources from a set of values predefined
- * by transport_generic_cmd_sequencer() from the iSCSI Target RX process.
- * Any non zero return here is treated as an "out of resource' op here.
+/*
+ * Allocate any required ressources to execute the command, and either place
+ * it on the execution queue if possible. For writes we might not have the
+ * payload yet, thus notify the fabric via a call to ->write_pending instead.
*/
- /*
- * Generate struct se_task(s) and/or their payloads for this CDB.
- */
int transport_generic_new_cmd(struct se_cmd *cmd)
{
+ struct se_device *dev = cmd->se_dev;
+ int task_cdbs, task_cdbs_bidi = 0;
+ int set_counts = 1;
int ret = 0;
/*
if (ret < 0)
return ret;
}
+
/*
- * Call transport_new_cmd_obj() to invoke transport_allocate_tasks() for
- * control or data CDB types, and perform the map to backend subsystem
- * code from SGL memory allocated here by transport_generic_get_mem(), or
- * via pre-existing SGL memory setup explictly by fabric module code with
- * transport_generic_map_mem_to_cmd().
+ * For BIDI command set up the read tasks first.
*/
- ret = transport_new_cmd_obj(cmd);
- if (ret < 0)
- return ret;
+ if (cmd->t_bidi_data_sg &&
+ dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
+ BUG_ON(!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB));
+
+ task_cdbs_bidi = transport_allocate_data_tasks(cmd,
+ DMA_FROM_DEVICE, cmd->t_bidi_data_sg,
+ cmd->t_bidi_data_nents);
+ if (task_cdbs_bidi <= 0)
+ goto out_fail;
+
+ atomic_inc(&cmd->t_fe_count);
+ atomic_inc(&cmd->t_se_count);
+ set_counts = 0;
+ }
+
+ if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
+ task_cdbs = transport_allocate_data_tasks(cmd,
+ cmd->data_direction, cmd->t_data_sg,
+ cmd->t_data_nents);
+ } else {
+ task_cdbs = transport_allocate_control_task(cmd);
+ }
+
+ if (task_cdbs <= 0)
+ goto out_fail;
+
+ if (set_counts) {
+ atomic_inc(&cmd->t_fe_count);
+ atomic_inc(&cmd->t_se_count);
+ }
+
+ cmd->t_task_list_num = (task_cdbs + task_cdbs_bidi);
+ atomic_set(&cmd->t_task_cdbs_left, cmd->t_task_list_num);
+ atomic_set(&cmd->t_task_cdbs_ex_left, cmd->t_task_list_num);
+
/*
* For WRITEs, let the fabric know its buffer is ready..
* This WRITE struct se_cmd (and all of its associated struct se_task's)
*/
transport_execute_tasks(cmd);
return 0;
+
+out_fail:
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
EXPORT_SYMBOL(transport_generic_new_cmd);
static void transport_write_pending_qf(struct se_cmd *cmd)
{
- if (cmd->se_tfo->write_pending(cmd) == -EAGAIN) {
+ int ret;
+
+ ret = cmd->se_tfo->write_pending(cmd);
+ if (ret == -EAGAIN || ret == -ENOMEM) {
pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
cmd);
transport_handle_queue_full(cmd, cmd->se_dev);
* frontend know that WRITE buffers are ready.
*/
ret = cmd->se_tfo->write_pending(cmd);
- if (ret == -EAGAIN)
+ if (ret == -EAGAIN || ret == -ENOMEM)
goto queue_full;
else if (ret < 0)
return ret;
pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
transport_handle_queue_full(cmd, cmd->se_dev);
- return ret;
+ return 0;
}
/**
core_tmr_release_req(cmd->se_tmr_req);
if (cmd->t_task_cdb != cmd->__t_task_cdb)
kfree(cmd->t_task_cdb);
+ /*
+ * Check if target_wait_for_sess_cmds() is expecting to
+ * release se_cmd directly here..
+ */
+ if (cmd->check_release != 0 && cmd->se_tfo->check_release_cmd)
+ if (cmd->se_tfo->check_release_cmd(cmd) != 0)
+ return;
+
cmd->se_tfo->release_cmd(cmd);
}
EXPORT_SYMBOL(transport_release_cmd);
}
EXPORT_SYMBOL(transport_generic_free_cmd);
+/* target_get_sess_cmd - Add command to active ->sess_cmd_list
+ * @se_sess: session to reference
+ * @se_cmd: command descriptor to add
+ */
+void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
+ list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
+ se_cmd->check_release = 1;
+ spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+}
+EXPORT_SYMBOL(target_get_sess_cmd);
+
+/* target_put_sess_cmd - Check for active I/O shutdown or list delete
+ * @se_sess: session to reference
+ * @se_cmd: command descriptor to drop
+ */
+int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
+ if (list_empty(&se_cmd->se_cmd_list)) {
+ spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+ WARN_ON(1);
+ return 0;
+ }
+
+ if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
+ spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+ complete(&se_cmd->cmd_wait_comp);
+ return 1;
+ }
+ list_del(&se_cmd->se_cmd_list);
+ spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(target_put_sess_cmd);
+
+/* target_splice_sess_cmd_list - Split active cmds into sess_wait_list
+ * @se_sess: session to split
+ */
+void target_splice_sess_cmd_list(struct se_session *se_sess)
+{
+ struct se_cmd *se_cmd;
+ unsigned long flags;
+
+ WARN_ON(!list_empty(&se_sess->sess_wait_list));
+ INIT_LIST_HEAD(&se_sess->sess_wait_list);
+
+ spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
+ se_sess->sess_tearing_down = 1;
+
+ list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
+
+ list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list)
+ se_cmd->cmd_wait_set = 1;
+
+ spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+}
+EXPORT_SYMBOL(target_splice_sess_cmd_list);
+
+/* target_wait_for_sess_cmds - Wait for outstanding descriptors
+ * @se_sess: session to wait for active I/O
+ * @wait_for_tasks: Make extra transport_wait_for_tasks call
+ */
+void target_wait_for_sess_cmds(
+ struct se_session *se_sess,
+ int wait_for_tasks)
+{
+ struct se_cmd *se_cmd, *tmp_cmd;
+ bool rc = false;
+
+ list_for_each_entry_safe(se_cmd, tmp_cmd,
+ &se_sess->sess_wait_list, se_cmd_list) {
+ list_del(&se_cmd->se_cmd_list);
+
+ pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
+ " %d\n", se_cmd, se_cmd->t_state,
+ se_cmd->se_tfo->get_cmd_state(se_cmd));
+
+ if (wait_for_tasks) {
+ pr_debug("Calling transport_wait_for_tasks se_cmd: %p t_state: %d,"
+ " fabric state: %d\n", se_cmd, se_cmd->t_state,
+ se_cmd->se_tfo->get_cmd_state(se_cmd));
+
+ rc = transport_wait_for_tasks(se_cmd);
+
+ pr_debug("After transport_wait_for_tasks se_cmd: %p t_state: %d,"
+ " fabric state: %d\n", se_cmd, se_cmd->t_state,
+ se_cmd->se_tfo->get_cmd_state(se_cmd));
+ }
+
+ if (!rc) {
+ wait_for_completion(&se_cmd->cmd_wait_comp);
+ pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
+ " fabric state: %d\n", se_cmd, se_cmd->t_state,
+ se_cmd->se_tfo->get_cmd_state(se_cmd));
+ }
+
+ se_cmd->se_tfo->release_cmd(se_cmd);
+ }
+}
+EXPORT_SYMBOL(target_wait_for_sess_cmds);
+
/* transport_lun_wait_for_tasks():
*
* Called from ConfigFS context to stop the passed struct se_cmd to allow
* Called from frontend fabric context to wait for storage engine
* to pause and/or release frontend generated struct se_cmd.
*/
-void transport_wait_for_tasks(struct se_cmd *cmd)
+bool transport_wait_for_tasks(struct se_cmd *cmd)
{
unsigned long flags;
spin_lock_irqsave(&cmd->t_state_lock, flags);
if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req)) {
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- return;
+ return false;
}
/*
* Only perform a possible wait_for_tasks if SCF_SUPPORTED_SAM_OPCODE
*/
if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && !cmd->se_tmr_req) {
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- return;
+ return false;
}
/*
* If we are already stopped due to an external event (ie: LUN shutdown)
if (!atomic_read(&cmd->t_transport_active) ||
atomic_read(&cmd->t_transport_aborted)) {
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- return;
+ return false;
}
atomic_set(&cmd->t_transport_stop, 1);
pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x"
- " i_state: %d, t_state/def_t_state: %d/%d, t_transport_stop"
- " = TRUE\n", cmd, cmd->se_tfo->get_task_tag(cmd),
- cmd->se_tfo->get_cmd_state(cmd), cmd->t_state,
- cmd->deferred_t_state);
+ " i_state: %d, t_state: %d, t_transport_stop = TRUE\n",
+ cmd, cmd->se_tfo->get_task_tag(cmd),
+ cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
cmd->se_tfo->get_task_tag(cmd));
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
+ return true;
}
EXPORT_SYMBOL(transport_wait_for_tasks);
ret = cmd->se_tfo->new_cmd_map(cmd);
if (ret < 0) {
cmd->transport_error_status = ret;
- transport_generic_request_failure(cmd, NULL,
+ transport_generic_request_failure(cmd,
0, (cmd->data_direction !=
DMA_TO_DEVICE));
break;
}
ret = transport_generic_new_cmd(cmd);
- if (ret == -EAGAIN)
- break;
- else if (ret < 0) {
+ if (ret < 0) {
cmd->transport_error_status = ret;
- transport_generic_request_failure(cmd, NULL,
+ transport_generic_request_failure(cmd,
0, (cmd->data_direction !=
DMA_TO_DEVICE));
}
case TRANSPORT_PROCESS_WRITE:
transport_generic_process_write(cmd);
break;
- case TRANSPORT_COMPLETE_OK:
- transport_stop_all_task_timers(cmd);
- transport_generic_complete_ok(cmd);
- break;
- case TRANSPORT_REMOVE:
- transport_put_cmd(cmd);
- break;
- case TRANSPORT_FREE_CMD_INTR:
- transport_generic_free_cmd(cmd, 0);
- break;
case TRANSPORT_PROCESS_TMR:
transport_generic_do_tmr(cmd);
break;
- case TRANSPORT_COMPLETE_FAILURE:
- transport_generic_request_failure(cmd, NULL, 1, 1);
- break;
- case TRANSPORT_COMPLETE_TIMEOUT:
- transport_stop_all_task_timers(cmd);
- transport_generic_request_timeout(cmd);
- break;
case TRANSPORT_COMPLETE_QF_WP:
transport_write_pending_qf(cmd);
break;
transport_complete_qf(cmd);
break;
default:
- pr_err("Unknown t_state: %d deferred_t_state:"
- " %d for ITT: 0x%08x i_state: %d on SE LUN:"
- " %u\n", cmd->t_state, cmd->deferred_t_state,
+ pr_err("Unknown t_state: %d for ITT: 0x%08x "
+ "i_state: %d on SE LUN: %u\n",
+ cmd->t_state,
cmd->se_tfo->get_task_tag(cmd),
cmd->se_tfo->get_cmd_state(cmd),
cmd->se_lun->unpacked_lun);