1 /*******************************************************************************
2 * Filename: target_core_transport.c
4 * This file contains the Generic Target Engine Core.
6 * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
7 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
8 * Copyright (c) 2007-2010 Rising Tide Systems
9 * Copyright (c) 2008-2010 Linux-iSCSI.org
11 * Nicholas A. Bellinger <nab@kernel.org>
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
27 ******************************************************************************/
29 #include <linux/net.h>
30 #include <linux/delay.h>
31 #include <linux/string.h>
32 #include <linux/timer.h>
33 #include <linux/slab.h>
34 #include <linux/blkdev.h>
35 #include <linux/spinlock.h>
36 #include <linux/kthread.h>
38 #include <linux/cdrom.h>
39 #include <linux/module.h>
40 #include <linux/ratelimit.h>
41 #include <asm/unaligned.h>
44 #include <scsi/scsi.h>
45 #include <scsi/scsi_cmnd.h>
46 #include <scsi/scsi_tcq.h>
48 #include <target/target_core_base.h>
49 #include <target/target_core_backend.h>
50 #include <target/target_core_fabric.h>
51 #include <target/target_core_configfs.h>
53 #include "target_core_internal.h"
54 #include "target_core_alua.h"
55 #include "target_core_pr.h"
56 #include "target_core_ua.h"
58 static int sub_api_initialized
;
60 static struct workqueue_struct
*target_completion_wq
;
61 static struct kmem_cache
*se_sess_cache
;
62 struct kmem_cache
*se_ua_cache
;
63 struct kmem_cache
*t10_pr_reg_cache
;
64 struct kmem_cache
*t10_alua_lu_gp_cache
;
65 struct kmem_cache
*t10_alua_lu_gp_mem_cache
;
66 struct kmem_cache
*t10_alua_tg_pt_gp_cache
;
67 struct kmem_cache
*t10_alua_tg_pt_gp_mem_cache
;
69 static int transport_processing_thread(void *param
);
70 static void transport_complete_task_attr(struct se_cmd
*cmd
);
71 static void transport_handle_queue_full(struct se_cmd
*cmd
,
72 struct se_device
*dev
);
73 static int transport_generic_get_mem(struct se_cmd
*cmd
);
74 static void transport_put_cmd(struct se_cmd
*cmd
);
75 static void transport_remove_cmd_from_queue(struct se_cmd
*cmd
);
76 static int transport_set_sense_codes(struct se_cmd
*cmd
, u8 asc
, u8 ascq
);
77 static void target_complete_ok_work(struct work_struct
*work
);
79 int init_se_kmem_caches(void)
81 se_sess_cache
= kmem_cache_create("se_sess_cache",
82 sizeof(struct se_session
), __alignof__(struct se_session
),
85 pr_err("kmem_cache_create() for struct se_session"
89 se_ua_cache
= kmem_cache_create("se_ua_cache",
90 sizeof(struct se_ua
), __alignof__(struct se_ua
),
93 pr_err("kmem_cache_create() for struct se_ua failed\n");
94 goto out_free_sess_cache
;
96 t10_pr_reg_cache
= kmem_cache_create("t10_pr_reg_cache",
97 sizeof(struct t10_pr_registration
),
98 __alignof__(struct t10_pr_registration
), 0, NULL
);
99 if (!t10_pr_reg_cache
) {
100 pr_err("kmem_cache_create() for struct t10_pr_registration"
102 goto out_free_ua_cache
;
104 t10_alua_lu_gp_cache
= kmem_cache_create("t10_alua_lu_gp_cache",
105 sizeof(struct t10_alua_lu_gp
), __alignof__(struct t10_alua_lu_gp
),
107 if (!t10_alua_lu_gp_cache
) {
108 pr_err("kmem_cache_create() for t10_alua_lu_gp_cache"
110 goto out_free_pr_reg_cache
;
112 t10_alua_lu_gp_mem_cache
= kmem_cache_create("t10_alua_lu_gp_mem_cache",
113 sizeof(struct t10_alua_lu_gp_member
),
114 __alignof__(struct t10_alua_lu_gp_member
), 0, NULL
);
115 if (!t10_alua_lu_gp_mem_cache
) {
116 pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_"
118 goto out_free_lu_gp_cache
;
120 t10_alua_tg_pt_gp_cache
= kmem_cache_create("t10_alua_tg_pt_gp_cache",
121 sizeof(struct t10_alua_tg_pt_gp
),
122 __alignof__(struct t10_alua_tg_pt_gp
), 0, NULL
);
123 if (!t10_alua_tg_pt_gp_cache
) {
124 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
126 goto out_free_lu_gp_mem_cache
;
128 t10_alua_tg_pt_gp_mem_cache
= kmem_cache_create(
129 "t10_alua_tg_pt_gp_mem_cache",
130 sizeof(struct t10_alua_tg_pt_gp_member
),
131 __alignof__(struct t10_alua_tg_pt_gp_member
),
133 if (!t10_alua_tg_pt_gp_mem_cache
) {
134 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
136 goto out_free_tg_pt_gp_cache
;
139 target_completion_wq
= alloc_workqueue("target_completion",
141 if (!target_completion_wq
)
142 goto out_free_tg_pt_gp_mem_cache
;
146 out_free_tg_pt_gp_mem_cache
:
147 kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache
);
148 out_free_tg_pt_gp_cache
:
149 kmem_cache_destroy(t10_alua_tg_pt_gp_cache
);
150 out_free_lu_gp_mem_cache
:
151 kmem_cache_destroy(t10_alua_lu_gp_mem_cache
);
152 out_free_lu_gp_cache
:
153 kmem_cache_destroy(t10_alua_lu_gp_cache
);
154 out_free_pr_reg_cache
:
155 kmem_cache_destroy(t10_pr_reg_cache
);
157 kmem_cache_destroy(se_ua_cache
);
159 kmem_cache_destroy(se_sess_cache
);
164 void release_se_kmem_caches(void)
166 destroy_workqueue(target_completion_wq
);
167 kmem_cache_destroy(se_sess_cache
);
168 kmem_cache_destroy(se_ua_cache
);
169 kmem_cache_destroy(t10_pr_reg_cache
);
170 kmem_cache_destroy(t10_alua_lu_gp_cache
);
171 kmem_cache_destroy(t10_alua_lu_gp_mem_cache
);
172 kmem_cache_destroy(t10_alua_tg_pt_gp_cache
);
173 kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache
);
176 /* This code ensures unique mib indexes are handed out. */
177 static DEFINE_SPINLOCK(scsi_mib_index_lock
);
178 static u32 scsi_mib_index
[SCSI_INDEX_TYPE_MAX
];
181 * Allocate a new row index for the entry type specified
183 u32
scsi_get_new_index(scsi_index_t type
)
187 BUG_ON((type
< 0) || (type
>= SCSI_INDEX_TYPE_MAX
));
189 spin_lock(&scsi_mib_index_lock
);
190 new_index
= ++scsi_mib_index
[type
];
191 spin_unlock(&scsi_mib_index_lock
);
196 static void transport_init_queue_obj(struct se_queue_obj
*qobj
)
198 atomic_set(&qobj
->queue_cnt
, 0);
199 INIT_LIST_HEAD(&qobj
->qobj_list
);
200 init_waitqueue_head(&qobj
->thread_wq
);
201 spin_lock_init(&qobj
->cmd_queue_lock
);
204 void transport_subsystem_check_init(void)
208 if (sub_api_initialized
)
211 ret
= request_module("target_core_iblock");
213 pr_err("Unable to load target_core_iblock\n");
215 ret
= request_module("target_core_file");
217 pr_err("Unable to load target_core_file\n");
219 ret
= request_module("target_core_pscsi");
221 pr_err("Unable to load target_core_pscsi\n");
223 ret
= request_module("target_core_stgt");
225 pr_err("Unable to load target_core_stgt\n");
227 sub_api_initialized
= 1;
231 struct se_session
*transport_init_session(void)
233 struct se_session
*se_sess
;
235 se_sess
= kmem_cache_zalloc(se_sess_cache
, GFP_KERNEL
);
237 pr_err("Unable to allocate struct se_session from"
239 return ERR_PTR(-ENOMEM
);
241 INIT_LIST_HEAD(&se_sess
->sess_list
);
242 INIT_LIST_HEAD(&se_sess
->sess_acl_list
);
243 INIT_LIST_HEAD(&se_sess
->sess_cmd_list
);
244 INIT_LIST_HEAD(&se_sess
->sess_wait_list
);
245 spin_lock_init(&se_sess
->sess_cmd_lock
);
246 kref_init(&se_sess
->sess_kref
);
250 EXPORT_SYMBOL(transport_init_session
);
253 * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called.
255 void __transport_register_session(
256 struct se_portal_group
*se_tpg
,
257 struct se_node_acl
*se_nacl
,
258 struct se_session
*se_sess
,
259 void *fabric_sess_ptr
)
261 unsigned char buf
[PR_REG_ISID_LEN
];
263 se_sess
->se_tpg
= se_tpg
;
264 se_sess
->fabric_sess_ptr
= fabric_sess_ptr
;
266 * Used by struct se_node_acl's under ConfigFS to locate active se_session-t
268 * Only set for struct se_session's that will actually be moving I/O.
269 * eg: *NOT* discovery sessions.
273 * If the fabric module supports an ISID based TransportID,
274 * save this value in binary from the fabric I_T Nexus now.
276 if (se_tpg
->se_tpg_tfo
->sess_get_initiator_sid
!= NULL
) {
277 memset(&buf
[0], 0, PR_REG_ISID_LEN
);
278 se_tpg
->se_tpg_tfo
->sess_get_initiator_sid(se_sess
,
279 &buf
[0], PR_REG_ISID_LEN
);
280 se_sess
->sess_bin_isid
= get_unaligned_be64(&buf
[0]);
282 kref_get(&se_nacl
->acl_kref
);
284 spin_lock_irq(&se_nacl
->nacl_sess_lock
);
286 * The se_nacl->nacl_sess pointer will be set to the
287 * last active I_T Nexus for each struct se_node_acl.
289 se_nacl
->nacl_sess
= se_sess
;
291 list_add_tail(&se_sess
->sess_acl_list
,
292 &se_nacl
->acl_sess_list
);
293 spin_unlock_irq(&se_nacl
->nacl_sess_lock
);
295 list_add_tail(&se_sess
->sess_list
, &se_tpg
->tpg_sess_list
);
297 pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
298 se_tpg
->se_tpg_tfo
->get_fabric_name(), se_sess
->fabric_sess_ptr
);
300 EXPORT_SYMBOL(__transport_register_session
);
302 void transport_register_session(
303 struct se_portal_group
*se_tpg
,
304 struct se_node_acl
*se_nacl
,
305 struct se_session
*se_sess
,
306 void *fabric_sess_ptr
)
310 spin_lock_irqsave(&se_tpg
->session_lock
, flags
);
311 __transport_register_session(se_tpg
, se_nacl
, se_sess
, fabric_sess_ptr
);
312 spin_unlock_irqrestore(&se_tpg
->session_lock
, flags
);
314 EXPORT_SYMBOL(transport_register_session
);
316 void target_release_session(struct kref
*kref
)
318 struct se_session
*se_sess
= container_of(kref
,
319 struct se_session
, sess_kref
);
320 struct se_portal_group
*se_tpg
= se_sess
->se_tpg
;
322 se_tpg
->se_tpg_tfo
->close_session(se_sess
);
325 void target_get_session(struct se_session
*se_sess
)
327 kref_get(&se_sess
->sess_kref
);
329 EXPORT_SYMBOL(target_get_session
);
331 void target_put_session(struct se_session
*se_sess
)
333 struct se_portal_group
*tpg
= se_sess
->se_tpg
;
335 if (tpg
->se_tpg_tfo
->put_session
!= NULL
) {
336 tpg
->se_tpg_tfo
->put_session(se_sess
);
339 kref_put(&se_sess
->sess_kref
, target_release_session
);
341 EXPORT_SYMBOL(target_put_session
);
343 static void target_complete_nacl(struct kref
*kref
)
345 struct se_node_acl
*nacl
= container_of(kref
,
346 struct se_node_acl
, acl_kref
);
348 complete(&nacl
->acl_free_comp
);
351 void target_put_nacl(struct se_node_acl
*nacl
)
353 kref_put(&nacl
->acl_kref
, target_complete_nacl
);
356 void transport_deregister_session_configfs(struct se_session
*se_sess
)
358 struct se_node_acl
*se_nacl
;
361 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session
363 se_nacl
= se_sess
->se_node_acl
;
365 spin_lock_irqsave(&se_nacl
->nacl_sess_lock
, flags
);
366 if (se_nacl
->acl_stop
== 0)
367 list_del(&se_sess
->sess_acl_list
);
369 * If the session list is empty, then clear the pointer.
370 * Otherwise, set the struct se_session pointer from the tail
371 * element of the per struct se_node_acl active session list.
373 if (list_empty(&se_nacl
->acl_sess_list
))
374 se_nacl
->nacl_sess
= NULL
;
376 se_nacl
->nacl_sess
= container_of(
377 se_nacl
->acl_sess_list
.prev
,
378 struct se_session
, sess_acl_list
);
380 spin_unlock_irqrestore(&se_nacl
->nacl_sess_lock
, flags
);
383 EXPORT_SYMBOL(transport_deregister_session_configfs
);
385 void transport_free_session(struct se_session
*se_sess
)
387 kmem_cache_free(se_sess_cache
, se_sess
);
389 EXPORT_SYMBOL(transport_free_session
);
391 void transport_deregister_session(struct se_session
*se_sess
)
393 struct se_portal_group
*se_tpg
= se_sess
->se_tpg
;
394 struct target_core_fabric_ops
*se_tfo
;
395 struct se_node_acl
*se_nacl
;
397 bool comp_nacl
= true;
400 transport_free_session(se_sess
);
403 se_tfo
= se_tpg
->se_tpg_tfo
;
405 spin_lock_irqsave(&se_tpg
->session_lock
, flags
);
406 list_del(&se_sess
->sess_list
);
407 se_sess
->se_tpg
= NULL
;
408 se_sess
->fabric_sess_ptr
= NULL
;
409 spin_unlock_irqrestore(&se_tpg
->session_lock
, flags
);
412 * Determine if we need to do extra work for this initiator node's
413 * struct se_node_acl if it had been previously dynamically generated.
415 se_nacl
= se_sess
->se_node_acl
;
417 spin_lock_irqsave(&se_tpg
->acl_node_lock
, flags
);
418 if (se_nacl
&& se_nacl
->dynamic_node_acl
) {
419 if (!se_tfo
->tpg_check_demo_mode_cache(se_tpg
)) {
420 list_del(&se_nacl
->acl_list
);
421 se_tpg
->num_node_acls
--;
422 spin_unlock_irqrestore(&se_tpg
->acl_node_lock
, flags
);
423 core_tpg_wait_for_nacl_pr_ref(se_nacl
);
424 core_free_device_list_for_node(se_nacl
, se_tpg
);
425 se_tfo
->tpg_release_fabric_acl(se_tpg
, se_nacl
);
428 spin_lock_irqsave(&se_tpg
->acl_node_lock
, flags
);
431 spin_unlock_irqrestore(&se_tpg
->acl_node_lock
, flags
);
433 pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
434 se_tpg
->se_tpg_tfo
->get_fabric_name());
436 * If last kref is dropping now for an explict NodeACL, awake sleeping
437 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group
440 if (se_nacl
&& comp_nacl
== true)
441 target_put_nacl(se_nacl
);
443 transport_free_session(se_sess
);
445 EXPORT_SYMBOL(transport_deregister_session
);
448 * Called with cmd->t_state_lock held.
450 static void target_remove_from_state_list(struct se_cmd
*cmd
)
452 struct se_device
*dev
= cmd
->se_dev
;
458 if (cmd
->transport_state
& CMD_T_BUSY
)
461 spin_lock_irqsave(&dev
->execute_task_lock
, flags
);
462 if (cmd
->state_active
) {
463 list_del(&cmd
->state_list
);
464 cmd
->state_active
= false;
466 spin_unlock_irqrestore(&dev
->execute_task_lock
, flags
);
469 static int transport_cmd_check_stop(struct se_cmd
*cmd
, bool remove_from_lists
)
473 spin_lock_irqsave(&cmd
->t_state_lock
, flags
);
475 * Determine if IOCTL context caller in requesting the stopping of this
476 * command for LUN shutdown purposes.
478 if (cmd
->transport_state
& CMD_T_LUN_STOP
) {
479 pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n",
480 __func__
, __LINE__
, cmd
->se_tfo
->get_task_tag(cmd
));
482 cmd
->transport_state
&= ~CMD_T_ACTIVE
;
483 if (remove_from_lists
)
484 target_remove_from_state_list(cmd
);
485 spin_unlock_irqrestore(&cmd
->t_state_lock
, flags
);
487 complete(&cmd
->transport_lun_stop_comp
);
491 if (remove_from_lists
) {
492 target_remove_from_state_list(cmd
);
495 * Clear struct se_cmd->se_lun before the handoff to FE.
501 * Determine if frontend context caller is requesting the stopping of
502 * this command for frontend exceptions.
504 if (cmd
->transport_state
& CMD_T_STOP
) {
505 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n",
507 cmd
->se_tfo
->get_task_tag(cmd
));
509 spin_unlock_irqrestore(&cmd
->t_state_lock
, flags
);
511 complete(&cmd
->t_transport_stop_comp
);
515 cmd
->transport_state
&= ~CMD_T_ACTIVE
;
516 if (remove_from_lists
) {
518 * Some fabric modules like tcm_loop can release
519 * their internally allocated I/O reference now and
522 * Fabric modules are expected to return '1' here if the
523 * se_cmd being passed is released at this point,
524 * or zero if not being released.
526 if (cmd
->se_tfo
->check_stop_free
!= NULL
) {
527 spin_unlock_irqrestore(&cmd
->t_state_lock
, flags
);
528 return cmd
->se_tfo
->check_stop_free(cmd
);
532 spin_unlock_irqrestore(&cmd
->t_state_lock
, flags
);
536 static int transport_cmd_check_stop_to_fabric(struct se_cmd
*cmd
)
538 return transport_cmd_check_stop(cmd
, true);
541 static void transport_lun_remove_cmd(struct se_cmd
*cmd
)
543 struct se_lun
*lun
= cmd
->se_lun
;
549 spin_lock_irqsave(&cmd
->t_state_lock
, flags
);
550 if (cmd
->transport_state
& CMD_T_DEV_ACTIVE
) {
551 cmd
->transport_state
&= ~CMD_T_DEV_ACTIVE
;
552 target_remove_from_state_list(cmd
);
554 spin_unlock_irqrestore(&cmd
->t_state_lock
, flags
);
556 spin_lock_irqsave(&lun
->lun_cmd_lock
, flags
);
557 if (!list_empty(&cmd
->se_lun_node
))
558 list_del_init(&cmd
->se_lun_node
);
559 spin_unlock_irqrestore(&lun
->lun_cmd_lock
, flags
);
562 void transport_cmd_finish_abort(struct se_cmd
*cmd
, int remove
)
564 if (!(cmd
->se_cmd_flags
& SCF_SCSI_TMR_CDB
))
565 transport_lun_remove_cmd(cmd
);
567 if (transport_cmd_check_stop_to_fabric(cmd
))
570 transport_remove_cmd_from_queue(cmd
);
571 transport_put_cmd(cmd
);
575 static void transport_add_cmd_to_queue(struct se_cmd
*cmd
, int t_state
,
578 struct se_device
*dev
= cmd
->se_dev
;
579 struct se_queue_obj
*qobj
= &dev
->dev_queue_obj
;
583 spin_lock_irqsave(&cmd
->t_state_lock
, flags
);
584 cmd
->t_state
= t_state
;
585 cmd
->transport_state
|= CMD_T_ACTIVE
;
586 spin_unlock_irqrestore(&cmd
->t_state_lock
, flags
);
589 spin_lock_irqsave(&qobj
->cmd_queue_lock
, flags
);
591 /* If the cmd is already on the list, remove it before we add it */
592 if (!list_empty(&cmd
->se_queue_node
))
593 list_del(&cmd
->se_queue_node
);
595 atomic_inc(&qobj
->queue_cnt
);
598 list_add(&cmd
->se_queue_node
, &qobj
->qobj_list
);
600 list_add_tail(&cmd
->se_queue_node
, &qobj
->qobj_list
);
601 cmd
->transport_state
|= CMD_T_QUEUED
;
602 spin_unlock_irqrestore(&qobj
->cmd_queue_lock
, flags
);
604 wake_up_interruptible(&qobj
->thread_wq
);
607 static struct se_cmd
*
608 transport_get_cmd_from_queue(struct se_queue_obj
*qobj
)
613 spin_lock_irqsave(&qobj
->cmd_queue_lock
, flags
);
614 if (list_empty(&qobj
->qobj_list
)) {
615 spin_unlock_irqrestore(&qobj
->cmd_queue_lock
, flags
);
618 cmd
= list_first_entry(&qobj
->qobj_list
, struct se_cmd
, se_queue_node
);
620 cmd
->transport_state
&= ~CMD_T_QUEUED
;
621 list_del_init(&cmd
->se_queue_node
);
622 atomic_dec(&qobj
->queue_cnt
);
623 spin_unlock_irqrestore(&qobj
->cmd_queue_lock
, flags
);
628 static void transport_remove_cmd_from_queue(struct se_cmd
*cmd
)
630 struct se_queue_obj
*qobj
= &cmd
->se_dev
->dev_queue_obj
;
633 spin_lock_irqsave(&qobj
->cmd_queue_lock
, flags
);
634 if (!(cmd
->transport_state
& CMD_T_QUEUED
)) {
635 spin_unlock_irqrestore(&qobj
->cmd_queue_lock
, flags
);
638 cmd
->transport_state
&= ~CMD_T_QUEUED
;
639 atomic_dec(&qobj
->queue_cnt
);
640 list_del_init(&cmd
->se_queue_node
);
641 spin_unlock_irqrestore(&qobj
->cmd_queue_lock
, flags
);
644 static void target_complete_failure_work(struct work_struct
*work
)
646 struct se_cmd
*cmd
= container_of(work
, struct se_cmd
, work
);
648 transport_generic_request_failure(cmd
);
651 void target_complete_cmd(struct se_cmd
*cmd
, u8 scsi_status
)
653 struct se_device
*dev
= cmd
->se_dev
;
654 int success
= scsi_status
== GOOD
;
657 cmd
->scsi_status
= scsi_status
;
660 spin_lock_irqsave(&cmd
->t_state_lock
, flags
);
661 cmd
->transport_state
&= ~CMD_T_BUSY
;
663 if (dev
&& dev
->transport
->transport_complete
) {
664 if (dev
->transport
->transport_complete(cmd
,
665 cmd
->t_data_sg
) != 0) {
666 cmd
->se_cmd_flags
|= SCF_TRANSPORT_TASK_SENSE
;
672 * See if we are waiting to complete for an exception condition.
674 if (cmd
->transport_state
& CMD_T_REQUEST_STOP
) {
675 spin_unlock_irqrestore(&cmd
->t_state_lock
, flags
);
676 complete(&cmd
->task_stop_comp
);
681 cmd
->transport_state
|= CMD_T_FAILED
;
684 * Check for case where an explict ABORT_TASK has been received
685 * and transport_wait_for_tasks() will be waiting for completion..
687 if (cmd
->transport_state
& CMD_T_ABORTED
&&
688 cmd
->transport_state
& CMD_T_STOP
) {
689 spin_unlock_irqrestore(&cmd
->t_state_lock
, flags
);
690 complete(&cmd
->t_transport_stop_comp
);
692 } else if (cmd
->transport_state
& CMD_T_FAILED
) {
693 cmd
->scsi_sense_reason
= TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
694 INIT_WORK(&cmd
->work
, target_complete_failure_work
);
696 INIT_WORK(&cmd
->work
, target_complete_ok_work
);
699 cmd
->t_state
= TRANSPORT_COMPLETE
;
700 cmd
->transport_state
|= (CMD_T_COMPLETE
| CMD_T_ACTIVE
);
701 spin_unlock_irqrestore(&cmd
->t_state_lock
, flags
);
703 queue_work(target_completion_wq
, &cmd
->work
);
705 EXPORT_SYMBOL(target_complete_cmd
);
707 static void target_add_to_state_list(struct se_cmd
*cmd
)
709 struct se_device
*dev
= cmd
->se_dev
;
712 spin_lock_irqsave(&dev
->execute_task_lock
, flags
);
713 if (!cmd
->state_active
) {
714 list_add_tail(&cmd
->state_list
, &dev
->state_list
);
715 cmd
->state_active
= true;
717 spin_unlock_irqrestore(&dev
->execute_task_lock
, flags
);
721 * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status
723 static void transport_write_pending_qf(struct se_cmd
*cmd
);
724 static void transport_complete_qf(struct se_cmd
*cmd
);
726 static void target_qf_do_work(struct work_struct
*work
)
728 struct se_device
*dev
= container_of(work
, struct se_device
,
730 LIST_HEAD(qf_cmd_list
);
731 struct se_cmd
*cmd
, *cmd_tmp
;
733 spin_lock_irq(&dev
->qf_cmd_lock
);
734 list_splice_init(&dev
->qf_cmd_list
, &qf_cmd_list
);
735 spin_unlock_irq(&dev
->qf_cmd_lock
);
737 list_for_each_entry_safe(cmd
, cmd_tmp
, &qf_cmd_list
, se_qf_node
) {
738 list_del(&cmd
->se_qf_node
);
739 atomic_dec(&dev
->dev_qf_count
);
740 smp_mb__after_atomic_dec();
742 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
743 " context: %s\n", cmd
->se_tfo
->get_fabric_name(), cmd
,
744 (cmd
->t_state
== TRANSPORT_COMPLETE_QF_OK
) ? "COMPLETE_OK" :
745 (cmd
->t_state
== TRANSPORT_COMPLETE_QF_WP
) ? "WRITE_PENDING"
748 if (cmd
->t_state
== TRANSPORT_COMPLETE_QF_WP
)
749 transport_write_pending_qf(cmd
);
750 else if (cmd
->t_state
== TRANSPORT_COMPLETE_QF_OK
)
751 transport_complete_qf(cmd
);
755 unsigned char *transport_dump_cmd_direction(struct se_cmd
*cmd
)
757 switch (cmd
->data_direction
) {
760 case DMA_FROM_DEVICE
:
764 case DMA_BIDIRECTIONAL
:
773 void transport_dump_dev_state(
774 struct se_device
*dev
,
778 *bl
+= sprintf(b
+ *bl
, "Status: ");
779 switch (dev
->dev_status
) {
780 case TRANSPORT_DEVICE_ACTIVATED
:
781 *bl
+= sprintf(b
+ *bl
, "ACTIVATED");
783 case TRANSPORT_DEVICE_DEACTIVATED
:
784 *bl
+= sprintf(b
+ *bl
, "DEACTIVATED");
786 case TRANSPORT_DEVICE_SHUTDOWN
:
787 *bl
+= sprintf(b
+ *bl
, "SHUTDOWN");
789 case TRANSPORT_DEVICE_OFFLINE_ACTIVATED
:
790 case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED
:
791 *bl
+= sprintf(b
+ *bl
, "OFFLINE");
794 *bl
+= sprintf(b
+ *bl
, "UNKNOWN=%d", dev
->dev_status
);
798 *bl
+= sprintf(b
+ *bl
, " Max Queue Depth: %d", dev
->queue_depth
);
799 *bl
+= sprintf(b
+ *bl
, " SectorSize: %u HwMaxSectors: %u\n",
800 dev
->se_sub_dev
->se_dev_attrib
.block_size
,
801 dev
->se_sub_dev
->se_dev_attrib
.hw_max_sectors
);
802 *bl
+= sprintf(b
+ *bl
, " ");
805 void transport_dump_vpd_proto_id(
807 unsigned char *p_buf
,
810 unsigned char buf
[VPD_TMP_BUF_SIZE
];
813 memset(buf
, 0, VPD_TMP_BUF_SIZE
);
814 len
= sprintf(buf
, "T10 VPD Protocol Identifier: ");
816 switch (vpd
->protocol_identifier
) {
818 sprintf(buf
+len
, "Fibre Channel\n");
821 sprintf(buf
+len
, "Parallel SCSI\n");
824 sprintf(buf
+len
, "SSA\n");
827 sprintf(buf
+len
, "IEEE 1394\n");
830 sprintf(buf
+len
, "SCSI Remote Direct Memory Access"
834 sprintf(buf
+len
, "Internet SCSI (iSCSI)\n");
837 sprintf(buf
+len
, "SAS Serial SCSI Protocol\n");
840 sprintf(buf
+len
, "Automation/Drive Interface Transport"
844 sprintf(buf
+len
, "AT Attachment Interface ATA/ATAPI\n");
847 sprintf(buf
+len
, "Unknown 0x%02x\n",
848 vpd
->protocol_identifier
);
853 strncpy(p_buf
, buf
, p_buf_len
);
859 transport_set_vpd_proto_id(struct t10_vpd
*vpd
, unsigned char *page_83
)
862 * Check if the Protocol Identifier Valid (PIV) bit is set..
864 * from spc3r23.pdf section 7.5.1
866 if (page_83
[1] & 0x80) {
867 vpd
->protocol_identifier
= (page_83
[0] & 0xf0);
868 vpd
->protocol_identifier_set
= 1;
869 transport_dump_vpd_proto_id(vpd
, NULL
, 0);
872 EXPORT_SYMBOL(transport_set_vpd_proto_id
);
874 int transport_dump_vpd_assoc(
876 unsigned char *p_buf
,
879 unsigned char buf
[VPD_TMP_BUF_SIZE
];
883 memset(buf
, 0, VPD_TMP_BUF_SIZE
);
884 len
= sprintf(buf
, "T10 VPD Identifier Association: ");
886 switch (vpd
->association
) {
888 sprintf(buf
+len
, "addressed logical unit\n");
891 sprintf(buf
+len
, "target port\n");
894 sprintf(buf
+len
, "SCSI target device\n");
897 sprintf(buf
+len
, "Unknown 0x%02x\n", vpd
->association
);
903 strncpy(p_buf
, buf
, p_buf_len
);
910 int transport_set_vpd_assoc(struct t10_vpd
*vpd
, unsigned char *page_83
)
913 * The VPD identification association..
915 * from spc3r23.pdf Section 7.6.3.1 Table 297
917 vpd
->association
= (page_83
[1] & 0x30);
918 return transport_dump_vpd_assoc(vpd
, NULL
, 0);
920 EXPORT_SYMBOL(transport_set_vpd_assoc
);
922 int transport_dump_vpd_ident_type(
924 unsigned char *p_buf
,
927 unsigned char buf
[VPD_TMP_BUF_SIZE
];
931 memset(buf
, 0, VPD_TMP_BUF_SIZE
);
932 len
= sprintf(buf
, "T10 VPD Identifier Type: ");
934 switch (vpd
->device_identifier_type
) {
936 sprintf(buf
+len
, "Vendor specific\n");
939 sprintf(buf
+len
, "T10 Vendor ID based\n");
942 sprintf(buf
+len
, "EUI-64 based\n");
945 sprintf(buf
+len
, "NAA\n");
948 sprintf(buf
+len
, "Relative target port identifier\n");
951 sprintf(buf
+len
, "SCSI name string\n");
954 sprintf(buf
+len
, "Unsupported: 0x%02x\n",
955 vpd
->device_identifier_type
);
961 if (p_buf_len
< strlen(buf
)+1)
963 strncpy(p_buf
, buf
, p_buf_len
);
971 int transport_set_vpd_ident_type(struct t10_vpd
*vpd
, unsigned char *page_83
)
974 * The VPD identifier type..
976 * from spc3r23.pdf Section 7.6.3.1 Table 298
978 vpd
->device_identifier_type
= (page_83
[1] & 0x0f);
979 return transport_dump_vpd_ident_type(vpd
, NULL
, 0);
981 EXPORT_SYMBOL(transport_set_vpd_ident_type
);
983 int transport_dump_vpd_ident(
985 unsigned char *p_buf
,
988 unsigned char buf
[VPD_TMP_BUF_SIZE
];
991 memset(buf
, 0, VPD_TMP_BUF_SIZE
);
993 switch (vpd
->device_identifier_code_set
) {
994 case 0x01: /* Binary */
995 sprintf(buf
, "T10 VPD Binary Device Identifier: %s\n",
996 &vpd
->device_identifier
[0]);
998 case 0x02: /* ASCII */
999 sprintf(buf
, "T10 VPD ASCII Device Identifier: %s\n",
1000 &vpd
->device_identifier
[0]);
1002 case 0x03: /* UTF-8 */
1003 sprintf(buf
, "T10 VPD UTF-8 Device Identifier: %s\n",
1004 &vpd
->device_identifier
[0]);
1007 sprintf(buf
, "T10 VPD Device Identifier encoding unsupported:"
1008 " 0x%02x", vpd
->device_identifier_code_set
);
1014 strncpy(p_buf
, buf
, p_buf_len
);
1016 pr_debug("%s", buf
);
1022 transport_set_vpd_ident(struct t10_vpd
*vpd
, unsigned char *page_83
)
1024 static const char hex_str
[] = "0123456789abcdef";
1025 int j
= 0, i
= 4; /* offset to start of the identifer */
1028 * The VPD Code Set (encoding)
1030 * from spc3r23.pdf Section 7.6.3.1 Table 296
1032 vpd
->device_identifier_code_set
= (page_83
[0] & 0x0f);
1033 switch (vpd
->device_identifier_code_set
) {
1034 case 0x01: /* Binary */
1035 vpd
->device_identifier
[j
++] =
1036 hex_str
[vpd
->device_identifier_type
];
1037 while (i
< (4 + page_83
[3])) {
1038 vpd
->device_identifier
[j
++] =
1039 hex_str
[(page_83
[i
] & 0xf0) >> 4];
1040 vpd
->device_identifier
[j
++] =
1041 hex_str
[page_83
[i
] & 0x0f];
1045 case 0x02: /* ASCII */
1046 case 0x03: /* UTF-8 */
1047 while (i
< (4 + page_83
[3]))
1048 vpd
->device_identifier
[j
++] = page_83
[i
++];
1054 return transport_dump_vpd_ident(vpd
, NULL
, 0);
1056 EXPORT_SYMBOL(transport_set_vpd_ident
);
1058 static void core_setup_task_attr_emulation(struct se_device
*dev
)
1061 * If this device is from Target_Core_Mod/pSCSI, disable the
1062 * SAM Task Attribute emulation.
1064 * This is currently not available in upsream Linux/SCSI Target
1065 * mode code, and is assumed to be disabled while using TCM/pSCSI.
1067 if (dev
->transport
->transport_type
== TRANSPORT_PLUGIN_PHBA_PDEV
) {
1068 dev
->dev_task_attr_type
= SAM_TASK_ATTR_PASSTHROUGH
;
1072 dev
->dev_task_attr_type
= SAM_TASK_ATTR_EMULATED
;
1073 pr_debug("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x"
1074 " device\n", dev
->transport
->name
,
1075 dev
->transport
->get_device_rev(dev
));
1078 static void scsi_dump_inquiry(struct se_device
*dev
)
1080 struct t10_wwn
*wwn
= &dev
->se_sub_dev
->t10_wwn
;
1084 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
1086 for (i
= 0; i
< 8; i
++)
1087 if (wwn
->vendor
[i
] >= 0x20)
1088 buf
[i
] = wwn
->vendor
[i
];
1092 pr_debug(" Vendor: %s\n", buf
);
1094 for (i
= 0; i
< 16; i
++)
1095 if (wwn
->model
[i
] >= 0x20)
1096 buf
[i
] = wwn
->model
[i
];
1100 pr_debug(" Model: %s\n", buf
);
1102 for (i
= 0; i
< 4; i
++)
1103 if (wwn
->revision
[i
] >= 0x20)
1104 buf
[i
] = wwn
->revision
[i
];
1108 pr_debug(" Revision: %s\n", buf
);
1110 device_type
= dev
->transport
->get_device_type(dev
);
1111 pr_debug(" Type: %s ", scsi_device_type(device_type
));
1112 pr_debug(" ANSI SCSI revision: %02x\n",
1113 dev
->transport
->get_device_rev(dev
));
1116 struct se_device
*transport_add_device_to_core_hba(
1118 struct se_subsystem_api
*transport
,
1119 struct se_subsystem_dev
*se_dev
,
1121 void *transport_dev
,
1122 struct se_dev_limits
*dev_limits
,
1123 const char *inquiry_prod
,
1124 const char *inquiry_rev
)
1127 struct se_device
*dev
;
1129 dev
= kzalloc(sizeof(struct se_device
), GFP_KERNEL
);
1131 pr_err("Unable to allocate memory for se_dev_t\n");
1135 transport_init_queue_obj(&dev
->dev_queue_obj
);
1136 dev
->dev_flags
= device_flags
;
1137 dev
->dev_status
|= TRANSPORT_DEVICE_DEACTIVATED
;
1138 dev
->dev_ptr
= transport_dev
;
1140 dev
->se_sub_dev
= se_dev
;
1141 dev
->transport
= transport
;
1142 INIT_LIST_HEAD(&dev
->dev_list
);
1143 INIT_LIST_HEAD(&dev
->dev_sep_list
);
1144 INIT_LIST_HEAD(&dev
->dev_tmr_list
);
1145 INIT_LIST_HEAD(&dev
->delayed_cmd_list
);
1146 INIT_LIST_HEAD(&dev
->state_list
);
1147 INIT_LIST_HEAD(&dev
->qf_cmd_list
);
1148 spin_lock_init(&dev
->execute_task_lock
);
1149 spin_lock_init(&dev
->delayed_cmd_lock
);
1150 spin_lock_init(&dev
->dev_reservation_lock
);
1151 spin_lock_init(&dev
->dev_status_lock
);
1152 spin_lock_init(&dev
->se_port_lock
);
1153 spin_lock_init(&dev
->se_tmr_lock
);
1154 spin_lock_init(&dev
->qf_cmd_lock
);
1155 atomic_set(&dev
->dev_ordered_id
, 0);
1157 se_dev_set_default_attribs(dev
, dev_limits
);
1159 dev
->dev_index
= scsi_get_new_index(SCSI_DEVICE_INDEX
);
1160 dev
->creation_time
= get_jiffies_64();
1161 spin_lock_init(&dev
->stats_lock
);
1163 spin_lock(&hba
->device_lock
);
1164 list_add_tail(&dev
->dev_list
, &hba
->hba_dev_list
);
1166 spin_unlock(&hba
->device_lock
);
1168 * Setup the SAM Task Attribute emulation for struct se_device
1170 core_setup_task_attr_emulation(dev
);
1172 * Force PR and ALUA passthrough emulation with internal object use.
1174 force_pt
= (hba
->hba_flags
& HBA_FLAGS_INTERNAL_USE
);
1176 * Setup the Reservations infrastructure for struct se_device
1178 core_setup_reservations(dev
, force_pt
);
1180 * Setup the Asymmetric Logical Unit Assignment for struct se_device
1182 if (core_setup_alua(dev
, force_pt
) < 0)
1186 * Startup the struct se_device processing thread
1188 dev
->process_thread
= kthread_run(transport_processing_thread
, dev
,
1189 "LIO_%s", dev
->transport
->name
);
1190 if (IS_ERR(dev
->process_thread
)) {
1191 pr_err("Unable to create kthread: LIO_%s\n",
1192 dev
->transport
->name
);
1196 * Setup work_queue for QUEUE_FULL
1198 INIT_WORK(&dev
->qf_work_queue
, target_qf_do_work
);
1200 * Preload the initial INQUIRY const values if we are doing
1201 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
1202 * passthrough because this is being provided by the backend LLD.
1203 * This is required so that transport_get_inquiry() copies these
1204 * originals once back into DEV_T10_WWN(dev) for the virtual device
1207 if (dev
->transport
->transport_type
!= TRANSPORT_PLUGIN_PHBA_PDEV
) {
1208 if (!inquiry_prod
|| !inquiry_rev
) {
1209 pr_err("All non TCM/pSCSI plugins require"
1210 " INQUIRY consts\n");
1214 strncpy(&dev
->se_sub_dev
->t10_wwn
.vendor
[0], "LIO-ORG", 8);
1215 strncpy(&dev
->se_sub_dev
->t10_wwn
.model
[0], inquiry_prod
, 16);
1216 strncpy(&dev
->se_sub_dev
->t10_wwn
.revision
[0], inquiry_rev
, 4);
1218 scsi_dump_inquiry(dev
);
1222 kthread_stop(dev
->process_thread
);
1224 spin_lock(&hba
->device_lock
);
1225 list_del(&dev
->dev_list
);
1227 spin_unlock(&hba
->device_lock
);
1229 se_release_vpd_for_dev(dev
);
1235 EXPORT_SYMBOL(transport_add_device_to_core_hba
);
1237 int target_cmd_size_check(struct se_cmd
*cmd
, unsigned int size
)
1239 struct se_device
*dev
= cmd
->se_dev
;
1241 if (cmd
->unknown_data_length
) {
1242 cmd
->data_length
= size
;
1243 } else if (size
!= cmd
->data_length
) {
1244 pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
1245 " %u does not match SCSI CDB Length: %u for SAM Opcode:"
1246 " 0x%02x\n", cmd
->se_tfo
->get_fabric_name(),
1247 cmd
->data_length
, size
, cmd
->t_task_cdb
[0]);
1249 cmd
->cmd_spdtl
= size
;
1251 if (cmd
->data_direction
== DMA_TO_DEVICE
) {
1252 pr_err("Rejecting underflow/overflow"
1254 goto out_invalid_cdb_field
;
1257 * Reject READ_* or WRITE_* with overflow/underflow for
1258 * type SCF_SCSI_DATA_CDB.
1260 if (dev
->se_sub_dev
->se_dev_attrib
.block_size
!= 512) {
1261 pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
1262 " CDB on non 512-byte sector setup subsystem"
1263 " plugin: %s\n", dev
->transport
->name
);
1264 /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
1265 goto out_invalid_cdb_field
;
1268 if (size
> cmd
->data_length
) {
1269 cmd
->se_cmd_flags
|= SCF_OVERFLOW_BIT
;
1270 cmd
->residual_count
= (size
- cmd
->data_length
);
1272 cmd
->se_cmd_flags
|= SCF_UNDERFLOW_BIT
;
1273 cmd
->residual_count
= (cmd
->data_length
- size
);
1275 cmd
->data_length
= size
;
1280 out_invalid_cdb_field
:
1281 cmd
->se_cmd_flags
|= SCF_SCSI_CDB_EXCEPTION
;
1282 cmd
->scsi_sense_reason
= TCM_INVALID_CDB_FIELD
;
1287 * Used by fabric modules containing a local struct se_cmd within their
1288 * fabric dependent per I/O descriptor.
1290 void transport_init_se_cmd(
1292 struct target_core_fabric_ops
*tfo
,
1293 struct se_session
*se_sess
,
1297 unsigned char *sense_buffer
)
1299 INIT_LIST_HEAD(&cmd
->se_lun_node
);
1300 INIT_LIST_HEAD(&cmd
->se_delayed_node
);
1301 INIT_LIST_HEAD(&cmd
->se_qf_node
);
1302 INIT_LIST_HEAD(&cmd
->se_queue_node
);
1303 INIT_LIST_HEAD(&cmd
->se_cmd_list
);
1304 INIT_LIST_HEAD(&cmd
->state_list
);
1305 init_completion(&cmd
->transport_lun_fe_stop_comp
);
1306 init_completion(&cmd
->transport_lun_stop_comp
);
1307 init_completion(&cmd
->t_transport_stop_comp
);
1308 init_completion(&cmd
->cmd_wait_comp
);
1309 init_completion(&cmd
->task_stop_comp
);
1310 spin_lock_init(&cmd
->t_state_lock
);
1311 cmd
->transport_state
= CMD_T_DEV_ACTIVE
;
1314 cmd
->se_sess
= se_sess
;
1315 cmd
->data_length
= data_length
;
1316 cmd
->data_direction
= data_direction
;
1317 cmd
->sam_task_attr
= task_attr
;
1318 cmd
->sense_buffer
= sense_buffer
;
1320 cmd
->state_active
= false;
1322 EXPORT_SYMBOL(transport_init_se_cmd
);
1324 static int transport_check_alloc_task_attr(struct se_cmd
*cmd
)
1327 * Check if SAM Task Attribute emulation is enabled for this
1328 * struct se_device storage object
1330 if (cmd
->se_dev
->dev_task_attr_type
!= SAM_TASK_ATTR_EMULATED
)
1333 if (cmd
->sam_task_attr
== MSG_ACA_TAG
) {
1334 pr_debug("SAM Task Attribute ACA"
1335 " emulation is not supported\n");
1339 * Used to determine when ORDERED commands should go from
1340 * Dormant to Active status.
1342 cmd
->se_ordered_id
= atomic_inc_return(&cmd
->se_dev
->dev_ordered_id
);
1343 smp_mb__after_atomic_inc();
1344 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
1345 cmd
->se_ordered_id
, cmd
->sam_task_attr
,
1346 cmd
->se_dev
->transport
->name
);
1350 /* target_setup_cmd_from_cdb():
1352 * Called from fabric RX Thread.
1354 int target_setup_cmd_from_cdb(
1358 struct se_subsystem_dev
*su_dev
= cmd
->se_dev
->se_sub_dev
;
1359 u32 pr_reg_type
= 0;
1361 unsigned long flags
;
1365 * Ensure that the received CDB is less than the max (252 + 8) bytes
1366 * for VARIABLE_LENGTH_CMD
1368 if (scsi_command_size(cdb
) > SCSI_MAX_VARLEN_CDB_SIZE
) {
1369 pr_err("Received SCSI CDB with command_size: %d that"
1370 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1371 scsi_command_size(cdb
), SCSI_MAX_VARLEN_CDB_SIZE
);
1372 cmd
->se_cmd_flags
|= SCF_SCSI_CDB_EXCEPTION
;
1373 cmd
->scsi_sense_reason
= TCM_INVALID_CDB_FIELD
;
1377 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
1378 * allocate the additional extended CDB buffer now.. Otherwise
1379 * setup the pointer from __t_task_cdb to t_task_cdb.
1381 if (scsi_command_size(cdb
) > sizeof(cmd
->__t_task_cdb
)) {
1382 cmd
->t_task_cdb
= kzalloc(scsi_command_size(cdb
),
1384 if (!cmd
->t_task_cdb
) {
1385 pr_err("Unable to allocate cmd->t_task_cdb"
1386 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
1387 scsi_command_size(cdb
),
1388 (unsigned long)sizeof(cmd
->__t_task_cdb
));
1389 cmd
->se_cmd_flags
|= SCF_SCSI_CDB_EXCEPTION
;
1390 cmd
->scsi_sense_reason
=
1391 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
1395 cmd
->t_task_cdb
= &cmd
->__t_task_cdb
[0];
1397 * Copy the original CDB into cmd->
1399 memcpy(cmd
->t_task_cdb
, cdb
, scsi_command_size(cdb
));
1402 * Check for an existing UNIT ATTENTION condition
1404 if (core_scsi3_ua_check(cmd
, cdb
) < 0) {
1405 cmd
->se_cmd_flags
|= SCF_SCSI_CDB_EXCEPTION
;
1406 cmd
->scsi_sense_reason
= TCM_CHECK_CONDITION_UNIT_ATTENTION
;
1410 ret
= su_dev
->t10_alua
.alua_state_check(cmd
, cdb
, &alua_ascq
);
1413 * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
1414 * The ALUA additional sense code qualifier (ASCQ) is determined
1415 * by the ALUA primary or secondary access state..
1418 pr_debug("[%s]: ALUA TG Port not available, "
1419 "SenseKey: NOT_READY, ASC/ASCQ: "
1421 cmd
->se_tfo
->get_fabric_name(), alua_ascq
);
1423 transport_set_sense_codes(cmd
, 0x04, alua_ascq
);
1424 cmd
->se_cmd_flags
|= SCF_SCSI_CDB_EXCEPTION
;
1425 cmd
->scsi_sense_reason
= TCM_CHECK_CONDITION_NOT_READY
;
1428 cmd
->se_cmd_flags
|= SCF_SCSI_CDB_EXCEPTION
;
1429 cmd
->scsi_sense_reason
= TCM_INVALID_CDB_FIELD
;
1434 * Check status for SPC-3 Persistent Reservations
1436 if (su_dev
->t10_pr
.pr_ops
.t10_reservation_check(cmd
, &pr_reg_type
)) {
1437 if (su_dev
->t10_pr
.pr_ops
.t10_seq_non_holder(
1438 cmd
, cdb
, pr_reg_type
) != 0) {
1439 cmd
->se_cmd_flags
|= SCF_SCSI_CDB_EXCEPTION
;
1440 cmd
->se_cmd_flags
|= SCF_SCSI_RESERVATION_CONFLICT
;
1441 cmd
->scsi_status
= SAM_STAT_RESERVATION_CONFLICT
;
1442 cmd
->scsi_sense_reason
= TCM_RESERVATION_CONFLICT
;
1446 * This means the CDB is allowed for the SCSI Initiator port
1447 * when said port is *NOT* holding the legacy SPC-2 or
1448 * SPC-3 Persistent Reservation.
1452 ret
= cmd
->se_dev
->transport
->parse_cdb(cmd
);
1456 spin_lock_irqsave(&cmd
->t_state_lock
, flags
);
1457 cmd
->se_cmd_flags
|= SCF_SUPPORTED_SAM_OPCODE
;
1458 spin_unlock_irqrestore(&cmd
->t_state_lock
, flags
);
1461 * Check for SAM Task Attribute Emulation
1463 if (transport_check_alloc_task_attr(cmd
) < 0) {
1464 cmd
->se_cmd_flags
|= SCF_SCSI_CDB_EXCEPTION
;
1465 cmd
->scsi_sense_reason
= TCM_INVALID_CDB_FIELD
;
1468 spin_lock(&cmd
->se_lun
->lun_sep_lock
);
1469 if (cmd
->se_lun
->lun_sep
)
1470 cmd
->se_lun
->lun_sep
->sep_stats
.cmd_pdus
++;
1471 spin_unlock(&cmd
->se_lun
->lun_sep_lock
);
1474 EXPORT_SYMBOL(target_setup_cmd_from_cdb
);
1477 * Used by fabric module frontends to queue tasks directly.
1478 * Many only be used from process context only
1480 int transport_handle_cdb_direct(
1487 pr_err("cmd->se_lun is NULL\n");
1490 if (in_interrupt()) {
1492 pr_err("transport_generic_handle_cdb cannot be called"
1493 " from interrupt context\n");
1497 * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE following
1498 * transport_generic_handle_cdb*() -> transport_add_cmd_to_queue()
1499 * in existing usage to ensure that outstanding descriptors are handled
1500 * correctly during shutdown via transport_wait_for_tasks()
1502 * Also, we don't take cmd->t_state_lock here as we only expect
1503 * this to be called for initial descriptor submission.
1505 cmd
->t_state
= TRANSPORT_NEW_CMD
;
1506 cmd
->transport_state
|= CMD_T_ACTIVE
;
1509 * transport_generic_new_cmd() is already handling QUEUE_FULL,
1510 * so follow TRANSPORT_NEW_CMD processing thread context usage
1511 * and call transport_generic_request_failure() if necessary..
1513 ret
= transport_generic_new_cmd(cmd
);
1515 transport_generic_request_failure(cmd
);
1519 EXPORT_SYMBOL(transport_handle_cdb_direct
);
1522 * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd
1524 * @se_cmd: command descriptor to submit
1525 * @se_sess: associated se_sess for endpoint
1526 * @cdb: pointer to SCSI CDB
1527 * @sense: pointer to SCSI sense buffer
1528 * @unpacked_lun: unpacked LUN to reference for struct se_lun
1529 * @data_length: fabric expected data transfer length
1530 * @task_addr: SAM task attribute
1531 * @data_dir: DMA data direction
1532 * @flags: flags for command submission from target_sc_flags_tables
1534 * This may only be called from process context, and also currently
1535 * assumes internal allocation of fabric payload buffer by target-core.
1537 void target_submit_cmd(struct se_cmd
*se_cmd
, struct se_session
*se_sess
,
1538 unsigned char *cdb
, unsigned char *sense
, u32 unpacked_lun
,
1539 u32 data_length
, int task_attr
, int data_dir
, int flags
)
1541 struct se_portal_group
*se_tpg
;
1544 se_tpg
= se_sess
->se_tpg
;
1546 BUG_ON(se_cmd
->se_tfo
|| se_cmd
->se_sess
);
1547 BUG_ON(in_interrupt());
1549 * Initialize se_cmd for target operation. From this point
1550 * exceptions are handled by sending exception status via
1551 * target_core_fabric_ops->queue_status() callback
1553 transport_init_se_cmd(se_cmd
, se_tpg
->se_tpg_tfo
, se_sess
,
1554 data_length
, data_dir
, task_attr
, sense
);
1555 if (flags
& TARGET_SCF_UNKNOWN_SIZE
)
1556 se_cmd
->unknown_data_length
= 1;
1558 * Obtain struct se_cmd->cmd_kref reference and add new cmd to
1559 * se_sess->sess_cmd_list. A second kref_get here is necessary
1560 * for fabrics using TARGET_SCF_ACK_KREF that expect a second
1561 * kref_put() to happen during fabric packet acknowledgement.
1563 target_get_sess_cmd(se_sess
, se_cmd
, (flags
& TARGET_SCF_ACK_KREF
));
1565 * Signal bidirectional data payloads to target-core
1567 if (flags
& TARGET_SCF_BIDI_OP
)
1568 se_cmd
->se_cmd_flags
|= SCF_BIDI
;
1570 * Locate se_lun pointer and attach it to struct se_cmd
1572 if (transport_lookup_cmd_lun(se_cmd
, unpacked_lun
) < 0) {
1573 transport_send_check_condition_and_sense(se_cmd
,
1574 se_cmd
->scsi_sense_reason
, 0);
1575 target_put_sess_cmd(se_sess
, se_cmd
);
1579 rc
= target_setup_cmd_from_cdb(se_cmd
, cdb
);
1581 transport_generic_request_failure(se_cmd
);
1586 * Check if we need to delay processing because of ALUA
1587 * Active/NonOptimized primary access state..
1589 core_alua_check_nonop_delay(se_cmd
);
1591 transport_handle_cdb_direct(se_cmd
);
1594 EXPORT_SYMBOL(target_submit_cmd
);
1596 static void target_complete_tmr_failure(struct work_struct
*work
)
1598 struct se_cmd
*se_cmd
= container_of(work
, struct se_cmd
, work
);
1600 se_cmd
->se_tmr_req
->response
= TMR_LUN_DOES_NOT_EXIST
;
1601 se_cmd
->se_tfo
->queue_tm_rsp(se_cmd
);
1602 transport_generic_free_cmd(se_cmd
, 0);
1606 * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd
1609 * @se_cmd: command descriptor to submit
1610 * @se_sess: associated se_sess for endpoint
1611 * @sense: pointer to SCSI sense buffer
1612 * @unpacked_lun: unpacked LUN to reference for struct se_lun
1613 * @fabric_context: fabric context for TMR req
1614 * @tm_type: Type of TM request
1615 * @gfp: gfp type for caller
1616 * @tag: referenced task tag for TMR_ABORT_TASK
1617 * @flags: submit cmd flags
1619 * Callable from all contexts.
1622 int target_submit_tmr(struct se_cmd
*se_cmd
, struct se_session
*se_sess
,
1623 unsigned char *sense
, u32 unpacked_lun
,
1624 void *fabric_tmr_ptr
, unsigned char tm_type
,
1625 gfp_t gfp
, unsigned int tag
, int flags
)
1627 struct se_portal_group
*se_tpg
;
1630 se_tpg
= se_sess
->se_tpg
;
1633 transport_init_se_cmd(se_cmd
, se_tpg
->se_tpg_tfo
, se_sess
,
1634 0, DMA_NONE
, MSG_SIMPLE_TAG
, sense
);
1636 * FIXME: Currently expect caller to handle se_cmd->se_tmr_req
1637 * allocation failure.
1639 ret
= core_tmr_alloc_req(se_cmd
, fabric_tmr_ptr
, tm_type
, gfp
);
1643 if (tm_type
== TMR_ABORT_TASK
)
1644 se_cmd
->se_tmr_req
->ref_task_tag
= tag
;
1646 /* See target_submit_cmd for commentary */
1647 target_get_sess_cmd(se_sess
, se_cmd
, (flags
& TARGET_SCF_ACK_KREF
));
1649 ret
= transport_lookup_tmr_lun(se_cmd
, unpacked_lun
);
1652 * For callback during failure handling, push this work off
1653 * to process context with TMR_LUN_DOES_NOT_EXIST status.
1655 INIT_WORK(&se_cmd
->work
, target_complete_tmr_failure
);
1656 schedule_work(&se_cmd
->work
);
1659 transport_generic_handle_tmr(se_cmd
);
1662 EXPORT_SYMBOL(target_submit_tmr
);
1664 /* transport_generic_handle_tmr():
1668 int transport_generic_handle_tmr(
1671 transport_add_cmd_to_queue(cmd
, TRANSPORT_PROCESS_TMR
, false);
1674 EXPORT_SYMBOL(transport_generic_handle_tmr
);
1677 * If the cmd is active, request it to be stopped and sleep until it
1680 bool target_stop_cmd(struct se_cmd
*cmd
, unsigned long *flags
)
1682 bool was_active
= false;
1684 if (cmd
->transport_state
& CMD_T_BUSY
) {
1685 cmd
->transport_state
|= CMD_T_REQUEST_STOP
;
1686 spin_unlock_irqrestore(&cmd
->t_state_lock
, *flags
);
1688 pr_debug("cmd %p waiting to complete\n", cmd
);
1689 wait_for_completion(&cmd
->task_stop_comp
);
1690 pr_debug("cmd %p stopped successfully\n", cmd
);
1692 spin_lock_irqsave(&cmd
->t_state_lock
, *flags
);
1693 cmd
->transport_state
&= ~CMD_T_REQUEST_STOP
;
1694 cmd
->transport_state
&= ~CMD_T_BUSY
;
1702 * Handle SAM-esque emulation for generic transport request failures.
1704 void transport_generic_request_failure(struct se_cmd
*cmd
)
1708 pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
1709 " CDB: 0x%02x\n", cmd
, cmd
->se_tfo
->get_task_tag(cmd
),
1710 cmd
->t_task_cdb
[0]);
1711 pr_debug("-----[ i_state: %d t_state: %d scsi_sense_reason: %d\n",
1712 cmd
->se_tfo
->get_cmd_state(cmd
),
1713 cmd
->t_state
, cmd
->scsi_sense_reason
);
1714 pr_debug("-----[ CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n",
1715 (cmd
->transport_state
& CMD_T_ACTIVE
) != 0,
1716 (cmd
->transport_state
& CMD_T_STOP
) != 0,
1717 (cmd
->transport_state
& CMD_T_SENT
) != 0);
1720 * For SAM Task Attribute emulation for failed struct se_cmd
1722 if (cmd
->se_dev
->dev_task_attr_type
== SAM_TASK_ATTR_EMULATED
)
1723 transport_complete_task_attr(cmd
);
1725 switch (cmd
->scsi_sense_reason
) {
1726 case TCM_NON_EXISTENT_LUN
:
1727 case TCM_UNSUPPORTED_SCSI_OPCODE
:
1728 case TCM_INVALID_CDB_FIELD
:
1729 case TCM_INVALID_PARAMETER_LIST
:
1730 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
:
1731 case TCM_UNKNOWN_MODE_PAGE
:
1732 case TCM_WRITE_PROTECTED
:
1733 case TCM_CHECK_CONDITION_ABORT_CMD
:
1734 case TCM_CHECK_CONDITION_UNIT_ATTENTION
:
1735 case TCM_CHECK_CONDITION_NOT_READY
:
1737 case TCM_RESERVATION_CONFLICT
:
1739 * No SENSE Data payload for this case, set SCSI Status
1740 * and queue the response to $FABRIC_MOD.
1742 * Uses linux/include/scsi/scsi.h SAM status codes defs
1744 cmd
->scsi_status
= SAM_STAT_RESERVATION_CONFLICT
;
1746 * For UA Interlock Code 11b, a RESERVATION CONFLICT will
1747 * establish a UNIT ATTENTION with PREVIOUS RESERVATION
1750 * See spc4r17, section 7.4.6 Control Mode Page, Table 349
1753 cmd
->se_dev
->se_sub_dev
->se_dev_attrib
.emulate_ua_intlck_ctrl
== 2)
1754 core_scsi3_ua_allocate(cmd
->se_sess
->se_node_acl
,
1755 cmd
->orig_fe_lun
, 0x2C,
1756 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS
);
1758 ret
= cmd
->se_tfo
->queue_status(cmd
);
1759 if (ret
== -EAGAIN
|| ret
== -ENOMEM
)
1763 pr_err("Unknown transport error for CDB 0x%02x: %d\n",
1764 cmd
->t_task_cdb
[0], cmd
->scsi_sense_reason
);
1765 cmd
->scsi_sense_reason
= TCM_UNSUPPORTED_SCSI_OPCODE
;
1769 ret
= transport_send_check_condition_and_sense(cmd
,
1770 cmd
->scsi_sense_reason
, 0);
1771 if (ret
== -EAGAIN
|| ret
== -ENOMEM
)
1775 transport_lun_remove_cmd(cmd
);
1776 if (!transport_cmd_check_stop_to_fabric(cmd
))
1781 cmd
->t_state
= TRANSPORT_COMPLETE_QF_OK
;
1782 transport_handle_queue_full(cmd
, cmd
->se_dev
);
1784 EXPORT_SYMBOL(transport_generic_request_failure
);
1786 static void __target_execute_cmd(struct se_cmd
*cmd
)
1790 spin_lock_irq(&cmd
->t_state_lock
);
1791 cmd
->transport_state
|= (CMD_T_BUSY
|CMD_T_SENT
);
1792 spin_unlock_irq(&cmd
->t_state_lock
);
1794 if (cmd
->execute_cmd
)
1795 error
= cmd
->execute_cmd(cmd
);
1798 spin_lock_irq(&cmd
->t_state_lock
);
1799 cmd
->transport_state
&= ~(CMD_T_BUSY
|CMD_T_SENT
);
1800 spin_unlock_irq(&cmd
->t_state_lock
);
1802 transport_generic_request_failure(cmd
);
1806 void target_execute_cmd(struct se_cmd
*cmd
)
1808 struct se_device
*dev
= cmd
->se_dev
;
1811 * If the received CDB has aleady been aborted stop processing it here.
1813 if (transport_check_aborted_status(cmd
, 1))
1817 * Determine if IOCTL context caller in requesting the stopping of this
1818 * command for LUN shutdown purposes.
1820 spin_lock_irq(&cmd
->t_state_lock
);
1821 if (cmd
->transport_state
& CMD_T_LUN_STOP
) {
1822 pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n",
1823 __func__
, __LINE__
, cmd
->se_tfo
->get_task_tag(cmd
));
1825 cmd
->transport_state
&= ~CMD_T_ACTIVE
;
1826 spin_unlock_irq(&cmd
->t_state_lock
);
1827 complete(&cmd
->transport_lun_stop_comp
);
1831 * Determine if frontend context caller is requesting the stopping of
1832 * this command for frontend exceptions.
1834 if (cmd
->transport_state
& CMD_T_STOP
) {
1835 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n",
1837 cmd
->se_tfo
->get_task_tag(cmd
));
1839 spin_unlock_irq(&cmd
->t_state_lock
);
1840 complete(&cmd
->t_transport_stop_comp
);
1844 cmd
->t_state
= TRANSPORT_PROCESSING
;
1845 spin_unlock_irq(&cmd
->t_state_lock
);
1847 if (dev
->dev_task_attr_type
!= SAM_TASK_ATTR_EMULATED
)
1851 * Check for the existence of HEAD_OF_QUEUE, and if true return 1
1852 * to allow the passed struct se_cmd list of tasks to the front of the list.
1854 switch (cmd
->sam_task_attr
) {
1856 pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x, "
1857 "se_ordered_id: %u\n",
1858 cmd
->t_task_cdb
[0], cmd
->se_ordered_id
);
1860 case MSG_ORDERED_TAG
:
1861 atomic_inc(&dev
->dev_ordered_sync
);
1862 smp_mb__after_atomic_inc();
1864 pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, "
1865 " se_ordered_id: %u\n",
1866 cmd
->t_task_cdb
[0], cmd
->se_ordered_id
);
1869 * Execute an ORDERED command if no other older commands
1870 * exist that need to be completed first.
1872 if (!atomic_read(&dev
->simple_cmds
))
1877 * For SIMPLE and UNTAGGED Task Attribute commands
1879 atomic_inc(&dev
->simple_cmds
);
1880 smp_mb__after_atomic_inc();
1884 if (atomic_read(&dev
->dev_ordered_sync
) != 0) {
1885 spin_lock(&dev
->delayed_cmd_lock
);
1886 list_add_tail(&cmd
->se_delayed_node
, &dev
->delayed_cmd_list
);
1887 spin_unlock(&dev
->delayed_cmd_lock
);
1889 pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to"
1890 " delayed CMD list, se_ordered_id: %u\n",
1891 cmd
->t_task_cdb
[0], cmd
->sam_task_attr
,
1892 cmd
->se_ordered_id
);
1898 * Otherwise, no ORDERED task attributes exist..
1900 __target_execute_cmd(cmd
);
1902 EXPORT_SYMBOL(target_execute_cmd
);
1905 * Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd
1907 static int transport_get_sense_data(struct se_cmd
*cmd
)
1909 unsigned char *buffer
= cmd
->sense_buffer
, *sense_buffer
= NULL
;
1910 struct se_device
*dev
= cmd
->se_dev
;
1911 unsigned long flags
;
1914 WARN_ON(!cmd
->se_lun
);
1919 spin_lock_irqsave(&cmd
->t_state_lock
, flags
);
1920 if (cmd
->se_cmd_flags
& SCF_SENT_CHECK_CONDITION
) {
1921 spin_unlock_irqrestore(&cmd
->t_state_lock
, flags
);
1925 if (!(cmd
->se_cmd_flags
& SCF_TRANSPORT_TASK_SENSE
))
1928 if (!dev
->transport
->get_sense_buffer
) {
1929 pr_err("dev->transport->get_sense_buffer is NULL\n");
1933 sense_buffer
= dev
->transport
->get_sense_buffer(cmd
);
1934 if (!sense_buffer
) {
1935 pr_err("ITT 0x%08x cmd %p: Unable to locate"
1936 " sense buffer for task with sense\n",
1937 cmd
->se_tfo
->get_task_tag(cmd
), cmd
);
1941 spin_unlock_irqrestore(&cmd
->t_state_lock
, flags
);
1943 offset
= cmd
->se_tfo
->set_fabric_sense_len(cmd
, TRANSPORT_SENSE_BUFFER
);
1945 memcpy(&buffer
[offset
], sense_buffer
, TRANSPORT_SENSE_BUFFER
);
1947 /* Automatically padded */
1948 cmd
->scsi_sense_length
= TRANSPORT_SENSE_BUFFER
+ offset
;
1950 pr_debug("HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x and sense\n",
1951 dev
->se_hba
->hba_id
, dev
->transport
->name
, cmd
->scsi_status
);
1955 spin_unlock_irqrestore(&cmd
->t_state_lock
, flags
);
1960 * Process all commands up to the last received ORDERED task attribute which
1961 * requires another blocking boundary
1963 static void target_restart_delayed_cmds(struct se_device
*dev
)
1968 spin_lock(&dev
->delayed_cmd_lock
);
1969 if (list_empty(&dev
->delayed_cmd_list
)) {
1970 spin_unlock(&dev
->delayed_cmd_lock
);
1974 cmd
= list_entry(dev
->delayed_cmd_list
.next
,
1975 struct se_cmd
, se_delayed_node
);
1976 list_del(&cmd
->se_delayed_node
);
1977 spin_unlock(&dev
->delayed_cmd_lock
);
1979 __target_execute_cmd(cmd
);
1981 if (cmd
->sam_task_attr
== MSG_ORDERED_TAG
)
1987 * Called from I/O completion to determine which dormant/delayed
1988 * and ordered cmds need to have their tasks added to the execution queue.
1990 static void transport_complete_task_attr(struct se_cmd
*cmd
)
1992 struct se_device
*dev
= cmd
->se_dev
;
1994 if (cmd
->sam_task_attr
== MSG_SIMPLE_TAG
) {
1995 atomic_dec(&dev
->simple_cmds
);
1996 smp_mb__after_atomic_dec();
1997 dev
->dev_cur_ordered_id
++;
1998 pr_debug("Incremented dev->dev_cur_ordered_id: %u for"
1999 " SIMPLE: %u\n", dev
->dev_cur_ordered_id
,
2000 cmd
->se_ordered_id
);
2001 } else if (cmd
->sam_task_attr
== MSG_HEAD_TAG
) {
2002 dev
->dev_cur_ordered_id
++;
2003 pr_debug("Incremented dev_cur_ordered_id: %u for"
2004 " HEAD_OF_QUEUE: %u\n", dev
->dev_cur_ordered_id
,
2005 cmd
->se_ordered_id
);
2006 } else if (cmd
->sam_task_attr
== MSG_ORDERED_TAG
) {
2007 atomic_dec(&dev
->dev_ordered_sync
);
2008 smp_mb__after_atomic_dec();
2010 dev
->dev_cur_ordered_id
++;
2011 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
2012 " %u\n", dev
->dev_cur_ordered_id
, cmd
->se_ordered_id
);
2015 target_restart_delayed_cmds(dev
);
2018 static void transport_complete_qf(struct se_cmd
*cmd
)
2022 if (cmd
->se_dev
->dev_task_attr_type
== SAM_TASK_ATTR_EMULATED
)
2023 transport_complete_task_attr(cmd
);
2025 if (cmd
->se_cmd_flags
& SCF_TRANSPORT_TASK_SENSE
) {
2026 ret
= cmd
->se_tfo
->queue_status(cmd
);
2031 switch (cmd
->data_direction
) {
2032 case DMA_FROM_DEVICE
:
2033 ret
= cmd
->se_tfo
->queue_data_in(cmd
);
2036 if (cmd
->t_bidi_data_sg
) {
2037 ret
= cmd
->se_tfo
->queue_data_in(cmd
);
2041 /* Fall through for DMA_TO_DEVICE */
2043 ret
= cmd
->se_tfo
->queue_status(cmd
);
2051 transport_handle_queue_full(cmd
, cmd
->se_dev
);
2054 transport_lun_remove_cmd(cmd
);
2055 transport_cmd_check_stop_to_fabric(cmd
);
2058 static void transport_handle_queue_full(
2060 struct se_device
*dev
)
2062 spin_lock_irq(&dev
->qf_cmd_lock
);
2063 list_add_tail(&cmd
->se_qf_node
, &cmd
->se_dev
->qf_cmd_list
);
2064 atomic_inc(&dev
->dev_qf_count
);
2065 smp_mb__after_atomic_inc();
2066 spin_unlock_irq(&cmd
->se_dev
->qf_cmd_lock
);
2068 schedule_work(&cmd
->se_dev
->qf_work_queue
);
2071 static void target_complete_ok_work(struct work_struct
*work
)
2073 struct se_cmd
*cmd
= container_of(work
, struct se_cmd
, work
);
2074 int reason
= 0, ret
;
2077 * Check if we need to move delayed/dormant tasks from cmds on the
2078 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
2081 if (cmd
->se_dev
->dev_task_attr_type
== SAM_TASK_ATTR_EMULATED
)
2082 transport_complete_task_attr(cmd
);
2084 * Check to schedule QUEUE_FULL work, or execute an existing
2085 * cmd->transport_qf_callback()
2087 if (atomic_read(&cmd
->se_dev
->dev_qf_count
) != 0)
2088 schedule_work(&cmd
->se_dev
->qf_work_queue
);
2091 * Check if we need to retrieve a sense buffer from
2092 * the struct se_cmd in question.
2094 if (cmd
->se_cmd_flags
& SCF_TRANSPORT_TASK_SENSE
) {
2095 if (transport_get_sense_data(cmd
) < 0)
2096 reason
= TCM_NON_EXISTENT_LUN
;
2098 if (cmd
->scsi_status
) {
2099 ret
= transport_send_check_condition_and_sense(
2101 if (ret
== -EAGAIN
|| ret
== -ENOMEM
)
2104 transport_lun_remove_cmd(cmd
);
2105 transport_cmd_check_stop_to_fabric(cmd
);
2110 * Check for a callback, used by amongst other things
2111 * XDWRITE_READ_10 emulation.
2113 if (cmd
->transport_complete_callback
)
2114 cmd
->transport_complete_callback(cmd
);
2116 switch (cmd
->data_direction
) {
2117 case DMA_FROM_DEVICE
:
2118 spin_lock(&cmd
->se_lun
->lun_sep_lock
);
2119 if (cmd
->se_lun
->lun_sep
) {
2120 cmd
->se_lun
->lun_sep
->sep_stats
.tx_data_octets
+=
2123 spin_unlock(&cmd
->se_lun
->lun_sep_lock
);
2125 ret
= cmd
->se_tfo
->queue_data_in(cmd
);
2126 if (ret
== -EAGAIN
|| ret
== -ENOMEM
)
2130 spin_lock(&cmd
->se_lun
->lun_sep_lock
);
2131 if (cmd
->se_lun
->lun_sep
) {
2132 cmd
->se_lun
->lun_sep
->sep_stats
.rx_data_octets
+=
2135 spin_unlock(&cmd
->se_lun
->lun_sep_lock
);
2137 * Check if we need to send READ payload for BIDI-COMMAND
2139 if (cmd
->t_bidi_data_sg
) {
2140 spin_lock(&cmd
->se_lun
->lun_sep_lock
);
2141 if (cmd
->se_lun
->lun_sep
) {
2142 cmd
->se_lun
->lun_sep
->sep_stats
.tx_data_octets
+=
2145 spin_unlock(&cmd
->se_lun
->lun_sep_lock
);
2146 ret
= cmd
->se_tfo
->queue_data_in(cmd
);
2147 if (ret
== -EAGAIN
|| ret
== -ENOMEM
)
2151 /* Fall through for DMA_TO_DEVICE */
2153 ret
= cmd
->se_tfo
->queue_status(cmd
);
2154 if (ret
== -EAGAIN
|| ret
== -ENOMEM
)
2161 transport_lun_remove_cmd(cmd
);
2162 transport_cmd_check_stop_to_fabric(cmd
);
2166 pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
2167 " data_direction: %d\n", cmd
, cmd
->data_direction
);
2168 cmd
->t_state
= TRANSPORT_COMPLETE_QF_OK
;
2169 transport_handle_queue_full(cmd
, cmd
->se_dev
);
2172 static inline void transport_free_sgl(struct scatterlist
*sgl
, int nents
)
2174 struct scatterlist
*sg
;
2177 for_each_sg(sgl
, sg
, nents
, count
)
2178 __free_page(sg_page(sg
));
2183 static inline void transport_free_pages(struct se_cmd
*cmd
)
2185 if (cmd
->se_cmd_flags
& SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC
)
2188 transport_free_sgl(cmd
->t_data_sg
, cmd
->t_data_nents
);
2189 cmd
->t_data_sg
= NULL
;
2190 cmd
->t_data_nents
= 0;
2192 transport_free_sgl(cmd
->t_bidi_data_sg
, cmd
->t_bidi_data_nents
);
2193 cmd
->t_bidi_data_sg
= NULL
;
2194 cmd
->t_bidi_data_nents
= 0;
2198 * transport_release_cmd - free a command
2199 * @cmd: command to free
2201 * This routine unconditionally frees a command, and reference counting
2202 * or list removal must be done in the caller.
2204 static void transport_release_cmd(struct se_cmd
*cmd
)
2206 BUG_ON(!cmd
->se_tfo
);
2208 if (cmd
->se_cmd_flags
& SCF_SCSI_TMR_CDB
)
2209 core_tmr_release_req(cmd
->se_tmr_req
);
2210 if (cmd
->t_task_cdb
!= cmd
->__t_task_cdb
)
2211 kfree(cmd
->t_task_cdb
);
2213 * If this cmd has been setup with target_get_sess_cmd(), drop
2214 * the kref and call ->release_cmd() in kref callback.
2216 if (cmd
->check_release
!= 0) {
2217 target_put_sess_cmd(cmd
->se_sess
, cmd
);
2220 cmd
->se_tfo
->release_cmd(cmd
);
2224 * transport_put_cmd - release a reference to a command
2225 * @cmd: command to release
2227 * This routine releases our reference to the command and frees it if possible.
2229 static void transport_put_cmd(struct se_cmd
*cmd
)
2231 unsigned long flags
;
2233 spin_lock_irqsave(&cmd
->t_state_lock
, flags
);
2234 if (atomic_read(&cmd
->t_fe_count
)) {
2235 if (!atomic_dec_and_test(&cmd
->t_fe_count
))
2239 if (cmd
->transport_state
& CMD_T_DEV_ACTIVE
) {
2240 cmd
->transport_state
&= ~CMD_T_DEV_ACTIVE
;
2241 target_remove_from_state_list(cmd
);
2243 spin_unlock_irqrestore(&cmd
->t_state_lock
, flags
);
2245 transport_free_pages(cmd
);
2246 transport_release_cmd(cmd
);
2249 spin_unlock_irqrestore(&cmd
->t_state_lock
, flags
);
2253 * transport_generic_map_mem_to_cmd - Use fabric-alloced pages instead of
2254 * allocating in the core.
2255 * @cmd: Associated se_cmd descriptor
2256 * @mem: SGL style memory for TCM WRITE / READ
2257 * @sg_mem_num: Number of SGL elements
2258 * @mem_bidi_in: SGL style memory for TCM BIDI READ
2259 * @sg_mem_bidi_num: Number of BIDI READ SGL elements
2261 * Return: nonzero return cmd was rejected for -ENOMEM or inproper usage
2264 int transport_generic_map_mem_to_cmd(
2266 struct scatterlist
*sgl
,
2268 struct scatterlist
*sgl_bidi
,
2271 if (!sgl
|| !sgl_count
)
2275 * Reject SCSI data overflow with map_mem_to_cmd() as incoming
2276 * scatterlists already have been set to follow what the fabric
2277 * passes for the original expected data transfer length.
2279 if (cmd
->se_cmd_flags
& SCF_OVERFLOW_BIT
) {
2280 pr_warn("Rejecting SCSI DATA overflow for fabric using"
2281 " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
2282 cmd
->se_cmd_flags
|= SCF_SCSI_CDB_EXCEPTION
;
2283 cmd
->scsi_sense_reason
= TCM_INVALID_CDB_FIELD
;
2287 cmd
->t_data_sg
= sgl
;
2288 cmd
->t_data_nents
= sgl_count
;
2290 if (sgl_bidi
&& sgl_bidi_count
) {
2291 cmd
->t_bidi_data_sg
= sgl_bidi
;
2292 cmd
->t_bidi_data_nents
= sgl_bidi_count
;
2294 cmd
->se_cmd_flags
|= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC
;
2297 EXPORT_SYMBOL(transport_generic_map_mem_to_cmd
);
2299 void *transport_kmap_data_sg(struct se_cmd
*cmd
)
2301 struct scatterlist
*sg
= cmd
->t_data_sg
;
2302 struct page
**pages
;
2307 * We need to take into account a possible offset here for fabrics like
2308 * tcm_loop who may be using a contig buffer from the SCSI midlayer for
2309 * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd()
2311 if (!cmd
->t_data_nents
)
2313 else if (cmd
->t_data_nents
== 1)
2314 return kmap(sg_page(sg
)) + sg
->offset
;
2316 /* >1 page. use vmap */
2317 pages
= kmalloc(sizeof(*pages
) * cmd
->t_data_nents
, GFP_KERNEL
);
2321 /* convert sg[] to pages[] */
2322 for_each_sg(cmd
->t_data_sg
, sg
, cmd
->t_data_nents
, i
) {
2323 pages
[i
] = sg_page(sg
);
2326 cmd
->t_data_vmap
= vmap(pages
, cmd
->t_data_nents
, VM_MAP
, PAGE_KERNEL
);
2328 if (!cmd
->t_data_vmap
)
2331 return cmd
->t_data_vmap
+ cmd
->t_data_sg
[0].offset
;
2333 EXPORT_SYMBOL(transport_kmap_data_sg
);
2335 void transport_kunmap_data_sg(struct se_cmd
*cmd
)
2337 if (!cmd
->t_data_nents
) {
2339 } else if (cmd
->t_data_nents
== 1) {
2340 kunmap(sg_page(cmd
->t_data_sg
));
2344 vunmap(cmd
->t_data_vmap
);
2345 cmd
->t_data_vmap
= NULL
;
2347 EXPORT_SYMBOL(transport_kunmap_data_sg
);
2350 transport_generic_get_mem(struct se_cmd
*cmd
)
2352 u32 length
= cmd
->data_length
;
2358 nents
= DIV_ROUND_UP(length
, PAGE_SIZE
);
2359 cmd
->t_data_sg
= kmalloc(sizeof(struct scatterlist
) * nents
, GFP_KERNEL
);
2360 if (!cmd
->t_data_sg
)
2363 cmd
->t_data_nents
= nents
;
2364 sg_init_table(cmd
->t_data_sg
, nents
);
2366 zero_flag
= cmd
->se_cmd_flags
& SCF_SCSI_DATA_CDB
? 0 : __GFP_ZERO
;
2369 u32 page_len
= min_t(u32
, length
, PAGE_SIZE
);
2370 page
= alloc_page(GFP_KERNEL
| zero_flag
);
2374 sg_set_page(&cmd
->t_data_sg
[i
], page
, page_len
, 0);
2382 __free_page(sg_page(&cmd
->t_data_sg
[i
]));
2385 kfree(cmd
->t_data_sg
);
2386 cmd
->t_data_sg
= NULL
;
2391 * Allocate any required resources to execute the command. For writes we
2392 * might not have the payload yet, so notify the fabric via a call to
2393 * ->write_pending instead. Otherwise place it on the execution queue.
2395 int transport_generic_new_cmd(struct se_cmd
*cmd
)
2400 * Determine is the TCM fabric module has already allocated physical
2401 * memory, and is directly calling transport_generic_map_mem_to_cmd()
2404 if (!(cmd
->se_cmd_flags
& SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC
) &&
2406 ret
= transport_generic_get_mem(cmd
);
2411 /* Workaround for handling zero-length control CDBs */
2412 if (!(cmd
->se_cmd_flags
& SCF_SCSI_DATA_CDB
) && !cmd
->data_length
) {
2413 spin_lock_irq(&cmd
->t_state_lock
);
2414 cmd
->t_state
= TRANSPORT_COMPLETE
;
2415 cmd
->transport_state
|= CMD_T_ACTIVE
;
2416 spin_unlock_irq(&cmd
->t_state_lock
);
2418 if (cmd
->t_task_cdb
[0] == REQUEST_SENSE
) {
2419 u8 ua_asc
= 0, ua_ascq
= 0;
2421 core_scsi3_ua_clear_for_request_sense(cmd
,
2425 INIT_WORK(&cmd
->work
, target_complete_ok_work
);
2426 queue_work(target_completion_wq
, &cmd
->work
);
2430 atomic_inc(&cmd
->t_fe_count
);
2433 * If this command is not a write we can execute it right here,
2434 * for write buffers we need to notify the fabric driver first
2435 * and let it call back once the write buffers are ready.
2437 target_add_to_state_list(cmd
);
2438 if (cmd
->data_direction
!= DMA_TO_DEVICE
) {
2439 target_execute_cmd(cmd
);
2443 spin_lock_irq(&cmd
->t_state_lock
);
2444 cmd
->t_state
= TRANSPORT_WRITE_PENDING
;
2445 spin_unlock_irq(&cmd
->t_state_lock
);
2447 transport_cmd_check_stop(cmd
, false);
2449 ret
= cmd
->se_tfo
->write_pending(cmd
);
2450 if (ret
== -EAGAIN
|| ret
== -ENOMEM
)
2458 cmd
->se_cmd_flags
|= SCF_SCSI_CDB_EXCEPTION
;
2459 cmd
->scsi_sense_reason
= TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
2462 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd
);
2463 cmd
->t_state
= TRANSPORT_COMPLETE_QF_WP
;
2464 transport_handle_queue_full(cmd
, cmd
->se_dev
);
2467 EXPORT_SYMBOL(transport_generic_new_cmd
);
2469 static void transport_write_pending_qf(struct se_cmd
*cmd
)
2473 ret
= cmd
->se_tfo
->write_pending(cmd
);
2474 if (ret
== -EAGAIN
|| ret
== -ENOMEM
) {
2475 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
2477 transport_handle_queue_full(cmd
, cmd
->se_dev
);
2481 void transport_generic_free_cmd(struct se_cmd
*cmd
, int wait_for_tasks
)
2483 if (!(cmd
->se_cmd_flags
& SCF_SE_LUN_CMD
)) {
2484 if (wait_for_tasks
&& (cmd
->se_cmd_flags
& SCF_SCSI_TMR_CDB
))
2485 transport_wait_for_tasks(cmd
);
2487 transport_release_cmd(cmd
);
2490 transport_wait_for_tasks(cmd
);
2492 core_dec_lacl_count(cmd
->se_sess
->se_node_acl
, cmd
);
2495 transport_lun_remove_cmd(cmd
);
2497 transport_put_cmd(cmd
);
2500 EXPORT_SYMBOL(transport_generic_free_cmd
);
2502 /* target_get_sess_cmd - Add command to active ->sess_cmd_list
2503 * @se_sess: session to reference
2504 * @se_cmd: command descriptor to add
2505 * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd()
2507 void target_get_sess_cmd(struct se_session
*se_sess
, struct se_cmd
*se_cmd
,
2510 unsigned long flags
;
2512 kref_init(&se_cmd
->cmd_kref
);
2514 * Add a second kref if the fabric caller is expecting to handle
2515 * fabric acknowledgement that requires two target_put_sess_cmd()
2516 * invocations before se_cmd descriptor release.
2518 if (ack_kref
== true) {
2519 kref_get(&se_cmd
->cmd_kref
);
2520 se_cmd
->se_cmd_flags
|= SCF_ACK_KREF
;
2523 spin_lock_irqsave(&se_sess
->sess_cmd_lock
, flags
);
2524 list_add_tail(&se_cmd
->se_cmd_list
, &se_sess
->sess_cmd_list
);
2525 se_cmd
->check_release
= 1;
2526 spin_unlock_irqrestore(&se_sess
->sess_cmd_lock
, flags
);
2528 EXPORT_SYMBOL(target_get_sess_cmd
);
2530 static void target_release_cmd_kref(struct kref
*kref
)
2532 struct se_cmd
*se_cmd
= container_of(kref
, struct se_cmd
, cmd_kref
);
2533 struct se_session
*se_sess
= se_cmd
->se_sess
;
2534 unsigned long flags
;
2536 spin_lock_irqsave(&se_sess
->sess_cmd_lock
, flags
);
2537 if (list_empty(&se_cmd
->se_cmd_list
)) {
2538 spin_unlock_irqrestore(&se_sess
->sess_cmd_lock
, flags
);
2539 se_cmd
->se_tfo
->release_cmd(se_cmd
);
2542 if (se_sess
->sess_tearing_down
&& se_cmd
->cmd_wait_set
) {
2543 spin_unlock_irqrestore(&se_sess
->sess_cmd_lock
, flags
);
2544 complete(&se_cmd
->cmd_wait_comp
);
2547 list_del(&se_cmd
->se_cmd_list
);
2548 spin_unlock_irqrestore(&se_sess
->sess_cmd_lock
, flags
);
2550 se_cmd
->se_tfo
->release_cmd(se_cmd
);
2553 /* target_put_sess_cmd - Check for active I/O shutdown via kref_put
2554 * @se_sess: session to reference
2555 * @se_cmd: command descriptor to drop
2557 int target_put_sess_cmd(struct se_session
*se_sess
, struct se_cmd
*se_cmd
)
2559 return kref_put(&se_cmd
->cmd_kref
, target_release_cmd_kref
);
2561 EXPORT_SYMBOL(target_put_sess_cmd
);
2563 /* target_splice_sess_cmd_list - Split active cmds into sess_wait_list
2564 * @se_sess: session to split
2566 void target_splice_sess_cmd_list(struct se_session
*se_sess
)
2568 struct se_cmd
*se_cmd
;
2569 unsigned long flags
;
2571 WARN_ON(!list_empty(&se_sess
->sess_wait_list
));
2572 INIT_LIST_HEAD(&se_sess
->sess_wait_list
);
2574 spin_lock_irqsave(&se_sess
->sess_cmd_lock
, flags
);
2575 se_sess
->sess_tearing_down
= 1;
2577 list_splice_init(&se_sess
->sess_cmd_list
, &se_sess
->sess_wait_list
);
2579 list_for_each_entry(se_cmd
, &se_sess
->sess_wait_list
, se_cmd_list
)
2580 se_cmd
->cmd_wait_set
= 1;
2582 spin_unlock_irqrestore(&se_sess
->sess_cmd_lock
, flags
);
2584 EXPORT_SYMBOL(target_splice_sess_cmd_list
);
2586 /* target_wait_for_sess_cmds - Wait for outstanding descriptors
2587 * @se_sess: session to wait for active I/O
2588 * @wait_for_tasks: Make extra transport_wait_for_tasks call
2590 void target_wait_for_sess_cmds(
2591 struct se_session
*se_sess
,
2594 struct se_cmd
*se_cmd
, *tmp_cmd
;
2597 list_for_each_entry_safe(se_cmd
, tmp_cmd
,
2598 &se_sess
->sess_wait_list
, se_cmd_list
) {
2599 list_del(&se_cmd
->se_cmd_list
);
2601 pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
2602 " %d\n", se_cmd
, se_cmd
->t_state
,
2603 se_cmd
->se_tfo
->get_cmd_state(se_cmd
));
2605 if (wait_for_tasks
) {
2606 pr_debug("Calling transport_wait_for_tasks se_cmd: %p t_state: %d,"
2607 " fabric state: %d\n", se_cmd
, se_cmd
->t_state
,
2608 se_cmd
->se_tfo
->get_cmd_state(se_cmd
));
2610 rc
= transport_wait_for_tasks(se_cmd
);
2612 pr_debug("After transport_wait_for_tasks se_cmd: %p t_state: %d,"
2613 " fabric state: %d\n", se_cmd
, se_cmd
->t_state
,
2614 se_cmd
->se_tfo
->get_cmd_state(se_cmd
));
2618 wait_for_completion(&se_cmd
->cmd_wait_comp
);
2619 pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
2620 " fabric state: %d\n", se_cmd
, se_cmd
->t_state
,
2621 se_cmd
->se_tfo
->get_cmd_state(se_cmd
));
2624 se_cmd
->se_tfo
->release_cmd(se_cmd
);
2627 EXPORT_SYMBOL(target_wait_for_sess_cmds
);
2629 /* transport_lun_wait_for_tasks():
2631 * Called from ConfigFS context to stop the passed struct se_cmd to allow
2632 * an struct se_lun to be successfully shutdown.
2634 static int transport_lun_wait_for_tasks(struct se_cmd
*cmd
, struct se_lun
*lun
)
2636 unsigned long flags
;
2640 * If the frontend has already requested this struct se_cmd to
2641 * be stopped, we can safely ignore this struct se_cmd.
2643 spin_lock_irqsave(&cmd
->t_state_lock
, flags
);
2644 if (cmd
->transport_state
& CMD_T_STOP
) {
2645 cmd
->transport_state
&= ~CMD_T_LUN_STOP
;
2647 pr_debug("ConfigFS ITT[0x%08x] - CMD_T_STOP, skipping\n",
2648 cmd
->se_tfo
->get_task_tag(cmd
));
2649 spin_unlock_irqrestore(&cmd
->t_state_lock
, flags
);
2650 transport_cmd_check_stop(cmd
, false);
2653 cmd
->transport_state
|= CMD_T_LUN_FE_STOP
;
2654 spin_unlock_irqrestore(&cmd
->t_state_lock
, flags
);
2656 wake_up_interruptible(&cmd
->se_dev
->dev_queue_obj
.thread_wq
);
2658 // XXX: audit task_flags checks.
2659 spin_lock_irqsave(&cmd
->t_state_lock
, flags
);
2660 if ((cmd
->transport_state
& CMD_T_BUSY
) &&
2661 (cmd
->transport_state
& CMD_T_SENT
)) {
2662 if (!target_stop_cmd(cmd
, &flags
))
2665 spin_unlock_irqrestore(&cmd
->t_state_lock
, flags
);
2667 pr_debug("ConfigFS: cmd: %p stop tasks ret:"
2670 pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
2671 cmd
->se_tfo
->get_task_tag(cmd
));
2672 wait_for_completion(&cmd
->transport_lun_stop_comp
);
2673 pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
2674 cmd
->se_tfo
->get_task_tag(cmd
));
2676 transport_remove_cmd_from_queue(cmd
);
2681 static void __transport_clear_lun_from_sessions(struct se_lun
*lun
)
2683 struct se_cmd
*cmd
= NULL
;
2684 unsigned long lun_flags
, cmd_flags
;
2686 * Do exception processing and return CHECK_CONDITION status to the
2689 spin_lock_irqsave(&lun
->lun_cmd_lock
, lun_flags
);
2690 while (!list_empty(&lun
->lun_cmd_list
)) {
2691 cmd
= list_first_entry(&lun
->lun_cmd_list
,
2692 struct se_cmd
, se_lun_node
);
2693 list_del_init(&cmd
->se_lun_node
);
2695 spin_lock(&cmd
->t_state_lock
);
2696 pr_debug("SE_LUN[%d] - Setting cmd->transport"
2697 "_lun_stop for ITT: 0x%08x\n",
2698 cmd
->se_lun
->unpacked_lun
,
2699 cmd
->se_tfo
->get_task_tag(cmd
));
2700 cmd
->transport_state
|= CMD_T_LUN_STOP
;
2701 spin_unlock(&cmd
->t_state_lock
);
2703 spin_unlock_irqrestore(&lun
->lun_cmd_lock
, lun_flags
);
2706 pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n",
2707 cmd
->se_tfo
->get_task_tag(cmd
),
2708 cmd
->se_tfo
->get_cmd_state(cmd
), cmd
->t_state
);
2712 * If the Storage engine still owns the iscsi_cmd_t, determine
2713 * and/or stop its context.
2715 pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport"
2716 "_lun_wait_for_tasks()\n", cmd
->se_lun
->unpacked_lun
,
2717 cmd
->se_tfo
->get_task_tag(cmd
));
2719 if (transport_lun_wait_for_tasks(cmd
, cmd
->se_lun
) < 0) {
2720 spin_lock_irqsave(&lun
->lun_cmd_lock
, lun_flags
);
2724 pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun"
2725 "_wait_for_tasks(): SUCCESS\n",
2726 cmd
->se_lun
->unpacked_lun
,
2727 cmd
->se_tfo
->get_task_tag(cmd
));
2729 spin_lock_irqsave(&cmd
->t_state_lock
, cmd_flags
);
2730 if (!(cmd
->transport_state
& CMD_T_DEV_ACTIVE
)) {
2731 spin_unlock_irqrestore(&cmd
->t_state_lock
, cmd_flags
);
2734 cmd
->transport_state
&= ~CMD_T_DEV_ACTIVE
;
2735 target_remove_from_state_list(cmd
);
2736 spin_unlock_irqrestore(&cmd
->t_state_lock
, cmd_flags
);
2739 * The Storage engine stopped this struct se_cmd before it was
2740 * send to the fabric frontend for delivery back to the
2741 * Initiator Node. Return this SCSI CDB back with an
2742 * CHECK_CONDITION status.
2745 transport_send_check_condition_and_sense(cmd
,
2746 TCM_NON_EXISTENT_LUN
, 0);
2748 * If the fabric frontend is waiting for this iscsi_cmd_t to
2749 * be released, notify the waiting thread now that LU has
2750 * finished accessing it.
2752 spin_lock_irqsave(&cmd
->t_state_lock
, cmd_flags
);
2753 if (cmd
->transport_state
& CMD_T_LUN_FE_STOP
) {
2754 pr_debug("SE_LUN[%d] - Detected FE stop for"
2755 " struct se_cmd: %p ITT: 0x%08x\n",
2757 cmd
, cmd
->se_tfo
->get_task_tag(cmd
));
2759 spin_unlock_irqrestore(&cmd
->t_state_lock
,
2761 transport_cmd_check_stop(cmd
, false);
2762 complete(&cmd
->transport_lun_fe_stop_comp
);
2763 spin_lock_irqsave(&lun
->lun_cmd_lock
, lun_flags
);
2766 pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n",
2767 lun
->unpacked_lun
, cmd
->se_tfo
->get_task_tag(cmd
));
2769 spin_unlock_irqrestore(&cmd
->t_state_lock
, cmd_flags
);
2770 spin_lock_irqsave(&lun
->lun_cmd_lock
, lun_flags
);
2772 spin_unlock_irqrestore(&lun
->lun_cmd_lock
, lun_flags
);
2775 static int transport_clear_lun_thread(void *p
)
2777 struct se_lun
*lun
= p
;
2779 __transport_clear_lun_from_sessions(lun
);
2780 complete(&lun
->lun_shutdown_comp
);
2785 int transport_clear_lun_from_sessions(struct se_lun
*lun
)
2787 struct task_struct
*kt
;
2789 kt
= kthread_run(transport_clear_lun_thread
, lun
,
2790 "tcm_cl_%u", lun
->unpacked_lun
);
2792 pr_err("Unable to start clear_lun thread\n");
2795 wait_for_completion(&lun
->lun_shutdown_comp
);
2801 * transport_wait_for_tasks - wait for completion to occur
2802 * @cmd: command to wait
2804 * Called from frontend fabric context to wait for storage engine
2805 * to pause and/or release frontend generated struct se_cmd.
2807 bool transport_wait_for_tasks(struct se_cmd
*cmd
)
2809 unsigned long flags
;
2811 spin_lock_irqsave(&cmd
->t_state_lock
, flags
);
2812 if (!(cmd
->se_cmd_flags
& SCF_SE_LUN_CMD
) &&
2813 !(cmd
->se_cmd_flags
& SCF_SCSI_TMR_CDB
)) {
2814 spin_unlock_irqrestore(&cmd
->t_state_lock
, flags
);
2818 if (!(cmd
->se_cmd_flags
& SCF_SUPPORTED_SAM_OPCODE
) &&
2819 !(cmd
->se_cmd_flags
& SCF_SCSI_TMR_CDB
)) {
2820 spin_unlock_irqrestore(&cmd
->t_state_lock
, flags
);
2824 * If we are already stopped due to an external event (ie: LUN shutdown)
2825 * sleep until the connection can have the passed struct se_cmd back.
2826 * The cmd->transport_lun_stopped_sem will be upped by
2827 * transport_clear_lun_from_sessions() once the ConfigFS context caller
2828 * has completed its operation on the struct se_cmd.
2830 if (cmd
->transport_state
& CMD_T_LUN_STOP
) {
2831 pr_debug("wait_for_tasks: Stopping"
2832 " wait_for_completion(&cmd->t_tasktransport_lun_fe"
2833 "_stop_comp); for ITT: 0x%08x\n",
2834 cmd
->se_tfo
->get_task_tag(cmd
));
2836 * There is a special case for WRITES where a FE exception +
2837 * LUN shutdown means ConfigFS context is still sleeping on
2838 * transport_lun_stop_comp in transport_lun_wait_for_tasks().
2839 * We go ahead and up transport_lun_stop_comp just to be sure
2842 spin_unlock_irqrestore(&cmd
->t_state_lock
, flags
);
2843 complete(&cmd
->transport_lun_stop_comp
);
2844 wait_for_completion(&cmd
->transport_lun_fe_stop_comp
);
2845 spin_lock_irqsave(&cmd
->t_state_lock
, flags
);
2847 target_remove_from_state_list(cmd
);
2849 * At this point, the frontend who was the originator of this
2850 * struct se_cmd, now owns the structure and can be released through
2851 * normal means below.
2853 pr_debug("wait_for_tasks: Stopped"
2854 " wait_for_completion(&cmd->t_tasktransport_lun_fe_"
2855 "stop_comp); for ITT: 0x%08x\n",
2856 cmd
->se_tfo
->get_task_tag(cmd
));
2858 cmd
->transport_state
&= ~CMD_T_LUN_STOP
;
2861 if (!(cmd
->transport_state
& CMD_T_ACTIVE
)) {
2862 spin_unlock_irqrestore(&cmd
->t_state_lock
, flags
);
2866 cmd
->transport_state
|= CMD_T_STOP
;
2868 pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x"
2869 " i_state: %d, t_state: %d, CMD_T_STOP\n",
2870 cmd
, cmd
->se_tfo
->get_task_tag(cmd
),
2871 cmd
->se_tfo
->get_cmd_state(cmd
), cmd
->t_state
);
2873 spin_unlock_irqrestore(&cmd
->t_state_lock
, flags
);
2875 wake_up_interruptible(&cmd
->se_dev
->dev_queue_obj
.thread_wq
);
2877 wait_for_completion(&cmd
->t_transport_stop_comp
);
2879 spin_lock_irqsave(&cmd
->t_state_lock
, flags
);
2880 cmd
->transport_state
&= ~(CMD_T_ACTIVE
| CMD_T_STOP
);
2882 pr_debug("wait_for_tasks: Stopped wait_for_compltion("
2883 "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n",
2884 cmd
->se_tfo
->get_task_tag(cmd
));
2886 spin_unlock_irqrestore(&cmd
->t_state_lock
, flags
);
2890 EXPORT_SYMBOL(transport_wait_for_tasks
);
2892 static int transport_get_sense_codes(
2897 *asc
= cmd
->scsi_asc
;
2898 *ascq
= cmd
->scsi_ascq
;
2903 static int transport_set_sense_codes(
2908 cmd
->scsi_asc
= asc
;
2909 cmd
->scsi_ascq
= ascq
;
2914 int transport_send_check_condition_and_sense(
2919 unsigned char *buffer
= cmd
->sense_buffer
;
2920 unsigned long flags
;
2922 u8 asc
= 0, ascq
= 0;
2924 spin_lock_irqsave(&cmd
->t_state_lock
, flags
);
2925 if (cmd
->se_cmd_flags
& SCF_SENT_CHECK_CONDITION
) {
2926 spin_unlock_irqrestore(&cmd
->t_state_lock
, flags
);
2929 cmd
->se_cmd_flags
|= SCF_SENT_CHECK_CONDITION
;
2930 spin_unlock_irqrestore(&cmd
->t_state_lock
, flags
);
2932 if (!reason
&& from_transport
)
2935 if (!from_transport
)
2936 cmd
->se_cmd_flags
|= SCF_EMULATED_TASK_SENSE
;
2938 * Data Segment and SenseLength of the fabric response PDU.
2940 * TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE
2941 * from include/scsi/scsi_cmnd.h
2943 offset
= cmd
->se_tfo
->set_fabric_sense_len(cmd
,
2944 TRANSPORT_SENSE_BUFFER
);
2946 * Actual SENSE DATA, see SPC-3 7.23.2 SPC_SENSE_KEY_OFFSET uses
2947 * SENSE KEY values from include/scsi/scsi.h
2950 case TCM_NON_EXISTENT_LUN
:
2952 buffer
[offset
] = 0x70;
2953 buffer
[offset
+SPC_ADD_SENSE_LEN_OFFSET
] = 10;
2954 /* ILLEGAL REQUEST */
2955 buffer
[offset
+SPC_SENSE_KEY_OFFSET
] = ILLEGAL_REQUEST
;
2956 /* LOGICAL UNIT NOT SUPPORTED */
2957 buffer
[offset
+SPC_ASC_KEY_OFFSET
] = 0x25;
2959 case TCM_UNSUPPORTED_SCSI_OPCODE
:
2960 case TCM_SECTOR_COUNT_TOO_MANY
:
2962 buffer
[offset
] = 0x70;
2963 buffer
[offset
+SPC_ADD_SENSE_LEN_OFFSET
] = 10;
2964 /* ILLEGAL REQUEST */
2965 buffer
[offset
+SPC_SENSE_KEY_OFFSET
] = ILLEGAL_REQUEST
;
2966 /* INVALID COMMAND OPERATION CODE */
2967 buffer
[offset
+SPC_ASC_KEY_OFFSET
] = 0x20;
2969 case TCM_UNKNOWN_MODE_PAGE
:
2971 buffer
[offset
] = 0x70;
2972 buffer
[offset
+SPC_ADD_SENSE_LEN_OFFSET
] = 10;
2973 /* ILLEGAL REQUEST */
2974 buffer
[offset
+SPC_SENSE_KEY_OFFSET
] = ILLEGAL_REQUEST
;
2975 /* INVALID FIELD IN CDB */
2976 buffer
[offset
+SPC_ASC_KEY_OFFSET
] = 0x24;
2978 case TCM_CHECK_CONDITION_ABORT_CMD
:
2980 buffer
[offset
] = 0x70;
2981 buffer
[offset
+SPC_ADD_SENSE_LEN_OFFSET
] = 10;
2982 /* ABORTED COMMAND */
2983 buffer
[offset
+SPC_SENSE_KEY_OFFSET
] = ABORTED_COMMAND
;
2984 /* BUS DEVICE RESET FUNCTION OCCURRED */
2985 buffer
[offset
+SPC_ASC_KEY_OFFSET
] = 0x29;
2986 buffer
[offset
+SPC_ASCQ_KEY_OFFSET
] = 0x03;
2988 case TCM_INCORRECT_AMOUNT_OF_DATA
:
2990 buffer
[offset
] = 0x70;
2991 buffer
[offset
+SPC_ADD_SENSE_LEN_OFFSET
] = 10;
2992 /* ABORTED COMMAND */
2993 buffer
[offset
+SPC_SENSE_KEY_OFFSET
] = ABORTED_COMMAND
;
2995 buffer
[offset
+SPC_ASC_KEY_OFFSET
] = 0x0c;
2996 /* NOT ENOUGH UNSOLICITED DATA */
2997 buffer
[offset
+SPC_ASCQ_KEY_OFFSET
] = 0x0d;
2999 case TCM_INVALID_CDB_FIELD
:
3001 buffer
[offset
] = 0x70;
3002 buffer
[offset
+SPC_ADD_SENSE_LEN_OFFSET
] = 10;
3003 /* ILLEGAL REQUEST */
3004 buffer
[offset
+SPC_SENSE_KEY_OFFSET
] = ILLEGAL_REQUEST
;
3005 /* INVALID FIELD IN CDB */
3006 buffer
[offset
+SPC_ASC_KEY_OFFSET
] = 0x24;
3008 case TCM_INVALID_PARAMETER_LIST
:
3010 buffer
[offset
] = 0x70;
3011 buffer
[offset
+SPC_ADD_SENSE_LEN_OFFSET
] = 10;
3012 /* ILLEGAL REQUEST */
3013 buffer
[offset
+SPC_SENSE_KEY_OFFSET
] = ILLEGAL_REQUEST
;
3014 /* INVALID FIELD IN PARAMETER LIST */
3015 buffer
[offset
+SPC_ASC_KEY_OFFSET
] = 0x26;
3017 case TCM_UNEXPECTED_UNSOLICITED_DATA
:
3019 buffer
[offset
] = 0x70;
3020 buffer
[offset
+SPC_ADD_SENSE_LEN_OFFSET
] = 10;
3021 /* ABORTED COMMAND */
3022 buffer
[offset
+SPC_SENSE_KEY_OFFSET
] = ABORTED_COMMAND
;
3024 buffer
[offset
+SPC_ASC_KEY_OFFSET
] = 0x0c;
3025 /* UNEXPECTED_UNSOLICITED_DATA */
3026 buffer
[offset
+SPC_ASCQ_KEY_OFFSET
] = 0x0c;
3028 case TCM_SERVICE_CRC_ERROR
:
3030 buffer
[offset
] = 0x70;
3031 buffer
[offset
+SPC_ADD_SENSE_LEN_OFFSET
] = 10;
3032 /* ABORTED COMMAND */
3033 buffer
[offset
+SPC_SENSE_KEY_OFFSET
] = ABORTED_COMMAND
;
3034 /* PROTOCOL SERVICE CRC ERROR */
3035 buffer
[offset
+SPC_ASC_KEY_OFFSET
] = 0x47;
3037 buffer
[offset
+SPC_ASCQ_KEY_OFFSET
] = 0x05;
3039 case TCM_SNACK_REJECTED
:
3041 buffer
[offset
] = 0x70;
3042 buffer
[offset
+SPC_ADD_SENSE_LEN_OFFSET
] = 10;
3043 /* ABORTED COMMAND */
3044 buffer
[offset
+SPC_SENSE_KEY_OFFSET
] = ABORTED_COMMAND
;
3046 buffer
[offset
+SPC_ASC_KEY_OFFSET
] = 0x11;
3047 /* FAILED RETRANSMISSION REQUEST */
3048 buffer
[offset
+SPC_ASCQ_KEY_OFFSET
] = 0x13;
3050 case TCM_WRITE_PROTECTED
:
3052 buffer
[offset
] = 0x70;
3053 buffer
[offset
+SPC_ADD_SENSE_LEN_OFFSET
] = 10;
3055 buffer
[offset
+SPC_SENSE_KEY_OFFSET
] = DATA_PROTECT
;
3056 /* WRITE PROTECTED */
3057 buffer
[offset
+SPC_ASC_KEY_OFFSET
] = 0x27;
3059 case TCM_CHECK_CONDITION_UNIT_ATTENTION
:
3061 buffer
[offset
] = 0x70;
3062 buffer
[offset
+SPC_ADD_SENSE_LEN_OFFSET
] = 10;
3063 /* UNIT ATTENTION */
3064 buffer
[offset
+SPC_SENSE_KEY_OFFSET
] = UNIT_ATTENTION
;
3065 core_scsi3_ua_for_check_condition(cmd
, &asc
, &ascq
);
3066 buffer
[offset
+SPC_ASC_KEY_OFFSET
] = asc
;
3067 buffer
[offset
+SPC_ASCQ_KEY_OFFSET
] = ascq
;
3069 case TCM_CHECK_CONDITION_NOT_READY
:
3071 buffer
[offset
] = 0x70;
3072 buffer
[offset
+SPC_ADD_SENSE_LEN_OFFSET
] = 10;
3074 buffer
[offset
+SPC_SENSE_KEY_OFFSET
] = NOT_READY
;
3075 transport_get_sense_codes(cmd
, &asc
, &ascq
);
3076 buffer
[offset
+SPC_ASC_KEY_OFFSET
] = asc
;
3077 buffer
[offset
+SPC_ASCQ_KEY_OFFSET
] = ascq
;
3079 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
:
3082 buffer
[offset
] = 0x70;
3083 buffer
[offset
+SPC_ADD_SENSE_LEN_OFFSET
] = 10;
3084 /* ILLEGAL REQUEST */
3085 buffer
[offset
+SPC_SENSE_KEY_OFFSET
] = ILLEGAL_REQUEST
;
3086 /* LOGICAL UNIT COMMUNICATION FAILURE */
3087 buffer
[offset
+SPC_ASC_KEY_OFFSET
] = 0x80;
3091 * This code uses linux/include/scsi/scsi.h SAM status codes!
3093 cmd
->scsi_status
= SAM_STAT_CHECK_CONDITION
;
3095 * Automatically padded, this value is encoded in the fabric's
3096 * data_length response PDU containing the SCSI defined sense data.
3098 cmd
->scsi_sense_length
= TRANSPORT_SENSE_BUFFER
+ offset
;
3101 return cmd
->se_tfo
->queue_status(cmd
);
3103 EXPORT_SYMBOL(transport_send_check_condition_and_sense
);
3105 int transport_check_aborted_status(struct se_cmd
*cmd
, int send_status
)
3109 if (cmd
->transport_state
& CMD_T_ABORTED
) {
3111 (cmd
->se_cmd_flags
& SCF_SENT_DELAYED_TAS
))
3114 pr_debug("Sending delayed SAM_STAT_TASK_ABORTED"
3115 " status for CDB: 0x%02x ITT: 0x%08x\n",
3117 cmd
->se_tfo
->get_task_tag(cmd
));
3119 cmd
->se_cmd_flags
|= SCF_SENT_DELAYED_TAS
;
3120 cmd
->se_tfo
->queue_status(cmd
);
3125 EXPORT_SYMBOL(transport_check_aborted_status
);
3127 void transport_send_task_abort(struct se_cmd
*cmd
)
3129 unsigned long flags
;
3131 spin_lock_irqsave(&cmd
->t_state_lock
, flags
);
3132 if (cmd
->se_cmd_flags
& SCF_SENT_CHECK_CONDITION
) {
3133 spin_unlock_irqrestore(&cmd
->t_state_lock
, flags
);
3136 spin_unlock_irqrestore(&cmd
->t_state_lock
, flags
);
3139 * If there are still expected incoming fabric WRITEs, we wait
3140 * until until they have completed before sending a TASK_ABORTED
3141 * response. This response with TASK_ABORTED status will be
3142 * queued back to fabric module by transport_check_aborted_status().
3144 if (cmd
->data_direction
== DMA_TO_DEVICE
) {
3145 if (cmd
->se_tfo
->write_pending_status(cmd
) != 0) {
3146 cmd
->transport_state
|= CMD_T_ABORTED
;
3147 smp_mb__after_atomic_inc();
3150 cmd
->scsi_status
= SAM_STAT_TASK_ABORTED
;
3152 pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
3153 " ITT: 0x%08x\n", cmd
->t_task_cdb
[0],
3154 cmd
->se_tfo
->get_task_tag(cmd
));
3156 cmd
->se_tfo
->queue_status(cmd
);
3159 static int transport_generic_do_tmr(struct se_cmd
*cmd
)
3161 struct se_device
*dev
= cmd
->se_dev
;
3162 struct se_tmr_req
*tmr
= cmd
->se_tmr_req
;
3165 switch (tmr
->function
) {
3166 case TMR_ABORT_TASK
:
3167 core_tmr_abort_task(dev
, tmr
, cmd
->se_sess
);
3169 case TMR_ABORT_TASK_SET
:
3171 case TMR_CLEAR_TASK_SET
:
3172 tmr
->response
= TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED
;
3175 ret
= core_tmr_lun_reset(dev
, tmr
, NULL
, NULL
);
3176 tmr
->response
= (!ret
) ? TMR_FUNCTION_COMPLETE
:
3177 TMR_FUNCTION_REJECTED
;
3179 case TMR_TARGET_WARM_RESET
:
3180 tmr
->response
= TMR_FUNCTION_REJECTED
;
3182 case TMR_TARGET_COLD_RESET
:
3183 tmr
->response
= TMR_FUNCTION_REJECTED
;
3186 pr_err("Uknown TMR function: 0x%02x.\n",
3188 tmr
->response
= TMR_FUNCTION_REJECTED
;
3192 cmd
->t_state
= TRANSPORT_ISTATE_PROCESSING
;
3193 cmd
->se_tfo
->queue_tm_rsp(cmd
);
3195 transport_cmd_check_stop_to_fabric(cmd
);
3199 /* transport_processing_thread():
3203 static int transport_processing_thread(void *param
)
3207 struct se_device
*dev
= param
;
3209 while (!kthread_should_stop()) {
3210 ret
= wait_event_interruptible(dev
->dev_queue_obj
.thread_wq
,
3211 atomic_read(&dev
->dev_queue_obj
.queue_cnt
) ||
3212 kthread_should_stop());
3217 cmd
= transport_get_cmd_from_queue(&dev
->dev_queue_obj
);
3221 switch (cmd
->t_state
) {
3222 case TRANSPORT_NEW_CMD
:
3225 case TRANSPORT_PROCESS_TMR
:
3226 transport_generic_do_tmr(cmd
);
3229 pr_err("Unknown t_state: %d for ITT: 0x%08x "
3230 "i_state: %d on SE LUN: %u\n",
3232 cmd
->se_tfo
->get_task_tag(cmd
),
3233 cmd
->se_tfo
->get_cmd_state(cmd
),
3234 cmd
->se_lun
->unpacked_lun
);
3242 WARN_ON(!list_empty(&dev
->state_list
));
3243 WARN_ON(!list_empty(&dev
->dev_queue_obj
.qobj_list
));
3244 dev
->process_thread
= NULL
;