Commit | Line | Data |
---|---|---|
c66ac9db NB |
1 | /******************************************************************************* |
2 | * Filename: target_core_transport.c | |
3 | * | |
4 | * This file contains the Generic Target Engine Core. | |
5 | * | |
6 | * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc. | |
7 | * Copyright (c) 2005, 2006, 2007 SBE, Inc. | |
8 | * Copyright (c) 2007-2010 Rising Tide Systems | |
9 | * Copyright (c) 2008-2010 Linux-iSCSI.org | |
10 | * | |
11 | * Nicholas A. Bellinger <nab@kernel.org> | |
12 | * | |
13 | * This program is free software; you can redistribute it and/or modify | |
14 | * it under the terms of the GNU General Public License as published by | |
15 | * the Free Software Foundation; either version 2 of the License, or | |
16 | * (at your option) any later version. | |
17 | * | |
18 | * This program is distributed in the hope that it will be useful, | |
19 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
20 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
21 | * GNU General Public License for more details. | |
22 | * | |
23 | * You should have received a copy of the GNU General Public License | |
24 | * along with this program; if not, write to the Free Software | |
25 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
26 | * | |
27 | ******************************************************************************/ | |
28 | ||
c66ac9db NB |
29 | #include <linux/net.h> |
30 | #include <linux/delay.h> | |
31 | #include <linux/string.h> | |
32 | #include <linux/timer.h> | |
33 | #include <linux/slab.h> | |
34 | #include <linux/blkdev.h> | |
35 | #include <linux/spinlock.h> | |
c66ac9db NB |
36 | #include <linux/kthread.h> |
37 | #include <linux/in.h> | |
38 | #include <linux/cdrom.h> | |
39 | #include <asm/unaligned.h> | |
40 | #include <net/sock.h> | |
41 | #include <net/tcp.h> | |
42 | #include <scsi/scsi.h> | |
43 | #include <scsi/scsi_cmnd.h> | |
e66ecd50 | 44 | #include <scsi/scsi_tcq.h> |
c66ac9db NB |
45 | |
46 | #include <target/target_core_base.h> | |
47 | #include <target/target_core_device.h> | |
48 | #include <target/target_core_tmr.h> | |
49 | #include <target/target_core_tpg.h> | |
50 | #include <target/target_core_transport.h> | |
51 | #include <target/target_core_fabric_ops.h> | |
52 | #include <target/target_core_configfs.h> | |
53 | ||
54 | #include "target_core_alua.h" | |
55 | #include "target_core_hba.h" | |
56 | #include "target_core_pr.h" | |
57 | #include "target_core_scdb.h" | |
58 | #include "target_core_ua.h" | |
59 | ||
e3d6f909 | 60 | static int sub_api_initialized; |
c66ac9db NB |
61 | |
62 | static struct kmem_cache *se_cmd_cache; | |
63 | static struct kmem_cache *se_sess_cache; | |
64 | struct kmem_cache *se_tmr_req_cache; | |
65 | struct kmem_cache *se_ua_cache; | |
c66ac9db NB |
66 | struct kmem_cache *t10_pr_reg_cache; |
67 | struct kmem_cache *t10_alua_lu_gp_cache; | |
68 | struct kmem_cache *t10_alua_lu_gp_mem_cache; | |
69 | struct kmem_cache *t10_alua_tg_pt_gp_cache; | |
70 | struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; | |
71 | ||
72 | /* Used for transport_dev_get_map_*() */ | |
73 | typedef int (*map_func_t)(struct se_task *, u32); | |
74 | ||
75 | static int transport_generic_write_pending(struct se_cmd *); | |
5951146d | 76 | static int transport_processing_thread(void *param); |
c66ac9db NB |
77 | static int __transport_execute_tasks(struct se_device *dev); |
78 | static void transport_complete_task_attr(struct se_cmd *cmd); | |
07bde79a NB |
79 | static int transport_complete_qf(struct se_cmd *cmd); |
80 | static void transport_handle_queue_full(struct se_cmd *cmd, | |
81 | struct se_device *dev, int (*qf_callback)(struct se_cmd *)); | |
c66ac9db NB |
82 | static void transport_direct_request_timeout(struct se_cmd *cmd); |
83 | static void transport_free_dev_tasks(struct se_cmd *cmd); | |
a1d8b49a | 84 | static u32 transport_allocate_tasks(struct se_cmd *cmd, |
ec98f782 | 85 | unsigned long long starting_lba, |
c66ac9db | 86 | enum dma_data_direction data_direction, |
ec98f782 | 87 | struct scatterlist *sgl, unsigned int nents); |
05d1c7c0 | 88 | static int transport_generic_get_mem(struct se_cmd *cmd); |
39c05f32 | 89 | static void transport_put_cmd(struct se_cmd *cmd); |
c66ac9db NB |
90 | static void transport_remove_cmd_from_queue(struct se_cmd *cmd, |
91 | struct se_queue_obj *qobj); | |
92 | static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq); | |
93 | static void transport_stop_all_task_timers(struct se_cmd *cmd); | |
94 | ||
e3d6f909 | 95 | int init_se_kmem_caches(void) |
c66ac9db | 96 | { |
c66ac9db NB |
97 | se_cmd_cache = kmem_cache_create("se_cmd_cache", |
98 | sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL); | |
6708bb27 AG |
99 | if (!se_cmd_cache) { |
100 | pr_err("kmem_cache_create for struct se_cmd failed\n"); | |
c66ac9db NB |
101 | goto out; |
102 | } | |
103 | se_tmr_req_cache = kmem_cache_create("se_tmr_cache", | |
104 | sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req), | |
105 | 0, NULL); | |
6708bb27 AG |
106 | if (!se_tmr_req_cache) { |
107 | pr_err("kmem_cache_create() for struct se_tmr_req" | |
c66ac9db NB |
108 | " failed\n"); |
109 | goto out; | |
110 | } | |
111 | se_sess_cache = kmem_cache_create("se_sess_cache", | |
112 | sizeof(struct se_session), __alignof__(struct se_session), | |
113 | 0, NULL); | |
6708bb27 AG |
114 | if (!se_sess_cache) { |
115 | pr_err("kmem_cache_create() for struct se_session" | |
c66ac9db NB |
116 | " failed\n"); |
117 | goto out; | |
118 | } | |
119 | se_ua_cache = kmem_cache_create("se_ua_cache", | |
120 | sizeof(struct se_ua), __alignof__(struct se_ua), | |
121 | 0, NULL); | |
6708bb27 AG |
122 | if (!se_ua_cache) { |
123 | pr_err("kmem_cache_create() for struct se_ua failed\n"); | |
c66ac9db NB |
124 | goto out; |
125 | } | |
c66ac9db NB |
126 | t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache", |
127 | sizeof(struct t10_pr_registration), | |
128 | __alignof__(struct t10_pr_registration), 0, NULL); | |
6708bb27 AG |
129 | if (!t10_pr_reg_cache) { |
130 | pr_err("kmem_cache_create() for struct t10_pr_registration" | |
c66ac9db NB |
131 | " failed\n"); |
132 | goto out; | |
133 | } | |
134 | t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache", | |
135 | sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp), | |
136 | 0, NULL); | |
6708bb27 AG |
137 | if (!t10_alua_lu_gp_cache) { |
138 | pr_err("kmem_cache_create() for t10_alua_lu_gp_cache" | |
c66ac9db NB |
139 | " failed\n"); |
140 | goto out; | |
141 | } | |
142 | t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache", | |
143 | sizeof(struct t10_alua_lu_gp_member), | |
144 | __alignof__(struct t10_alua_lu_gp_member), 0, NULL); | |
6708bb27 AG |
145 | if (!t10_alua_lu_gp_mem_cache) { |
146 | pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_" | |
c66ac9db NB |
147 | "cache failed\n"); |
148 | goto out; | |
149 | } | |
150 | t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache", | |
151 | sizeof(struct t10_alua_tg_pt_gp), | |
152 | __alignof__(struct t10_alua_tg_pt_gp), 0, NULL); | |
6708bb27 AG |
153 | if (!t10_alua_tg_pt_gp_cache) { |
154 | pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" | |
c66ac9db NB |
155 | "cache failed\n"); |
156 | goto out; | |
157 | } | |
158 | t10_alua_tg_pt_gp_mem_cache = kmem_cache_create( | |
159 | "t10_alua_tg_pt_gp_mem_cache", | |
160 | sizeof(struct t10_alua_tg_pt_gp_member), | |
161 | __alignof__(struct t10_alua_tg_pt_gp_member), | |
162 | 0, NULL); | |
6708bb27 AG |
163 | if (!t10_alua_tg_pt_gp_mem_cache) { |
164 | pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" | |
c66ac9db NB |
165 | "mem_t failed\n"); |
166 | goto out; | |
167 | } | |
168 | ||
c66ac9db NB |
169 | return 0; |
170 | out: | |
171 | if (se_cmd_cache) | |
172 | kmem_cache_destroy(se_cmd_cache); | |
173 | if (se_tmr_req_cache) | |
174 | kmem_cache_destroy(se_tmr_req_cache); | |
175 | if (se_sess_cache) | |
176 | kmem_cache_destroy(se_sess_cache); | |
177 | if (se_ua_cache) | |
178 | kmem_cache_destroy(se_ua_cache); | |
c66ac9db NB |
179 | if (t10_pr_reg_cache) |
180 | kmem_cache_destroy(t10_pr_reg_cache); | |
181 | if (t10_alua_lu_gp_cache) | |
182 | kmem_cache_destroy(t10_alua_lu_gp_cache); | |
183 | if (t10_alua_lu_gp_mem_cache) | |
184 | kmem_cache_destroy(t10_alua_lu_gp_mem_cache); | |
185 | if (t10_alua_tg_pt_gp_cache) | |
186 | kmem_cache_destroy(t10_alua_tg_pt_gp_cache); | |
187 | if (t10_alua_tg_pt_gp_mem_cache) | |
188 | kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); | |
e3d6f909 | 189 | return -ENOMEM; |
c66ac9db NB |
190 | } |
191 | ||
e3d6f909 | 192 | void release_se_kmem_caches(void) |
c66ac9db | 193 | { |
c66ac9db NB |
194 | kmem_cache_destroy(se_cmd_cache); |
195 | kmem_cache_destroy(se_tmr_req_cache); | |
196 | kmem_cache_destroy(se_sess_cache); | |
197 | kmem_cache_destroy(se_ua_cache); | |
c66ac9db NB |
198 | kmem_cache_destroy(t10_pr_reg_cache); |
199 | kmem_cache_destroy(t10_alua_lu_gp_cache); | |
200 | kmem_cache_destroy(t10_alua_lu_gp_mem_cache); | |
201 | kmem_cache_destroy(t10_alua_tg_pt_gp_cache); | |
202 | kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); | |
c66ac9db NB |
203 | } |
204 | ||
e3d6f909 AG |
205 | /* This code ensures unique mib indexes are handed out. */ |
206 | static DEFINE_SPINLOCK(scsi_mib_index_lock); | |
207 | static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX]; | |
e89d15ee NB |
208 | |
209 | /* | |
210 | * Allocate a new row index for the entry type specified | |
211 | */ | |
212 | u32 scsi_get_new_index(scsi_index_t type) | |
213 | { | |
214 | u32 new_index; | |
215 | ||
e3d6f909 | 216 | BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)); |
e89d15ee | 217 | |
e3d6f909 AG |
218 | spin_lock(&scsi_mib_index_lock); |
219 | new_index = ++scsi_mib_index[type]; | |
220 | spin_unlock(&scsi_mib_index_lock); | |
e89d15ee NB |
221 | |
222 | return new_index; | |
223 | } | |
224 | ||
c66ac9db NB |
225 | void transport_init_queue_obj(struct se_queue_obj *qobj) |
226 | { | |
227 | atomic_set(&qobj->queue_cnt, 0); | |
228 | INIT_LIST_HEAD(&qobj->qobj_list); | |
229 | init_waitqueue_head(&qobj->thread_wq); | |
230 | spin_lock_init(&qobj->cmd_queue_lock); | |
231 | } | |
232 | EXPORT_SYMBOL(transport_init_queue_obj); | |
233 | ||
234 | static int transport_subsystem_reqmods(void) | |
235 | { | |
236 | int ret; | |
237 | ||
238 | ret = request_module("target_core_iblock"); | |
239 | if (ret != 0) | |
6708bb27 | 240 | pr_err("Unable to load target_core_iblock\n"); |
c66ac9db NB |
241 | |
242 | ret = request_module("target_core_file"); | |
243 | if (ret != 0) | |
6708bb27 | 244 | pr_err("Unable to load target_core_file\n"); |
c66ac9db NB |
245 | |
246 | ret = request_module("target_core_pscsi"); | |
247 | if (ret != 0) | |
6708bb27 | 248 | pr_err("Unable to load target_core_pscsi\n"); |
c66ac9db NB |
249 | |
250 | ret = request_module("target_core_stgt"); | |
251 | if (ret != 0) | |
6708bb27 | 252 | pr_err("Unable to load target_core_stgt\n"); |
c66ac9db NB |
253 | |
254 | return 0; | |
255 | } | |
256 | ||
257 | int transport_subsystem_check_init(void) | |
258 | { | |
e3d6f909 AG |
259 | int ret; |
260 | ||
261 | if (sub_api_initialized) | |
c66ac9db NB |
262 | return 0; |
263 | /* | |
264 | * Request the loading of known TCM subsystem plugins.. | |
265 | */ | |
e3d6f909 AG |
266 | ret = transport_subsystem_reqmods(); |
267 | if (ret < 0) | |
268 | return ret; | |
c66ac9db | 269 | |
e3d6f909 | 270 | sub_api_initialized = 1; |
c66ac9db NB |
271 | return 0; |
272 | } | |
273 | ||
274 | struct se_session *transport_init_session(void) | |
275 | { | |
276 | struct se_session *se_sess; | |
277 | ||
278 | se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL); | |
6708bb27 AG |
279 | if (!se_sess) { |
280 | pr_err("Unable to allocate struct se_session from" | |
c66ac9db NB |
281 | " se_sess_cache\n"); |
282 | return ERR_PTR(-ENOMEM); | |
283 | } | |
284 | INIT_LIST_HEAD(&se_sess->sess_list); | |
285 | INIT_LIST_HEAD(&se_sess->sess_acl_list); | |
c66ac9db NB |
286 | |
287 | return se_sess; | |
288 | } | |
289 | EXPORT_SYMBOL(transport_init_session); | |
290 | ||
291 | /* | |
292 | * Called with spin_lock_bh(&struct se_portal_group->session_lock called. | |
293 | */ | |
294 | void __transport_register_session( | |
295 | struct se_portal_group *se_tpg, | |
296 | struct se_node_acl *se_nacl, | |
297 | struct se_session *se_sess, | |
298 | void *fabric_sess_ptr) | |
299 | { | |
300 | unsigned char buf[PR_REG_ISID_LEN]; | |
301 | ||
302 | se_sess->se_tpg = se_tpg; | |
303 | se_sess->fabric_sess_ptr = fabric_sess_ptr; | |
304 | /* | |
305 | * Used by struct se_node_acl's under ConfigFS to locate active se_session-t | |
306 | * | |
307 | * Only set for struct se_session's that will actually be moving I/O. | |
308 | * eg: *NOT* discovery sessions. | |
309 | */ | |
310 | if (se_nacl) { | |
311 | /* | |
312 | * If the fabric module supports an ISID based TransportID, | |
313 | * save this value in binary from the fabric I_T Nexus now. | |
314 | */ | |
e3d6f909 | 315 | if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) { |
c66ac9db | 316 | memset(&buf[0], 0, PR_REG_ISID_LEN); |
e3d6f909 | 317 | se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, |
c66ac9db NB |
318 | &buf[0], PR_REG_ISID_LEN); |
319 | se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]); | |
320 | } | |
321 | spin_lock_irq(&se_nacl->nacl_sess_lock); | |
322 | /* | |
323 | * The se_nacl->nacl_sess pointer will be set to the | |
324 | * last active I_T Nexus for each struct se_node_acl. | |
325 | */ | |
326 | se_nacl->nacl_sess = se_sess; | |
327 | ||
328 | list_add_tail(&se_sess->sess_acl_list, | |
329 | &se_nacl->acl_sess_list); | |
330 | spin_unlock_irq(&se_nacl->nacl_sess_lock); | |
331 | } | |
332 | list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list); | |
333 | ||
6708bb27 | 334 | pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n", |
e3d6f909 | 335 | se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr); |
c66ac9db NB |
336 | } |
337 | EXPORT_SYMBOL(__transport_register_session); | |
338 | ||
339 | void transport_register_session( | |
340 | struct se_portal_group *se_tpg, | |
341 | struct se_node_acl *se_nacl, | |
342 | struct se_session *se_sess, | |
343 | void *fabric_sess_ptr) | |
344 | { | |
345 | spin_lock_bh(&se_tpg->session_lock); | |
346 | __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr); | |
347 | spin_unlock_bh(&se_tpg->session_lock); | |
348 | } | |
349 | EXPORT_SYMBOL(transport_register_session); | |
350 | ||
351 | void transport_deregister_session_configfs(struct se_session *se_sess) | |
352 | { | |
353 | struct se_node_acl *se_nacl; | |
23388864 | 354 | unsigned long flags; |
c66ac9db NB |
355 | /* |
356 | * Used by struct se_node_acl's under ConfigFS to locate active struct se_session | |
357 | */ | |
358 | se_nacl = se_sess->se_node_acl; | |
6708bb27 | 359 | if (se_nacl) { |
23388864 | 360 | spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); |
c66ac9db NB |
361 | list_del(&se_sess->sess_acl_list); |
362 | /* | |
363 | * If the session list is empty, then clear the pointer. | |
364 | * Otherwise, set the struct se_session pointer from the tail | |
365 | * element of the per struct se_node_acl active session list. | |
366 | */ | |
367 | if (list_empty(&se_nacl->acl_sess_list)) | |
368 | se_nacl->nacl_sess = NULL; | |
369 | else { | |
370 | se_nacl->nacl_sess = container_of( | |
371 | se_nacl->acl_sess_list.prev, | |
372 | struct se_session, sess_acl_list); | |
373 | } | |
23388864 | 374 | spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); |
c66ac9db NB |
375 | } |
376 | } | |
377 | EXPORT_SYMBOL(transport_deregister_session_configfs); | |
378 | ||
379 | void transport_free_session(struct se_session *se_sess) | |
380 | { | |
381 | kmem_cache_free(se_sess_cache, se_sess); | |
382 | } | |
383 | EXPORT_SYMBOL(transport_free_session); | |
384 | ||
385 | void transport_deregister_session(struct se_session *se_sess) | |
386 | { | |
387 | struct se_portal_group *se_tpg = se_sess->se_tpg; | |
388 | struct se_node_acl *se_nacl; | |
e63a8e19 | 389 | unsigned long flags; |
c66ac9db | 390 | |
6708bb27 | 391 | if (!se_tpg) { |
c66ac9db NB |
392 | transport_free_session(se_sess); |
393 | return; | |
394 | } | |
c66ac9db | 395 | |
e63a8e19 | 396 | spin_lock_irqsave(&se_tpg->session_lock, flags); |
c66ac9db NB |
397 | list_del(&se_sess->sess_list); |
398 | se_sess->se_tpg = NULL; | |
399 | se_sess->fabric_sess_ptr = NULL; | |
e63a8e19 | 400 | spin_unlock_irqrestore(&se_tpg->session_lock, flags); |
c66ac9db NB |
401 | |
402 | /* | |
403 | * Determine if we need to do extra work for this initiator node's | |
404 | * struct se_node_acl if it had been previously dynamically generated. | |
405 | */ | |
406 | se_nacl = se_sess->se_node_acl; | |
6708bb27 | 407 | if (se_nacl) { |
e63a8e19 | 408 | spin_lock_irqsave(&se_tpg->acl_node_lock, flags); |
c66ac9db | 409 | if (se_nacl->dynamic_node_acl) { |
6708bb27 AG |
410 | if (!se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache( |
411 | se_tpg)) { | |
c66ac9db NB |
412 | list_del(&se_nacl->acl_list); |
413 | se_tpg->num_node_acls--; | |
e63a8e19 | 414 | spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags); |
c66ac9db NB |
415 | |
416 | core_tpg_wait_for_nacl_pr_ref(se_nacl); | |
c66ac9db | 417 | core_free_device_list_for_node(se_nacl, se_tpg); |
e3d6f909 | 418 | se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, |
c66ac9db | 419 | se_nacl); |
e63a8e19 | 420 | spin_lock_irqsave(&se_tpg->acl_node_lock, flags); |
c66ac9db NB |
421 | } |
422 | } | |
e63a8e19 | 423 | spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags); |
c66ac9db NB |
424 | } |
425 | ||
426 | transport_free_session(se_sess); | |
427 | ||
6708bb27 | 428 | pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n", |
e3d6f909 | 429 | se_tpg->se_tpg_tfo->get_fabric_name()); |
c66ac9db NB |
430 | } |
431 | EXPORT_SYMBOL(transport_deregister_session); | |
432 | ||
433 | /* | |
a1d8b49a | 434 | * Called with cmd->t_state_lock held. |
c66ac9db NB |
435 | */ |
436 | static void transport_all_task_dev_remove_state(struct se_cmd *cmd) | |
437 | { | |
438 | struct se_device *dev; | |
439 | struct se_task *task; | |
440 | unsigned long flags; | |
441 | ||
a1d8b49a | 442 | list_for_each_entry(task, &cmd->t_task_list, t_list) { |
c66ac9db | 443 | dev = task->se_dev; |
6708bb27 | 444 | if (!dev) |
c66ac9db NB |
445 | continue; |
446 | ||
447 | if (atomic_read(&task->task_active)) | |
448 | continue; | |
449 | ||
6708bb27 | 450 | if (!atomic_read(&task->task_state_active)) |
c66ac9db NB |
451 | continue; |
452 | ||
453 | spin_lock_irqsave(&dev->execute_task_lock, flags); | |
454 | list_del(&task->t_state_list); | |
6708bb27 AG |
455 | pr_debug("Removed ITT: 0x%08x dev: %p task[%p]\n", |
456 | cmd->se_tfo->get_task_tag(cmd), dev, task); | |
c66ac9db NB |
457 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); |
458 | ||
459 | atomic_set(&task->task_state_active, 0); | |
a1d8b49a | 460 | atomic_dec(&cmd->t_task_cdbs_ex_left); |
c66ac9db NB |
461 | } |
462 | } | |
463 | ||
464 | /* transport_cmd_check_stop(): | |
465 | * | |
466 | * 'transport_off = 1' determines if t_transport_active should be cleared. | |
467 | * 'transport_off = 2' determines if task_dev_state should be removed. | |
468 | * | |
469 | * A non-zero u8 t_state sets cmd->t_state. | |
470 | * Returns 1 when command is stopped, else 0. | |
471 | */ | |
472 | static int transport_cmd_check_stop( | |
473 | struct se_cmd *cmd, | |
474 | int transport_off, | |
475 | u8 t_state) | |
476 | { | |
477 | unsigned long flags; | |
478 | ||
a1d8b49a | 479 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db NB |
480 | /* |
481 | * Determine if IOCTL context caller in requesting the stopping of this | |
482 | * command for LUN shutdown purposes. | |
483 | */ | |
a1d8b49a | 484 | if (atomic_read(&cmd->transport_lun_stop)) { |
6708bb27 | 485 | pr_debug("%s:%d atomic_read(&cmd->transport_lun_stop)" |
c66ac9db | 486 | " == TRUE for ITT: 0x%08x\n", __func__, __LINE__, |
e3d6f909 | 487 | cmd->se_tfo->get_task_tag(cmd)); |
c66ac9db NB |
488 | |
489 | cmd->deferred_t_state = cmd->t_state; | |
490 | cmd->t_state = TRANSPORT_DEFERRED_CMD; | |
a1d8b49a | 491 | atomic_set(&cmd->t_transport_active, 0); |
c66ac9db NB |
492 | if (transport_off == 2) |
493 | transport_all_task_dev_remove_state(cmd); | |
a1d8b49a | 494 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db | 495 | |
a1d8b49a | 496 | complete(&cmd->transport_lun_stop_comp); |
c66ac9db NB |
497 | return 1; |
498 | } | |
499 | /* | |
500 | * Determine if frontend context caller is requesting the stopping of | |
e3d6f909 | 501 | * this command for frontend exceptions. |
c66ac9db | 502 | */ |
a1d8b49a | 503 | if (atomic_read(&cmd->t_transport_stop)) { |
6708bb27 | 504 | pr_debug("%s:%d atomic_read(&cmd->t_transport_stop) ==" |
c66ac9db | 505 | " TRUE for ITT: 0x%08x\n", __func__, __LINE__, |
e3d6f909 | 506 | cmd->se_tfo->get_task_tag(cmd)); |
c66ac9db NB |
507 | |
508 | cmd->deferred_t_state = cmd->t_state; | |
509 | cmd->t_state = TRANSPORT_DEFERRED_CMD; | |
510 | if (transport_off == 2) | |
511 | transport_all_task_dev_remove_state(cmd); | |
512 | ||
513 | /* | |
514 | * Clear struct se_cmd->se_lun before the transport_off == 2 handoff | |
515 | * to FE. | |
516 | */ | |
517 | if (transport_off == 2) | |
518 | cmd->se_lun = NULL; | |
a1d8b49a | 519 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db | 520 | |
a1d8b49a | 521 | complete(&cmd->t_transport_stop_comp); |
c66ac9db NB |
522 | return 1; |
523 | } | |
524 | if (transport_off) { | |
a1d8b49a | 525 | atomic_set(&cmd->t_transport_active, 0); |
c66ac9db NB |
526 | if (transport_off == 2) { |
527 | transport_all_task_dev_remove_state(cmd); | |
528 | /* | |
529 | * Clear struct se_cmd->se_lun before the transport_off == 2 | |
530 | * handoff to fabric module. | |
531 | */ | |
532 | cmd->se_lun = NULL; | |
533 | /* | |
534 | * Some fabric modules like tcm_loop can release | |
25985edc | 535 | * their internally allocated I/O reference now and |
c66ac9db NB |
536 | * struct se_cmd now. |
537 | */ | |
e3d6f909 | 538 | if (cmd->se_tfo->check_stop_free != NULL) { |
c66ac9db | 539 | spin_unlock_irqrestore( |
a1d8b49a | 540 | &cmd->t_state_lock, flags); |
c66ac9db | 541 | |
e3d6f909 | 542 | cmd->se_tfo->check_stop_free(cmd); |
c66ac9db NB |
543 | return 1; |
544 | } | |
545 | } | |
a1d8b49a | 546 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
547 | |
548 | return 0; | |
549 | } else if (t_state) | |
550 | cmd->t_state = t_state; | |
a1d8b49a | 551 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
552 | |
553 | return 0; | |
554 | } | |
555 | ||
556 | static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) | |
557 | { | |
558 | return transport_cmd_check_stop(cmd, 2, 0); | |
559 | } | |
560 | ||
561 | static void transport_lun_remove_cmd(struct se_cmd *cmd) | |
562 | { | |
e3d6f909 | 563 | struct se_lun *lun = cmd->se_lun; |
c66ac9db NB |
564 | unsigned long flags; |
565 | ||
566 | if (!lun) | |
567 | return; | |
568 | ||
a1d8b49a | 569 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
6708bb27 | 570 | if (!atomic_read(&cmd->transport_dev_active)) { |
a1d8b49a | 571 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
572 | goto check_lun; |
573 | } | |
a1d8b49a | 574 | atomic_set(&cmd->transport_dev_active, 0); |
c66ac9db | 575 | transport_all_task_dev_remove_state(cmd); |
a1d8b49a | 576 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db | 577 | |
c66ac9db NB |
578 | |
579 | check_lun: | |
580 | spin_lock_irqsave(&lun->lun_cmd_lock, flags); | |
a1d8b49a | 581 | if (atomic_read(&cmd->transport_lun_active)) { |
5951146d | 582 | list_del(&cmd->se_lun_node); |
a1d8b49a | 583 | atomic_set(&cmd->transport_lun_active, 0); |
c66ac9db | 584 | #if 0 |
6708bb27 | 585 | pr_debug("Removed ITT: 0x%08x from LUN LIST[%d]\n" |
e3d6f909 | 586 | cmd->se_tfo->get_task_tag(cmd), lun->unpacked_lun); |
c66ac9db NB |
587 | #endif |
588 | } | |
589 | spin_unlock_irqrestore(&lun->lun_cmd_lock, flags); | |
590 | } | |
591 | ||
592 | void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) | |
593 | { | |
8dc52b54 NB |
594 | if (!cmd->se_tmr_req) |
595 | transport_lun_remove_cmd(cmd); | |
c66ac9db NB |
596 | |
597 | if (transport_cmd_check_stop_to_fabric(cmd)) | |
598 | return; | |
77039d1e NB |
599 | if (remove) { |
600 | transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj); | |
e6a2573f | 601 | transport_put_cmd(cmd); |
77039d1e | 602 | } |
c66ac9db NB |
603 | } |
604 | ||
5951146d | 605 | static void transport_add_cmd_to_queue( |
c66ac9db NB |
606 | struct se_cmd *cmd, |
607 | int t_state) | |
608 | { | |
609 | struct se_device *dev = cmd->se_dev; | |
e3d6f909 | 610 | struct se_queue_obj *qobj = &dev->dev_queue_obj; |
c66ac9db NB |
611 | unsigned long flags; |
612 | ||
c66ac9db | 613 | if (t_state) { |
a1d8b49a | 614 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db | 615 | cmd->t_state = t_state; |
a1d8b49a AG |
616 | atomic_set(&cmd->t_transport_active, 1); |
617 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | |
c66ac9db NB |
618 | } |
619 | ||
620 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); | |
79a7fef2 RD |
621 | |
622 | /* If the cmd is already on the list, remove it before we add it */ | |
623 | if (!list_empty(&cmd->se_queue_node)) | |
624 | list_del(&cmd->se_queue_node); | |
625 | else | |
626 | atomic_inc(&qobj->queue_cnt); | |
627 | ||
07bde79a NB |
628 | if (cmd->se_cmd_flags & SCF_EMULATE_QUEUE_FULL) { |
629 | cmd->se_cmd_flags &= ~SCF_EMULATE_QUEUE_FULL; | |
630 | list_add(&cmd->se_queue_node, &qobj->qobj_list); | |
631 | } else | |
632 | list_add_tail(&cmd->se_queue_node, &qobj->qobj_list); | |
79a7fef2 | 633 | atomic_set(&cmd->t_transport_queue_active, 1); |
c66ac9db NB |
634 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); |
635 | ||
c66ac9db | 636 | wake_up_interruptible(&qobj->thread_wq); |
c66ac9db NB |
637 | } |
638 | ||
5951146d AG |
639 | static struct se_cmd * |
640 | transport_get_cmd_from_queue(struct se_queue_obj *qobj) | |
c66ac9db | 641 | { |
5951146d | 642 | struct se_cmd *cmd; |
c66ac9db NB |
643 | unsigned long flags; |
644 | ||
645 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); | |
646 | if (list_empty(&qobj->qobj_list)) { | |
647 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | |
648 | return NULL; | |
649 | } | |
5951146d | 650 | cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node); |
c66ac9db | 651 | |
79a7fef2 | 652 | atomic_set(&cmd->t_transport_queue_active, 0); |
c66ac9db | 653 | |
79a7fef2 | 654 | list_del_init(&cmd->se_queue_node); |
c66ac9db NB |
655 | atomic_dec(&qobj->queue_cnt); |
656 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | |
657 | ||
5951146d | 658 | return cmd; |
c66ac9db NB |
659 | } |
660 | ||
661 | static void transport_remove_cmd_from_queue(struct se_cmd *cmd, | |
662 | struct se_queue_obj *qobj) | |
663 | { | |
c66ac9db NB |
664 | unsigned long flags; |
665 | ||
666 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); | |
6708bb27 | 667 | if (!atomic_read(&cmd->t_transport_queue_active)) { |
c66ac9db NB |
668 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); |
669 | return; | |
670 | } | |
79a7fef2 RD |
671 | atomic_set(&cmd->t_transport_queue_active, 0); |
672 | atomic_dec(&qobj->queue_cnt); | |
673 | list_del_init(&cmd->se_queue_node); | |
c66ac9db NB |
674 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); |
675 | ||
a1d8b49a | 676 | if (atomic_read(&cmd->t_transport_queue_active)) { |
6708bb27 | 677 | pr_err("ITT: 0x%08x t_transport_queue_active: %d\n", |
e3d6f909 | 678 | cmd->se_tfo->get_task_tag(cmd), |
a1d8b49a | 679 | atomic_read(&cmd->t_transport_queue_active)); |
c66ac9db NB |
680 | } |
681 | } | |
682 | ||
683 | /* | |
684 | * Completion function used by TCM subsystem plugins (such as FILEIO) | |
685 | * for queueing up response from struct se_subsystem_api->do_task() | |
686 | */ | |
687 | void transport_complete_sync_cache(struct se_cmd *cmd, int good) | |
688 | { | |
a1d8b49a | 689 | struct se_task *task = list_entry(cmd->t_task_list.next, |
c66ac9db NB |
690 | struct se_task, t_list); |
691 | ||
692 | if (good) { | |
693 | cmd->scsi_status = SAM_STAT_GOOD; | |
694 | task->task_scsi_status = GOOD; | |
695 | } else { | |
696 | task->task_scsi_status = SAM_STAT_CHECK_CONDITION; | |
697 | task->task_error_status = PYX_TRANSPORT_ILLEGAL_REQUEST; | |
e3d6f909 | 698 | task->task_se_cmd->transport_error_status = |
c66ac9db NB |
699 | PYX_TRANSPORT_ILLEGAL_REQUEST; |
700 | } | |
701 | ||
702 | transport_complete_task(task, good); | |
703 | } | |
704 | EXPORT_SYMBOL(transport_complete_sync_cache); | |
705 | ||
706 | /* transport_complete_task(): | |
707 | * | |
708 | * Called from interrupt and non interrupt context depending | |
709 | * on the transport plugin. | |
710 | */ | |
711 | void transport_complete_task(struct se_task *task, int success) | |
712 | { | |
e3d6f909 | 713 | struct se_cmd *cmd = task->task_se_cmd; |
c66ac9db NB |
714 | struct se_device *dev = task->se_dev; |
715 | int t_state; | |
716 | unsigned long flags; | |
717 | #if 0 | |
6708bb27 | 718 | pr_debug("task: %p CDB: 0x%02x obj_ptr: %p\n", task, |
a1d8b49a | 719 | cmd->t_task_cdb[0], dev); |
c66ac9db | 720 | #endif |
e3d6f909 | 721 | if (dev) |
c66ac9db | 722 | atomic_inc(&dev->depth_left); |
c66ac9db | 723 | |
a1d8b49a | 724 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db NB |
725 | atomic_set(&task->task_active, 0); |
726 | ||
727 | /* | |
728 | * See if any sense data exists, if so set the TASK_SENSE flag. | |
729 | * Also check for any other post completion work that needs to be | |
730 | * done by the plugins. | |
731 | */ | |
732 | if (dev && dev->transport->transport_complete) { | |
733 | if (dev->transport->transport_complete(task) != 0) { | |
734 | cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE; | |
735 | task->task_sense = 1; | |
736 | success = 1; | |
737 | } | |
738 | } | |
739 | ||
740 | /* | |
741 | * See if we are waiting for outstanding struct se_task | |
742 | * to complete for an exception condition | |
743 | */ | |
744 | if (atomic_read(&task->task_stop)) { | |
745 | /* | |
a1d8b49a | 746 | * Decrement cmd->t_se_count if this task had |
c66ac9db NB |
747 | * previously thrown its timeout exception handler. |
748 | */ | |
749 | if (atomic_read(&task->task_timeout)) { | |
a1d8b49a | 750 | atomic_dec(&cmd->t_se_count); |
c66ac9db NB |
751 | atomic_set(&task->task_timeout, 0); |
752 | } | |
a1d8b49a | 753 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
754 | |
755 | complete(&task->task_stop_comp); | |
756 | return; | |
757 | } | |
758 | /* | |
759 | * If the task's timeout handler has fired, use the t_task_cdbs_timeout | |
760 | * left counter to determine when the struct se_cmd is ready to be queued to | |
761 | * the processing thread. | |
762 | */ | |
763 | if (atomic_read(&task->task_timeout)) { | |
6708bb27 AG |
764 | if (!atomic_dec_and_test( |
765 | &cmd->t_task_cdbs_timeout_left)) { | |
a1d8b49a | 766 | spin_unlock_irqrestore(&cmd->t_state_lock, |
c66ac9db NB |
767 | flags); |
768 | return; | |
769 | } | |
770 | t_state = TRANSPORT_COMPLETE_TIMEOUT; | |
a1d8b49a | 771 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
772 | |
773 | transport_add_cmd_to_queue(cmd, t_state); | |
774 | return; | |
775 | } | |
a1d8b49a | 776 | atomic_dec(&cmd->t_task_cdbs_timeout_left); |
c66ac9db NB |
777 | |
778 | /* | |
779 | * Decrement the outstanding t_task_cdbs_left count. The last | |
780 | * struct se_task from struct se_cmd will complete itself into the | |
781 | * device queue depending upon int success. | |
782 | */ | |
6708bb27 | 783 | if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) { |
c66ac9db | 784 | if (!success) |
a1d8b49a | 785 | cmd->t_tasks_failed = 1; |
c66ac9db | 786 | |
a1d8b49a | 787 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
788 | return; |
789 | } | |
790 | ||
a1d8b49a | 791 | if (!success || cmd->t_tasks_failed) { |
c66ac9db NB |
792 | t_state = TRANSPORT_COMPLETE_FAILURE; |
793 | if (!task->task_error_status) { | |
794 | task->task_error_status = | |
795 | PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; | |
796 | cmd->transport_error_status = | |
797 | PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; | |
798 | } | |
799 | } else { | |
a1d8b49a | 800 | atomic_set(&cmd->t_transport_complete, 1); |
c66ac9db NB |
801 | t_state = TRANSPORT_COMPLETE_OK; |
802 | } | |
a1d8b49a | 803 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
804 | |
805 | transport_add_cmd_to_queue(cmd, t_state); | |
806 | } | |
807 | EXPORT_SYMBOL(transport_complete_task); | |
808 | ||
809 | /* | |
810 | * Called by transport_add_tasks_from_cmd() once a struct se_cmd's | |
811 | * struct se_task list are ready to be added to the active execution list | |
812 | * struct se_device | |
813 | ||
814 | * Called with se_dev_t->execute_task_lock called. | |
815 | */ | |
816 | static inline int transport_add_task_check_sam_attr( | |
817 | struct se_task *task, | |
818 | struct se_task *task_prev, | |
819 | struct se_device *dev) | |
820 | { | |
821 | /* | |
822 | * No SAM Task attribute emulation enabled, add to tail of | |
823 | * execution queue | |
824 | */ | |
825 | if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) { | |
826 | list_add_tail(&task->t_execute_list, &dev->execute_task_list); | |
827 | return 0; | |
828 | } | |
829 | /* | |
830 | * HEAD_OF_QUEUE attribute for received CDB, which means | |
831 | * the first task that is associated with a struct se_cmd goes to | |
832 | * head of the struct se_device->execute_task_list, and task_prev | |
833 | * after that for each subsequent task | |
834 | */ | |
e66ecd50 | 835 | if (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG) { |
c66ac9db NB |
836 | list_add(&task->t_execute_list, |
837 | (task_prev != NULL) ? | |
838 | &task_prev->t_execute_list : | |
839 | &dev->execute_task_list); | |
840 | ||
6708bb27 | 841 | pr_debug("Set HEAD_OF_QUEUE for task CDB: 0x%02x" |
c66ac9db | 842 | " in execution queue\n", |
6708bb27 | 843 | task->task_se_cmd->t_task_cdb[0]); |
c66ac9db NB |
844 | return 1; |
845 | } | |
846 | /* | |
847 | * For ORDERED, SIMPLE or UNTAGGED attribute tasks once they have been | |
848 | * transitioned from Dermant -> Active state, and are added to the end | |
849 | * of the struct se_device->execute_task_list | |
850 | */ | |
851 | list_add_tail(&task->t_execute_list, &dev->execute_task_list); | |
852 | return 0; | |
853 | } | |
854 | ||
855 | /* __transport_add_task_to_execute_queue(): | |
856 | * | |
857 | * Called with se_dev_t->execute_task_lock called. | |
858 | */ | |
859 | static void __transport_add_task_to_execute_queue( | |
860 | struct se_task *task, | |
861 | struct se_task *task_prev, | |
862 | struct se_device *dev) | |
863 | { | |
864 | int head_of_queue; | |
865 | ||
866 | head_of_queue = transport_add_task_check_sam_attr(task, task_prev, dev); | |
867 | atomic_inc(&dev->execute_tasks); | |
868 | ||
869 | if (atomic_read(&task->task_state_active)) | |
870 | return; | |
871 | /* | |
872 | * Determine if this task needs to go to HEAD_OF_QUEUE for the | |
873 | * state list as well. Running with SAM Task Attribute emulation | |
874 | * will always return head_of_queue == 0 here | |
875 | */ | |
876 | if (head_of_queue) | |
877 | list_add(&task->t_state_list, (task_prev) ? | |
878 | &task_prev->t_state_list : | |
879 | &dev->state_task_list); | |
880 | else | |
881 | list_add_tail(&task->t_state_list, &dev->state_task_list); | |
882 | ||
883 | atomic_set(&task->task_state_active, 1); | |
884 | ||
6708bb27 | 885 | pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n", |
e3d6f909 | 886 | task->task_se_cmd->se_tfo->get_task_tag(task->task_se_cmd), |
c66ac9db NB |
887 | task, dev); |
888 | } | |
889 | ||
890 | static void transport_add_tasks_to_state_queue(struct se_cmd *cmd) | |
891 | { | |
892 | struct se_device *dev; | |
893 | struct se_task *task; | |
894 | unsigned long flags; | |
895 | ||
a1d8b49a AG |
896 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
897 | list_for_each_entry(task, &cmd->t_task_list, t_list) { | |
c66ac9db NB |
898 | dev = task->se_dev; |
899 | ||
900 | if (atomic_read(&task->task_state_active)) | |
901 | continue; | |
902 | ||
903 | spin_lock(&dev->execute_task_lock); | |
904 | list_add_tail(&task->t_state_list, &dev->state_task_list); | |
905 | atomic_set(&task->task_state_active, 1); | |
906 | ||
6708bb27 AG |
907 | pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n", |
908 | task->task_se_cmd->se_tfo->get_task_tag( | |
c66ac9db NB |
909 | task->task_se_cmd), task, dev); |
910 | ||
911 | spin_unlock(&dev->execute_task_lock); | |
912 | } | |
a1d8b49a | 913 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
914 | } |
915 | ||
916 | static void transport_add_tasks_from_cmd(struct se_cmd *cmd) | |
917 | { | |
5951146d | 918 | struct se_device *dev = cmd->se_dev; |
c66ac9db NB |
919 | struct se_task *task, *task_prev = NULL; |
920 | unsigned long flags; | |
921 | ||
922 | spin_lock_irqsave(&dev->execute_task_lock, flags); | |
a1d8b49a | 923 | list_for_each_entry(task, &cmd->t_task_list, t_list) { |
c66ac9db NB |
924 | if (atomic_read(&task->task_execute_queue)) |
925 | continue; | |
926 | /* | |
927 | * __transport_add_task_to_execute_queue() handles the | |
928 | * SAM Task Attribute emulation if enabled | |
929 | */ | |
930 | __transport_add_task_to_execute_queue(task, task_prev, dev); | |
931 | atomic_set(&task->task_execute_queue, 1); | |
932 | task_prev = task; | |
933 | } | |
934 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); | |
c66ac9db NB |
935 | } |
936 | ||
937 | /* transport_remove_task_from_execute_queue(): | |
938 | * | |
939 | * | |
940 | */ | |
52208ae3 | 941 | void transport_remove_task_from_execute_queue( |
c66ac9db NB |
942 | struct se_task *task, |
943 | struct se_device *dev) | |
944 | { | |
945 | unsigned long flags; | |
946 | ||
af57c3ac NB |
947 | if (atomic_read(&task->task_execute_queue) == 0) { |
948 | dump_stack(); | |
949 | return; | |
950 | } | |
951 | ||
c66ac9db NB |
952 | spin_lock_irqsave(&dev->execute_task_lock, flags); |
953 | list_del(&task->t_execute_list); | |
af57c3ac | 954 | atomic_set(&task->task_execute_queue, 0); |
c66ac9db NB |
955 | atomic_dec(&dev->execute_tasks); |
956 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); | |
957 | } | |
958 | ||
07bde79a NB |
959 | /* |
960 | * Handle QUEUE_FULL / -EAGAIN status | |
961 | */ | |
962 | ||
963 | static void target_qf_do_work(struct work_struct *work) | |
964 | { | |
965 | struct se_device *dev = container_of(work, struct se_device, | |
966 | qf_work_queue); | |
bcac364a | 967 | LIST_HEAD(qf_cmd_list); |
07bde79a NB |
968 | struct se_cmd *cmd, *cmd_tmp; |
969 | ||
970 | spin_lock_irq(&dev->qf_cmd_lock); | |
bcac364a RD |
971 | list_splice_init(&dev->qf_cmd_list, &qf_cmd_list); |
972 | spin_unlock_irq(&dev->qf_cmd_lock); | |
07bde79a | 973 | |
bcac364a | 974 | list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) { |
07bde79a NB |
975 | list_del(&cmd->se_qf_node); |
976 | atomic_dec(&dev->dev_qf_count); | |
977 | smp_mb__after_atomic_dec(); | |
07bde79a | 978 | |
6708bb27 | 979 | pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" |
07bde79a NB |
980 | " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, |
981 | (cmd->t_state == TRANSPORT_COMPLETE_OK) ? "COMPLETE_OK" : | |
982 | (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING" | |
983 | : "UNKNOWN"); | |
984 | /* | |
985 | * The SCF_EMULATE_QUEUE_FULL flag will be cleared once se_cmd | |
986 | * has been added to head of queue | |
987 | */ | |
988 | transport_add_cmd_to_queue(cmd, cmd->t_state); | |
07bde79a | 989 | } |
07bde79a NB |
990 | } |
991 | ||
c66ac9db NB |
992 | unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd) |
993 | { | |
994 | switch (cmd->data_direction) { | |
995 | case DMA_NONE: | |
996 | return "NONE"; | |
997 | case DMA_FROM_DEVICE: | |
998 | return "READ"; | |
999 | case DMA_TO_DEVICE: | |
1000 | return "WRITE"; | |
1001 | case DMA_BIDIRECTIONAL: | |
1002 | return "BIDI"; | |
1003 | default: | |
1004 | break; | |
1005 | } | |
1006 | ||
1007 | return "UNKNOWN"; | |
1008 | } | |
1009 | ||
1010 | void transport_dump_dev_state( | |
1011 | struct se_device *dev, | |
1012 | char *b, | |
1013 | int *bl) | |
1014 | { | |
1015 | *bl += sprintf(b + *bl, "Status: "); | |
1016 | switch (dev->dev_status) { | |
1017 | case TRANSPORT_DEVICE_ACTIVATED: | |
1018 | *bl += sprintf(b + *bl, "ACTIVATED"); | |
1019 | break; | |
1020 | case TRANSPORT_DEVICE_DEACTIVATED: | |
1021 | *bl += sprintf(b + *bl, "DEACTIVATED"); | |
1022 | break; | |
1023 | case TRANSPORT_DEVICE_SHUTDOWN: | |
1024 | *bl += sprintf(b + *bl, "SHUTDOWN"); | |
1025 | break; | |
1026 | case TRANSPORT_DEVICE_OFFLINE_ACTIVATED: | |
1027 | case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED: | |
1028 | *bl += sprintf(b + *bl, "OFFLINE"); | |
1029 | break; | |
1030 | default: | |
1031 | *bl += sprintf(b + *bl, "UNKNOWN=%d", dev->dev_status); | |
1032 | break; | |
1033 | } | |
1034 | ||
1035 | *bl += sprintf(b + *bl, " Execute/Left/Max Queue Depth: %d/%d/%d", | |
1036 | atomic_read(&dev->execute_tasks), atomic_read(&dev->depth_left), | |
1037 | dev->queue_depth); | |
1038 | *bl += sprintf(b + *bl, " SectorSize: %u MaxSectors: %u\n", | |
e3d6f909 | 1039 | dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.max_sectors); |
c66ac9db NB |
1040 | *bl += sprintf(b + *bl, " "); |
1041 | } | |
1042 | ||
1043 | /* transport_release_all_cmds(): | |
1044 | * | |
1045 | * | |
1046 | */ | |
1047 | static void transport_release_all_cmds(struct se_device *dev) | |
1048 | { | |
5951146d | 1049 | struct se_cmd *cmd, *tcmd; |
c66ac9db NB |
1050 | int bug_out = 0, t_state; |
1051 | unsigned long flags; | |
1052 | ||
e3d6f909 | 1053 | spin_lock_irqsave(&dev->dev_queue_obj.cmd_queue_lock, flags); |
5951146d AG |
1054 | list_for_each_entry_safe(cmd, tcmd, &dev->dev_queue_obj.qobj_list, |
1055 | se_queue_node) { | |
1056 | t_state = cmd->t_state; | |
79a7fef2 | 1057 | list_del_init(&cmd->se_queue_node); |
e3d6f909 | 1058 | spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock, |
c66ac9db NB |
1059 | flags); |
1060 | ||
6708bb27 | 1061 | pr_err("Releasing ITT: 0x%08x, i_state: %u," |
c66ac9db | 1062 | " t_state: %u directly\n", |
e3d6f909 AG |
1063 | cmd->se_tfo->get_task_tag(cmd), |
1064 | cmd->se_tfo->get_cmd_state(cmd), t_state); | |
c66ac9db | 1065 | |
d3df7825 | 1066 | transport_put_cmd(cmd); |
c66ac9db NB |
1067 | bug_out = 1; |
1068 | ||
e3d6f909 | 1069 | spin_lock_irqsave(&dev->dev_queue_obj.cmd_queue_lock, flags); |
c66ac9db | 1070 | } |
e3d6f909 | 1071 | spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock, flags); |
c66ac9db NB |
1072 | #if 0 |
1073 | if (bug_out) | |
1074 | BUG(); | |
1075 | #endif | |
1076 | } | |
1077 | ||
1078 | void transport_dump_vpd_proto_id( | |
1079 | struct t10_vpd *vpd, | |
1080 | unsigned char *p_buf, | |
1081 | int p_buf_len) | |
1082 | { | |
1083 | unsigned char buf[VPD_TMP_BUF_SIZE]; | |
1084 | int len; | |
1085 | ||
1086 | memset(buf, 0, VPD_TMP_BUF_SIZE); | |
1087 | len = sprintf(buf, "T10 VPD Protocol Identifier: "); | |
1088 | ||
1089 | switch (vpd->protocol_identifier) { | |
1090 | case 0x00: | |
1091 | sprintf(buf+len, "Fibre Channel\n"); | |
1092 | break; | |
1093 | case 0x10: | |
1094 | sprintf(buf+len, "Parallel SCSI\n"); | |
1095 | break; | |
1096 | case 0x20: | |
1097 | sprintf(buf+len, "SSA\n"); | |
1098 | break; | |
1099 | case 0x30: | |
1100 | sprintf(buf+len, "IEEE 1394\n"); | |
1101 | break; | |
1102 | case 0x40: | |
1103 | sprintf(buf+len, "SCSI Remote Direct Memory Access" | |
1104 | " Protocol\n"); | |
1105 | break; | |
1106 | case 0x50: | |
1107 | sprintf(buf+len, "Internet SCSI (iSCSI)\n"); | |
1108 | break; | |
1109 | case 0x60: | |
1110 | sprintf(buf+len, "SAS Serial SCSI Protocol\n"); | |
1111 | break; | |
1112 | case 0x70: | |
1113 | sprintf(buf+len, "Automation/Drive Interface Transport" | |
1114 | " Protocol\n"); | |
1115 | break; | |
1116 | case 0x80: | |
1117 | sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n"); | |
1118 | break; | |
1119 | default: | |
1120 | sprintf(buf+len, "Unknown 0x%02x\n", | |
1121 | vpd->protocol_identifier); | |
1122 | break; | |
1123 | } | |
1124 | ||
1125 | if (p_buf) | |
1126 | strncpy(p_buf, buf, p_buf_len); | |
1127 | else | |
6708bb27 | 1128 | pr_debug("%s", buf); |
c66ac9db NB |
1129 | } |
1130 | ||
1131 | void | |
1132 | transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83) | |
1133 | { | |
1134 | /* | |
1135 | * Check if the Protocol Identifier Valid (PIV) bit is set.. | |
1136 | * | |
1137 | * from spc3r23.pdf section 7.5.1 | |
1138 | */ | |
1139 | if (page_83[1] & 0x80) { | |
1140 | vpd->protocol_identifier = (page_83[0] & 0xf0); | |
1141 | vpd->protocol_identifier_set = 1; | |
1142 | transport_dump_vpd_proto_id(vpd, NULL, 0); | |
1143 | } | |
1144 | } | |
1145 | EXPORT_SYMBOL(transport_set_vpd_proto_id); | |
1146 | ||
1147 | int transport_dump_vpd_assoc( | |
1148 | struct t10_vpd *vpd, | |
1149 | unsigned char *p_buf, | |
1150 | int p_buf_len) | |
1151 | { | |
1152 | unsigned char buf[VPD_TMP_BUF_SIZE]; | |
e3d6f909 AG |
1153 | int ret = 0; |
1154 | int len; | |
c66ac9db NB |
1155 | |
1156 | memset(buf, 0, VPD_TMP_BUF_SIZE); | |
1157 | len = sprintf(buf, "T10 VPD Identifier Association: "); | |
1158 | ||
1159 | switch (vpd->association) { | |
1160 | case 0x00: | |
1161 | sprintf(buf+len, "addressed logical unit\n"); | |
1162 | break; | |
1163 | case 0x10: | |
1164 | sprintf(buf+len, "target port\n"); | |
1165 | break; | |
1166 | case 0x20: | |
1167 | sprintf(buf+len, "SCSI target device\n"); | |
1168 | break; | |
1169 | default: | |
1170 | sprintf(buf+len, "Unknown 0x%02x\n", vpd->association); | |
e3d6f909 | 1171 | ret = -EINVAL; |
c66ac9db NB |
1172 | break; |
1173 | } | |
1174 | ||
1175 | if (p_buf) | |
1176 | strncpy(p_buf, buf, p_buf_len); | |
1177 | else | |
6708bb27 | 1178 | pr_debug("%s", buf); |
c66ac9db NB |
1179 | |
1180 | return ret; | |
1181 | } | |
1182 | ||
1183 | int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83) | |
1184 | { | |
1185 | /* | |
1186 | * The VPD identification association.. | |
1187 | * | |
1188 | * from spc3r23.pdf Section 7.6.3.1 Table 297 | |
1189 | */ | |
1190 | vpd->association = (page_83[1] & 0x30); | |
1191 | return transport_dump_vpd_assoc(vpd, NULL, 0); | |
1192 | } | |
1193 | EXPORT_SYMBOL(transport_set_vpd_assoc); | |
1194 | ||
1195 | int transport_dump_vpd_ident_type( | |
1196 | struct t10_vpd *vpd, | |
1197 | unsigned char *p_buf, | |
1198 | int p_buf_len) | |
1199 | { | |
1200 | unsigned char buf[VPD_TMP_BUF_SIZE]; | |
e3d6f909 AG |
1201 | int ret = 0; |
1202 | int len; | |
c66ac9db NB |
1203 | |
1204 | memset(buf, 0, VPD_TMP_BUF_SIZE); | |
1205 | len = sprintf(buf, "T10 VPD Identifier Type: "); | |
1206 | ||
1207 | switch (vpd->device_identifier_type) { | |
1208 | case 0x00: | |
1209 | sprintf(buf+len, "Vendor specific\n"); | |
1210 | break; | |
1211 | case 0x01: | |
1212 | sprintf(buf+len, "T10 Vendor ID based\n"); | |
1213 | break; | |
1214 | case 0x02: | |
1215 | sprintf(buf+len, "EUI-64 based\n"); | |
1216 | break; | |
1217 | case 0x03: | |
1218 | sprintf(buf+len, "NAA\n"); | |
1219 | break; | |
1220 | case 0x04: | |
1221 | sprintf(buf+len, "Relative target port identifier\n"); | |
1222 | break; | |
1223 | case 0x08: | |
1224 | sprintf(buf+len, "SCSI name string\n"); | |
1225 | break; | |
1226 | default: | |
1227 | sprintf(buf+len, "Unsupported: 0x%02x\n", | |
1228 | vpd->device_identifier_type); | |
e3d6f909 | 1229 | ret = -EINVAL; |
c66ac9db NB |
1230 | break; |
1231 | } | |
1232 | ||
e3d6f909 AG |
1233 | if (p_buf) { |
1234 | if (p_buf_len < strlen(buf)+1) | |
1235 | return -EINVAL; | |
c66ac9db | 1236 | strncpy(p_buf, buf, p_buf_len); |
e3d6f909 | 1237 | } else { |
6708bb27 | 1238 | pr_debug("%s", buf); |
e3d6f909 | 1239 | } |
c66ac9db NB |
1240 | |
1241 | return ret; | |
1242 | } | |
1243 | ||
1244 | int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83) | |
1245 | { | |
1246 | /* | |
1247 | * The VPD identifier type.. | |
1248 | * | |
1249 | * from spc3r23.pdf Section 7.6.3.1 Table 298 | |
1250 | */ | |
1251 | vpd->device_identifier_type = (page_83[1] & 0x0f); | |
1252 | return transport_dump_vpd_ident_type(vpd, NULL, 0); | |
1253 | } | |
1254 | EXPORT_SYMBOL(transport_set_vpd_ident_type); | |
1255 | ||
1256 | int transport_dump_vpd_ident( | |
1257 | struct t10_vpd *vpd, | |
1258 | unsigned char *p_buf, | |
1259 | int p_buf_len) | |
1260 | { | |
1261 | unsigned char buf[VPD_TMP_BUF_SIZE]; | |
1262 | int ret = 0; | |
1263 | ||
1264 | memset(buf, 0, VPD_TMP_BUF_SIZE); | |
1265 | ||
1266 | switch (vpd->device_identifier_code_set) { | |
1267 | case 0x01: /* Binary */ | |
1268 | sprintf(buf, "T10 VPD Binary Device Identifier: %s\n", | |
1269 | &vpd->device_identifier[0]); | |
1270 | break; | |
1271 | case 0x02: /* ASCII */ | |
1272 | sprintf(buf, "T10 VPD ASCII Device Identifier: %s\n", | |
1273 | &vpd->device_identifier[0]); | |
1274 | break; | |
1275 | case 0x03: /* UTF-8 */ | |
1276 | sprintf(buf, "T10 VPD UTF-8 Device Identifier: %s\n", | |
1277 | &vpd->device_identifier[0]); | |
1278 | break; | |
1279 | default: | |
1280 | sprintf(buf, "T10 VPD Device Identifier encoding unsupported:" | |
1281 | " 0x%02x", vpd->device_identifier_code_set); | |
e3d6f909 | 1282 | ret = -EINVAL; |
c66ac9db NB |
1283 | break; |
1284 | } | |
1285 | ||
1286 | if (p_buf) | |
1287 | strncpy(p_buf, buf, p_buf_len); | |
1288 | else | |
6708bb27 | 1289 | pr_debug("%s", buf); |
c66ac9db NB |
1290 | |
1291 | return ret; | |
1292 | } | |
1293 | ||
1294 | int | |
1295 | transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83) | |
1296 | { | |
1297 | static const char hex_str[] = "0123456789abcdef"; | |
1298 | int j = 0, i = 4; /* offset to start of the identifer */ | |
1299 | ||
1300 | /* | |
1301 | * The VPD Code Set (encoding) | |
1302 | * | |
1303 | * from spc3r23.pdf Section 7.6.3.1 Table 296 | |
1304 | */ | |
1305 | vpd->device_identifier_code_set = (page_83[0] & 0x0f); | |
1306 | switch (vpd->device_identifier_code_set) { | |
1307 | case 0x01: /* Binary */ | |
1308 | vpd->device_identifier[j++] = | |
1309 | hex_str[vpd->device_identifier_type]; | |
1310 | while (i < (4 + page_83[3])) { | |
1311 | vpd->device_identifier[j++] = | |
1312 | hex_str[(page_83[i] & 0xf0) >> 4]; | |
1313 | vpd->device_identifier[j++] = | |
1314 | hex_str[page_83[i] & 0x0f]; | |
1315 | i++; | |
1316 | } | |
1317 | break; | |
1318 | case 0x02: /* ASCII */ | |
1319 | case 0x03: /* UTF-8 */ | |
1320 | while (i < (4 + page_83[3])) | |
1321 | vpd->device_identifier[j++] = page_83[i++]; | |
1322 | break; | |
1323 | default: | |
1324 | break; | |
1325 | } | |
1326 | ||
1327 | return transport_dump_vpd_ident(vpd, NULL, 0); | |
1328 | } | |
1329 | EXPORT_SYMBOL(transport_set_vpd_ident); | |
1330 | ||
1331 | static void core_setup_task_attr_emulation(struct se_device *dev) | |
1332 | { | |
1333 | /* | |
1334 | * If this device is from Target_Core_Mod/pSCSI, disable the | |
1335 | * SAM Task Attribute emulation. | |
1336 | * | |
1337 | * This is currently not available in upsream Linux/SCSI Target | |
1338 | * mode code, and is assumed to be disabled while using TCM/pSCSI. | |
1339 | */ | |
e3d6f909 | 1340 | if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { |
c66ac9db NB |
1341 | dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH; |
1342 | return; | |
1343 | } | |
1344 | ||
1345 | dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED; | |
6708bb27 | 1346 | pr_debug("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x" |
e3d6f909 AG |
1347 | " device\n", dev->transport->name, |
1348 | dev->transport->get_device_rev(dev)); | |
c66ac9db NB |
1349 | } |
1350 | ||
1351 | static void scsi_dump_inquiry(struct se_device *dev) | |
1352 | { | |
e3d6f909 | 1353 | struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn; |
c66ac9db NB |
1354 | int i, device_type; |
1355 | /* | |
1356 | * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer | |
1357 | */ | |
6708bb27 | 1358 | pr_debug(" Vendor: "); |
c66ac9db NB |
1359 | for (i = 0; i < 8; i++) |
1360 | if (wwn->vendor[i] >= 0x20) | |
6708bb27 | 1361 | pr_debug("%c", wwn->vendor[i]); |
c66ac9db | 1362 | else |
6708bb27 | 1363 | pr_debug(" "); |
c66ac9db | 1364 | |
6708bb27 | 1365 | pr_debug(" Model: "); |
c66ac9db NB |
1366 | for (i = 0; i < 16; i++) |
1367 | if (wwn->model[i] >= 0x20) | |
6708bb27 | 1368 | pr_debug("%c", wwn->model[i]); |
c66ac9db | 1369 | else |
6708bb27 | 1370 | pr_debug(" "); |
c66ac9db | 1371 | |
6708bb27 | 1372 | pr_debug(" Revision: "); |
c66ac9db NB |
1373 | for (i = 0; i < 4; i++) |
1374 | if (wwn->revision[i] >= 0x20) | |
6708bb27 | 1375 | pr_debug("%c", wwn->revision[i]); |
c66ac9db | 1376 | else |
6708bb27 | 1377 | pr_debug(" "); |
c66ac9db | 1378 | |
6708bb27 | 1379 | pr_debug("\n"); |
c66ac9db | 1380 | |
e3d6f909 | 1381 | device_type = dev->transport->get_device_type(dev); |
6708bb27 AG |
1382 | pr_debug(" Type: %s ", scsi_device_type(device_type)); |
1383 | pr_debug(" ANSI SCSI revision: %02x\n", | |
e3d6f909 | 1384 | dev->transport->get_device_rev(dev)); |
c66ac9db NB |
1385 | } |
1386 | ||
1387 | struct se_device *transport_add_device_to_core_hba( | |
1388 | struct se_hba *hba, | |
1389 | struct se_subsystem_api *transport, | |
1390 | struct se_subsystem_dev *se_dev, | |
1391 | u32 device_flags, | |
1392 | void *transport_dev, | |
1393 | struct se_dev_limits *dev_limits, | |
1394 | const char *inquiry_prod, | |
1395 | const char *inquiry_rev) | |
1396 | { | |
12a18bdc | 1397 | int force_pt; |
c66ac9db NB |
1398 | struct se_device *dev; |
1399 | ||
1400 | dev = kzalloc(sizeof(struct se_device), GFP_KERNEL); | |
6708bb27 AG |
1401 | if (!dev) { |
1402 | pr_err("Unable to allocate memory for se_dev_t\n"); | |
c66ac9db NB |
1403 | return NULL; |
1404 | } | |
c66ac9db | 1405 | |
e3d6f909 | 1406 | transport_init_queue_obj(&dev->dev_queue_obj); |
c66ac9db NB |
1407 | dev->dev_flags = device_flags; |
1408 | dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED; | |
5951146d | 1409 | dev->dev_ptr = transport_dev; |
c66ac9db NB |
1410 | dev->se_hba = hba; |
1411 | dev->se_sub_dev = se_dev; | |
1412 | dev->transport = transport; | |
1413 | atomic_set(&dev->active_cmds, 0); | |
1414 | INIT_LIST_HEAD(&dev->dev_list); | |
1415 | INIT_LIST_HEAD(&dev->dev_sep_list); | |
1416 | INIT_LIST_HEAD(&dev->dev_tmr_list); | |
1417 | INIT_LIST_HEAD(&dev->execute_task_list); | |
1418 | INIT_LIST_HEAD(&dev->delayed_cmd_list); | |
1419 | INIT_LIST_HEAD(&dev->ordered_cmd_list); | |
1420 | INIT_LIST_HEAD(&dev->state_task_list); | |
07bde79a | 1421 | INIT_LIST_HEAD(&dev->qf_cmd_list); |
c66ac9db NB |
1422 | spin_lock_init(&dev->execute_task_lock); |
1423 | spin_lock_init(&dev->delayed_cmd_lock); | |
1424 | spin_lock_init(&dev->ordered_cmd_lock); | |
1425 | spin_lock_init(&dev->state_task_lock); | |
1426 | spin_lock_init(&dev->dev_alua_lock); | |
1427 | spin_lock_init(&dev->dev_reservation_lock); | |
1428 | spin_lock_init(&dev->dev_status_lock); | |
1429 | spin_lock_init(&dev->dev_status_thr_lock); | |
1430 | spin_lock_init(&dev->se_port_lock); | |
1431 | spin_lock_init(&dev->se_tmr_lock); | |
07bde79a | 1432 | spin_lock_init(&dev->qf_cmd_lock); |
c66ac9db NB |
1433 | |
1434 | dev->queue_depth = dev_limits->queue_depth; | |
1435 | atomic_set(&dev->depth_left, dev->queue_depth); | |
1436 | atomic_set(&dev->dev_ordered_id, 0); | |
1437 | ||
1438 | se_dev_set_default_attribs(dev, dev_limits); | |
1439 | ||
1440 | dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX); | |
1441 | dev->creation_time = get_jiffies_64(); | |
1442 | spin_lock_init(&dev->stats_lock); | |
1443 | ||
1444 | spin_lock(&hba->device_lock); | |
1445 | list_add_tail(&dev->dev_list, &hba->hba_dev_list); | |
1446 | hba->dev_count++; | |
1447 | spin_unlock(&hba->device_lock); | |
1448 | /* | |
1449 | * Setup the SAM Task Attribute emulation for struct se_device | |
1450 | */ | |
1451 | core_setup_task_attr_emulation(dev); | |
1452 | /* | |
1453 | * Force PR and ALUA passthrough emulation with internal object use. | |
1454 | */ | |
1455 | force_pt = (hba->hba_flags & HBA_FLAGS_INTERNAL_USE); | |
1456 | /* | |
1457 | * Setup the Reservations infrastructure for struct se_device | |
1458 | */ | |
1459 | core_setup_reservations(dev, force_pt); | |
1460 | /* | |
1461 | * Setup the Asymmetric Logical Unit Assignment for struct se_device | |
1462 | */ | |
1463 | if (core_setup_alua(dev, force_pt) < 0) | |
1464 | goto out; | |
1465 | ||
1466 | /* | |
1467 | * Startup the struct se_device processing thread | |
1468 | */ | |
1469 | dev->process_thread = kthread_run(transport_processing_thread, dev, | |
e3d6f909 | 1470 | "LIO_%s", dev->transport->name); |
c66ac9db | 1471 | if (IS_ERR(dev->process_thread)) { |
6708bb27 | 1472 | pr_err("Unable to create kthread: LIO_%s\n", |
e3d6f909 | 1473 | dev->transport->name); |
c66ac9db NB |
1474 | goto out; |
1475 | } | |
07bde79a NB |
1476 | /* |
1477 | * Setup work_queue for QUEUE_FULL | |
1478 | */ | |
1479 | INIT_WORK(&dev->qf_work_queue, target_qf_do_work); | |
c66ac9db NB |
1480 | /* |
1481 | * Preload the initial INQUIRY const values if we are doing | |
1482 | * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI | |
1483 | * passthrough because this is being provided by the backend LLD. | |
1484 | * This is required so that transport_get_inquiry() copies these | |
1485 | * originals once back into DEV_T10_WWN(dev) for the virtual device | |
1486 | * setup. | |
1487 | */ | |
e3d6f909 | 1488 | if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) { |
f22c1196 | 1489 | if (!inquiry_prod || !inquiry_rev) { |
6708bb27 | 1490 | pr_err("All non TCM/pSCSI plugins require" |
c66ac9db NB |
1491 | " INQUIRY consts\n"); |
1492 | goto out; | |
1493 | } | |
1494 | ||
e3d6f909 AG |
1495 | strncpy(&dev->se_sub_dev->t10_wwn.vendor[0], "LIO-ORG", 8); |
1496 | strncpy(&dev->se_sub_dev->t10_wwn.model[0], inquiry_prod, 16); | |
1497 | strncpy(&dev->se_sub_dev->t10_wwn.revision[0], inquiry_rev, 4); | |
c66ac9db NB |
1498 | } |
1499 | scsi_dump_inquiry(dev); | |
1500 | ||
12a18bdc | 1501 | return dev; |
c66ac9db | 1502 | out: |
c66ac9db NB |
1503 | kthread_stop(dev->process_thread); |
1504 | ||
1505 | spin_lock(&hba->device_lock); | |
1506 | list_del(&dev->dev_list); | |
1507 | hba->dev_count--; | |
1508 | spin_unlock(&hba->device_lock); | |
1509 | ||
1510 | se_release_vpd_for_dev(dev); | |
1511 | ||
c66ac9db NB |
1512 | kfree(dev); |
1513 | ||
1514 | return NULL; | |
1515 | } | |
1516 | EXPORT_SYMBOL(transport_add_device_to_core_hba); | |
1517 | ||
1518 | /* transport_generic_prepare_cdb(): | |
1519 | * | |
1520 | * Since the Initiator sees iSCSI devices as LUNs, the SCSI CDB will | |
1521 | * contain the iSCSI LUN in bits 7-5 of byte 1 as per SAM-2. | |
1522 | * The point of this is since we are mapping iSCSI LUNs to | |
1523 | * SCSI Target IDs having a non-zero LUN in the CDB will throw the | |
1524 | * devices and HBAs for a loop. | |
1525 | */ | |
1526 | static inline void transport_generic_prepare_cdb( | |
1527 | unsigned char *cdb) | |
1528 | { | |
1529 | switch (cdb[0]) { | |
1530 | case READ_10: /* SBC - RDProtect */ | |
1531 | case READ_12: /* SBC - RDProtect */ | |
1532 | case READ_16: /* SBC - RDProtect */ | |
1533 | case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */ | |
1534 | case VERIFY: /* SBC - VRProtect */ | |
1535 | case VERIFY_16: /* SBC - VRProtect */ | |
1536 | case WRITE_VERIFY: /* SBC - VRProtect */ | |
1537 | case WRITE_VERIFY_12: /* SBC - VRProtect */ | |
1538 | break; | |
1539 | default: | |
1540 | cdb[1] &= 0x1f; /* clear logical unit number */ | |
1541 | break; | |
1542 | } | |
1543 | } | |
1544 | ||
1545 | static struct se_task * | |
1546 | transport_generic_get_task(struct se_cmd *cmd, | |
1547 | enum dma_data_direction data_direction) | |
1548 | { | |
1549 | struct se_task *task; | |
5951146d | 1550 | struct se_device *dev = cmd->se_dev; |
c66ac9db | 1551 | |
6708bb27 | 1552 | task = dev->transport->alloc_task(cmd->t_task_cdb); |
c66ac9db | 1553 | if (!task) { |
6708bb27 | 1554 | pr_err("Unable to allocate struct se_task\n"); |
c66ac9db NB |
1555 | return NULL; |
1556 | } | |
1557 | ||
1558 | INIT_LIST_HEAD(&task->t_list); | |
1559 | INIT_LIST_HEAD(&task->t_execute_list); | |
1560 | INIT_LIST_HEAD(&task->t_state_list); | |
1561 | init_completion(&task->task_stop_comp); | |
c66ac9db NB |
1562 | task->task_se_cmd = cmd; |
1563 | task->se_dev = dev; | |
1564 | task->task_data_direction = data_direction; | |
1565 | ||
c66ac9db NB |
1566 | return task; |
1567 | } | |
1568 | ||
1569 | static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *); | |
1570 | ||
c66ac9db NB |
1571 | /* |
1572 | * Used by fabric modules containing a local struct se_cmd within their | |
1573 | * fabric dependent per I/O descriptor. | |
1574 | */ | |
1575 | void transport_init_se_cmd( | |
1576 | struct se_cmd *cmd, | |
1577 | struct target_core_fabric_ops *tfo, | |
1578 | struct se_session *se_sess, | |
1579 | u32 data_length, | |
1580 | int data_direction, | |
1581 | int task_attr, | |
1582 | unsigned char *sense_buffer) | |
1583 | { | |
5951146d AG |
1584 | INIT_LIST_HEAD(&cmd->se_lun_node); |
1585 | INIT_LIST_HEAD(&cmd->se_delayed_node); | |
1586 | INIT_LIST_HEAD(&cmd->se_ordered_node); | |
07bde79a | 1587 | INIT_LIST_HEAD(&cmd->se_qf_node); |
79a7fef2 | 1588 | INIT_LIST_HEAD(&cmd->se_queue_node); |
c66ac9db | 1589 | |
a1d8b49a AG |
1590 | INIT_LIST_HEAD(&cmd->t_task_list); |
1591 | init_completion(&cmd->transport_lun_fe_stop_comp); | |
1592 | init_completion(&cmd->transport_lun_stop_comp); | |
1593 | init_completion(&cmd->t_transport_stop_comp); | |
1594 | spin_lock_init(&cmd->t_state_lock); | |
1595 | atomic_set(&cmd->transport_dev_active, 1); | |
c66ac9db NB |
1596 | |
1597 | cmd->se_tfo = tfo; | |
1598 | cmd->se_sess = se_sess; | |
1599 | cmd->data_length = data_length; | |
1600 | cmd->data_direction = data_direction; | |
1601 | cmd->sam_task_attr = task_attr; | |
1602 | cmd->sense_buffer = sense_buffer; | |
1603 | } | |
1604 | EXPORT_SYMBOL(transport_init_se_cmd); | |
1605 | ||
1606 | static int transport_check_alloc_task_attr(struct se_cmd *cmd) | |
1607 | { | |
1608 | /* | |
1609 | * Check if SAM Task Attribute emulation is enabled for this | |
1610 | * struct se_device storage object | |
1611 | */ | |
5951146d | 1612 | if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) |
c66ac9db NB |
1613 | return 0; |
1614 | ||
e66ecd50 | 1615 | if (cmd->sam_task_attr == MSG_ACA_TAG) { |
6708bb27 | 1616 | pr_debug("SAM Task Attribute ACA" |
c66ac9db | 1617 | " emulation is not supported\n"); |
e3d6f909 | 1618 | return -EINVAL; |
c66ac9db NB |
1619 | } |
1620 | /* | |
1621 | * Used to determine when ORDERED commands should go from | |
1622 | * Dormant to Active status. | |
1623 | */ | |
5951146d | 1624 | cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id); |
c66ac9db | 1625 | smp_mb__after_atomic_inc(); |
6708bb27 | 1626 | pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n", |
c66ac9db | 1627 | cmd->se_ordered_id, cmd->sam_task_attr, |
6708bb27 | 1628 | cmd->se_dev->transport->name); |
c66ac9db NB |
1629 | return 0; |
1630 | } | |
1631 | ||
c66ac9db NB |
1632 | /* transport_generic_allocate_tasks(): |
1633 | * | |
1634 | * Called from fabric RX Thread. | |
1635 | */ | |
1636 | int transport_generic_allocate_tasks( | |
1637 | struct se_cmd *cmd, | |
1638 | unsigned char *cdb) | |
1639 | { | |
1640 | int ret; | |
1641 | ||
1642 | transport_generic_prepare_cdb(cdb); | |
c66ac9db NB |
1643 | /* |
1644 | * Ensure that the received CDB is less than the max (252 + 8) bytes | |
1645 | * for VARIABLE_LENGTH_CMD | |
1646 | */ | |
1647 | if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) { | |
6708bb27 | 1648 | pr_err("Received SCSI CDB with command_size: %d that" |
c66ac9db NB |
1649 | " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", |
1650 | scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE); | |
e3d6f909 | 1651 | return -EINVAL; |
c66ac9db NB |
1652 | } |
1653 | /* | |
1654 | * If the received CDB is larger than TCM_MAX_COMMAND_SIZE, | |
1655 | * allocate the additional extended CDB buffer now.. Otherwise | |
1656 | * setup the pointer from __t_task_cdb to t_task_cdb. | |
1657 | */ | |
a1d8b49a AG |
1658 | if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) { |
1659 | cmd->t_task_cdb = kzalloc(scsi_command_size(cdb), | |
c66ac9db | 1660 | GFP_KERNEL); |
6708bb27 AG |
1661 | if (!cmd->t_task_cdb) { |
1662 | pr_err("Unable to allocate cmd->t_task_cdb" | |
a1d8b49a | 1663 | " %u > sizeof(cmd->__t_task_cdb): %lu ops\n", |
c66ac9db | 1664 | scsi_command_size(cdb), |
a1d8b49a | 1665 | (unsigned long)sizeof(cmd->__t_task_cdb)); |
e3d6f909 | 1666 | return -ENOMEM; |
c66ac9db NB |
1667 | } |
1668 | } else | |
a1d8b49a | 1669 | cmd->t_task_cdb = &cmd->__t_task_cdb[0]; |
c66ac9db | 1670 | /* |
a1d8b49a | 1671 | * Copy the original CDB into cmd-> |
c66ac9db | 1672 | */ |
a1d8b49a | 1673 | memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb)); |
c66ac9db NB |
1674 | /* |
1675 | * Setup the received CDB based on SCSI defined opcodes and | |
1676 | * perform unit attention, persistent reservations and ALUA | |
a1d8b49a | 1677 | * checks for virtual device backends. The cmd->t_task_cdb |
c66ac9db NB |
1678 | * pointer is expected to be setup before we reach this point. |
1679 | */ | |
1680 | ret = transport_generic_cmd_sequencer(cmd, cdb); | |
1681 | if (ret < 0) | |
1682 | return ret; | |
1683 | /* | |
1684 | * Check for SAM Task Attribute Emulation | |
1685 | */ | |
1686 | if (transport_check_alloc_task_attr(cmd) < 0) { | |
1687 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
1688 | cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; | |
5951146d | 1689 | return -EINVAL; |
c66ac9db NB |
1690 | } |
1691 | spin_lock(&cmd->se_lun->lun_sep_lock); | |
1692 | if (cmd->se_lun->lun_sep) | |
1693 | cmd->se_lun->lun_sep->sep_stats.cmd_pdus++; | |
1694 | spin_unlock(&cmd->se_lun->lun_sep_lock); | |
1695 | return 0; | |
1696 | } | |
1697 | EXPORT_SYMBOL(transport_generic_allocate_tasks); | |
1698 | ||
dd8ae59d NB |
1699 | static void transport_generic_request_failure(struct se_cmd *, |
1700 | struct se_device *, int, int); | |
695434e1 NB |
1701 | /* |
1702 | * Used by fabric module frontends to queue tasks directly. | |
1703 | * Many only be used from process context only | |
1704 | */ | |
1705 | int transport_handle_cdb_direct( | |
1706 | struct se_cmd *cmd) | |
1707 | { | |
dd8ae59d NB |
1708 | int ret; |
1709 | ||
695434e1 NB |
1710 | if (!cmd->se_lun) { |
1711 | dump_stack(); | |
6708bb27 | 1712 | pr_err("cmd->se_lun is NULL\n"); |
695434e1 NB |
1713 | return -EINVAL; |
1714 | } | |
1715 | if (in_interrupt()) { | |
1716 | dump_stack(); | |
6708bb27 | 1717 | pr_err("transport_generic_handle_cdb cannot be called" |
695434e1 NB |
1718 | " from interrupt context\n"); |
1719 | return -EINVAL; | |
1720 | } | |
dd8ae59d NB |
1721 | /* |
1722 | * Set TRANSPORT_NEW_CMD state and cmd->t_transport_active=1 following | |
1723 | * transport_generic_handle_cdb*() -> transport_add_cmd_to_queue() | |
1724 | * in existing usage to ensure that outstanding descriptors are handled | |
d14921d6 | 1725 | * correctly during shutdown via transport_wait_for_tasks() |
dd8ae59d NB |
1726 | * |
1727 | * Also, we don't take cmd->t_state_lock here as we only expect | |
1728 | * this to be called for initial descriptor submission. | |
1729 | */ | |
1730 | cmd->t_state = TRANSPORT_NEW_CMD; | |
1731 | atomic_set(&cmd->t_transport_active, 1); | |
1732 | /* | |
1733 | * transport_generic_new_cmd() is already handling QUEUE_FULL, | |
1734 | * so follow TRANSPORT_NEW_CMD processing thread context usage | |
1735 | * and call transport_generic_request_failure() if necessary.. | |
1736 | */ | |
1737 | ret = transport_generic_new_cmd(cmd); | |
1738 | if (ret == -EAGAIN) | |
1739 | return 0; | |
1740 | else if (ret < 0) { | |
1741 | cmd->transport_error_status = ret; | |
1742 | transport_generic_request_failure(cmd, NULL, 0, | |
1743 | (cmd->data_direction != DMA_TO_DEVICE)); | |
1744 | } | |
1745 | return 0; | |
695434e1 NB |
1746 | } |
1747 | EXPORT_SYMBOL(transport_handle_cdb_direct); | |
1748 | ||
c66ac9db NB |
1749 | /* |
1750 | * Used by fabric module frontends defining a TFO->new_cmd_map() caller | |
1751 | * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to | |
1752 | * complete setup in TCM process context w/ TFO->new_cmd_map(). | |
1753 | */ | |
1754 | int transport_generic_handle_cdb_map( | |
1755 | struct se_cmd *cmd) | |
1756 | { | |
e3d6f909 | 1757 | if (!cmd->se_lun) { |
c66ac9db | 1758 | dump_stack(); |
6708bb27 | 1759 | pr_err("cmd->se_lun is NULL\n"); |
e3d6f909 | 1760 | return -EINVAL; |
c66ac9db NB |
1761 | } |
1762 | ||
1763 | transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP); | |
1764 | return 0; | |
1765 | } | |
1766 | EXPORT_SYMBOL(transport_generic_handle_cdb_map); | |
1767 | ||
1768 | /* transport_generic_handle_data(): | |
1769 | * | |
1770 | * | |
1771 | */ | |
1772 | int transport_generic_handle_data( | |
1773 | struct se_cmd *cmd) | |
1774 | { | |
1775 | /* | |
1776 | * For the software fabric case, then we assume the nexus is being | |
1777 | * failed/shutdown when signals are pending from the kthread context | |
1778 | * caller, so we return a failure. For the HW target mode case running | |
1779 | * in interrupt code, the signal_pending() check is skipped. | |
1780 | */ | |
1781 | if (!in_interrupt() && signal_pending(current)) | |
e3d6f909 | 1782 | return -EPERM; |
c66ac9db NB |
1783 | /* |
1784 | * If the received CDB has aleady been ABORTED by the generic | |
1785 | * target engine, we now call transport_check_aborted_status() | |
1786 | * to queue any delated TASK_ABORTED status for the received CDB to the | |
25985edc | 1787 | * fabric module as we are expecting no further incoming DATA OUT |
c66ac9db NB |
1788 | * sequences at this point. |
1789 | */ | |
1790 | if (transport_check_aborted_status(cmd, 1) != 0) | |
1791 | return 0; | |
1792 | ||
1793 | transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE); | |
1794 | return 0; | |
1795 | } | |
1796 | EXPORT_SYMBOL(transport_generic_handle_data); | |
1797 | ||
1798 | /* transport_generic_handle_tmr(): | |
1799 | * | |
1800 | * | |
1801 | */ | |
1802 | int transport_generic_handle_tmr( | |
1803 | struct se_cmd *cmd) | |
1804 | { | |
c66ac9db NB |
1805 | transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR); |
1806 | return 0; | |
1807 | } | |
1808 | EXPORT_SYMBOL(transport_generic_handle_tmr); | |
1809 | ||
f4366772 NB |
1810 | void transport_generic_free_cmd_intr( |
1811 | struct se_cmd *cmd) | |
1812 | { | |
1813 | transport_add_cmd_to_queue(cmd, TRANSPORT_FREE_CMD_INTR); | |
1814 | } | |
1815 | EXPORT_SYMBOL(transport_generic_free_cmd_intr); | |
1816 | ||
c66ac9db NB |
1817 | static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) |
1818 | { | |
1819 | struct se_task *task, *task_tmp; | |
1820 | unsigned long flags; | |
1821 | int ret = 0; | |
1822 | ||
6708bb27 | 1823 | pr_debug("ITT[0x%08x] - Stopping tasks\n", |
e3d6f909 | 1824 | cmd->se_tfo->get_task_tag(cmd)); |
c66ac9db NB |
1825 | |
1826 | /* | |
1827 | * No tasks remain in the execution queue | |
1828 | */ | |
a1d8b49a | 1829 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db | 1830 | list_for_each_entry_safe(task, task_tmp, |
a1d8b49a | 1831 | &cmd->t_task_list, t_list) { |
6708bb27 | 1832 | pr_debug("task_no[%d] - Processing task %p\n", |
c66ac9db NB |
1833 | task->task_no, task); |
1834 | /* | |
1835 | * If the struct se_task has not been sent and is not active, | |
1836 | * remove the struct se_task from the execution queue. | |
1837 | */ | |
1838 | if (!atomic_read(&task->task_sent) && | |
1839 | !atomic_read(&task->task_active)) { | |
a1d8b49a | 1840 | spin_unlock_irqrestore(&cmd->t_state_lock, |
c66ac9db NB |
1841 | flags); |
1842 | transport_remove_task_from_execute_queue(task, | |
1843 | task->se_dev); | |
1844 | ||
6708bb27 | 1845 | pr_debug("task_no[%d] - Removed from execute queue\n", |
c66ac9db | 1846 | task->task_no); |
a1d8b49a | 1847 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db NB |
1848 | continue; |
1849 | } | |
1850 | ||
1851 | /* | |
1852 | * If the struct se_task is active, sleep until it is returned | |
1853 | * from the plugin. | |
1854 | */ | |
1855 | if (atomic_read(&task->task_active)) { | |
1856 | atomic_set(&task->task_stop, 1); | |
a1d8b49a | 1857 | spin_unlock_irqrestore(&cmd->t_state_lock, |
c66ac9db NB |
1858 | flags); |
1859 | ||
6708bb27 | 1860 | pr_debug("task_no[%d] - Waiting to complete\n", |
c66ac9db NB |
1861 | task->task_no); |
1862 | wait_for_completion(&task->task_stop_comp); | |
6708bb27 | 1863 | pr_debug("task_no[%d] - Stopped successfully\n", |
c66ac9db NB |
1864 | task->task_no); |
1865 | ||
a1d8b49a AG |
1866 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
1867 | atomic_dec(&cmd->t_task_cdbs_left); | |
c66ac9db NB |
1868 | |
1869 | atomic_set(&task->task_active, 0); | |
1870 | atomic_set(&task->task_stop, 0); | |
1871 | } else { | |
6708bb27 | 1872 | pr_debug("task_no[%d] - Did nothing\n", task->task_no); |
c66ac9db NB |
1873 | ret++; |
1874 | } | |
1875 | ||
1876 | __transport_stop_task_timer(task, &flags); | |
1877 | } | |
a1d8b49a | 1878 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
1879 | |
1880 | return ret; | |
1881 | } | |
1882 | ||
c66ac9db NB |
1883 | /* |
1884 | * Handle SAM-esque emulation for generic transport request failures. | |
1885 | */ | |
1886 | static void transport_generic_request_failure( | |
1887 | struct se_cmd *cmd, | |
1888 | struct se_device *dev, | |
1889 | int complete, | |
1890 | int sc) | |
1891 | { | |
07bde79a NB |
1892 | int ret = 0; |
1893 | ||
6708bb27 | 1894 | pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x" |
e3d6f909 | 1895 | " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd), |
a1d8b49a | 1896 | cmd->t_task_cdb[0]); |
6708bb27 | 1897 | pr_debug("-----[ i_state: %d t_state/def_t_state:" |
c66ac9db | 1898 | " %d/%d transport_error_status: %d\n", |
e3d6f909 | 1899 | cmd->se_tfo->get_cmd_state(cmd), |
c66ac9db NB |
1900 | cmd->t_state, cmd->deferred_t_state, |
1901 | cmd->transport_error_status); | |
6708bb27 | 1902 | pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d" |
c66ac9db NB |
1903 | " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --" |
1904 | " t_transport_active: %d t_transport_stop: %d" | |
6708bb27 | 1905 | " t_transport_sent: %d\n", cmd->t_task_list_num, |
a1d8b49a AG |
1906 | atomic_read(&cmd->t_task_cdbs_left), |
1907 | atomic_read(&cmd->t_task_cdbs_sent), | |
1908 | atomic_read(&cmd->t_task_cdbs_ex_left), | |
1909 | atomic_read(&cmd->t_transport_active), | |
1910 | atomic_read(&cmd->t_transport_stop), | |
1911 | atomic_read(&cmd->t_transport_sent)); | |
c66ac9db NB |
1912 | |
1913 | transport_stop_all_task_timers(cmd); | |
1914 | ||
1915 | if (dev) | |
e3d6f909 | 1916 | atomic_inc(&dev->depth_left); |
c66ac9db NB |
1917 | /* |
1918 | * For SAM Task Attribute emulation for failed struct se_cmd | |
1919 | */ | |
1920 | if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) | |
1921 | transport_complete_task_attr(cmd); | |
1922 | ||
1923 | if (complete) { | |
1924 | transport_direct_request_timeout(cmd); | |
1925 | cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE; | |
1926 | } | |
1927 | ||
1928 | switch (cmd->transport_error_status) { | |
1929 | case PYX_TRANSPORT_UNKNOWN_SAM_OPCODE: | |
1930 | cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; | |
1931 | break; | |
1932 | case PYX_TRANSPORT_REQ_TOO_MANY_SECTORS: | |
1933 | cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY; | |
1934 | break; | |
1935 | case PYX_TRANSPORT_INVALID_CDB_FIELD: | |
1936 | cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; | |
1937 | break; | |
1938 | case PYX_TRANSPORT_INVALID_PARAMETER_LIST: | |
1939 | cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; | |
1940 | break; | |
1941 | case PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES: | |
1942 | if (!sc) | |
1943 | transport_new_cmd_failure(cmd); | |
1944 | /* | |
1945 | * Currently for PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES, | |
1946 | * we force this session to fall back to session | |
1947 | * recovery. | |
1948 | */ | |
e3d6f909 AG |
1949 | cmd->se_tfo->fall_back_to_erl0(cmd->se_sess); |
1950 | cmd->se_tfo->stop_session(cmd->se_sess, 0, 0); | |
c66ac9db NB |
1951 | |
1952 | goto check_stop; | |
1953 | case PYX_TRANSPORT_LU_COMM_FAILURE: | |
1954 | case PYX_TRANSPORT_ILLEGAL_REQUEST: | |
1955 | cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | |
1956 | break; | |
1957 | case PYX_TRANSPORT_UNKNOWN_MODE_PAGE: | |
1958 | cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE; | |
1959 | break; | |
1960 | case PYX_TRANSPORT_WRITE_PROTECTED: | |
1961 | cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; | |
1962 | break; | |
1963 | case PYX_TRANSPORT_RESERVATION_CONFLICT: | |
1964 | /* | |
1965 | * No SENSE Data payload for this case, set SCSI Status | |
1966 | * and queue the response to $FABRIC_MOD. | |
1967 | * | |
1968 | * Uses linux/include/scsi/scsi.h SAM status codes defs | |
1969 | */ | |
1970 | cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; | |
1971 | /* | |
1972 | * For UA Interlock Code 11b, a RESERVATION CONFLICT will | |
1973 | * establish a UNIT ATTENTION with PREVIOUS RESERVATION | |
1974 | * CONFLICT STATUS. | |
1975 | * | |
1976 | * See spc4r17, section 7.4.6 Control Mode Page, Table 349 | |
1977 | */ | |
e3d6f909 AG |
1978 | if (cmd->se_sess && |
1979 | cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2) | |
1980 | core_scsi3_ua_allocate(cmd->se_sess->se_node_acl, | |
c66ac9db NB |
1981 | cmd->orig_fe_lun, 0x2C, |
1982 | ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); | |
1983 | ||
07bde79a NB |
1984 | ret = cmd->se_tfo->queue_status(cmd); |
1985 | if (ret == -EAGAIN) | |
1986 | goto queue_full; | |
c66ac9db NB |
1987 | goto check_stop; |
1988 | case PYX_TRANSPORT_USE_SENSE_REASON: | |
1989 | /* | |
1990 | * struct se_cmd->scsi_sense_reason already set | |
1991 | */ | |
1992 | break; | |
1993 | default: | |
6708bb27 | 1994 | pr_err("Unknown transport error for CDB 0x%02x: %d\n", |
a1d8b49a | 1995 | cmd->t_task_cdb[0], |
c66ac9db NB |
1996 | cmd->transport_error_status); |
1997 | cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; | |
1998 | break; | |
1999 | } | |
16ab8e60 NB |
2000 | /* |
2001 | * If a fabric does not define a cmd->se_tfo->new_cmd_map caller, | |
2002 | * make the call to transport_send_check_condition_and_sense() | |
2003 | * directly. Otherwise expect the fabric to make the call to | |
2004 | * transport_send_check_condition_and_sense() after handling | |
2005 | * possible unsoliticied write data payloads. | |
2006 | */ | |
2007 | if (!sc && !cmd->se_tfo->new_cmd_map) | |
c66ac9db | 2008 | transport_new_cmd_failure(cmd); |
07bde79a NB |
2009 | else { |
2010 | ret = transport_send_check_condition_and_sense(cmd, | |
2011 | cmd->scsi_sense_reason, 0); | |
2012 | if (ret == -EAGAIN) | |
2013 | goto queue_full; | |
2014 | } | |
2015 | ||
c66ac9db NB |
2016 | check_stop: |
2017 | transport_lun_remove_cmd(cmd); | |
6708bb27 | 2018 | if (!transport_cmd_check_stop_to_fabric(cmd)) |
c66ac9db | 2019 | ; |
07bde79a NB |
2020 | return; |
2021 | ||
2022 | queue_full: | |
2023 | cmd->t_state = TRANSPORT_COMPLETE_OK; | |
2024 | transport_handle_queue_full(cmd, cmd->se_dev, transport_complete_qf); | |
c66ac9db NB |
2025 | } |
2026 | ||
2027 | static void transport_direct_request_timeout(struct se_cmd *cmd) | |
2028 | { | |
2029 | unsigned long flags; | |
2030 | ||
a1d8b49a | 2031 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
6708bb27 | 2032 | if (!atomic_read(&cmd->t_transport_timeout)) { |
a1d8b49a | 2033 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
2034 | return; |
2035 | } | |
a1d8b49a AG |
2036 | if (atomic_read(&cmd->t_task_cdbs_timeout_left)) { |
2037 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | |
c66ac9db NB |
2038 | return; |
2039 | } | |
2040 | ||
a1d8b49a AG |
2041 | atomic_sub(atomic_read(&cmd->t_transport_timeout), |
2042 | &cmd->t_se_count); | |
2043 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | |
c66ac9db NB |
2044 | } |
2045 | ||
2046 | static void transport_generic_request_timeout(struct se_cmd *cmd) | |
2047 | { | |
2048 | unsigned long flags; | |
2049 | ||
2050 | /* | |
e6a2573f | 2051 | * Reset cmd->t_se_count to allow transport_put_cmd() |
c66ac9db NB |
2052 | * to allow last call to free memory resources. |
2053 | */ | |
a1d8b49a AG |
2054 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
2055 | if (atomic_read(&cmd->t_transport_timeout) > 1) { | |
2056 | int tmp = (atomic_read(&cmd->t_transport_timeout) - 1); | |
c66ac9db | 2057 | |
a1d8b49a | 2058 | atomic_sub(tmp, &cmd->t_se_count); |
c66ac9db | 2059 | } |
a1d8b49a | 2060 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db | 2061 | |
e6a2573f | 2062 | transport_put_cmd(cmd); |
c66ac9db NB |
2063 | } |
2064 | ||
c66ac9db NB |
2065 | static inline u32 transport_lba_21(unsigned char *cdb) |
2066 | { | |
2067 | return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3]; | |
2068 | } | |
2069 | ||
2070 | static inline u32 transport_lba_32(unsigned char *cdb) | |
2071 | { | |
2072 | return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; | |
2073 | } | |
2074 | ||
2075 | static inline unsigned long long transport_lba_64(unsigned char *cdb) | |
2076 | { | |
2077 | unsigned int __v1, __v2; | |
2078 | ||
2079 | __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; | |
2080 | __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; | |
2081 | ||
2082 | return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; | |
2083 | } | |
2084 | ||
2085 | /* | |
2086 | * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs | |
2087 | */ | |
2088 | static inline unsigned long long transport_lba_64_ext(unsigned char *cdb) | |
2089 | { | |
2090 | unsigned int __v1, __v2; | |
2091 | ||
2092 | __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15]; | |
2093 | __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19]; | |
2094 | ||
2095 | return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; | |
2096 | } | |
2097 | ||
2098 | static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd) | |
2099 | { | |
2100 | unsigned long flags; | |
2101 | ||
a1d8b49a | 2102 | spin_lock_irqsave(&se_cmd->t_state_lock, flags); |
c66ac9db | 2103 | se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; |
a1d8b49a | 2104 | spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); |
c66ac9db NB |
2105 | } |
2106 | ||
2107 | /* | |
2108 | * Called from interrupt context. | |
2109 | */ | |
2110 | static void transport_task_timeout_handler(unsigned long data) | |
2111 | { | |
2112 | struct se_task *task = (struct se_task *)data; | |
e3d6f909 | 2113 | struct se_cmd *cmd = task->task_se_cmd; |
c66ac9db NB |
2114 | unsigned long flags; |
2115 | ||
6708bb27 | 2116 | pr_debug("transport task timeout fired! task: %p cmd: %p\n", task, cmd); |
c66ac9db | 2117 | |
a1d8b49a | 2118 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db | 2119 | if (task->task_flags & TF_STOP) { |
a1d8b49a | 2120 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
2121 | return; |
2122 | } | |
2123 | task->task_flags &= ~TF_RUNNING; | |
2124 | ||
2125 | /* | |
2126 | * Determine if transport_complete_task() has already been called. | |
2127 | */ | |
6708bb27 AG |
2128 | if (!atomic_read(&task->task_active)) { |
2129 | pr_debug("transport task: %p cmd: %p timeout task_active" | |
c66ac9db | 2130 | " == 0\n", task, cmd); |
a1d8b49a | 2131 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
2132 | return; |
2133 | } | |
2134 | ||
a1d8b49a AG |
2135 | atomic_inc(&cmd->t_se_count); |
2136 | atomic_inc(&cmd->t_transport_timeout); | |
2137 | cmd->t_tasks_failed = 1; | |
c66ac9db NB |
2138 | |
2139 | atomic_set(&task->task_timeout, 1); | |
2140 | task->task_error_status = PYX_TRANSPORT_TASK_TIMEOUT; | |
2141 | task->task_scsi_status = 1; | |
2142 | ||
2143 | if (atomic_read(&task->task_stop)) { | |
6708bb27 | 2144 | pr_debug("transport task: %p cmd: %p timeout task_stop" |
c66ac9db | 2145 | " == 1\n", task, cmd); |
a1d8b49a | 2146 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
2147 | complete(&task->task_stop_comp); |
2148 | return; | |
2149 | } | |
2150 | ||
6708bb27 AG |
2151 | if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) { |
2152 | pr_debug("transport task: %p cmd: %p timeout non zero" | |
c66ac9db | 2153 | " t_task_cdbs_left\n", task, cmd); |
a1d8b49a | 2154 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
2155 | return; |
2156 | } | |
6708bb27 | 2157 | pr_debug("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n", |
c66ac9db NB |
2158 | task, cmd); |
2159 | ||
2160 | cmd->t_state = TRANSPORT_COMPLETE_FAILURE; | |
a1d8b49a | 2161 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
2162 | |
2163 | transport_add_cmd_to_queue(cmd, TRANSPORT_COMPLETE_FAILURE); | |
2164 | } | |
2165 | ||
2166 | /* | |
a1d8b49a | 2167 | * Called with cmd->t_state_lock held. |
c66ac9db NB |
2168 | */ |
2169 | static void transport_start_task_timer(struct se_task *task) | |
2170 | { | |
2171 | struct se_device *dev = task->se_dev; | |
2172 | int timeout; | |
2173 | ||
2174 | if (task->task_flags & TF_RUNNING) | |
2175 | return; | |
2176 | /* | |
2177 | * If the task_timeout is disabled, exit now. | |
2178 | */ | |
e3d6f909 | 2179 | timeout = dev->se_sub_dev->se_dev_attrib.task_timeout; |
6708bb27 | 2180 | if (!timeout) |
c66ac9db NB |
2181 | return; |
2182 | ||
2183 | init_timer(&task->task_timer); | |
2184 | task->task_timer.expires = (get_jiffies_64() + timeout * HZ); | |
2185 | task->task_timer.data = (unsigned long) task; | |
2186 | task->task_timer.function = transport_task_timeout_handler; | |
2187 | ||
2188 | task->task_flags |= TF_RUNNING; | |
2189 | add_timer(&task->task_timer); | |
2190 | #if 0 | |
6708bb27 | 2191 | pr_debug("Starting task timer for cmd: %p task: %p seconds:" |
c66ac9db NB |
2192 | " %d\n", task->task_se_cmd, task, timeout); |
2193 | #endif | |
2194 | } | |
2195 | ||
2196 | /* | |
a1d8b49a | 2197 | * Called with spin_lock_irq(&cmd->t_state_lock) held. |
c66ac9db NB |
2198 | */ |
2199 | void __transport_stop_task_timer(struct se_task *task, unsigned long *flags) | |
2200 | { | |
e3d6f909 | 2201 | struct se_cmd *cmd = task->task_se_cmd; |
c66ac9db | 2202 | |
6708bb27 | 2203 | if (!task->task_flags & TF_RUNNING) |
c66ac9db NB |
2204 | return; |
2205 | ||
2206 | task->task_flags |= TF_STOP; | |
a1d8b49a | 2207 | spin_unlock_irqrestore(&cmd->t_state_lock, *flags); |
c66ac9db NB |
2208 | |
2209 | del_timer_sync(&task->task_timer); | |
2210 | ||
a1d8b49a | 2211 | spin_lock_irqsave(&cmd->t_state_lock, *flags); |
c66ac9db NB |
2212 | task->task_flags &= ~TF_RUNNING; |
2213 | task->task_flags &= ~TF_STOP; | |
2214 | } | |
2215 | ||
2216 | static void transport_stop_all_task_timers(struct se_cmd *cmd) | |
2217 | { | |
2218 | struct se_task *task = NULL, *task_tmp; | |
2219 | unsigned long flags; | |
2220 | ||
a1d8b49a | 2221 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db | 2222 | list_for_each_entry_safe(task, task_tmp, |
a1d8b49a | 2223 | &cmd->t_task_list, t_list) |
c66ac9db | 2224 | __transport_stop_task_timer(task, &flags); |
a1d8b49a | 2225 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
2226 | } |
2227 | ||
2228 | static inline int transport_tcq_window_closed(struct se_device *dev) | |
2229 | { | |
2230 | if (dev->dev_tcq_window_closed++ < | |
2231 | PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD) { | |
2232 | msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT); | |
2233 | } else | |
2234 | msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG); | |
2235 | ||
e3d6f909 | 2236 | wake_up_interruptible(&dev->dev_queue_obj.thread_wq); |
c66ac9db NB |
2237 | return 0; |
2238 | } | |
2239 | ||
2240 | /* | |
2241 | * Called from Fabric Module context from transport_execute_tasks() | |
2242 | * | |
2243 | * The return of this function determins if the tasks from struct se_cmd | |
2244 | * get added to the execution queue in transport_execute_tasks(), | |
2245 | * or are added to the delayed or ordered lists here. | |
2246 | */ | |
2247 | static inline int transport_execute_task_attr(struct se_cmd *cmd) | |
2248 | { | |
5951146d | 2249 | if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) |
c66ac9db NB |
2250 | return 1; |
2251 | /* | |
25985edc | 2252 | * Check for the existence of HEAD_OF_QUEUE, and if true return 1 |
c66ac9db NB |
2253 | * to allow the passed struct se_cmd list of tasks to the front of the list. |
2254 | */ | |
e66ecd50 | 2255 | if (cmd->sam_task_attr == MSG_HEAD_TAG) { |
5951146d | 2256 | atomic_inc(&cmd->se_dev->dev_hoq_count); |
c66ac9db | 2257 | smp_mb__after_atomic_inc(); |
6708bb27 | 2258 | pr_debug("Added HEAD_OF_QUEUE for CDB:" |
c66ac9db | 2259 | " 0x%02x, se_ordered_id: %u\n", |
6708bb27 | 2260 | cmd->t_task_cdb[0], |
c66ac9db NB |
2261 | cmd->se_ordered_id); |
2262 | return 1; | |
e66ecd50 | 2263 | } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { |
5951146d AG |
2264 | spin_lock(&cmd->se_dev->ordered_cmd_lock); |
2265 | list_add_tail(&cmd->se_ordered_node, | |
2266 | &cmd->se_dev->ordered_cmd_list); | |
2267 | spin_unlock(&cmd->se_dev->ordered_cmd_lock); | |
c66ac9db | 2268 | |
5951146d | 2269 | atomic_inc(&cmd->se_dev->dev_ordered_sync); |
c66ac9db NB |
2270 | smp_mb__after_atomic_inc(); |
2271 | ||
6708bb27 | 2272 | pr_debug("Added ORDERED for CDB: 0x%02x to ordered" |
c66ac9db | 2273 | " list, se_ordered_id: %u\n", |
a1d8b49a | 2274 | cmd->t_task_cdb[0], |
c66ac9db NB |
2275 | cmd->se_ordered_id); |
2276 | /* | |
2277 | * Add ORDERED command to tail of execution queue if | |
2278 | * no other older commands exist that need to be | |
2279 | * completed first. | |
2280 | */ | |
6708bb27 | 2281 | if (!atomic_read(&cmd->se_dev->simple_cmds)) |
c66ac9db NB |
2282 | return 1; |
2283 | } else { | |
2284 | /* | |
2285 | * For SIMPLE and UNTAGGED Task Attribute commands | |
2286 | */ | |
5951146d | 2287 | atomic_inc(&cmd->se_dev->simple_cmds); |
c66ac9db NB |
2288 | smp_mb__after_atomic_inc(); |
2289 | } | |
2290 | /* | |
2291 | * Otherwise if one or more outstanding ORDERED task attribute exist, | |
2292 | * add the dormant task(s) built for the passed struct se_cmd to the | |
2293 | * execution queue and become in Active state for this struct se_device. | |
2294 | */ | |
5951146d | 2295 | if (atomic_read(&cmd->se_dev->dev_ordered_sync) != 0) { |
c66ac9db NB |
2296 | /* |
2297 | * Otherwise, add cmd w/ tasks to delayed cmd queue that | |
25985edc | 2298 | * will be drained upon completion of HEAD_OF_QUEUE task. |
c66ac9db | 2299 | */ |
5951146d | 2300 | spin_lock(&cmd->se_dev->delayed_cmd_lock); |
c66ac9db | 2301 | cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR; |
5951146d AG |
2302 | list_add_tail(&cmd->se_delayed_node, |
2303 | &cmd->se_dev->delayed_cmd_list); | |
2304 | spin_unlock(&cmd->se_dev->delayed_cmd_lock); | |
c66ac9db | 2305 | |
6708bb27 | 2306 | pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to" |
c66ac9db | 2307 | " delayed CMD list, se_ordered_id: %u\n", |
a1d8b49a | 2308 | cmd->t_task_cdb[0], cmd->sam_task_attr, |
c66ac9db NB |
2309 | cmd->se_ordered_id); |
2310 | /* | |
2311 | * Return zero to let transport_execute_tasks() know | |
2312 | * not to add the delayed tasks to the execution list. | |
2313 | */ | |
2314 | return 0; | |
2315 | } | |
2316 | /* | |
2317 | * Otherwise, no ORDERED task attributes exist.. | |
2318 | */ | |
2319 | return 1; | |
2320 | } | |
2321 | ||
2322 | /* | |
2323 | * Called from fabric module context in transport_generic_new_cmd() and | |
2324 | * transport_generic_process_write() | |
2325 | */ | |
2326 | static int transport_execute_tasks(struct se_cmd *cmd) | |
2327 | { | |
2328 | int add_tasks; | |
2329 | ||
db1620a2 CH |
2330 | if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) { |
2331 | cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE; | |
2332 | transport_generic_request_failure(cmd, NULL, 0, 1); | |
2333 | return 0; | |
c66ac9db | 2334 | } |
db1620a2 | 2335 | |
c66ac9db NB |
2336 | /* |
2337 | * Call transport_cmd_check_stop() to see if a fabric exception | |
25985edc | 2338 | * has occurred that prevents execution. |
c66ac9db | 2339 | */ |
6708bb27 | 2340 | if (!transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING)) { |
c66ac9db NB |
2341 | /* |
2342 | * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE | |
2343 | * attribute for the tasks of the received struct se_cmd CDB | |
2344 | */ | |
2345 | add_tasks = transport_execute_task_attr(cmd); | |
e3d6f909 | 2346 | if (!add_tasks) |
c66ac9db NB |
2347 | goto execute_tasks; |
2348 | /* | |
2349 | * This calls transport_add_tasks_from_cmd() to handle | |
2350 | * HEAD_OF_QUEUE ordering for SAM Task Attribute emulation | |
2351 | * (if enabled) in __transport_add_task_to_execute_queue() and | |
2352 | * transport_add_task_check_sam_attr(). | |
2353 | */ | |
2354 | transport_add_tasks_from_cmd(cmd); | |
2355 | } | |
2356 | /* | |
2357 | * Kick the execution queue for the cmd associated struct se_device | |
2358 | * storage object. | |
2359 | */ | |
2360 | execute_tasks: | |
5951146d | 2361 | __transport_execute_tasks(cmd->se_dev); |
c66ac9db NB |
2362 | return 0; |
2363 | } | |
2364 | ||
2365 | /* | |
2366 | * Called to check struct se_device tcq depth window, and once open pull struct se_task | |
2367 | * from struct se_device->execute_task_list and | |
2368 | * | |
2369 | * Called from transport_processing_thread() | |
2370 | */ | |
2371 | static int __transport_execute_tasks(struct se_device *dev) | |
2372 | { | |
2373 | int error; | |
2374 | struct se_cmd *cmd = NULL; | |
e3d6f909 | 2375 | struct se_task *task = NULL; |
c66ac9db NB |
2376 | unsigned long flags; |
2377 | ||
2378 | /* | |
2379 | * Check if there is enough room in the device and HBA queue to send | |
a1d8b49a | 2380 | * struct se_tasks to the selected transport. |
c66ac9db NB |
2381 | */ |
2382 | check_depth: | |
e3d6f909 | 2383 | if (!atomic_read(&dev->depth_left)) |
c66ac9db | 2384 | return transport_tcq_window_closed(dev); |
c66ac9db | 2385 | |
e3d6f909 | 2386 | dev->dev_tcq_window_closed = 0; |
c66ac9db | 2387 | |
e3d6f909 AG |
2388 | spin_lock_irq(&dev->execute_task_lock); |
2389 | if (list_empty(&dev->execute_task_list)) { | |
2390 | spin_unlock_irq(&dev->execute_task_lock); | |
c66ac9db NB |
2391 | return 0; |
2392 | } | |
e3d6f909 AG |
2393 | task = list_first_entry(&dev->execute_task_list, |
2394 | struct se_task, t_execute_list); | |
2395 | list_del(&task->t_execute_list); | |
2396 | atomic_set(&task->task_execute_queue, 0); | |
2397 | atomic_dec(&dev->execute_tasks); | |
2398 | spin_unlock_irq(&dev->execute_task_lock); | |
c66ac9db NB |
2399 | |
2400 | atomic_dec(&dev->depth_left); | |
c66ac9db | 2401 | |
e3d6f909 | 2402 | cmd = task->task_se_cmd; |
c66ac9db | 2403 | |
a1d8b49a | 2404 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db NB |
2405 | atomic_set(&task->task_active, 1); |
2406 | atomic_set(&task->task_sent, 1); | |
a1d8b49a | 2407 | atomic_inc(&cmd->t_task_cdbs_sent); |
c66ac9db | 2408 | |
a1d8b49a AG |
2409 | if (atomic_read(&cmd->t_task_cdbs_sent) == |
2410 | cmd->t_task_list_num) | |
c66ac9db NB |
2411 | atomic_set(&cmd->transport_sent, 1); |
2412 | ||
2413 | transport_start_task_timer(task); | |
a1d8b49a | 2414 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
2415 | /* |
2416 | * The struct se_cmd->transport_emulate_cdb() function pointer is used | |
e3d6f909 | 2417 | * to grab REPORT_LUNS and other CDBs we want to handle before they hit the |
c66ac9db NB |
2418 | * struct se_subsystem_api->do_task() caller below. |
2419 | */ | |
2420 | if (cmd->transport_emulate_cdb) { | |
2421 | error = cmd->transport_emulate_cdb(cmd); | |
2422 | if (error != 0) { | |
2423 | cmd->transport_error_status = error; | |
2424 | atomic_set(&task->task_active, 0); | |
2425 | atomic_set(&cmd->transport_sent, 0); | |
2426 | transport_stop_tasks_for_cmd(cmd); | |
2427 | transport_generic_request_failure(cmd, dev, 0, 1); | |
2428 | goto check_depth; | |
2429 | } | |
2430 | /* | |
2431 | * Handle the successful completion for transport_emulate_cdb() | |
2432 | * for synchronous operation, following SCF_EMULATE_CDB_ASYNC | |
2433 | * Otherwise the caller is expected to complete the task with | |
2434 | * proper status. | |
2435 | */ | |
2436 | if (!(cmd->se_cmd_flags & SCF_EMULATE_CDB_ASYNC)) { | |
2437 | cmd->scsi_status = SAM_STAT_GOOD; | |
2438 | task->task_scsi_status = GOOD; | |
2439 | transport_complete_task(task, 1); | |
2440 | } | |
2441 | } else { | |
2442 | /* | |
2443 | * Currently for all virtual TCM plugins including IBLOCK, FILEIO and | |
2444 | * RAMDISK we use the internal transport_emulate_control_cdb() logic | |
2445 | * with struct se_subsystem_api callers for the primary SPC-3 TYPE_DISK | |
2446 | * LUN emulation code. | |
2447 | * | |
2448 | * For TCM/pSCSI and all other SCF_SCSI_DATA_SG_IO_CDB I/O tasks we | |
2449 | * call ->do_task() directly and let the underlying TCM subsystem plugin | |
2450 | * code handle the CDB emulation. | |
2451 | */ | |
e3d6f909 AG |
2452 | if ((dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) && |
2453 | (!(task->task_se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB))) | |
c66ac9db NB |
2454 | error = transport_emulate_control_cdb(task); |
2455 | else | |
e3d6f909 | 2456 | error = dev->transport->do_task(task); |
c66ac9db NB |
2457 | |
2458 | if (error != 0) { | |
2459 | cmd->transport_error_status = error; | |
2460 | atomic_set(&task->task_active, 0); | |
2461 | atomic_set(&cmd->transport_sent, 0); | |
2462 | transport_stop_tasks_for_cmd(cmd); | |
2463 | transport_generic_request_failure(cmd, dev, 0, 1); | |
2464 | } | |
2465 | } | |
2466 | ||
2467 | goto check_depth; | |
2468 | ||
2469 | return 0; | |
2470 | } | |
2471 | ||
2472 | void transport_new_cmd_failure(struct se_cmd *se_cmd) | |
2473 | { | |
2474 | unsigned long flags; | |
2475 | /* | |
2476 | * Any unsolicited data will get dumped for failed command inside of | |
2477 | * the fabric plugin | |
2478 | */ | |
a1d8b49a | 2479 | spin_lock_irqsave(&se_cmd->t_state_lock, flags); |
c66ac9db NB |
2480 | se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED; |
2481 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
a1d8b49a | 2482 | spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); |
c66ac9db NB |
2483 | } |
2484 | ||
c66ac9db NB |
2485 | static inline u32 transport_get_sectors_6( |
2486 | unsigned char *cdb, | |
2487 | struct se_cmd *cmd, | |
2488 | int *ret) | |
2489 | { | |
5951146d | 2490 | struct se_device *dev = cmd->se_dev; |
c66ac9db NB |
2491 | |
2492 | /* | |
2493 | * Assume TYPE_DISK for non struct se_device objects. | |
2494 | * Use 8-bit sector value. | |
2495 | */ | |
2496 | if (!dev) | |
2497 | goto type_disk; | |
2498 | ||
2499 | /* | |
2500 | * Use 24-bit allocation length for TYPE_TAPE. | |
2501 | */ | |
e3d6f909 | 2502 | if (dev->transport->get_device_type(dev) == TYPE_TAPE) |
c66ac9db NB |
2503 | return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4]; |
2504 | ||
2505 | /* | |
2506 | * Everything else assume TYPE_DISK Sector CDB location. | |
2507 | * Use 8-bit sector value. | |
2508 | */ | |
2509 | type_disk: | |
2510 | return (u32)cdb[4]; | |
2511 | } | |
2512 | ||
2513 | static inline u32 transport_get_sectors_10( | |
2514 | unsigned char *cdb, | |
2515 | struct se_cmd *cmd, | |
2516 | int *ret) | |
2517 | { | |
5951146d | 2518 | struct se_device *dev = cmd->se_dev; |
c66ac9db NB |
2519 | |
2520 | /* | |
2521 | * Assume TYPE_DISK for non struct se_device objects. | |
2522 | * Use 16-bit sector value. | |
2523 | */ | |
2524 | if (!dev) | |
2525 | goto type_disk; | |
2526 | ||
2527 | /* | |
2528 | * XXX_10 is not defined in SSC, throw an exception | |
2529 | */ | |
e3d6f909 AG |
2530 | if (dev->transport->get_device_type(dev) == TYPE_TAPE) { |
2531 | *ret = -EINVAL; | |
c66ac9db NB |
2532 | return 0; |
2533 | } | |
2534 | ||
2535 | /* | |
2536 | * Everything else assume TYPE_DISK Sector CDB location. | |
2537 | * Use 16-bit sector value. | |
2538 | */ | |
2539 | type_disk: | |
2540 | return (u32)(cdb[7] << 8) + cdb[8]; | |
2541 | } | |
2542 | ||
2543 | static inline u32 transport_get_sectors_12( | |
2544 | unsigned char *cdb, | |
2545 | struct se_cmd *cmd, | |
2546 | int *ret) | |
2547 | { | |
5951146d | 2548 | struct se_device *dev = cmd->se_dev; |
c66ac9db NB |
2549 | |
2550 | /* | |
2551 | * Assume TYPE_DISK for non struct se_device objects. | |
2552 | * Use 32-bit sector value. | |
2553 | */ | |
2554 | if (!dev) | |
2555 | goto type_disk; | |
2556 | ||
2557 | /* | |
2558 | * XXX_12 is not defined in SSC, throw an exception | |
2559 | */ | |
e3d6f909 AG |
2560 | if (dev->transport->get_device_type(dev) == TYPE_TAPE) { |
2561 | *ret = -EINVAL; | |
c66ac9db NB |
2562 | return 0; |
2563 | } | |
2564 | ||
2565 | /* | |
2566 | * Everything else assume TYPE_DISK Sector CDB location. | |
2567 | * Use 32-bit sector value. | |
2568 | */ | |
2569 | type_disk: | |
2570 | return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9]; | |
2571 | } | |
2572 | ||
2573 | static inline u32 transport_get_sectors_16( | |
2574 | unsigned char *cdb, | |
2575 | struct se_cmd *cmd, | |
2576 | int *ret) | |
2577 | { | |
5951146d | 2578 | struct se_device *dev = cmd->se_dev; |
c66ac9db NB |
2579 | |
2580 | /* | |
2581 | * Assume TYPE_DISK for non struct se_device objects. | |
2582 | * Use 32-bit sector value. | |
2583 | */ | |
2584 | if (!dev) | |
2585 | goto type_disk; | |
2586 | ||
2587 | /* | |
2588 | * Use 24-bit allocation length for TYPE_TAPE. | |
2589 | */ | |
e3d6f909 | 2590 | if (dev->transport->get_device_type(dev) == TYPE_TAPE) |
c66ac9db NB |
2591 | return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14]; |
2592 | ||
2593 | type_disk: | |
2594 | return (u32)(cdb[10] << 24) + (cdb[11] << 16) + | |
2595 | (cdb[12] << 8) + cdb[13]; | |
2596 | } | |
2597 | ||
2598 | /* | |
2599 | * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants | |
2600 | */ | |
2601 | static inline u32 transport_get_sectors_32( | |
2602 | unsigned char *cdb, | |
2603 | struct se_cmd *cmd, | |
2604 | int *ret) | |
2605 | { | |
2606 | /* | |
2607 | * Assume TYPE_DISK for non struct se_device objects. | |
2608 | * Use 32-bit sector value. | |
2609 | */ | |
2610 | return (u32)(cdb[28] << 24) + (cdb[29] << 16) + | |
2611 | (cdb[30] << 8) + cdb[31]; | |
2612 | ||
2613 | } | |
2614 | ||
2615 | static inline u32 transport_get_size( | |
2616 | u32 sectors, | |
2617 | unsigned char *cdb, | |
2618 | struct se_cmd *cmd) | |
2619 | { | |
5951146d | 2620 | struct se_device *dev = cmd->se_dev; |
c66ac9db | 2621 | |
e3d6f909 | 2622 | if (dev->transport->get_device_type(dev) == TYPE_TAPE) { |
c66ac9db | 2623 | if (cdb[1] & 1) { /* sectors */ |
e3d6f909 | 2624 | return dev->se_sub_dev->se_dev_attrib.block_size * sectors; |
c66ac9db NB |
2625 | } else /* bytes */ |
2626 | return sectors; | |
2627 | } | |
2628 | #if 0 | |
6708bb27 | 2629 | pr_debug("Returning block_size: %u, sectors: %u == %u for" |
e3d6f909 AG |
2630 | " %s object\n", dev->se_sub_dev->se_dev_attrib.block_size, sectors, |
2631 | dev->se_sub_dev->se_dev_attrib.block_size * sectors, | |
2632 | dev->transport->name); | |
c66ac9db | 2633 | #endif |
e3d6f909 | 2634 | return dev->se_sub_dev->se_dev_attrib.block_size * sectors; |
c66ac9db NB |
2635 | } |
2636 | ||
c66ac9db NB |
2637 | static void transport_xor_callback(struct se_cmd *cmd) |
2638 | { | |
2639 | unsigned char *buf, *addr; | |
ec98f782 | 2640 | struct scatterlist *sg; |
c66ac9db NB |
2641 | unsigned int offset; |
2642 | int i; | |
ec98f782 | 2643 | int count; |
c66ac9db NB |
2644 | /* |
2645 | * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command | |
2646 | * | |
2647 | * 1) read the specified logical block(s); | |
2648 | * 2) transfer logical blocks from the data-out buffer; | |
2649 | * 3) XOR the logical blocks transferred from the data-out buffer with | |
2650 | * the logical blocks read, storing the resulting XOR data in a buffer; | |
2651 | * 4) if the DISABLE WRITE bit is set to zero, then write the logical | |
2652 | * blocks transferred from the data-out buffer; and | |
2653 | * 5) transfer the resulting XOR data to the data-in buffer. | |
2654 | */ | |
2655 | buf = kmalloc(cmd->data_length, GFP_KERNEL); | |
6708bb27 AG |
2656 | if (!buf) { |
2657 | pr_err("Unable to allocate xor_callback buf\n"); | |
c66ac9db NB |
2658 | return; |
2659 | } | |
2660 | /* | |
ec98f782 | 2661 | * Copy the scatterlist WRITE buffer located at cmd->t_data_sg |
c66ac9db NB |
2662 | * into the locally allocated *buf |
2663 | */ | |
ec98f782 AG |
2664 | sg_copy_to_buffer(cmd->t_data_sg, |
2665 | cmd->t_data_nents, | |
2666 | buf, | |
2667 | cmd->data_length); | |
2668 | ||
c66ac9db NB |
2669 | /* |
2670 | * Now perform the XOR against the BIDI read memory located at | |
a1d8b49a | 2671 | * cmd->t_mem_bidi_list |
c66ac9db NB |
2672 | */ |
2673 | ||
2674 | offset = 0; | |
ec98f782 AG |
2675 | for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) { |
2676 | addr = kmap_atomic(sg_page(sg), KM_USER0); | |
2677 | if (!addr) | |
c66ac9db NB |
2678 | goto out; |
2679 | ||
ec98f782 AG |
2680 | for (i = 0; i < sg->length; i++) |
2681 | *(addr + sg->offset + i) ^= *(buf + offset + i); | |
c66ac9db | 2682 | |
ec98f782 | 2683 | offset += sg->length; |
c66ac9db NB |
2684 | kunmap_atomic(addr, KM_USER0); |
2685 | } | |
ec98f782 | 2686 | |
c66ac9db NB |
2687 | out: |
2688 | kfree(buf); | |
2689 | } | |
2690 | ||
2691 | /* | |
2692 | * Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd | |
2693 | */ | |
2694 | static int transport_get_sense_data(struct se_cmd *cmd) | |
2695 | { | |
2696 | unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL; | |
2697 | struct se_device *dev; | |
2698 | struct se_task *task = NULL, *task_tmp; | |
2699 | unsigned long flags; | |
2700 | u32 offset = 0; | |
2701 | ||
e3d6f909 AG |
2702 | WARN_ON(!cmd->se_lun); |
2703 | ||
a1d8b49a | 2704 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db | 2705 | if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { |
a1d8b49a | 2706 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
2707 | return 0; |
2708 | } | |
2709 | ||
2710 | list_for_each_entry_safe(task, task_tmp, | |
a1d8b49a | 2711 | &cmd->t_task_list, t_list) { |
c66ac9db NB |
2712 | |
2713 | if (!task->task_sense) | |
2714 | continue; | |
2715 | ||
2716 | dev = task->se_dev; | |
6708bb27 | 2717 | if (!dev) |
c66ac9db NB |
2718 | continue; |
2719 | ||
e3d6f909 | 2720 | if (!dev->transport->get_sense_buffer) { |
6708bb27 | 2721 | pr_err("dev->transport->get_sense_buffer" |
c66ac9db NB |
2722 | " is NULL\n"); |
2723 | continue; | |
2724 | } | |
2725 | ||
e3d6f909 | 2726 | sense_buffer = dev->transport->get_sense_buffer(task); |
6708bb27 AG |
2727 | if (!sense_buffer) { |
2728 | pr_err("ITT[0x%08x]_TASK[%d]: Unable to locate" | |
c66ac9db | 2729 | " sense buffer for task with sense\n", |
e3d6f909 | 2730 | cmd->se_tfo->get_task_tag(cmd), task->task_no); |
c66ac9db NB |
2731 | continue; |
2732 | } | |
a1d8b49a | 2733 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db | 2734 | |
e3d6f909 | 2735 | offset = cmd->se_tfo->set_fabric_sense_len(cmd, |
c66ac9db NB |
2736 | TRANSPORT_SENSE_BUFFER); |
2737 | ||
5951146d | 2738 | memcpy(&buffer[offset], sense_buffer, |
c66ac9db NB |
2739 | TRANSPORT_SENSE_BUFFER); |
2740 | cmd->scsi_status = task->task_scsi_status; | |
2741 | /* Automatically padded */ | |
2742 | cmd->scsi_sense_length = | |
2743 | (TRANSPORT_SENSE_BUFFER + offset); | |
2744 | ||
6708bb27 | 2745 | pr_debug("HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x" |
c66ac9db | 2746 | " and sense\n", |
e3d6f909 | 2747 | dev->se_hba->hba_id, dev->transport->name, |
c66ac9db NB |
2748 | cmd->scsi_status); |
2749 | return 0; | |
2750 | } | |
a1d8b49a | 2751 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
2752 | |
2753 | return -1; | |
2754 | } | |
2755 | ||
c66ac9db NB |
2756 | static int |
2757 | transport_handle_reservation_conflict(struct se_cmd *cmd) | |
2758 | { | |
c66ac9db NB |
2759 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
2760 | cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT; | |
2761 | cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; | |
2762 | /* | |
2763 | * For UA Interlock Code 11b, a RESERVATION CONFLICT will | |
2764 | * establish a UNIT ATTENTION with PREVIOUS RESERVATION | |
2765 | * CONFLICT STATUS. | |
2766 | * | |
2767 | * See spc4r17, section 7.4.6 Control Mode Page, Table 349 | |
2768 | */ | |
e3d6f909 AG |
2769 | if (cmd->se_sess && |
2770 | cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2) | |
2771 | core_scsi3_ua_allocate(cmd->se_sess->se_node_acl, | |
c66ac9db NB |
2772 | cmd->orig_fe_lun, 0x2C, |
2773 | ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); | |
5951146d | 2774 | return -EINVAL; |
c66ac9db NB |
2775 | } |
2776 | ||
ec98f782 AG |
2777 | static inline long long transport_dev_end_lba(struct se_device *dev) |
2778 | { | |
2779 | return dev->transport->get_blocks(dev) + 1; | |
2780 | } | |
2781 | ||
2782 | static int transport_cmd_get_valid_sectors(struct se_cmd *cmd) | |
2783 | { | |
2784 | struct se_device *dev = cmd->se_dev; | |
2785 | u32 sectors; | |
2786 | ||
2787 | if (dev->transport->get_device_type(dev) != TYPE_DISK) | |
2788 | return 0; | |
2789 | ||
2790 | sectors = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size); | |
2791 | ||
6708bb27 AG |
2792 | if ((cmd->t_task_lba + sectors) > transport_dev_end_lba(dev)) { |
2793 | pr_err("LBA: %llu Sectors: %u exceeds" | |
ec98f782 AG |
2794 | " transport_dev_end_lba(): %llu\n", |
2795 | cmd->t_task_lba, sectors, | |
2796 | transport_dev_end_lba(dev)); | |
7abbe7f3 | 2797 | return -EINVAL; |
ec98f782 AG |
2798 | } |
2799 | ||
7abbe7f3 | 2800 | return 0; |
ec98f782 AG |
2801 | } |
2802 | ||
706d5860 NB |
2803 | static int target_check_write_same_discard(unsigned char *flags, struct se_device *dev) |
2804 | { | |
2805 | /* | |
2806 | * Determine if the received WRITE_SAME is used to for direct | |
2807 | * passthrough into Linux/SCSI with struct request via TCM/pSCSI | |
2808 | * or we are signaling the use of internal WRITE_SAME + UNMAP=1 | |
2809 | * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK code. | |
2810 | */ | |
2811 | int passthrough = (dev->transport->transport_type == | |
2812 | TRANSPORT_PLUGIN_PHBA_PDEV); | |
2813 | ||
2814 | if (!passthrough) { | |
2815 | if ((flags[0] & 0x04) || (flags[0] & 0x02)) { | |
2816 | pr_err("WRITE_SAME PBDATA and LBDATA" | |
2817 | " bits not supported for Block Discard" | |
2818 | " Emulation\n"); | |
2819 | return -ENOSYS; | |
2820 | } | |
2821 | /* | |
2822 | * Currently for the emulated case we only accept | |
2823 | * tpws with the UNMAP=1 bit set. | |
2824 | */ | |
2825 | if (!(flags[0] & 0x08)) { | |
2826 | pr_err("WRITE_SAME w/o UNMAP bit not" | |
2827 | " supported for Block Discard Emulation\n"); | |
2828 | return -ENOSYS; | |
2829 | } | |
2830 | } | |
2831 | ||
2832 | return 0; | |
2833 | } | |
2834 | ||
c66ac9db NB |
2835 | /* transport_generic_cmd_sequencer(): |
2836 | * | |
2837 | * Generic Command Sequencer that should work for most DAS transport | |
2838 | * drivers. | |
2839 | * | |
2840 | * Called from transport_generic_allocate_tasks() in the $FABRIC_MOD | |
2841 | * RX Thread. | |
2842 | * | |
2843 | * FIXME: Need to support other SCSI OPCODES where as well. | |
2844 | */ | |
2845 | static int transport_generic_cmd_sequencer( | |
2846 | struct se_cmd *cmd, | |
2847 | unsigned char *cdb) | |
2848 | { | |
5951146d | 2849 | struct se_device *dev = cmd->se_dev; |
c66ac9db NB |
2850 | struct se_subsystem_dev *su_dev = dev->se_sub_dev; |
2851 | int ret = 0, sector_ret = 0, passthrough; | |
2852 | u32 sectors = 0, size = 0, pr_reg_type = 0; | |
2853 | u16 service_action; | |
2854 | u8 alua_ascq = 0; | |
2855 | /* | |
2856 | * Check for an existing UNIT ATTENTION condition | |
2857 | */ | |
2858 | if (core_scsi3_ua_check(cmd, cdb) < 0) { | |
c66ac9db NB |
2859 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
2860 | cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION; | |
5951146d | 2861 | return -EINVAL; |
c66ac9db NB |
2862 | } |
2863 | /* | |
2864 | * Check status of Asymmetric Logical Unit Assignment port | |
2865 | */ | |
e3d6f909 | 2866 | ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq); |
c66ac9db | 2867 | if (ret != 0) { |
c66ac9db | 2868 | /* |
25985edc | 2869 | * Set SCSI additional sense code (ASC) to 'LUN Not Accessible'; |
c66ac9db NB |
2870 | * The ALUA additional sense code qualifier (ASCQ) is determined |
2871 | * by the ALUA primary or secondary access state.. | |
2872 | */ | |
2873 | if (ret > 0) { | |
2874 | #if 0 | |
6708bb27 | 2875 | pr_debug("[%s]: ALUA TG Port not available," |
c66ac9db | 2876 | " SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n", |
e3d6f909 | 2877 | cmd->se_tfo->get_fabric_name(), alua_ascq); |
c66ac9db NB |
2878 | #endif |
2879 | transport_set_sense_codes(cmd, 0x04, alua_ascq); | |
2880 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
2881 | cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY; | |
5951146d | 2882 | return -EINVAL; |
c66ac9db NB |
2883 | } |
2884 | goto out_invalid_cdb_field; | |
2885 | } | |
2886 | /* | |
2887 | * Check status for SPC-3 Persistent Reservations | |
2888 | */ | |
e3d6f909 AG |
2889 | if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) { |
2890 | if (su_dev->t10_pr.pr_ops.t10_seq_non_holder( | |
c66ac9db NB |
2891 | cmd, cdb, pr_reg_type) != 0) |
2892 | return transport_handle_reservation_conflict(cmd); | |
2893 | /* | |
2894 | * This means the CDB is allowed for the SCSI Initiator port | |
2895 | * when said port is *NOT* holding the legacy SPC-2 or | |
2896 | * SPC-3 Persistent Reservation. | |
2897 | */ | |
2898 | } | |
2899 | ||
2900 | switch (cdb[0]) { | |
2901 | case READ_6: | |
2902 | sectors = transport_get_sectors_6(cdb, cmd, §or_ret); | |
2903 | if (sector_ret) | |
2904 | goto out_unsupported_cdb; | |
2905 | size = transport_get_size(sectors, cdb, cmd); | |
2906 | cmd->transport_split_cdb = &split_cdb_XX_6; | |
a1d8b49a | 2907 | cmd->t_task_lba = transport_lba_21(cdb); |
c66ac9db NB |
2908 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
2909 | break; | |
2910 | case READ_10: | |
2911 | sectors = transport_get_sectors_10(cdb, cmd, §or_ret); | |
2912 | if (sector_ret) | |
2913 | goto out_unsupported_cdb; | |
2914 | size = transport_get_size(sectors, cdb, cmd); | |
2915 | cmd->transport_split_cdb = &split_cdb_XX_10; | |
a1d8b49a | 2916 | cmd->t_task_lba = transport_lba_32(cdb); |
c66ac9db NB |
2917 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
2918 | break; | |
2919 | case READ_12: | |
2920 | sectors = transport_get_sectors_12(cdb, cmd, §or_ret); | |
2921 | if (sector_ret) | |
2922 | goto out_unsupported_cdb; | |
2923 | size = transport_get_size(sectors, cdb, cmd); | |
2924 | cmd->transport_split_cdb = &split_cdb_XX_12; | |
a1d8b49a | 2925 | cmd->t_task_lba = transport_lba_32(cdb); |
c66ac9db NB |
2926 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
2927 | break; | |
2928 | case READ_16: | |
2929 | sectors = transport_get_sectors_16(cdb, cmd, §or_ret); | |
2930 | if (sector_ret) | |
2931 | goto out_unsupported_cdb; | |
2932 | size = transport_get_size(sectors, cdb, cmd); | |
2933 | cmd->transport_split_cdb = &split_cdb_XX_16; | |
a1d8b49a | 2934 | cmd->t_task_lba = transport_lba_64(cdb); |
c66ac9db NB |
2935 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
2936 | break; | |
2937 | case WRITE_6: | |
2938 | sectors = transport_get_sectors_6(cdb, cmd, §or_ret); | |
2939 | if (sector_ret) | |
2940 | goto out_unsupported_cdb; | |
2941 | size = transport_get_size(sectors, cdb, cmd); | |
2942 | cmd->transport_split_cdb = &split_cdb_XX_6; | |
a1d8b49a | 2943 | cmd->t_task_lba = transport_lba_21(cdb); |
c66ac9db NB |
2944 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
2945 | break; | |
2946 | case WRITE_10: | |
2947 | sectors = transport_get_sectors_10(cdb, cmd, §or_ret); | |
2948 | if (sector_ret) | |
2949 | goto out_unsupported_cdb; | |
2950 | size = transport_get_size(sectors, cdb, cmd); | |
2951 | cmd->transport_split_cdb = &split_cdb_XX_10; | |
a1d8b49a AG |
2952 | cmd->t_task_lba = transport_lba_32(cdb); |
2953 | cmd->t_tasks_fua = (cdb[1] & 0x8); | |
c66ac9db NB |
2954 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
2955 | break; | |
2956 | case WRITE_12: | |
2957 | sectors = transport_get_sectors_12(cdb, cmd, §or_ret); | |
2958 | if (sector_ret) | |
2959 | goto out_unsupported_cdb; | |
2960 | size = transport_get_size(sectors, cdb, cmd); | |
2961 | cmd->transport_split_cdb = &split_cdb_XX_12; | |
a1d8b49a AG |
2962 | cmd->t_task_lba = transport_lba_32(cdb); |
2963 | cmd->t_tasks_fua = (cdb[1] & 0x8); | |
c66ac9db NB |
2964 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
2965 | break; | |
2966 | case WRITE_16: | |
2967 | sectors = transport_get_sectors_16(cdb, cmd, §or_ret); | |
2968 | if (sector_ret) | |
2969 | goto out_unsupported_cdb; | |
2970 | size = transport_get_size(sectors, cdb, cmd); | |
2971 | cmd->transport_split_cdb = &split_cdb_XX_16; | |
a1d8b49a AG |
2972 | cmd->t_task_lba = transport_lba_64(cdb); |
2973 | cmd->t_tasks_fua = (cdb[1] & 0x8); | |
c66ac9db NB |
2974 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
2975 | break; | |
2976 | case XDWRITEREAD_10: | |
2977 | if ((cmd->data_direction != DMA_TO_DEVICE) || | |
a1d8b49a | 2978 | !(cmd->t_tasks_bidi)) |
c66ac9db NB |
2979 | goto out_invalid_cdb_field; |
2980 | sectors = transport_get_sectors_10(cdb, cmd, §or_ret); | |
2981 | if (sector_ret) | |
2982 | goto out_unsupported_cdb; | |
2983 | size = transport_get_size(sectors, cdb, cmd); | |
2984 | cmd->transport_split_cdb = &split_cdb_XX_10; | |
a1d8b49a | 2985 | cmd->t_task_lba = transport_lba_32(cdb); |
c66ac9db | 2986 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
e3d6f909 | 2987 | passthrough = (dev->transport->transport_type == |
c66ac9db NB |
2988 | TRANSPORT_PLUGIN_PHBA_PDEV); |
2989 | /* | |
2990 | * Skip the remaining assignments for TCM/PSCSI passthrough | |
2991 | */ | |
2992 | if (passthrough) | |
2993 | break; | |
2994 | /* | |
2995 | * Setup BIDI XOR callback to be run during transport_generic_complete_ok() | |
2996 | */ | |
2997 | cmd->transport_complete_callback = &transport_xor_callback; | |
a1d8b49a | 2998 | cmd->t_tasks_fua = (cdb[1] & 0x8); |
c66ac9db NB |
2999 | break; |
3000 | case VARIABLE_LENGTH_CMD: | |
3001 | service_action = get_unaligned_be16(&cdb[8]); | |
3002 | /* | |
3003 | * Determine if this is TCM/PSCSI device and we should disable | |
3004 | * internal emulation for this CDB. | |
3005 | */ | |
e3d6f909 | 3006 | passthrough = (dev->transport->transport_type == |
c66ac9db NB |
3007 | TRANSPORT_PLUGIN_PHBA_PDEV); |
3008 | ||
3009 | switch (service_action) { | |
3010 | case XDWRITEREAD_32: | |
3011 | sectors = transport_get_sectors_32(cdb, cmd, §or_ret); | |
3012 | if (sector_ret) | |
3013 | goto out_unsupported_cdb; | |
3014 | size = transport_get_size(sectors, cdb, cmd); | |
3015 | /* | |
3016 | * Use WRITE_32 and READ_32 opcodes for the emulated | |
3017 | * XDWRITE_READ_32 logic. | |
3018 | */ | |
3019 | cmd->transport_split_cdb = &split_cdb_XX_32; | |
a1d8b49a | 3020 | cmd->t_task_lba = transport_lba_64_ext(cdb); |
c66ac9db NB |
3021 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
3022 | ||
3023 | /* | |
3024 | * Skip the remaining assignments for TCM/PSCSI passthrough | |
3025 | */ | |
3026 | if (passthrough) | |
3027 | break; | |
3028 | ||
3029 | /* | |
3030 | * Setup BIDI XOR callback to be run during | |
3031 | * transport_generic_complete_ok() | |
3032 | */ | |
3033 | cmd->transport_complete_callback = &transport_xor_callback; | |
a1d8b49a | 3034 | cmd->t_tasks_fua = (cdb[10] & 0x8); |
c66ac9db NB |
3035 | break; |
3036 | case WRITE_SAME_32: | |
3037 | sectors = transport_get_sectors_32(cdb, cmd, §or_ret); | |
3038 | if (sector_ret) | |
3039 | goto out_unsupported_cdb; | |
dd3a5ad8 | 3040 | |
6708bb27 | 3041 | if (sectors) |
12850626 | 3042 | size = transport_get_size(1, cdb, cmd); |
6708bb27 AG |
3043 | else { |
3044 | pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not" | |
3045 | " supported\n"); | |
3046 | goto out_invalid_cdb_field; | |
3047 | } | |
dd3a5ad8 | 3048 | |
a1d8b49a | 3049 | cmd->t_task_lba = get_unaligned_be64(&cdb[12]); |
c66ac9db NB |
3050 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
3051 | ||
706d5860 | 3052 | if (target_check_write_same_discard(&cdb[10], dev) < 0) |
c66ac9db | 3053 | goto out_invalid_cdb_field; |
706d5860 | 3054 | |
c66ac9db NB |
3055 | break; |
3056 | default: | |
6708bb27 | 3057 | pr_err("VARIABLE_LENGTH_CMD service action" |
c66ac9db NB |
3058 | " 0x%04x not supported\n", service_action); |
3059 | goto out_unsupported_cdb; | |
3060 | } | |
3061 | break; | |
e434f1f1 | 3062 | case MAINTENANCE_IN: |
e3d6f909 | 3063 | if (dev->transport->get_device_type(dev) != TYPE_ROM) { |
c66ac9db NB |
3064 | /* MAINTENANCE_IN from SCC-2 */ |
3065 | /* | |
3066 | * Check for emulated MI_REPORT_TARGET_PGS. | |
3067 | */ | |
3068 | if (cdb[1] == MI_REPORT_TARGET_PGS) { | |
3069 | cmd->transport_emulate_cdb = | |
e3d6f909 | 3070 | (su_dev->t10_alua.alua_type == |
c66ac9db | 3071 | SPC3_ALUA_EMULATED) ? |
e3d6f909 | 3072 | core_emulate_report_target_port_groups : |
c66ac9db NB |
3073 | NULL; |
3074 | } | |
3075 | size = (cdb[6] << 24) | (cdb[7] << 16) | | |
3076 | (cdb[8] << 8) | cdb[9]; | |
3077 | } else { | |
3078 | /* GPCMD_SEND_KEY from multi media commands */ | |
3079 | size = (cdb[8] << 8) + cdb[9]; | |
3080 | } | |
05d1c7c0 | 3081 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
3082 | break; |
3083 | case MODE_SELECT: | |
3084 | size = cdb[4]; | |
3085 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | |
3086 | break; | |
3087 | case MODE_SELECT_10: | |
3088 | size = (cdb[7] << 8) + cdb[8]; | |
3089 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | |
3090 | break; | |
3091 | case MODE_SENSE: | |
3092 | size = cdb[4]; | |
05d1c7c0 | 3093 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
3094 | break; |
3095 | case MODE_SENSE_10: | |
3096 | case GPCMD_READ_BUFFER_CAPACITY: | |
3097 | case GPCMD_SEND_OPC: | |
3098 | case LOG_SELECT: | |
3099 | case LOG_SENSE: | |
3100 | size = (cdb[7] << 8) + cdb[8]; | |
05d1c7c0 | 3101 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
3102 | break; |
3103 | case READ_BLOCK_LIMITS: | |
3104 | size = READ_BLOCK_LEN; | |
05d1c7c0 | 3105 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
3106 | break; |
3107 | case GPCMD_GET_CONFIGURATION: | |
3108 | case GPCMD_READ_FORMAT_CAPACITIES: | |
3109 | case GPCMD_READ_DISC_INFO: | |
3110 | case GPCMD_READ_TRACK_RZONE_INFO: | |
3111 | size = (cdb[7] << 8) + cdb[8]; | |
3112 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | |
3113 | break; | |
3114 | case PERSISTENT_RESERVE_IN: | |
3115 | case PERSISTENT_RESERVE_OUT: | |
3116 | cmd->transport_emulate_cdb = | |
e3d6f909 | 3117 | (su_dev->t10_pr.res_type == |
c66ac9db | 3118 | SPC3_PERSISTENT_RESERVATIONS) ? |
e3d6f909 | 3119 | core_scsi3_emulate_pr : NULL; |
c66ac9db | 3120 | size = (cdb[7] << 8) + cdb[8]; |
05d1c7c0 | 3121 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
3122 | break; |
3123 | case GPCMD_MECHANISM_STATUS: | |
3124 | case GPCMD_READ_DVD_STRUCTURE: | |
3125 | size = (cdb[8] << 8) + cdb[9]; | |
3126 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | |
3127 | break; | |
3128 | case READ_POSITION: | |
3129 | size = READ_POSITION_LEN; | |
05d1c7c0 | 3130 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db | 3131 | break; |
e434f1f1 | 3132 | case MAINTENANCE_OUT: |
e3d6f909 | 3133 | if (dev->transport->get_device_type(dev) != TYPE_ROM) { |
c66ac9db NB |
3134 | /* MAINTENANCE_OUT from SCC-2 |
3135 | * | |
3136 | * Check for emulated MO_SET_TARGET_PGS. | |
3137 | */ | |
3138 | if (cdb[1] == MO_SET_TARGET_PGS) { | |
3139 | cmd->transport_emulate_cdb = | |
e3d6f909 | 3140 | (su_dev->t10_alua.alua_type == |
c66ac9db | 3141 | SPC3_ALUA_EMULATED) ? |
e3d6f909 | 3142 | core_emulate_set_target_port_groups : |
c66ac9db NB |
3143 | NULL; |
3144 | } | |
3145 | ||
3146 | size = (cdb[6] << 24) | (cdb[7] << 16) | | |
3147 | (cdb[8] << 8) | cdb[9]; | |
3148 | } else { | |
3149 | /* GPCMD_REPORT_KEY from multi media commands */ | |
3150 | size = (cdb[8] << 8) + cdb[9]; | |
3151 | } | |
05d1c7c0 | 3152 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
3153 | break; |
3154 | case INQUIRY: | |
3155 | size = (cdb[3] << 8) + cdb[4]; | |
3156 | /* | |
3157 | * Do implict HEAD_OF_QUEUE processing for INQUIRY. | |
3158 | * See spc4r17 section 5.3 | |
3159 | */ | |
5951146d | 3160 | if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) |
e66ecd50 | 3161 | cmd->sam_task_attr = MSG_HEAD_TAG; |
05d1c7c0 | 3162 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
3163 | break; |
3164 | case READ_BUFFER: | |
3165 | size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; | |
05d1c7c0 | 3166 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
3167 | break; |
3168 | case READ_CAPACITY: | |
3169 | size = READ_CAP_LEN; | |
05d1c7c0 | 3170 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
3171 | break; |
3172 | case READ_MEDIA_SERIAL_NUMBER: | |
3173 | case SECURITY_PROTOCOL_IN: | |
3174 | case SECURITY_PROTOCOL_OUT: | |
3175 | size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; | |
05d1c7c0 | 3176 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
3177 | break; |
3178 | case SERVICE_ACTION_IN: | |
3179 | case ACCESS_CONTROL_IN: | |
3180 | case ACCESS_CONTROL_OUT: | |
3181 | case EXTENDED_COPY: | |
3182 | case READ_ATTRIBUTE: | |
3183 | case RECEIVE_COPY_RESULTS: | |
3184 | case WRITE_ATTRIBUTE: | |
3185 | size = (cdb[10] << 24) | (cdb[11] << 16) | | |
3186 | (cdb[12] << 8) | cdb[13]; | |
05d1c7c0 | 3187 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
3188 | break; |
3189 | case RECEIVE_DIAGNOSTIC: | |
3190 | case SEND_DIAGNOSTIC: | |
3191 | size = (cdb[3] << 8) | cdb[4]; | |
05d1c7c0 | 3192 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
3193 | break; |
3194 | /* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */ | |
3195 | #if 0 | |
3196 | case GPCMD_READ_CD: | |
3197 | sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; | |
3198 | size = (2336 * sectors); | |
05d1c7c0 | 3199 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
3200 | break; |
3201 | #endif | |
3202 | case READ_TOC: | |
3203 | size = cdb[8]; | |
05d1c7c0 | 3204 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
3205 | break; |
3206 | case REQUEST_SENSE: | |
3207 | size = cdb[4]; | |
05d1c7c0 | 3208 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
3209 | break; |
3210 | case READ_ELEMENT_STATUS: | |
3211 | size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9]; | |
05d1c7c0 | 3212 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
3213 | break; |
3214 | case WRITE_BUFFER: | |
3215 | size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; | |
05d1c7c0 | 3216 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
3217 | break; |
3218 | case RESERVE: | |
3219 | case RESERVE_10: | |
3220 | /* | |
3221 | * The SPC-2 RESERVE does not contain a size in the SCSI CDB. | |
3222 | * Assume the passthrough or $FABRIC_MOD will tell us about it. | |
3223 | */ | |
3224 | if (cdb[0] == RESERVE_10) | |
3225 | size = (cdb[7] << 8) | cdb[8]; | |
3226 | else | |
3227 | size = cmd->data_length; | |
3228 | ||
3229 | /* | |
3230 | * Setup the legacy emulated handler for SPC-2 and | |
3231 | * >= SPC-3 compatible reservation handling (CRH=1) | |
3232 | * Otherwise, we assume the underlying SCSI logic is | |
3233 | * is running in SPC_PASSTHROUGH, and wants reservations | |
3234 | * emulation disabled. | |
3235 | */ | |
3236 | cmd->transport_emulate_cdb = | |
e3d6f909 | 3237 | (su_dev->t10_pr.res_type != |
c66ac9db | 3238 | SPC_PASSTHROUGH) ? |
e3d6f909 | 3239 | core_scsi2_emulate_crh : NULL; |
c66ac9db NB |
3240 | cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; |
3241 | break; | |
3242 | case RELEASE: | |
3243 | case RELEASE_10: | |
3244 | /* | |
3245 | * The SPC-2 RELEASE does not contain a size in the SCSI CDB. | |
3246 | * Assume the passthrough or $FABRIC_MOD will tell us about it. | |
3247 | */ | |
3248 | if (cdb[0] == RELEASE_10) | |
3249 | size = (cdb[7] << 8) | cdb[8]; | |
3250 | else | |
3251 | size = cmd->data_length; | |
3252 | ||
3253 | cmd->transport_emulate_cdb = | |
e3d6f909 | 3254 | (su_dev->t10_pr.res_type != |
c66ac9db | 3255 | SPC_PASSTHROUGH) ? |
e3d6f909 | 3256 | core_scsi2_emulate_crh : NULL; |
c66ac9db NB |
3257 | cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; |
3258 | break; | |
3259 | case SYNCHRONIZE_CACHE: | |
3260 | case 0x91: /* SYNCHRONIZE_CACHE_16: */ | |
3261 | /* | |
3262 | * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE | |
3263 | */ | |
3264 | if (cdb[0] == SYNCHRONIZE_CACHE) { | |
3265 | sectors = transport_get_sectors_10(cdb, cmd, §or_ret); | |
a1d8b49a | 3266 | cmd->t_task_lba = transport_lba_32(cdb); |
c66ac9db NB |
3267 | } else { |
3268 | sectors = transport_get_sectors_16(cdb, cmd, §or_ret); | |
a1d8b49a | 3269 | cmd->t_task_lba = transport_lba_64(cdb); |
c66ac9db NB |
3270 | } |
3271 | if (sector_ret) | |
3272 | goto out_unsupported_cdb; | |
3273 | ||
3274 | size = transport_get_size(sectors, cdb, cmd); | |
3275 | cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; | |
3276 | ||
3277 | /* | |
3278 | * For TCM/pSCSI passthrough, skip cmd->transport_emulate_cdb() | |
3279 | */ | |
e3d6f909 | 3280 | if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) |
c66ac9db NB |
3281 | break; |
3282 | /* | |
3283 | * Set SCF_EMULATE_CDB_ASYNC to ensure asynchronous operation | |
3284 | * for SYNCHRONIZE_CACHE* Immed=1 case in __transport_execute_tasks() | |
3285 | */ | |
3286 | cmd->se_cmd_flags |= SCF_EMULATE_CDB_ASYNC; | |
3287 | /* | |
3288 | * Check to ensure that LBA + Range does not exceed past end of | |
7abbe7f3 | 3289 | * device for IBLOCK and FILEIO ->do_sync_cache() backend calls |
c66ac9db | 3290 | */ |
7abbe7f3 NB |
3291 | if ((cmd->t_task_lba != 0) || (sectors != 0)) { |
3292 | if (transport_cmd_get_valid_sectors(cmd) < 0) | |
3293 | goto out_invalid_cdb_field; | |
3294 | } | |
c66ac9db NB |
3295 | break; |
3296 | case UNMAP: | |
3297 | size = get_unaligned_be16(&cdb[7]); | |
05d1c7c0 | 3298 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
3299 | break; |
3300 | case WRITE_SAME_16: | |
3301 | sectors = transport_get_sectors_16(cdb, cmd, §or_ret); | |
3302 | if (sector_ret) | |
3303 | goto out_unsupported_cdb; | |
dd3a5ad8 | 3304 | |
6708bb27 | 3305 | if (sectors) |
12850626 | 3306 | size = transport_get_size(1, cdb, cmd); |
6708bb27 AG |
3307 | else { |
3308 | pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); | |
3309 | goto out_invalid_cdb_field; | |
3310 | } | |
dd3a5ad8 | 3311 | |
5db0753b | 3312 | cmd->t_task_lba = get_unaligned_be64(&cdb[2]); |
706d5860 NB |
3313 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
3314 | ||
3315 | if (target_check_write_same_discard(&cdb[1], dev) < 0) | |
3316 | goto out_invalid_cdb_field; | |
3317 | break; | |
3318 | case WRITE_SAME: | |
3319 | sectors = transport_get_sectors_10(cdb, cmd, §or_ret); | |
3320 | if (sector_ret) | |
3321 | goto out_unsupported_cdb; | |
3322 | ||
3323 | if (sectors) | |
12850626 | 3324 | size = transport_get_size(1, cdb, cmd); |
706d5860 NB |
3325 | else { |
3326 | pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); | |
3327 | goto out_invalid_cdb_field; | |
c66ac9db | 3328 | } |
706d5860 NB |
3329 | |
3330 | cmd->t_task_lba = get_unaligned_be32(&cdb[2]); | |
c66ac9db | 3331 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
706d5860 NB |
3332 | /* |
3333 | * Follow sbcr26 with WRITE_SAME (10) and check for the existence | |
3334 | * of byte 1 bit 3 UNMAP instead of original reserved field | |
3335 | */ | |
3336 | if (target_check_write_same_discard(&cdb[1], dev) < 0) | |
3337 | goto out_invalid_cdb_field; | |
c66ac9db NB |
3338 | break; |
3339 | case ALLOW_MEDIUM_REMOVAL: | |
3340 | case GPCMD_CLOSE_TRACK: | |
3341 | case ERASE: | |
3342 | case INITIALIZE_ELEMENT_STATUS: | |
3343 | case GPCMD_LOAD_UNLOAD: | |
3344 | case REZERO_UNIT: | |
3345 | case SEEK_10: | |
3346 | case GPCMD_SET_SPEED: | |
3347 | case SPACE: | |
3348 | case START_STOP: | |
3349 | case TEST_UNIT_READY: | |
3350 | case VERIFY: | |
3351 | case WRITE_FILEMARKS: | |
3352 | case MOVE_MEDIUM: | |
3353 | cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; | |
3354 | break; | |
3355 | case REPORT_LUNS: | |
3356 | cmd->transport_emulate_cdb = | |
e3d6f909 | 3357 | transport_core_report_lun_response; |
c66ac9db NB |
3358 | size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; |
3359 | /* | |
3360 | * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS | |
3361 | * See spc4r17 section 5.3 | |
3362 | */ | |
5951146d | 3363 | if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) |
e66ecd50 | 3364 | cmd->sam_task_attr = MSG_HEAD_TAG; |
05d1c7c0 | 3365 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
3366 | break; |
3367 | default: | |
6708bb27 | 3368 | pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode" |
c66ac9db | 3369 | " 0x%02x, sending CHECK_CONDITION.\n", |
e3d6f909 | 3370 | cmd->se_tfo->get_fabric_name(), cdb[0]); |
c66ac9db NB |
3371 | goto out_unsupported_cdb; |
3372 | } | |
3373 | ||
3374 | if (size != cmd->data_length) { | |
6708bb27 | 3375 | pr_warn("TARGET_CORE[%s]: Expected Transfer Length:" |
c66ac9db | 3376 | " %u does not match SCSI CDB Length: %u for SAM Opcode:" |
e3d6f909 | 3377 | " 0x%02x\n", cmd->se_tfo->get_fabric_name(), |
c66ac9db NB |
3378 | cmd->data_length, size, cdb[0]); |
3379 | ||
3380 | cmd->cmd_spdtl = size; | |
3381 | ||
3382 | if (cmd->data_direction == DMA_TO_DEVICE) { | |
6708bb27 | 3383 | pr_err("Rejecting underflow/overflow" |
c66ac9db NB |
3384 | " WRITE data\n"); |
3385 | goto out_invalid_cdb_field; | |
3386 | } | |
3387 | /* | |
3388 | * Reject READ_* or WRITE_* with overflow/underflow for | |
3389 | * type SCF_SCSI_DATA_SG_IO_CDB. | |
3390 | */ | |
6708bb27 AG |
3391 | if (!ret && (dev->se_sub_dev->se_dev_attrib.block_size != 512)) { |
3392 | pr_err("Failing OVERFLOW/UNDERFLOW for LBA op" | |
c66ac9db | 3393 | " CDB on non 512-byte sector setup subsystem" |
e3d6f909 | 3394 | " plugin: %s\n", dev->transport->name); |
c66ac9db NB |
3395 | /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */ |
3396 | goto out_invalid_cdb_field; | |
3397 | } | |
3398 | ||
3399 | if (size > cmd->data_length) { | |
3400 | cmd->se_cmd_flags |= SCF_OVERFLOW_BIT; | |
3401 | cmd->residual_count = (size - cmd->data_length); | |
3402 | } else { | |
3403 | cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; | |
3404 | cmd->residual_count = (cmd->data_length - size); | |
3405 | } | |
3406 | cmd->data_length = size; | |
3407 | } | |
3408 | ||
d0229ae3 AG |
3409 | /* Let's limit control cdbs to a page, for simplicity's sake. */ |
3410 | if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) && | |
3411 | size > PAGE_SIZE) | |
3412 | goto out_invalid_cdb_field; | |
3413 | ||
c66ac9db NB |
3414 | transport_set_supported_SAM_opcode(cmd); |
3415 | return ret; | |
3416 | ||
3417 | out_unsupported_cdb: | |
3418 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
3419 | cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; | |
5951146d | 3420 | return -EINVAL; |
c66ac9db NB |
3421 | out_invalid_cdb_field: |
3422 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
3423 | cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; | |
5951146d | 3424 | return -EINVAL; |
c66ac9db NB |
3425 | } |
3426 | ||
c66ac9db NB |
3427 | /* |
3428 | * Called from transport_generic_complete_ok() and | |
3429 | * transport_generic_request_failure() to determine which dormant/delayed | |
3430 | * and ordered cmds need to have their tasks added to the execution queue. | |
3431 | */ | |
3432 | static void transport_complete_task_attr(struct se_cmd *cmd) | |
3433 | { | |
5951146d | 3434 | struct se_device *dev = cmd->se_dev; |
c66ac9db NB |
3435 | struct se_cmd *cmd_p, *cmd_tmp; |
3436 | int new_active_tasks = 0; | |
3437 | ||
e66ecd50 | 3438 | if (cmd->sam_task_attr == MSG_SIMPLE_TAG) { |
c66ac9db NB |
3439 | atomic_dec(&dev->simple_cmds); |
3440 | smp_mb__after_atomic_dec(); | |
3441 | dev->dev_cur_ordered_id++; | |
6708bb27 | 3442 | pr_debug("Incremented dev->dev_cur_ordered_id: %u for" |
c66ac9db NB |
3443 | " SIMPLE: %u\n", dev->dev_cur_ordered_id, |
3444 | cmd->se_ordered_id); | |
e66ecd50 | 3445 | } else if (cmd->sam_task_attr == MSG_HEAD_TAG) { |
c66ac9db NB |
3446 | atomic_dec(&dev->dev_hoq_count); |
3447 | smp_mb__after_atomic_dec(); | |
3448 | dev->dev_cur_ordered_id++; | |
6708bb27 | 3449 | pr_debug("Incremented dev_cur_ordered_id: %u for" |
c66ac9db NB |
3450 | " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id, |
3451 | cmd->se_ordered_id); | |
e66ecd50 | 3452 | } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { |
c66ac9db | 3453 | spin_lock(&dev->ordered_cmd_lock); |
5951146d | 3454 | list_del(&cmd->se_ordered_node); |
c66ac9db NB |
3455 | atomic_dec(&dev->dev_ordered_sync); |
3456 | smp_mb__after_atomic_dec(); | |
3457 | spin_unlock(&dev->ordered_cmd_lock); | |
3458 | ||
3459 | dev->dev_cur_ordered_id++; | |
6708bb27 | 3460 | pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:" |
c66ac9db NB |
3461 | " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id); |
3462 | } | |
3463 | /* | |
3464 | * Process all commands up to the last received | |
3465 | * ORDERED task attribute which requires another blocking | |
3466 | * boundary | |
3467 | */ | |
3468 | spin_lock(&dev->delayed_cmd_lock); | |
3469 | list_for_each_entry_safe(cmd_p, cmd_tmp, | |
5951146d | 3470 | &dev->delayed_cmd_list, se_delayed_node) { |
c66ac9db | 3471 | |
5951146d | 3472 | list_del(&cmd_p->se_delayed_node); |
c66ac9db NB |
3473 | spin_unlock(&dev->delayed_cmd_lock); |
3474 | ||
6708bb27 | 3475 | pr_debug("Calling add_tasks() for" |
c66ac9db NB |
3476 | " cmd_p: 0x%02x Task Attr: 0x%02x" |
3477 | " Dormant -> Active, se_ordered_id: %u\n", | |
6708bb27 | 3478 | cmd_p->t_task_cdb[0], |
c66ac9db NB |
3479 | cmd_p->sam_task_attr, cmd_p->se_ordered_id); |
3480 | ||
3481 | transport_add_tasks_from_cmd(cmd_p); | |
3482 | new_active_tasks++; | |
3483 | ||
3484 | spin_lock(&dev->delayed_cmd_lock); | |
e66ecd50 | 3485 | if (cmd_p->sam_task_attr == MSG_ORDERED_TAG) |
c66ac9db NB |
3486 | break; |
3487 | } | |
3488 | spin_unlock(&dev->delayed_cmd_lock); | |
3489 | /* | |
3490 | * If new tasks have become active, wake up the transport thread | |
3491 | * to do the processing of the Active tasks. | |
3492 | */ | |
3493 | if (new_active_tasks != 0) | |
e3d6f909 | 3494 | wake_up_interruptible(&dev->dev_queue_obj.thread_wq); |
c66ac9db NB |
3495 | } |
3496 | ||
07bde79a NB |
3497 | static int transport_complete_qf(struct se_cmd *cmd) |
3498 | { | |
3499 | int ret = 0; | |
3500 | ||
3501 | if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) | |
3502 | return cmd->se_tfo->queue_status(cmd); | |
3503 | ||
3504 | switch (cmd->data_direction) { | |
3505 | case DMA_FROM_DEVICE: | |
3506 | ret = cmd->se_tfo->queue_data_in(cmd); | |
3507 | break; | |
3508 | case DMA_TO_DEVICE: | |
ec98f782 | 3509 | if (cmd->t_bidi_data_sg) { |
07bde79a NB |
3510 | ret = cmd->se_tfo->queue_data_in(cmd); |
3511 | if (ret < 0) | |
3512 | return ret; | |
3513 | } | |
3514 | /* Fall through for DMA_TO_DEVICE */ | |
3515 | case DMA_NONE: | |
3516 | ret = cmd->se_tfo->queue_status(cmd); | |
3517 | break; | |
3518 | default: | |
3519 | break; | |
3520 | } | |
3521 | ||
3522 | return ret; | |
3523 | } | |
3524 | ||
3525 | static void transport_handle_queue_full( | |
3526 | struct se_cmd *cmd, | |
3527 | struct se_device *dev, | |
3528 | int (*qf_callback)(struct se_cmd *)) | |
3529 | { | |
3530 | spin_lock_irq(&dev->qf_cmd_lock); | |
3531 | cmd->se_cmd_flags |= SCF_EMULATE_QUEUE_FULL; | |
3532 | cmd->transport_qf_callback = qf_callback; | |
3533 | list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); | |
3534 | atomic_inc(&dev->dev_qf_count); | |
3535 | smp_mb__after_atomic_inc(); | |
3536 | spin_unlock_irq(&cmd->se_dev->qf_cmd_lock); | |
3537 | ||
3538 | schedule_work(&cmd->se_dev->qf_work_queue); | |
3539 | } | |
3540 | ||
c66ac9db NB |
3541 | static void transport_generic_complete_ok(struct se_cmd *cmd) |
3542 | { | |
07bde79a | 3543 | int reason = 0, ret; |
c66ac9db NB |
3544 | /* |
3545 | * Check if we need to move delayed/dormant tasks from cmds on the | |
3546 | * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task | |
3547 | * Attribute. | |
3548 | */ | |
5951146d | 3549 | if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) |
c66ac9db | 3550 | transport_complete_task_attr(cmd); |
07bde79a NB |
3551 | /* |
3552 | * Check to schedule QUEUE_FULL work, or execute an existing | |
3553 | * cmd->transport_qf_callback() | |
3554 | */ | |
3555 | if (atomic_read(&cmd->se_dev->dev_qf_count) != 0) | |
3556 | schedule_work(&cmd->se_dev->qf_work_queue); | |
3557 | ||
3558 | if (cmd->transport_qf_callback) { | |
3559 | ret = cmd->transport_qf_callback(cmd); | |
3560 | if (ret < 0) | |
3561 | goto queue_full; | |
3562 | ||
3563 | cmd->transport_qf_callback = NULL; | |
3564 | goto done; | |
3565 | } | |
c66ac9db NB |
3566 | /* |
3567 | * Check if we need to retrieve a sense buffer from | |
3568 | * the struct se_cmd in question. | |
3569 | */ | |
3570 | if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { | |
3571 | if (transport_get_sense_data(cmd) < 0) | |
3572 | reason = TCM_NON_EXISTENT_LUN; | |
3573 | ||
3574 | /* | |
3575 | * Only set when an struct se_task->task_scsi_status returned | |
3576 | * a non GOOD status. | |
3577 | */ | |
3578 | if (cmd->scsi_status) { | |
07bde79a | 3579 | ret = transport_send_check_condition_and_sense( |
c66ac9db | 3580 | cmd, reason, 1); |
07bde79a NB |
3581 | if (ret == -EAGAIN) |
3582 | goto queue_full; | |
3583 | ||
c66ac9db NB |
3584 | transport_lun_remove_cmd(cmd); |
3585 | transport_cmd_check_stop_to_fabric(cmd); | |
3586 | return; | |
3587 | } | |
3588 | } | |
3589 | /* | |
25985edc | 3590 | * Check for a callback, used by amongst other things |
c66ac9db NB |
3591 | * XDWRITE_READ_10 emulation. |
3592 | */ | |
3593 | if (cmd->transport_complete_callback) | |
3594 | cmd->transport_complete_callback(cmd); | |
3595 | ||
3596 | switch (cmd->data_direction) { | |
3597 | case DMA_FROM_DEVICE: | |
3598 | spin_lock(&cmd->se_lun->lun_sep_lock); | |
e3d6f909 AG |
3599 | if (cmd->se_lun->lun_sep) { |
3600 | cmd->se_lun->lun_sep->sep_stats.tx_data_octets += | |
c66ac9db NB |
3601 | cmd->data_length; |
3602 | } | |
3603 | spin_unlock(&cmd->se_lun->lun_sep_lock); | |
c66ac9db | 3604 | |
07bde79a NB |
3605 | ret = cmd->se_tfo->queue_data_in(cmd); |
3606 | if (ret == -EAGAIN) | |
3607 | goto queue_full; | |
c66ac9db NB |
3608 | break; |
3609 | case DMA_TO_DEVICE: | |
3610 | spin_lock(&cmd->se_lun->lun_sep_lock); | |
e3d6f909 AG |
3611 | if (cmd->se_lun->lun_sep) { |
3612 | cmd->se_lun->lun_sep->sep_stats.rx_data_octets += | |
c66ac9db NB |
3613 | cmd->data_length; |
3614 | } | |
3615 | spin_unlock(&cmd->se_lun->lun_sep_lock); | |
3616 | /* | |
3617 | * Check if we need to send READ payload for BIDI-COMMAND | |
3618 | */ | |
ec98f782 | 3619 | if (cmd->t_bidi_data_sg) { |
c66ac9db | 3620 | spin_lock(&cmd->se_lun->lun_sep_lock); |
e3d6f909 AG |
3621 | if (cmd->se_lun->lun_sep) { |
3622 | cmd->se_lun->lun_sep->sep_stats.tx_data_octets += | |
c66ac9db NB |
3623 | cmd->data_length; |
3624 | } | |
3625 | spin_unlock(&cmd->se_lun->lun_sep_lock); | |
07bde79a NB |
3626 | ret = cmd->se_tfo->queue_data_in(cmd); |
3627 | if (ret == -EAGAIN) | |
3628 | goto queue_full; | |
c66ac9db NB |
3629 | break; |
3630 | } | |
3631 | /* Fall through for DMA_TO_DEVICE */ | |
3632 | case DMA_NONE: | |
07bde79a NB |
3633 | ret = cmd->se_tfo->queue_status(cmd); |
3634 | if (ret == -EAGAIN) | |
3635 | goto queue_full; | |
c66ac9db NB |
3636 | break; |
3637 | default: | |
3638 | break; | |
3639 | } | |
3640 | ||
07bde79a | 3641 | done: |
c66ac9db NB |
3642 | transport_lun_remove_cmd(cmd); |
3643 | transport_cmd_check_stop_to_fabric(cmd); | |
07bde79a NB |
3644 | return; |
3645 | ||
3646 | queue_full: | |
6708bb27 | 3647 | pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p," |
07bde79a NB |
3648 | " data_direction: %d\n", cmd, cmd->data_direction); |
3649 | transport_handle_queue_full(cmd, cmd->se_dev, transport_complete_qf); | |
c66ac9db NB |
3650 | } |
3651 | ||
3652 | static void transport_free_dev_tasks(struct se_cmd *cmd) | |
3653 | { | |
3654 | struct se_task *task, *task_tmp; | |
3655 | unsigned long flags; | |
3656 | ||
a1d8b49a | 3657 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db | 3658 | list_for_each_entry_safe(task, task_tmp, |
a1d8b49a | 3659 | &cmd->t_task_list, t_list) { |
c66ac9db NB |
3660 | if (atomic_read(&task->task_active)) |
3661 | continue; | |
3662 | ||
3663 | kfree(task->task_sg_bidi); | |
3664 | kfree(task->task_sg); | |
3665 | ||
3666 | list_del(&task->t_list); | |
3667 | ||
a1d8b49a | 3668 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db | 3669 | if (task->se_dev) |
e3d6f909 | 3670 | task->se_dev->transport->free_task(task); |
c66ac9db | 3671 | else |
6708bb27 | 3672 | pr_err("task[%u] - task->se_dev is NULL\n", |
c66ac9db | 3673 | task->task_no); |
a1d8b49a | 3674 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db | 3675 | } |
a1d8b49a | 3676 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
3677 | } |
3678 | ||
6708bb27 | 3679 | static inline void transport_free_sgl(struct scatterlist *sgl, int nents) |
c66ac9db | 3680 | { |
ec98f782 | 3681 | struct scatterlist *sg; |
ec98f782 | 3682 | int count; |
c66ac9db | 3683 | |
6708bb27 AG |
3684 | for_each_sg(sgl, sg, nents, count) |
3685 | __free_page(sg_page(sg)); | |
c66ac9db | 3686 | |
6708bb27 AG |
3687 | kfree(sgl); |
3688 | } | |
c66ac9db | 3689 | |
6708bb27 AG |
3690 | static inline void transport_free_pages(struct se_cmd *cmd) |
3691 | { | |
3692 | if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) | |
3693 | return; | |
3694 | ||
3695 | transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents); | |
ec98f782 AG |
3696 | cmd->t_data_sg = NULL; |
3697 | cmd->t_data_nents = 0; | |
c66ac9db | 3698 | |
6708bb27 | 3699 | transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents); |
ec98f782 AG |
3700 | cmd->t_bidi_data_sg = NULL; |
3701 | cmd->t_bidi_data_nents = 0; | |
c66ac9db NB |
3702 | } |
3703 | ||
d3df7825 CH |
3704 | /** |
3705 | * transport_put_cmd - release a reference to a command | |
3706 | * @cmd: command to release | |
3707 | * | |
3708 | * This routine releases our reference to the command and frees it if possible. | |
3709 | */ | |
39c05f32 | 3710 | static void transport_put_cmd(struct se_cmd *cmd) |
c66ac9db NB |
3711 | { |
3712 | unsigned long flags; | |
4911e3cc | 3713 | int free_tasks = 0; |
c66ac9db | 3714 | |
a1d8b49a | 3715 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
4911e3cc CH |
3716 | if (atomic_read(&cmd->t_fe_count)) { |
3717 | if (!atomic_dec_and_test(&cmd->t_fe_count)) | |
3718 | goto out_busy; | |
3719 | } | |
3720 | ||
3721 | if (atomic_read(&cmd->t_se_count)) { | |
3722 | if (!atomic_dec_and_test(&cmd->t_se_count)) | |
3723 | goto out_busy; | |
3724 | } | |
3725 | ||
3726 | if (atomic_read(&cmd->transport_dev_active)) { | |
3727 | atomic_set(&cmd->transport_dev_active, 0); | |
3728 | transport_all_task_dev_remove_state(cmd); | |
3729 | free_tasks = 1; | |
c66ac9db | 3730 | } |
a1d8b49a | 3731 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db | 3732 | |
4911e3cc CH |
3733 | if (free_tasks != 0) |
3734 | transport_free_dev_tasks(cmd); | |
d3df7825 | 3735 | |
c66ac9db | 3736 | transport_free_pages(cmd); |
31afc39c | 3737 | transport_release_cmd(cmd); |
39c05f32 | 3738 | return; |
4911e3cc CH |
3739 | out_busy: |
3740 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | |
c66ac9db NB |
3741 | } |
3742 | ||
c66ac9db | 3743 | /* |
ec98f782 AG |
3744 | * transport_generic_map_mem_to_cmd - Use fabric-alloced pages instead of |
3745 | * allocating in the core. | |
c66ac9db NB |
3746 | * @cmd: Associated se_cmd descriptor |
3747 | * @mem: SGL style memory for TCM WRITE / READ | |
3748 | * @sg_mem_num: Number of SGL elements | |
3749 | * @mem_bidi_in: SGL style memory for TCM BIDI READ | |
3750 | * @sg_mem_bidi_num: Number of BIDI READ SGL elements | |
3751 | * | |
3752 | * Return: nonzero return cmd was rejected for -ENOMEM or inproper usage | |
3753 | * of parameters. | |
3754 | */ | |
3755 | int transport_generic_map_mem_to_cmd( | |
3756 | struct se_cmd *cmd, | |
5951146d AG |
3757 | struct scatterlist *sgl, |
3758 | u32 sgl_count, | |
3759 | struct scatterlist *sgl_bidi, | |
3760 | u32 sgl_bidi_count) | |
c66ac9db | 3761 | { |
5951146d | 3762 | if (!sgl || !sgl_count) |
c66ac9db | 3763 | return 0; |
c66ac9db | 3764 | |
c66ac9db NB |
3765 | if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) || |
3766 | (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) { | |
c66ac9db | 3767 | |
ec98f782 AG |
3768 | cmd->t_data_sg = sgl; |
3769 | cmd->t_data_nents = sgl_count; | |
c66ac9db | 3770 | |
ec98f782 AG |
3771 | if (sgl_bidi && sgl_bidi_count) { |
3772 | cmd->t_bidi_data_sg = sgl_bidi; | |
3773 | cmd->t_bidi_data_nents = sgl_bidi_count; | |
c66ac9db NB |
3774 | } |
3775 | cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; | |
c66ac9db NB |
3776 | } |
3777 | ||
3778 | return 0; | |
3779 | } | |
3780 | EXPORT_SYMBOL(transport_generic_map_mem_to_cmd); | |
3781 | ||
c66ac9db NB |
3782 | static int transport_new_cmd_obj(struct se_cmd *cmd) |
3783 | { | |
5951146d | 3784 | struct se_device *dev = cmd->se_dev; |
01cde4d5 | 3785 | int set_counts = 1, rc, task_cdbs; |
c66ac9db | 3786 | |
ec98f782 AG |
3787 | /* |
3788 | * Setup any BIDI READ tasks and memory from | |
3789 | * cmd->t_mem_bidi_list so the READ struct se_tasks | |
3790 | * are queued first for the non pSCSI passthrough case. | |
3791 | */ | |
3792 | if (cmd->t_bidi_data_sg && | |
3793 | (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) { | |
3794 | rc = transport_allocate_tasks(cmd, | |
3795 | cmd->t_task_lba, | |
3796 | DMA_FROM_DEVICE, | |
3797 | cmd->t_bidi_data_sg, | |
3798 | cmd->t_bidi_data_nents); | |
6708bb27 | 3799 | if (rc <= 0) { |
c66ac9db NB |
3800 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
3801 | cmd->scsi_sense_reason = | |
ec98f782 | 3802 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
01cde4d5 | 3803 | return -EINVAL; |
c66ac9db | 3804 | } |
ec98f782 AG |
3805 | atomic_inc(&cmd->t_fe_count); |
3806 | atomic_inc(&cmd->t_se_count); | |
3807 | set_counts = 0; | |
3808 | } | |
3809 | /* | |
3810 | * Setup the tasks and memory from cmd->t_mem_list | |
3811 | * Note for BIDI transfers this will contain the WRITE payload | |
3812 | */ | |
3813 | task_cdbs = transport_allocate_tasks(cmd, | |
3814 | cmd->t_task_lba, | |
3815 | cmd->data_direction, | |
3816 | cmd->t_data_sg, | |
3817 | cmd->t_data_nents); | |
6708bb27 | 3818 | if (task_cdbs <= 0) { |
ec98f782 AG |
3819 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
3820 | cmd->scsi_sense_reason = | |
3821 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | |
01cde4d5 | 3822 | return -EINVAL; |
ec98f782 | 3823 | } |
c66ac9db | 3824 | |
ec98f782 AG |
3825 | if (set_counts) { |
3826 | atomic_inc(&cmd->t_fe_count); | |
3827 | atomic_inc(&cmd->t_se_count); | |
c66ac9db NB |
3828 | } |
3829 | ||
ec98f782 AG |
3830 | cmd->t_task_list_num = task_cdbs; |
3831 | ||
a1d8b49a AG |
3832 | atomic_set(&cmd->t_task_cdbs_left, task_cdbs); |
3833 | atomic_set(&cmd->t_task_cdbs_ex_left, task_cdbs); | |
3834 | atomic_set(&cmd->t_task_cdbs_timeout_left, task_cdbs); | |
c66ac9db NB |
3835 | return 0; |
3836 | } | |
3837 | ||
05d1c7c0 AG |
3838 | void *transport_kmap_first_data_page(struct se_cmd *cmd) |
3839 | { | |
ec98f782 | 3840 | struct scatterlist *sg = cmd->t_data_sg; |
05d1c7c0 | 3841 | |
ec98f782 | 3842 | BUG_ON(!sg); |
05d1c7c0 | 3843 | /* |
ec98f782 AG |
3844 | * We need to take into account a possible offset here for fabrics like |
3845 | * tcm_loop who may be using a contig buffer from the SCSI midlayer for | |
3846 | * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd() | |
05d1c7c0 | 3847 | */ |
ec98f782 | 3848 | return kmap(sg_page(sg)) + sg->offset; |
05d1c7c0 AG |
3849 | } |
3850 | EXPORT_SYMBOL(transport_kmap_first_data_page); | |
3851 | ||
3852 | void transport_kunmap_first_data_page(struct se_cmd *cmd) | |
3853 | { | |
ec98f782 | 3854 | kunmap(sg_page(cmd->t_data_sg)); |
05d1c7c0 AG |
3855 | } |
3856 | EXPORT_SYMBOL(transport_kunmap_first_data_page); | |
3857 | ||
c66ac9db | 3858 | static int |
05d1c7c0 | 3859 | transport_generic_get_mem(struct se_cmd *cmd) |
c66ac9db | 3860 | { |
ec98f782 AG |
3861 | u32 length = cmd->data_length; |
3862 | unsigned int nents; | |
3863 | struct page *page; | |
3864 | int i = 0; | |
c66ac9db | 3865 | |
ec98f782 AG |
3866 | nents = DIV_ROUND_UP(length, PAGE_SIZE); |
3867 | cmd->t_data_sg = kmalloc(sizeof(struct scatterlist) * nents, GFP_KERNEL); | |
3868 | if (!cmd->t_data_sg) | |
3869 | return -ENOMEM; | |
c66ac9db | 3870 | |
ec98f782 AG |
3871 | cmd->t_data_nents = nents; |
3872 | sg_init_table(cmd->t_data_sg, nents); | |
c66ac9db | 3873 | |
ec98f782 AG |
3874 | while (length) { |
3875 | u32 page_len = min_t(u32, length, PAGE_SIZE); | |
3876 | page = alloc_page(GFP_KERNEL | __GFP_ZERO); | |
3877 | if (!page) | |
3878 | goto out; | |
c66ac9db | 3879 | |
ec98f782 AG |
3880 | sg_set_page(&cmd->t_data_sg[i], page, page_len, 0); |
3881 | length -= page_len; | |
3882 | i++; | |
c66ac9db | 3883 | } |
c66ac9db | 3884 | return 0; |
c66ac9db | 3885 | |
ec98f782 AG |
3886 | out: |
3887 | while (i >= 0) { | |
3888 | __free_page(sg_page(&cmd->t_data_sg[i])); | |
3889 | i--; | |
c66ac9db | 3890 | } |
ec98f782 AG |
3891 | kfree(cmd->t_data_sg); |
3892 | cmd->t_data_sg = NULL; | |
3893 | return -ENOMEM; | |
c66ac9db NB |
3894 | } |
3895 | ||
a1d8b49a AG |
3896 | /* Reduce sectors if they are too long for the device */ |
3897 | static inline sector_t transport_limit_task_sectors( | |
c66ac9db NB |
3898 | struct se_device *dev, |
3899 | unsigned long long lba, | |
a1d8b49a | 3900 | sector_t sectors) |
c66ac9db | 3901 | { |
a1d8b49a | 3902 | sectors = min_t(sector_t, sectors, dev->se_sub_dev->se_dev_attrib.max_sectors); |
c66ac9db | 3903 | |
a1d8b49a AG |
3904 | if (dev->transport->get_device_type(dev) == TYPE_DISK) |
3905 | if ((lba + sectors) > transport_dev_end_lba(dev)) | |
3906 | sectors = ((transport_dev_end_lba(dev) - lba) + 1); | |
c66ac9db | 3907 | |
a1d8b49a | 3908 | return sectors; |
c66ac9db NB |
3909 | } |
3910 | ||
c66ac9db NB |
3911 | |
3912 | /* | |
3913 | * This function can be used by HW target mode drivers to create a linked | |
3914 | * scatterlist from all contiguously allocated struct se_task->task_sg[]. | |
3915 | * This is intended to be called during the completion path by TCM Core | |
3916 | * when struct target_core_fabric_ops->check_task_sg_chaining is enabled. | |
3917 | */ | |
3918 | void transport_do_task_sg_chain(struct se_cmd *cmd) | |
3919 | { | |
ec98f782 AG |
3920 | struct scatterlist *sg_first = NULL; |
3921 | struct scatterlist *sg_prev = NULL; | |
3922 | int sg_prev_nents = 0; | |
3923 | struct scatterlist *sg; | |
c66ac9db | 3924 | struct se_task *task; |
ec98f782 | 3925 | u32 chained_nents = 0; |
c66ac9db NB |
3926 | int i; |
3927 | ||
ec98f782 AG |
3928 | BUG_ON(!cmd->se_tfo->task_sg_chaining); |
3929 | ||
c66ac9db NB |
3930 | /* |
3931 | * Walk the struct se_task list and setup scatterlist chains | |
a1d8b49a | 3932 | * for each contiguously allocated struct se_task->task_sg[]. |
c66ac9db | 3933 | */ |
a1d8b49a | 3934 | list_for_each_entry(task, &cmd->t_task_list, t_list) { |
ec98f782 | 3935 | if (!task->task_sg) |
c66ac9db NB |
3936 | continue; |
3937 | ||
ec98f782 AG |
3938 | if (!sg_first) { |
3939 | sg_first = task->task_sg; | |
6708bb27 | 3940 | chained_nents = task->task_sg_nents; |
97868c89 | 3941 | } else { |
ec98f782 | 3942 | sg_chain(sg_prev, sg_prev_nents, task->task_sg); |
6708bb27 | 3943 | chained_nents += task->task_sg_nents; |
97868c89 | 3944 | } |
c3c74c7a NB |
3945 | /* |
3946 | * For the padded tasks, use the extra SGL vector allocated | |
3947 | * in transport_allocate_data_tasks() for the sg_prev_nents | |
3948 | * offset into sg_chain() above.. The last task of a | |
3949 | * multi-task list, or a single task will not have | |
3950 | * task->task_sg_padded set.. | |
3951 | */ | |
3952 | if (task->task_padded_sg) | |
3953 | sg_prev_nents = (task->task_sg_nents + 1); | |
3954 | else | |
3955 | sg_prev_nents = task->task_sg_nents; | |
ec98f782 AG |
3956 | |
3957 | sg_prev = task->task_sg; | |
c66ac9db NB |
3958 | } |
3959 | /* | |
3960 | * Setup the starting pointer and total t_tasks_sg_linked_no including | |
3961 | * padding SGs for linking and to mark the end. | |
3962 | */ | |
a1d8b49a | 3963 | cmd->t_tasks_sg_chained = sg_first; |
ec98f782 | 3964 | cmd->t_tasks_sg_chained_no = chained_nents; |
c66ac9db | 3965 | |
6708bb27 | 3966 | pr_debug("Setup cmd: %p cmd->t_tasks_sg_chained: %p and" |
a1d8b49a AG |
3967 | " t_tasks_sg_chained_no: %u\n", cmd, cmd->t_tasks_sg_chained, |
3968 | cmd->t_tasks_sg_chained_no); | |
c66ac9db | 3969 | |
a1d8b49a AG |
3970 | for_each_sg(cmd->t_tasks_sg_chained, sg, |
3971 | cmd->t_tasks_sg_chained_no, i) { | |
c66ac9db | 3972 | |
6708bb27 | 3973 | pr_debug("SG[%d]: %p page: %p length: %d offset: %d\n", |
5951146d | 3974 | i, sg, sg_page(sg), sg->length, sg->offset); |
c66ac9db | 3975 | if (sg_is_chain(sg)) |
6708bb27 | 3976 | pr_debug("SG: %p sg_is_chain=1\n", sg); |
c66ac9db | 3977 | if (sg_is_last(sg)) |
6708bb27 | 3978 | pr_debug("SG: %p sg_is_last=1\n", sg); |
c66ac9db | 3979 | } |
c66ac9db NB |
3980 | } |
3981 | EXPORT_SYMBOL(transport_do_task_sg_chain); | |
3982 | ||
a1d8b49a AG |
3983 | /* |
3984 | * Break up cmd into chunks transport can handle | |
3985 | */ | |
ec98f782 | 3986 | static int transport_allocate_data_tasks( |
c66ac9db NB |
3987 | struct se_cmd *cmd, |
3988 | unsigned long long lba, | |
c66ac9db | 3989 | enum dma_data_direction data_direction, |
ec98f782 AG |
3990 | struct scatterlist *sgl, |
3991 | unsigned int sgl_nents) | |
c66ac9db NB |
3992 | { |
3993 | unsigned char *cdb = NULL; | |
3994 | struct se_task *task; | |
5951146d | 3995 | struct se_device *dev = cmd->se_dev; |
ec98f782 | 3996 | unsigned long flags; |
a3eedc22 | 3997 | int task_count, i; |
277c5f27 | 3998 | sector_t sectors, dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors; |
ec98f782 AG |
3999 | u32 sector_size = dev->se_sub_dev->se_dev_attrib.block_size; |
4000 | struct scatterlist *sg; | |
4001 | struct scatterlist *cmd_sg; | |
a1d8b49a | 4002 | |
ec98f782 AG |
4003 | WARN_ON(cmd->data_length % sector_size); |
4004 | sectors = DIV_ROUND_UP(cmd->data_length, sector_size); | |
277c5f27 NB |
4005 | task_count = DIV_ROUND_UP_SECTOR_T(sectors, dev_max_sectors); |
4006 | ||
ec98f782 AG |
4007 | cmd_sg = sgl; |
4008 | for (i = 0; i < task_count; i++) { | |
c3c74c7a | 4009 | unsigned int task_size, task_sg_nents_padded; |
ec98f782 | 4010 | int count; |
a1d8b49a | 4011 | |
c66ac9db | 4012 | task = transport_generic_get_task(cmd, data_direction); |
a1d8b49a | 4013 | if (!task) |
ec98f782 | 4014 | return -ENOMEM; |
c66ac9db | 4015 | |
c66ac9db | 4016 | task->task_lba = lba; |
ec98f782 AG |
4017 | task->task_sectors = min(sectors, dev_max_sectors); |
4018 | task->task_size = task->task_sectors * sector_size; | |
c66ac9db | 4019 | |
e3d6f909 | 4020 | cdb = dev->transport->get_cdb(task); |
a1d8b49a AG |
4021 | BUG_ON(!cdb); |
4022 | ||
4023 | memcpy(cdb, cmd->t_task_cdb, | |
4024 | scsi_command_size(cmd->t_task_cdb)); | |
4025 | ||
4026 | /* Update new cdb with updated lba/sectors */ | |
3a867205 | 4027 | cmd->transport_split_cdb(task->task_lba, task->task_sectors, cdb); |
525a48a2 NB |
4028 | /* |
4029 | * This now assumes that passed sg_ents are in PAGE_SIZE chunks | |
4030 | * in order to calculate the number per task SGL entries | |
4031 | */ | |
4032 | task->task_sg_nents = DIV_ROUND_UP(task->task_size, PAGE_SIZE); | |
c66ac9db | 4033 | /* |
ec98f782 AG |
4034 | * Check if the fabric module driver is requesting that all |
4035 | * struct se_task->task_sg[] be chained together.. If so, | |
4036 | * then allocate an extra padding SG entry for linking and | |
c3c74c7a NB |
4037 | * marking the end of the chained SGL for every task except |
4038 | * the last one for (task_count > 1) operation, or skipping | |
4039 | * the extra padding for the (task_count == 1) case. | |
c66ac9db | 4040 | */ |
c3c74c7a NB |
4041 | if (cmd->se_tfo->task_sg_chaining && (i < (task_count - 1))) { |
4042 | task_sg_nents_padded = (task->task_sg_nents + 1); | |
ec98f782 | 4043 | task->task_padded_sg = 1; |
c3c74c7a NB |
4044 | } else |
4045 | task_sg_nents_padded = task->task_sg_nents; | |
c66ac9db | 4046 | |
1d20bb61 | 4047 | task->task_sg = kmalloc(sizeof(struct scatterlist) * |
c3c74c7a | 4048 | task_sg_nents_padded, GFP_KERNEL); |
ec98f782 AG |
4049 | if (!task->task_sg) { |
4050 | cmd->se_dev->transport->free_task(task); | |
4051 | return -ENOMEM; | |
4052 | } | |
4053 | ||
c3c74c7a | 4054 | sg_init_table(task->task_sg, task_sg_nents_padded); |
c66ac9db | 4055 | |
ec98f782 AG |
4056 | task_size = task->task_size; |
4057 | ||
4058 | /* Build new sgl, only up to task_size */ | |
6708bb27 | 4059 | for_each_sg(task->task_sg, sg, task->task_sg_nents, count) { |
ec98f782 AG |
4060 | if (cmd_sg->length > task_size) |
4061 | break; | |
4062 | ||
4063 | *sg = *cmd_sg; | |
4064 | task_size -= cmd_sg->length; | |
4065 | cmd_sg = sg_next(cmd_sg); | |
c66ac9db | 4066 | } |
c66ac9db | 4067 | |
ec98f782 AG |
4068 | lba += task->task_sectors; |
4069 | sectors -= task->task_sectors; | |
c66ac9db | 4070 | |
ec98f782 AG |
4071 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
4072 | list_add_tail(&task->t_list, &cmd->t_task_list); | |
4073 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | |
c66ac9db NB |
4074 | } |
4075 | ||
ec98f782 | 4076 | return task_count; |
c66ac9db NB |
4077 | } |
4078 | ||
4079 | static int | |
ec98f782 | 4080 | transport_allocate_control_task(struct se_cmd *cmd) |
c66ac9db | 4081 | { |
5951146d | 4082 | struct se_device *dev = cmd->se_dev; |
c66ac9db NB |
4083 | unsigned char *cdb; |
4084 | struct se_task *task; | |
ec98f782 | 4085 | unsigned long flags; |
c66ac9db NB |
4086 | |
4087 | task = transport_generic_get_task(cmd, cmd->data_direction); | |
4088 | if (!task) | |
ec98f782 | 4089 | return -ENOMEM; |
c66ac9db | 4090 | |
e3d6f909 | 4091 | cdb = dev->transport->get_cdb(task); |
a1d8b49a AG |
4092 | BUG_ON(!cdb); |
4093 | memcpy(cdb, cmd->t_task_cdb, | |
4094 | scsi_command_size(cmd->t_task_cdb)); | |
c66ac9db | 4095 | |
ec98f782 AG |
4096 | task->task_sg = kmalloc(sizeof(struct scatterlist) * cmd->t_data_nents, |
4097 | GFP_KERNEL); | |
4098 | if (!task->task_sg) { | |
4099 | cmd->se_dev->transport->free_task(task); | |
4100 | return -ENOMEM; | |
4101 | } | |
4102 | ||
4103 | memcpy(task->task_sg, cmd->t_data_sg, | |
4104 | sizeof(struct scatterlist) * cmd->t_data_nents); | |
c66ac9db | 4105 | task->task_size = cmd->data_length; |
6708bb27 | 4106 | task->task_sg_nents = cmd->t_data_nents; |
c66ac9db | 4107 | |
ec98f782 AG |
4108 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
4109 | list_add_tail(&task->t_list, &cmd->t_task_list); | |
4110 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | |
c66ac9db | 4111 | |
6708bb27 | 4112 | /* Success! Return number of tasks allocated */ |
a3eedc22 | 4113 | return 1; |
ec98f782 AG |
4114 | } |
4115 | ||
4116 | static u32 transport_allocate_tasks( | |
4117 | struct se_cmd *cmd, | |
4118 | unsigned long long lba, | |
4119 | enum dma_data_direction data_direction, | |
4120 | struct scatterlist *sgl, | |
4121 | unsigned int sgl_nents) | |
4122 | { | |
01cde4d5 NB |
4123 | if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { |
4124 | if (transport_cmd_get_valid_sectors(cmd) < 0) | |
4125 | return -EINVAL; | |
4126 | ||
ec98f782 AG |
4127 | return transport_allocate_data_tasks(cmd, lba, data_direction, |
4128 | sgl, sgl_nents); | |
01cde4d5 | 4129 | } else |
6708bb27 AG |
4130 | return transport_allocate_control_task(cmd); |
4131 | ||
c66ac9db NB |
4132 | } |
4133 | ||
ec98f782 | 4134 | |
c66ac9db NB |
4135 | /* transport_generic_new_cmd(): Called from transport_processing_thread() |
4136 | * | |
4137 | * Allocate storage transport resources from a set of values predefined | |
4138 | * by transport_generic_cmd_sequencer() from the iSCSI Target RX process. | |
4139 | * Any non zero return here is treated as an "out of resource' op here. | |
4140 | */ | |
4141 | /* | |
4142 | * Generate struct se_task(s) and/or their payloads for this CDB. | |
4143 | */ | |
a1d8b49a | 4144 | int transport_generic_new_cmd(struct se_cmd *cmd) |
c66ac9db | 4145 | { |
c66ac9db NB |
4146 | int ret = 0; |
4147 | ||
4148 | /* | |
4149 | * Determine is the TCM fabric module has already allocated physical | |
4150 | * memory, and is directly calling transport_generic_map_mem_to_cmd() | |
ec98f782 | 4151 | * beforehand. |
c66ac9db | 4152 | */ |
ec98f782 AG |
4153 | if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) && |
4154 | cmd->data_length) { | |
05d1c7c0 | 4155 | ret = transport_generic_get_mem(cmd); |
c66ac9db NB |
4156 | if (ret < 0) |
4157 | return ret; | |
4158 | } | |
1d20bb61 NB |
4159 | /* |
4160 | * Call transport_new_cmd_obj() to invoke transport_allocate_tasks() for | |
4161 | * control or data CDB types, and perform the map to backend subsystem | |
4162 | * code from SGL memory allocated here by transport_generic_get_mem(), or | |
4163 | * via pre-existing SGL memory setup explictly by fabric module code with | |
4164 | * transport_generic_map_mem_to_cmd(). | |
4165 | */ | |
c66ac9db NB |
4166 | ret = transport_new_cmd_obj(cmd); |
4167 | if (ret < 0) | |
4168 | return ret; | |
c66ac9db | 4169 | /* |
a1d8b49a | 4170 | * For WRITEs, let the fabric know its buffer is ready.. |
c66ac9db NB |
4171 | * This WRITE struct se_cmd (and all of its associated struct se_task's) |
4172 | * will be added to the struct se_device execution queue after its WRITE | |
4173 | * data has arrived. (ie: It gets handled by the transport processing | |
4174 | * thread a second time) | |
4175 | */ | |
4176 | if (cmd->data_direction == DMA_TO_DEVICE) { | |
4177 | transport_add_tasks_to_state_queue(cmd); | |
4178 | return transport_generic_write_pending(cmd); | |
4179 | } | |
4180 | /* | |
4181 | * Everything else but a WRITE, add the struct se_cmd's struct se_task's | |
4182 | * to the execution queue. | |
4183 | */ | |
4184 | transport_execute_tasks(cmd); | |
4185 | return 0; | |
4186 | } | |
a1d8b49a | 4187 | EXPORT_SYMBOL(transport_generic_new_cmd); |
c66ac9db NB |
4188 | |
4189 | /* transport_generic_process_write(): | |
4190 | * | |
4191 | * | |
4192 | */ | |
4193 | void transport_generic_process_write(struct se_cmd *cmd) | |
4194 | { | |
c66ac9db NB |
4195 | transport_execute_tasks(cmd); |
4196 | } | |
4197 | EXPORT_SYMBOL(transport_generic_process_write); | |
4198 | ||
07bde79a NB |
4199 | static int transport_write_pending_qf(struct se_cmd *cmd) |
4200 | { | |
4201 | return cmd->se_tfo->write_pending(cmd); | |
4202 | } | |
4203 | ||
c66ac9db NB |
4204 | /* transport_generic_write_pending(): |
4205 | * | |
4206 | * | |
4207 | */ | |
4208 | static int transport_generic_write_pending(struct se_cmd *cmd) | |
4209 | { | |
4210 | unsigned long flags; | |
4211 | int ret; | |
4212 | ||
a1d8b49a | 4213 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db | 4214 | cmd->t_state = TRANSPORT_WRITE_PENDING; |
a1d8b49a | 4215 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
07bde79a NB |
4216 | |
4217 | if (cmd->transport_qf_callback) { | |
4218 | ret = cmd->transport_qf_callback(cmd); | |
4219 | if (ret == -EAGAIN) | |
4220 | goto queue_full; | |
4221 | else if (ret < 0) | |
4222 | return ret; | |
4223 | ||
4224 | cmd->transport_qf_callback = NULL; | |
4225 | return 0; | |
4226 | } | |
05d1c7c0 | 4227 | |
c66ac9db NB |
4228 | /* |
4229 | * Clear the se_cmd for WRITE_PENDING status in order to set | |
a1d8b49a | 4230 | * cmd->t_transport_active=0 so that transport_generic_handle_data |
c66ac9db | 4231 | * can be called from HW target mode interrupt code. This is safe |
e3d6f909 | 4232 | * to be called with transport_off=1 before the cmd->se_tfo->write_pending |
c66ac9db NB |
4233 | * because the se_cmd->se_lun pointer is not being cleared. |
4234 | */ | |
4235 | transport_cmd_check_stop(cmd, 1, 0); | |
4236 | ||
4237 | /* | |
4238 | * Call the fabric write_pending function here to let the | |
4239 | * frontend know that WRITE buffers are ready. | |
4240 | */ | |
e3d6f909 | 4241 | ret = cmd->se_tfo->write_pending(cmd); |
07bde79a NB |
4242 | if (ret == -EAGAIN) |
4243 | goto queue_full; | |
4244 | else if (ret < 0) | |
c66ac9db NB |
4245 | return ret; |
4246 | ||
4247 | return PYX_TRANSPORT_WRITE_PENDING; | |
07bde79a NB |
4248 | |
4249 | queue_full: | |
6708bb27 | 4250 | pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); |
07bde79a NB |
4251 | cmd->t_state = TRANSPORT_COMPLETE_QF_WP; |
4252 | transport_handle_queue_full(cmd, cmd->se_dev, | |
4253 | transport_write_pending_qf); | |
4254 | return ret; | |
c66ac9db NB |
4255 | } |
4256 | ||
2dbc43d2 CH |
4257 | /** |
4258 | * transport_release_cmd - free a command | |
4259 | * @cmd: command to free | |
4260 | * | |
4261 | * This routine unconditionally frees a command, and reference counting | |
4262 | * or list removal must be done in the caller. | |
4263 | */ | |
35462975 | 4264 | void transport_release_cmd(struct se_cmd *cmd) |
c66ac9db | 4265 | { |
e3d6f909 | 4266 | BUG_ON(!cmd->se_tfo); |
c66ac9db | 4267 | |
2dbc43d2 CH |
4268 | if (cmd->se_tmr_req) |
4269 | core_tmr_release_req(cmd->se_tmr_req); | |
4270 | if (cmd->t_task_cdb != cmd->__t_task_cdb) | |
4271 | kfree(cmd->t_task_cdb); | |
35462975 | 4272 | cmd->se_tfo->release_cmd(cmd); |
c66ac9db | 4273 | } |
35462975 | 4274 | EXPORT_SYMBOL(transport_release_cmd); |
c66ac9db | 4275 | |
39c05f32 | 4276 | void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) |
c66ac9db | 4277 | { |
d14921d6 NB |
4278 | if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) { |
4279 | if (wait_for_tasks && cmd->se_tmr_req) | |
4280 | transport_wait_for_tasks(cmd); | |
4281 | ||
35462975 | 4282 | transport_release_cmd(cmd); |
d14921d6 NB |
4283 | } else { |
4284 | if (wait_for_tasks) | |
4285 | transport_wait_for_tasks(cmd); | |
4286 | ||
c66ac9db NB |
4287 | core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd); |
4288 | ||
82f1c8a4 | 4289 | if (cmd->se_lun) |
c66ac9db | 4290 | transport_lun_remove_cmd(cmd); |
c66ac9db | 4291 | |
f4366772 NB |
4292 | transport_free_dev_tasks(cmd); |
4293 | ||
39c05f32 | 4294 | transport_put_cmd(cmd); |
c66ac9db NB |
4295 | } |
4296 | } | |
4297 | EXPORT_SYMBOL(transport_generic_free_cmd); | |
4298 | ||
c66ac9db NB |
4299 | /* transport_lun_wait_for_tasks(): |
4300 | * | |
4301 | * Called from ConfigFS context to stop the passed struct se_cmd to allow | |
4302 | * an struct se_lun to be successfully shutdown. | |
4303 | */ | |
4304 | static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun) | |
4305 | { | |
4306 | unsigned long flags; | |
4307 | int ret; | |
4308 | /* | |
4309 | * If the frontend has already requested this struct se_cmd to | |
4310 | * be stopped, we can safely ignore this struct se_cmd. | |
4311 | */ | |
a1d8b49a AG |
4312 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
4313 | if (atomic_read(&cmd->t_transport_stop)) { | |
4314 | atomic_set(&cmd->transport_lun_stop, 0); | |
6708bb27 | 4315 | pr_debug("ConfigFS ITT[0x%08x] - t_transport_stop ==" |
e3d6f909 | 4316 | " TRUE, skipping\n", cmd->se_tfo->get_task_tag(cmd)); |
a1d8b49a | 4317 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db | 4318 | transport_cmd_check_stop(cmd, 1, 0); |
e3d6f909 | 4319 | return -EPERM; |
c66ac9db | 4320 | } |
a1d8b49a AG |
4321 | atomic_set(&cmd->transport_lun_fe_stop, 1); |
4322 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | |
c66ac9db | 4323 | |
5951146d | 4324 | wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq); |
c66ac9db NB |
4325 | |
4326 | ret = transport_stop_tasks_for_cmd(cmd); | |
4327 | ||
6708bb27 AG |
4328 | pr_debug("ConfigFS: cmd: %p t_tasks: %d stop tasks ret:" |
4329 | " %d\n", cmd, cmd->t_task_list_num, ret); | |
c66ac9db | 4330 | if (!ret) { |
6708bb27 | 4331 | pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n", |
e3d6f909 | 4332 | cmd->se_tfo->get_task_tag(cmd)); |
a1d8b49a | 4333 | wait_for_completion(&cmd->transport_lun_stop_comp); |
6708bb27 | 4334 | pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n", |
e3d6f909 | 4335 | cmd->se_tfo->get_task_tag(cmd)); |
c66ac9db | 4336 | } |
5951146d | 4337 | transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj); |
c66ac9db NB |
4338 | |
4339 | return 0; | |
4340 | } | |
4341 | ||
c66ac9db NB |
4342 | static void __transport_clear_lun_from_sessions(struct se_lun *lun) |
4343 | { | |
4344 | struct se_cmd *cmd = NULL; | |
4345 | unsigned long lun_flags, cmd_flags; | |
4346 | /* | |
4347 | * Do exception processing and return CHECK_CONDITION status to the | |
4348 | * Initiator Port. | |
4349 | */ | |
4350 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); | |
5951146d AG |
4351 | while (!list_empty(&lun->lun_cmd_list)) { |
4352 | cmd = list_first_entry(&lun->lun_cmd_list, | |
4353 | struct se_cmd, se_lun_node); | |
4354 | list_del(&cmd->se_lun_node); | |
4355 | ||
a1d8b49a | 4356 | atomic_set(&cmd->transport_lun_active, 0); |
c66ac9db NB |
4357 | /* |
4358 | * This will notify iscsi_target_transport.c: | |
4359 | * transport_cmd_check_stop() that a LUN shutdown is in | |
4360 | * progress for the iscsi_cmd_t. | |
4361 | */ | |
a1d8b49a | 4362 | spin_lock(&cmd->t_state_lock); |
6708bb27 | 4363 | pr_debug("SE_LUN[%d] - Setting cmd->transport" |
c66ac9db | 4364 | "_lun_stop for ITT: 0x%08x\n", |
e3d6f909 AG |
4365 | cmd->se_lun->unpacked_lun, |
4366 | cmd->se_tfo->get_task_tag(cmd)); | |
a1d8b49a AG |
4367 | atomic_set(&cmd->transport_lun_stop, 1); |
4368 | spin_unlock(&cmd->t_state_lock); | |
c66ac9db NB |
4369 | |
4370 | spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); | |
4371 | ||
6708bb27 AG |
4372 | if (!cmd->se_lun) { |
4373 | pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n", | |
e3d6f909 AG |
4374 | cmd->se_tfo->get_task_tag(cmd), |
4375 | cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); | |
c66ac9db NB |
4376 | BUG(); |
4377 | } | |
4378 | /* | |
4379 | * If the Storage engine still owns the iscsi_cmd_t, determine | |
4380 | * and/or stop its context. | |
4381 | */ | |
6708bb27 | 4382 | pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport" |
e3d6f909 AG |
4383 | "_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun, |
4384 | cmd->se_tfo->get_task_tag(cmd)); | |
c66ac9db | 4385 | |
e3d6f909 | 4386 | if (transport_lun_wait_for_tasks(cmd, cmd->se_lun) < 0) { |
c66ac9db NB |
4387 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); |
4388 | continue; | |
4389 | } | |
4390 | ||
6708bb27 | 4391 | pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun" |
c66ac9db | 4392 | "_wait_for_tasks(): SUCCESS\n", |
e3d6f909 AG |
4393 | cmd->se_lun->unpacked_lun, |
4394 | cmd->se_tfo->get_task_tag(cmd)); | |
c66ac9db | 4395 | |
a1d8b49a | 4396 | spin_lock_irqsave(&cmd->t_state_lock, cmd_flags); |
6708bb27 | 4397 | if (!atomic_read(&cmd->transport_dev_active)) { |
a1d8b49a | 4398 | spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); |
c66ac9db NB |
4399 | goto check_cond; |
4400 | } | |
a1d8b49a | 4401 | atomic_set(&cmd->transport_dev_active, 0); |
c66ac9db | 4402 | transport_all_task_dev_remove_state(cmd); |
a1d8b49a | 4403 | spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); |
c66ac9db NB |
4404 | |
4405 | transport_free_dev_tasks(cmd); | |
4406 | /* | |
4407 | * The Storage engine stopped this struct se_cmd before it was | |
4408 | * send to the fabric frontend for delivery back to the | |
4409 | * Initiator Node. Return this SCSI CDB back with an | |
4410 | * CHECK_CONDITION status. | |
4411 | */ | |
4412 | check_cond: | |
4413 | transport_send_check_condition_and_sense(cmd, | |
4414 | TCM_NON_EXISTENT_LUN, 0); | |
4415 | /* | |
4416 | * If the fabric frontend is waiting for this iscsi_cmd_t to | |
4417 | * be released, notify the waiting thread now that LU has | |
4418 | * finished accessing it. | |
4419 | */ | |
a1d8b49a AG |
4420 | spin_lock_irqsave(&cmd->t_state_lock, cmd_flags); |
4421 | if (atomic_read(&cmd->transport_lun_fe_stop)) { | |
6708bb27 | 4422 | pr_debug("SE_LUN[%d] - Detected FE stop for" |
c66ac9db NB |
4423 | " struct se_cmd: %p ITT: 0x%08x\n", |
4424 | lun->unpacked_lun, | |
e3d6f909 | 4425 | cmd, cmd->se_tfo->get_task_tag(cmd)); |
c66ac9db | 4426 | |
a1d8b49a | 4427 | spin_unlock_irqrestore(&cmd->t_state_lock, |
c66ac9db NB |
4428 | cmd_flags); |
4429 | transport_cmd_check_stop(cmd, 1, 0); | |
a1d8b49a | 4430 | complete(&cmd->transport_lun_fe_stop_comp); |
c66ac9db NB |
4431 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); |
4432 | continue; | |
4433 | } | |
6708bb27 | 4434 | pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n", |
e3d6f909 | 4435 | lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd)); |
c66ac9db | 4436 | |
a1d8b49a | 4437 | spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); |
c66ac9db NB |
4438 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); |
4439 | } | |
4440 | spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); | |
4441 | } | |
4442 | ||
4443 | static int transport_clear_lun_thread(void *p) | |
4444 | { | |
4445 | struct se_lun *lun = (struct se_lun *)p; | |
4446 | ||
4447 | __transport_clear_lun_from_sessions(lun); | |
4448 | complete(&lun->lun_shutdown_comp); | |
4449 | ||
4450 | return 0; | |
4451 | } | |
4452 | ||
4453 | int transport_clear_lun_from_sessions(struct se_lun *lun) | |
4454 | { | |
4455 | struct task_struct *kt; | |
4456 | ||
5951146d | 4457 | kt = kthread_run(transport_clear_lun_thread, lun, |
c66ac9db NB |
4458 | "tcm_cl_%u", lun->unpacked_lun); |
4459 | if (IS_ERR(kt)) { | |
6708bb27 | 4460 | pr_err("Unable to start clear_lun thread\n"); |
e3d6f909 | 4461 | return PTR_ERR(kt); |
c66ac9db NB |
4462 | } |
4463 | wait_for_completion(&lun->lun_shutdown_comp); | |
4464 | ||
4465 | return 0; | |
4466 | } | |
4467 | ||
d14921d6 NB |
4468 | /** |
4469 | * transport_wait_for_tasks - wait for completion to occur | |
4470 | * @cmd: command to wait | |
c66ac9db | 4471 | * |
d14921d6 NB |
4472 | * Called from frontend fabric context to wait for storage engine |
4473 | * to pause and/or release frontend generated struct se_cmd. | |
c66ac9db | 4474 | */ |
d14921d6 | 4475 | void transport_wait_for_tasks(struct se_cmd *cmd) |
c66ac9db NB |
4476 | { |
4477 | unsigned long flags; | |
4478 | ||
a1d8b49a | 4479 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
d14921d6 NB |
4480 | if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req)) { |
4481 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | |
4482 | return; | |
4483 | } | |
4484 | /* | |
4485 | * Only perform a possible wait_for_tasks if SCF_SUPPORTED_SAM_OPCODE | |
4486 | * has been set in transport_set_supported_SAM_opcode(). | |
4487 | */ | |
4488 | if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && !cmd->se_tmr_req) { | |
4489 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | |
4490 | return; | |
4491 | } | |
c66ac9db NB |
4492 | /* |
4493 | * If we are already stopped due to an external event (ie: LUN shutdown) | |
4494 | * sleep until the connection can have the passed struct se_cmd back. | |
a1d8b49a | 4495 | * The cmd->transport_lun_stopped_sem will be upped by |
c66ac9db NB |
4496 | * transport_clear_lun_from_sessions() once the ConfigFS context caller |
4497 | * has completed its operation on the struct se_cmd. | |
4498 | */ | |
a1d8b49a | 4499 | if (atomic_read(&cmd->transport_lun_stop)) { |
c66ac9db | 4500 | |
6708bb27 | 4501 | pr_debug("wait_for_tasks: Stopping" |
e3d6f909 | 4502 | " wait_for_completion(&cmd->t_tasktransport_lun_fe" |
c66ac9db | 4503 | "_stop_comp); for ITT: 0x%08x\n", |
e3d6f909 | 4504 | cmd->se_tfo->get_task_tag(cmd)); |
c66ac9db NB |
4505 | /* |
4506 | * There is a special case for WRITES where a FE exception + | |
4507 | * LUN shutdown means ConfigFS context is still sleeping on | |
4508 | * transport_lun_stop_comp in transport_lun_wait_for_tasks(). | |
4509 | * We go ahead and up transport_lun_stop_comp just to be sure | |
4510 | * here. | |
4511 | */ | |
a1d8b49a AG |
4512 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
4513 | complete(&cmd->transport_lun_stop_comp); | |
4514 | wait_for_completion(&cmd->transport_lun_fe_stop_comp); | |
4515 | spin_lock_irqsave(&cmd->t_state_lock, flags); | |
c66ac9db NB |
4516 | |
4517 | transport_all_task_dev_remove_state(cmd); | |
4518 | /* | |
4519 | * At this point, the frontend who was the originator of this | |
4520 | * struct se_cmd, now owns the structure and can be released through | |
4521 | * normal means below. | |
4522 | */ | |
6708bb27 | 4523 | pr_debug("wait_for_tasks: Stopped" |
e3d6f909 | 4524 | " wait_for_completion(&cmd->t_tasktransport_lun_fe_" |
c66ac9db | 4525 | "stop_comp); for ITT: 0x%08x\n", |
e3d6f909 | 4526 | cmd->se_tfo->get_task_tag(cmd)); |
c66ac9db | 4527 | |
a1d8b49a | 4528 | atomic_set(&cmd->transport_lun_stop, 0); |
c66ac9db | 4529 | } |
a1d8b49a | 4530 | if (!atomic_read(&cmd->t_transport_active) || |
d14921d6 NB |
4531 | atomic_read(&cmd->t_transport_aborted)) { |
4532 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | |
4533 | return; | |
4534 | } | |
c66ac9db | 4535 | |
a1d8b49a | 4536 | atomic_set(&cmd->t_transport_stop, 1); |
c66ac9db | 4537 | |
6708bb27 | 4538 | pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x" |
c66ac9db | 4539 | " i_state: %d, t_state/def_t_state: %d/%d, t_transport_stop" |
e3d6f909 AG |
4540 | " = TRUE\n", cmd, cmd->se_tfo->get_task_tag(cmd), |
4541 | cmd->se_tfo->get_cmd_state(cmd), cmd->t_state, | |
c66ac9db NB |
4542 | cmd->deferred_t_state); |
4543 | ||
a1d8b49a | 4544 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db | 4545 | |
5951146d | 4546 | wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq); |
c66ac9db | 4547 | |
a1d8b49a | 4548 | wait_for_completion(&cmd->t_transport_stop_comp); |
c66ac9db | 4549 | |
a1d8b49a AG |
4550 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
4551 | atomic_set(&cmd->t_transport_active, 0); | |
4552 | atomic_set(&cmd->t_transport_stop, 0); | |
c66ac9db | 4553 | |
6708bb27 | 4554 | pr_debug("wait_for_tasks: Stopped wait_for_compltion(" |
a1d8b49a | 4555 | "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n", |
e3d6f909 | 4556 | cmd->se_tfo->get_task_tag(cmd)); |
c66ac9db | 4557 | |
d14921d6 | 4558 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db | 4559 | } |
d14921d6 | 4560 | EXPORT_SYMBOL(transport_wait_for_tasks); |
c66ac9db NB |
4561 | |
4562 | static int transport_get_sense_codes( | |
4563 | struct se_cmd *cmd, | |
4564 | u8 *asc, | |
4565 | u8 *ascq) | |
4566 | { | |
4567 | *asc = cmd->scsi_asc; | |
4568 | *ascq = cmd->scsi_ascq; | |
4569 | ||
4570 | return 0; | |
4571 | } | |
4572 | ||
4573 | static int transport_set_sense_codes( | |
4574 | struct se_cmd *cmd, | |
4575 | u8 asc, | |
4576 | u8 ascq) | |
4577 | { | |
4578 | cmd->scsi_asc = asc; | |
4579 | cmd->scsi_ascq = ascq; | |
4580 | ||
4581 | return 0; | |
4582 | } | |
4583 | ||
4584 | int transport_send_check_condition_and_sense( | |
4585 | struct se_cmd *cmd, | |
4586 | u8 reason, | |
4587 | int from_transport) | |
4588 | { | |
4589 | unsigned char *buffer = cmd->sense_buffer; | |
4590 | unsigned long flags; | |
4591 | int offset; | |
4592 | u8 asc = 0, ascq = 0; | |
4593 | ||
a1d8b49a | 4594 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db | 4595 | if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { |
a1d8b49a | 4596 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
4597 | return 0; |
4598 | } | |
4599 | cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION; | |
a1d8b49a | 4600 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
4601 | |
4602 | if (!reason && from_transport) | |
4603 | goto after_reason; | |
4604 | ||
4605 | if (!from_transport) | |
4606 | cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE; | |
4607 | /* | |
4608 | * Data Segment and SenseLength of the fabric response PDU. | |
4609 | * | |
4610 | * TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE | |
4611 | * from include/scsi/scsi_cmnd.h | |
4612 | */ | |
e3d6f909 | 4613 | offset = cmd->se_tfo->set_fabric_sense_len(cmd, |
c66ac9db NB |
4614 | TRANSPORT_SENSE_BUFFER); |
4615 | /* | |
4616 | * Actual SENSE DATA, see SPC-3 7.23.2 SPC_SENSE_KEY_OFFSET uses | |
4617 | * SENSE KEY values from include/scsi/scsi.h | |
4618 | */ | |
4619 | switch (reason) { | |
4620 | case TCM_NON_EXISTENT_LUN: | |
eb39d340 NB |
4621 | /* CURRENT ERROR */ |
4622 | buffer[offset] = 0x70; | |
4623 | /* ILLEGAL REQUEST */ | |
4624 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; | |
4625 | /* LOGICAL UNIT NOT SUPPORTED */ | |
4626 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x25; | |
4627 | break; | |
c66ac9db NB |
4628 | case TCM_UNSUPPORTED_SCSI_OPCODE: |
4629 | case TCM_SECTOR_COUNT_TOO_MANY: | |
4630 | /* CURRENT ERROR */ | |
4631 | buffer[offset] = 0x70; | |
4632 | /* ILLEGAL REQUEST */ | |
4633 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; | |
4634 | /* INVALID COMMAND OPERATION CODE */ | |
4635 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x20; | |
4636 | break; | |
4637 | case TCM_UNKNOWN_MODE_PAGE: | |
4638 | /* CURRENT ERROR */ | |
4639 | buffer[offset] = 0x70; | |
4640 | /* ILLEGAL REQUEST */ | |
4641 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; | |
4642 | /* INVALID FIELD IN CDB */ | |
4643 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24; | |
4644 | break; | |
4645 | case TCM_CHECK_CONDITION_ABORT_CMD: | |
4646 | /* CURRENT ERROR */ | |
4647 | buffer[offset] = 0x70; | |
4648 | /* ABORTED COMMAND */ | |
4649 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | |
4650 | /* BUS DEVICE RESET FUNCTION OCCURRED */ | |
4651 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x29; | |
4652 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x03; | |
4653 | break; | |
4654 | case TCM_INCORRECT_AMOUNT_OF_DATA: | |
4655 | /* CURRENT ERROR */ | |
4656 | buffer[offset] = 0x70; | |
4657 | /* ABORTED COMMAND */ | |
4658 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | |
4659 | /* WRITE ERROR */ | |
4660 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c; | |
4661 | /* NOT ENOUGH UNSOLICITED DATA */ | |
4662 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0d; | |
4663 | break; | |
4664 | case TCM_INVALID_CDB_FIELD: | |
4665 | /* CURRENT ERROR */ | |
4666 | buffer[offset] = 0x70; | |
4667 | /* ABORTED COMMAND */ | |
4668 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | |
4669 | /* INVALID FIELD IN CDB */ | |
4670 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24; | |
4671 | break; | |
4672 | case TCM_INVALID_PARAMETER_LIST: | |
4673 | /* CURRENT ERROR */ | |
4674 | buffer[offset] = 0x70; | |
4675 | /* ABORTED COMMAND */ | |
4676 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | |
4677 | /* INVALID FIELD IN PARAMETER LIST */ | |
4678 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26; | |
4679 | break; | |
4680 | case TCM_UNEXPECTED_UNSOLICITED_DATA: | |
4681 | /* CURRENT ERROR */ | |
4682 | buffer[offset] = 0x70; | |
4683 | /* ABORTED COMMAND */ | |
4684 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | |
4685 | /* WRITE ERROR */ | |
4686 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c; | |
4687 | /* UNEXPECTED_UNSOLICITED_DATA */ | |
4688 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0c; | |
4689 | break; | |
4690 | case TCM_SERVICE_CRC_ERROR: | |
4691 | /* CURRENT ERROR */ | |
4692 | buffer[offset] = 0x70; | |
4693 | /* ABORTED COMMAND */ | |
4694 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | |
4695 | /* PROTOCOL SERVICE CRC ERROR */ | |
4696 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x47; | |
4697 | /* N/A */ | |
4698 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x05; | |
4699 | break; | |
4700 | case TCM_SNACK_REJECTED: | |
4701 | /* CURRENT ERROR */ | |
4702 | buffer[offset] = 0x70; | |
4703 | /* ABORTED COMMAND */ | |
4704 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | |
4705 | /* READ ERROR */ | |
4706 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x11; | |
4707 | /* FAILED RETRANSMISSION REQUEST */ | |
4708 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x13; | |
4709 | break; | |
4710 | case TCM_WRITE_PROTECTED: | |
4711 | /* CURRENT ERROR */ | |
4712 | buffer[offset] = 0x70; | |
4713 | /* DATA PROTECT */ | |
4714 | buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT; | |
4715 | /* WRITE PROTECTED */ | |
4716 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27; | |
4717 | break; | |
4718 | case TCM_CHECK_CONDITION_UNIT_ATTENTION: | |
4719 | /* CURRENT ERROR */ | |
4720 | buffer[offset] = 0x70; | |
4721 | /* UNIT ATTENTION */ | |
4722 | buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION; | |
4723 | core_scsi3_ua_for_check_condition(cmd, &asc, &ascq); | |
4724 | buffer[offset+SPC_ASC_KEY_OFFSET] = asc; | |
4725 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq; | |
4726 | break; | |
4727 | case TCM_CHECK_CONDITION_NOT_READY: | |
4728 | /* CURRENT ERROR */ | |
4729 | buffer[offset] = 0x70; | |
4730 | /* Not Ready */ | |
4731 | buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY; | |
4732 | transport_get_sense_codes(cmd, &asc, &ascq); | |
4733 | buffer[offset+SPC_ASC_KEY_OFFSET] = asc; | |
4734 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq; | |
4735 | break; | |
4736 | case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: | |
4737 | default: | |
4738 | /* CURRENT ERROR */ | |
4739 | buffer[offset] = 0x70; | |
4740 | /* ILLEGAL REQUEST */ | |
4741 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; | |
4742 | /* LOGICAL UNIT COMMUNICATION FAILURE */ | |
4743 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x80; | |
4744 | break; | |
4745 | } | |
4746 | /* | |
4747 | * This code uses linux/include/scsi/scsi.h SAM status codes! | |
4748 | */ | |
4749 | cmd->scsi_status = SAM_STAT_CHECK_CONDITION; | |
4750 | /* | |
4751 | * Automatically padded, this value is encoded in the fabric's | |
4752 | * data_length response PDU containing the SCSI defined sense data. | |
4753 | */ | |
4754 | cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset; | |
4755 | ||
4756 | after_reason: | |
07bde79a | 4757 | return cmd->se_tfo->queue_status(cmd); |
c66ac9db NB |
4758 | } |
4759 | EXPORT_SYMBOL(transport_send_check_condition_and_sense); | |
4760 | ||
4761 | int transport_check_aborted_status(struct se_cmd *cmd, int send_status) | |
4762 | { | |
4763 | int ret = 0; | |
4764 | ||
a1d8b49a | 4765 | if (atomic_read(&cmd->t_transport_aborted) != 0) { |
6708bb27 | 4766 | if (!send_status || |
c66ac9db NB |
4767 | (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS)) |
4768 | return 1; | |
4769 | #if 0 | |
6708bb27 | 4770 | pr_debug("Sending delayed SAM_STAT_TASK_ABORTED" |
c66ac9db | 4771 | " status for CDB: 0x%02x ITT: 0x%08x\n", |
a1d8b49a | 4772 | cmd->t_task_cdb[0], |
e3d6f909 | 4773 | cmd->se_tfo->get_task_tag(cmd)); |
c66ac9db NB |
4774 | #endif |
4775 | cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS; | |
e3d6f909 | 4776 | cmd->se_tfo->queue_status(cmd); |
c66ac9db NB |
4777 | ret = 1; |
4778 | } | |
4779 | return ret; | |
4780 | } | |
4781 | EXPORT_SYMBOL(transport_check_aborted_status); | |
4782 | ||
4783 | void transport_send_task_abort(struct se_cmd *cmd) | |
4784 | { | |
c252f003 NB |
4785 | unsigned long flags; |
4786 | ||
4787 | spin_lock_irqsave(&cmd->t_state_lock, flags); | |
4788 | if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { | |
4789 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | |
4790 | return; | |
4791 | } | |
4792 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | |
4793 | ||
c66ac9db NB |
4794 | /* |
4795 | * If there are still expected incoming fabric WRITEs, we wait | |
4796 | * until until they have completed before sending a TASK_ABORTED | |
4797 | * response. This response with TASK_ABORTED status will be | |
4798 | * queued back to fabric module by transport_check_aborted_status(). | |
4799 | */ | |
4800 | if (cmd->data_direction == DMA_TO_DEVICE) { | |
e3d6f909 | 4801 | if (cmd->se_tfo->write_pending_status(cmd) != 0) { |
a1d8b49a | 4802 | atomic_inc(&cmd->t_transport_aborted); |
c66ac9db NB |
4803 | smp_mb__after_atomic_inc(); |
4804 | cmd->scsi_status = SAM_STAT_TASK_ABORTED; | |
4805 | transport_new_cmd_failure(cmd); | |
4806 | return; | |
4807 | } | |
4808 | } | |
4809 | cmd->scsi_status = SAM_STAT_TASK_ABORTED; | |
4810 | #if 0 | |
6708bb27 | 4811 | pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x," |
a1d8b49a | 4812 | " ITT: 0x%08x\n", cmd->t_task_cdb[0], |
e3d6f909 | 4813 | cmd->se_tfo->get_task_tag(cmd)); |
c66ac9db | 4814 | #endif |
e3d6f909 | 4815 | cmd->se_tfo->queue_status(cmd); |
c66ac9db NB |
4816 | } |
4817 | ||
4818 | /* transport_generic_do_tmr(): | |
4819 | * | |
4820 | * | |
4821 | */ | |
4822 | int transport_generic_do_tmr(struct se_cmd *cmd) | |
4823 | { | |
5951146d | 4824 | struct se_device *dev = cmd->se_dev; |
c66ac9db NB |
4825 | struct se_tmr_req *tmr = cmd->se_tmr_req; |
4826 | int ret; | |
4827 | ||
4828 | switch (tmr->function) { | |
5c6cd613 | 4829 | case TMR_ABORT_TASK: |
c66ac9db NB |
4830 | tmr->response = TMR_FUNCTION_REJECTED; |
4831 | break; | |
5c6cd613 NB |
4832 | case TMR_ABORT_TASK_SET: |
4833 | case TMR_CLEAR_ACA: | |
4834 | case TMR_CLEAR_TASK_SET: | |
c66ac9db NB |
4835 | tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED; |
4836 | break; | |
5c6cd613 | 4837 | case TMR_LUN_RESET: |
c66ac9db NB |
4838 | ret = core_tmr_lun_reset(dev, tmr, NULL, NULL); |
4839 | tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE : | |
4840 | TMR_FUNCTION_REJECTED; | |
4841 | break; | |
5c6cd613 | 4842 | case TMR_TARGET_WARM_RESET: |
c66ac9db NB |
4843 | tmr->response = TMR_FUNCTION_REJECTED; |
4844 | break; | |
5c6cd613 | 4845 | case TMR_TARGET_COLD_RESET: |
c66ac9db NB |
4846 | tmr->response = TMR_FUNCTION_REJECTED; |
4847 | break; | |
c66ac9db | 4848 | default: |
6708bb27 | 4849 | pr_err("Uknown TMR function: 0x%02x.\n", |
c66ac9db NB |
4850 | tmr->function); |
4851 | tmr->response = TMR_FUNCTION_REJECTED; | |
4852 | break; | |
4853 | } | |
4854 | ||
4855 | cmd->t_state = TRANSPORT_ISTATE_PROCESSING; | |
e3d6f909 | 4856 | cmd->se_tfo->queue_tm_rsp(cmd); |
c66ac9db NB |
4857 | |
4858 | transport_cmd_check_stop(cmd, 2, 0); | |
4859 | return 0; | |
4860 | } | |
4861 | ||
4862 | /* | |
4863 | * Called with spin_lock_irq(&dev->execute_task_lock); held | |
4864 | * | |
4865 | */ | |
4866 | static struct se_task * | |
4867 | transport_get_task_from_state_list(struct se_device *dev) | |
4868 | { | |
4869 | struct se_task *task; | |
4870 | ||
4871 | if (list_empty(&dev->state_task_list)) | |
4872 | return NULL; | |
4873 | ||
4874 | list_for_each_entry(task, &dev->state_task_list, t_state_list) | |
4875 | break; | |
4876 | ||
4877 | list_del(&task->t_state_list); | |
4878 | atomic_set(&task->task_state_active, 0); | |
4879 | ||
4880 | return task; | |
4881 | } | |
4882 | ||
4883 | static void transport_processing_shutdown(struct se_device *dev) | |
4884 | { | |
4885 | struct se_cmd *cmd; | |
c66ac9db | 4886 | struct se_task *task; |
c66ac9db NB |
4887 | unsigned long flags; |
4888 | /* | |
4889 | * Empty the struct se_device's struct se_task state list. | |
4890 | */ | |
4891 | spin_lock_irqsave(&dev->execute_task_lock, flags); | |
4892 | while ((task = transport_get_task_from_state_list(dev))) { | |
e3d6f909 | 4893 | if (!task->task_se_cmd) { |
6708bb27 | 4894 | pr_err("task->task_se_cmd is NULL!\n"); |
c66ac9db NB |
4895 | continue; |
4896 | } | |
e3d6f909 | 4897 | cmd = task->task_se_cmd; |
c66ac9db | 4898 | |
c66ac9db NB |
4899 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); |
4900 | ||
a1d8b49a | 4901 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db | 4902 | |
6708bb27 AG |
4903 | pr_debug("PT: cmd: %p task: %p ITT: 0x%08x," |
4904 | " i_state: %d, t_state/def_t_state:" | |
c66ac9db | 4905 | " %d/%d cdb: 0x%02x\n", cmd, task, |
6708bb27 AG |
4906 | cmd->se_tfo->get_task_tag(cmd), |
4907 | cmd->se_tfo->get_cmd_state(cmd), | |
c66ac9db | 4908 | cmd->t_state, cmd->deferred_t_state, |
a1d8b49a | 4909 | cmd->t_task_cdb[0]); |
6708bb27 | 4910 | pr_debug("PT: ITT[0x%08x] - t_tasks: %d t_task_cdbs_left:" |
c66ac9db NB |
4911 | " %d t_task_cdbs_sent: %d -- t_transport_active: %d" |
4912 | " t_transport_stop: %d t_transport_sent: %d\n", | |
e3d6f909 | 4913 | cmd->se_tfo->get_task_tag(cmd), |
6708bb27 | 4914 | cmd->t_task_list_num, |
a1d8b49a AG |
4915 | atomic_read(&cmd->t_task_cdbs_left), |
4916 | atomic_read(&cmd->t_task_cdbs_sent), | |
4917 | atomic_read(&cmd->t_transport_active), | |
4918 | atomic_read(&cmd->t_transport_stop), | |
4919 | atomic_read(&cmd->t_transport_sent)); | |
c66ac9db NB |
4920 | |
4921 | if (atomic_read(&task->task_active)) { | |
4922 | atomic_set(&task->task_stop, 1); | |
4923 | spin_unlock_irqrestore( | |
a1d8b49a | 4924 | &cmd->t_state_lock, flags); |
c66ac9db | 4925 | |
6708bb27 | 4926 | pr_debug("Waiting for task: %p to shutdown for dev:" |
c66ac9db NB |
4927 | " %p\n", task, dev); |
4928 | wait_for_completion(&task->task_stop_comp); | |
6708bb27 | 4929 | pr_debug("Completed task: %p shutdown for dev: %p\n", |
c66ac9db NB |
4930 | task, dev); |
4931 | ||
a1d8b49a AG |
4932 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
4933 | atomic_dec(&cmd->t_task_cdbs_left); | |
c66ac9db NB |
4934 | |
4935 | atomic_set(&task->task_active, 0); | |
4936 | atomic_set(&task->task_stop, 0); | |
52208ae3 NB |
4937 | } else { |
4938 | if (atomic_read(&task->task_execute_queue) != 0) | |
4939 | transport_remove_task_from_execute_queue(task, dev); | |
c66ac9db NB |
4940 | } |
4941 | __transport_stop_task_timer(task, &flags); | |
4942 | ||
6708bb27 | 4943 | if (!atomic_dec_and_test(&cmd->t_task_cdbs_ex_left)) { |
c66ac9db | 4944 | spin_unlock_irqrestore( |
a1d8b49a | 4945 | &cmd->t_state_lock, flags); |
c66ac9db | 4946 | |
6708bb27 | 4947 | pr_debug("Skipping task: %p, dev: %p for" |
c66ac9db | 4948 | " t_task_cdbs_ex_left: %d\n", task, dev, |
a1d8b49a | 4949 | atomic_read(&cmd->t_task_cdbs_ex_left)); |
c66ac9db NB |
4950 | |
4951 | spin_lock_irqsave(&dev->execute_task_lock, flags); | |
4952 | continue; | |
4953 | } | |
4954 | ||
a1d8b49a | 4955 | if (atomic_read(&cmd->t_transport_active)) { |
6708bb27 | 4956 | pr_debug("got t_transport_active = 1 for task: %p, dev:" |
c66ac9db NB |
4957 | " %p\n", task, dev); |
4958 | ||
a1d8b49a | 4959 | if (atomic_read(&cmd->t_fe_count)) { |
c66ac9db | 4960 | spin_unlock_irqrestore( |
a1d8b49a | 4961 | &cmd->t_state_lock, flags); |
c66ac9db NB |
4962 | transport_send_check_condition_and_sense( |
4963 | cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, | |
4964 | 0); | |
4965 | transport_remove_cmd_from_queue(cmd, | |
5951146d | 4966 | &cmd->se_dev->dev_queue_obj); |
c66ac9db NB |
4967 | |
4968 | transport_lun_remove_cmd(cmd); | |
4969 | transport_cmd_check_stop(cmd, 1, 0); | |
4970 | } else { | |
4971 | spin_unlock_irqrestore( | |
a1d8b49a | 4972 | &cmd->t_state_lock, flags); |
c66ac9db NB |
4973 | |
4974 | transport_remove_cmd_from_queue(cmd, | |
5951146d | 4975 | &cmd->se_dev->dev_queue_obj); |
c66ac9db NB |
4976 | |
4977 | transport_lun_remove_cmd(cmd); | |
4978 | ||
4979 | if (transport_cmd_check_stop(cmd, 1, 0)) | |
e6a2573f | 4980 | transport_put_cmd(cmd); |
c66ac9db NB |
4981 | } |
4982 | ||
4983 | spin_lock_irqsave(&dev->execute_task_lock, flags); | |
4984 | continue; | |
4985 | } | |
6708bb27 | 4986 | pr_debug("Got t_transport_active = 0 for task: %p, dev: %p\n", |
c66ac9db NB |
4987 | task, dev); |
4988 | ||
a1d8b49a | 4989 | if (atomic_read(&cmd->t_fe_count)) { |
c66ac9db | 4990 | spin_unlock_irqrestore( |
a1d8b49a | 4991 | &cmd->t_state_lock, flags); |
c66ac9db NB |
4992 | transport_send_check_condition_and_sense(cmd, |
4993 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); | |
4994 | transport_remove_cmd_from_queue(cmd, | |
5951146d | 4995 | &cmd->se_dev->dev_queue_obj); |
c66ac9db NB |
4996 | |
4997 | transport_lun_remove_cmd(cmd); | |
4998 | transport_cmd_check_stop(cmd, 1, 0); | |
4999 | } else { | |
5000 | spin_unlock_irqrestore( | |
a1d8b49a | 5001 | &cmd->t_state_lock, flags); |
c66ac9db NB |
5002 | |
5003 | transport_remove_cmd_from_queue(cmd, | |
5951146d | 5004 | &cmd->se_dev->dev_queue_obj); |
c66ac9db NB |
5005 | transport_lun_remove_cmd(cmd); |
5006 | ||
5007 | if (transport_cmd_check_stop(cmd, 1, 0)) | |
e6a2573f | 5008 | transport_put_cmd(cmd); |
c66ac9db NB |
5009 | } |
5010 | ||
5011 | spin_lock_irqsave(&dev->execute_task_lock, flags); | |
5012 | } | |
5013 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); | |
5014 | /* | |
5015 | * Empty the struct se_device's struct se_cmd list. | |
5016 | */ | |
5951146d | 5017 | while ((cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj))) { |
c66ac9db | 5018 | |
6708bb27 | 5019 | pr_debug("From Device Queue: cmd: %p t_state: %d\n", |
5951146d | 5020 | cmd, cmd->t_state); |
c66ac9db | 5021 | |
a1d8b49a | 5022 | if (atomic_read(&cmd->t_fe_count)) { |
c66ac9db NB |
5023 | transport_send_check_condition_and_sense(cmd, |
5024 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); | |
5025 | ||
5026 | transport_lun_remove_cmd(cmd); | |
5027 | transport_cmd_check_stop(cmd, 1, 0); | |
5028 | } else { | |
5029 | transport_lun_remove_cmd(cmd); | |
5030 | if (transport_cmd_check_stop(cmd, 1, 0)) | |
e6a2573f | 5031 | transport_put_cmd(cmd); |
c66ac9db | 5032 | } |
c66ac9db | 5033 | } |
c66ac9db NB |
5034 | } |
5035 | ||
5036 | /* transport_processing_thread(): | |
5037 | * | |
5038 | * | |
5039 | */ | |
5040 | static int transport_processing_thread(void *param) | |
5041 | { | |
5951146d | 5042 | int ret; |
c66ac9db NB |
5043 | struct se_cmd *cmd; |
5044 | struct se_device *dev = (struct se_device *) param; | |
c66ac9db NB |
5045 | |
5046 | set_user_nice(current, -20); | |
5047 | ||
5048 | while (!kthread_should_stop()) { | |
e3d6f909 AG |
5049 | ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq, |
5050 | atomic_read(&dev->dev_queue_obj.queue_cnt) || | |
c66ac9db NB |
5051 | kthread_should_stop()); |
5052 | if (ret < 0) | |
5053 | goto out; | |
5054 | ||
5055 | spin_lock_irq(&dev->dev_status_lock); | |
5056 | if (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) { | |
5057 | spin_unlock_irq(&dev->dev_status_lock); | |
5058 | transport_processing_shutdown(dev); | |
5059 | continue; | |
5060 | } | |
5061 | spin_unlock_irq(&dev->dev_status_lock); | |
5062 | ||
5063 | get_cmd: | |
5064 | __transport_execute_tasks(dev); | |
5065 | ||
5951146d AG |
5066 | cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj); |
5067 | if (!cmd) | |
c66ac9db NB |
5068 | continue; |
5069 | ||
5951146d | 5070 | switch (cmd->t_state) { |
680b73c5 CH |
5071 | case TRANSPORT_NEW_CMD: |
5072 | BUG(); | |
5073 | break; | |
c66ac9db | 5074 | case TRANSPORT_NEW_CMD_MAP: |
6708bb27 AG |
5075 | if (!cmd->se_tfo->new_cmd_map) { |
5076 | pr_err("cmd->se_tfo->new_cmd_map is" | |
c66ac9db NB |
5077 | " NULL for TRANSPORT_NEW_CMD_MAP\n"); |
5078 | BUG(); | |
5079 | } | |
e3d6f909 | 5080 | ret = cmd->se_tfo->new_cmd_map(cmd); |
c66ac9db NB |
5081 | if (ret < 0) { |
5082 | cmd->transport_error_status = ret; | |
5083 | transport_generic_request_failure(cmd, NULL, | |
5084 | 0, (cmd->data_direction != | |
5085 | DMA_TO_DEVICE)); | |
5086 | break; | |
5087 | } | |
c66ac9db | 5088 | ret = transport_generic_new_cmd(cmd); |
07bde79a NB |
5089 | if (ret == -EAGAIN) |
5090 | break; | |
5091 | else if (ret < 0) { | |
c66ac9db NB |
5092 | cmd->transport_error_status = ret; |
5093 | transport_generic_request_failure(cmd, NULL, | |
5094 | 0, (cmd->data_direction != | |
5095 | DMA_TO_DEVICE)); | |
5096 | } | |
5097 | break; | |
5098 | case TRANSPORT_PROCESS_WRITE: | |
5099 | transport_generic_process_write(cmd); | |
5100 | break; | |
5101 | case TRANSPORT_COMPLETE_OK: | |
5102 | transport_stop_all_task_timers(cmd); | |
5103 | transport_generic_complete_ok(cmd); | |
5104 | break; | |
5105 | case TRANSPORT_REMOVE: | |
e6a2573f | 5106 | transport_put_cmd(cmd); |
c66ac9db | 5107 | break; |
f4366772 | 5108 | case TRANSPORT_FREE_CMD_INTR: |
82f1c8a4 | 5109 | transport_generic_free_cmd(cmd, 0); |
f4366772 | 5110 | break; |
c66ac9db NB |
5111 | case TRANSPORT_PROCESS_TMR: |
5112 | transport_generic_do_tmr(cmd); | |
5113 | break; | |
5114 | case TRANSPORT_COMPLETE_FAILURE: | |
5115 | transport_generic_request_failure(cmd, NULL, 1, 1); | |
5116 | break; | |
5117 | case TRANSPORT_COMPLETE_TIMEOUT: | |
5118 | transport_stop_all_task_timers(cmd); | |
5119 | transport_generic_request_timeout(cmd); | |
5120 | break; | |
07bde79a NB |
5121 | case TRANSPORT_COMPLETE_QF_WP: |
5122 | transport_generic_write_pending(cmd); | |
5123 | break; | |
c66ac9db | 5124 | default: |
6708bb27 | 5125 | pr_err("Unknown t_state: %d deferred_t_state:" |
c66ac9db | 5126 | " %d for ITT: 0x%08x i_state: %d on SE LUN:" |
5951146d | 5127 | " %u\n", cmd->t_state, cmd->deferred_t_state, |
e3d6f909 AG |
5128 | cmd->se_tfo->get_task_tag(cmd), |
5129 | cmd->se_tfo->get_cmd_state(cmd), | |
5130 | cmd->se_lun->unpacked_lun); | |
c66ac9db NB |
5131 | BUG(); |
5132 | } | |
5133 | ||
5134 | goto get_cmd; | |
5135 | } | |
5136 | ||
5137 | out: | |
5138 | transport_release_all_cmds(dev); | |
5139 | dev->process_thread = NULL; | |
5140 | return 0; | |
5141 | } |