Commit | Line | Data |
---|---|---|
c66ac9db NB |
1 | /******************************************************************************* |
2 | * Filename: target_core_transport.c | |
3 | * | |
4 | * This file contains the Generic Target Engine Core. | |
5 | * | |
6 | * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc. | |
7 | * Copyright (c) 2005, 2006, 2007 SBE, Inc. | |
8 | * Copyright (c) 2007-2010 Rising Tide Systems | |
9 | * Copyright (c) 2008-2010 Linux-iSCSI.org | |
10 | * | |
11 | * Nicholas A. Bellinger <nab@kernel.org> | |
12 | * | |
13 | * This program is free software; you can redistribute it and/or modify | |
14 | * it under the terms of the GNU General Public License as published by | |
15 | * the Free Software Foundation; either version 2 of the License, or | |
16 | * (at your option) any later version. | |
17 | * | |
18 | * This program is distributed in the hope that it will be useful, | |
19 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
20 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
21 | * GNU General Public License for more details. | |
22 | * | |
23 | * You should have received a copy of the GNU General Public License | |
24 | * along with this program; if not, write to the Free Software | |
25 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
26 | * | |
27 | ******************************************************************************/ | |
28 | ||
c66ac9db NB |
29 | #include <linux/net.h> |
30 | #include <linux/delay.h> | |
31 | #include <linux/string.h> | |
32 | #include <linux/timer.h> | |
33 | #include <linux/slab.h> | |
34 | #include <linux/blkdev.h> | |
35 | #include <linux/spinlock.h> | |
c66ac9db NB |
36 | #include <linux/kthread.h> |
37 | #include <linux/in.h> | |
38 | #include <linux/cdrom.h> | |
39 | #include <asm/unaligned.h> | |
40 | #include <net/sock.h> | |
41 | #include <net/tcp.h> | |
42 | #include <scsi/scsi.h> | |
43 | #include <scsi/scsi_cmnd.h> | |
e66ecd50 | 44 | #include <scsi/scsi_tcq.h> |
c66ac9db NB |
45 | |
46 | #include <target/target_core_base.h> | |
47 | #include <target/target_core_device.h> | |
48 | #include <target/target_core_tmr.h> | |
49 | #include <target/target_core_tpg.h> | |
50 | #include <target/target_core_transport.h> | |
51 | #include <target/target_core_fabric_ops.h> | |
52 | #include <target/target_core_configfs.h> | |
53 | ||
54 | #include "target_core_alua.h" | |
55 | #include "target_core_hba.h" | |
56 | #include "target_core_pr.h" | |
57 | #include "target_core_scdb.h" | |
58 | #include "target_core_ua.h" | |
59 | ||
e3d6f909 | 60 | static int sub_api_initialized; |
c66ac9db NB |
61 | |
62 | static struct kmem_cache *se_cmd_cache; | |
63 | static struct kmem_cache *se_sess_cache; | |
64 | struct kmem_cache *se_tmr_req_cache; | |
65 | struct kmem_cache *se_ua_cache; | |
c66ac9db NB |
66 | struct kmem_cache *t10_pr_reg_cache; |
67 | struct kmem_cache *t10_alua_lu_gp_cache; | |
68 | struct kmem_cache *t10_alua_lu_gp_mem_cache; | |
69 | struct kmem_cache *t10_alua_tg_pt_gp_cache; | |
70 | struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; | |
71 | ||
72 | /* Used for transport_dev_get_map_*() */ | |
73 | typedef int (*map_func_t)(struct se_task *, u32); | |
74 | ||
75 | static int transport_generic_write_pending(struct se_cmd *); | |
5951146d | 76 | static int transport_processing_thread(void *param); |
c66ac9db NB |
77 | static int __transport_execute_tasks(struct se_device *dev); |
78 | static void transport_complete_task_attr(struct se_cmd *cmd); | |
07bde79a NB |
79 | static int transport_complete_qf(struct se_cmd *cmd); |
80 | static void transport_handle_queue_full(struct se_cmd *cmd, | |
81 | struct se_device *dev, int (*qf_callback)(struct se_cmd *)); | |
c66ac9db NB |
82 | static void transport_direct_request_timeout(struct se_cmd *cmd); |
83 | static void transport_free_dev_tasks(struct se_cmd *cmd); | |
a1d8b49a | 84 | static u32 transport_allocate_tasks(struct se_cmd *cmd, |
ec98f782 | 85 | unsigned long long starting_lba, |
c66ac9db | 86 | enum dma_data_direction data_direction, |
ec98f782 | 87 | struct scatterlist *sgl, unsigned int nents); |
05d1c7c0 | 88 | static int transport_generic_get_mem(struct se_cmd *cmd); |
c66ac9db | 89 | static int transport_generic_remove(struct se_cmd *cmd, |
35462975 | 90 | int session_reinstatement); |
c66ac9db NB |
91 | static void transport_release_fe_cmd(struct se_cmd *cmd); |
92 | static void transport_remove_cmd_from_queue(struct se_cmd *cmd, | |
93 | struct se_queue_obj *qobj); | |
94 | static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq); | |
95 | static void transport_stop_all_task_timers(struct se_cmd *cmd); | |
96 | ||
e3d6f909 | 97 | int init_se_kmem_caches(void) |
c66ac9db | 98 | { |
c66ac9db NB |
99 | se_cmd_cache = kmem_cache_create("se_cmd_cache", |
100 | sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL); | |
6708bb27 AG |
101 | if (!se_cmd_cache) { |
102 | pr_err("kmem_cache_create for struct se_cmd failed\n"); | |
c66ac9db NB |
103 | goto out; |
104 | } | |
105 | se_tmr_req_cache = kmem_cache_create("se_tmr_cache", | |
106 | sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req), | |
107 | 0, NULL); | |
6708bb27 AG |
108 | if (!se_tmr_req_cache) { |
109 | pr_err("kmem_cache_create() for struct se_tmr_req" | |
c66ac9db NB |
110 | " failed\n"); |
111 | goto out; | |
112 | } | |
113 | se_sess_cache = kmem_cache_create("se_sess_cache", | |
114 | sizeof(struct se_session), __alignof__(struct se_session), | |
115 | 0, NULL); | |
6708bb27 AG |
116 | if (!se_sess_cache) { |
117 | pr_err("kmem_cache_create() for struct se_session" | |
c66ac9db NB |
118 | " failed\n"); |
119 | goto out; | |
120 | } | |
121 | se_ua_cache = kmem_cache_create("se_ua_cache", | |
122 | sizeof(struct se_ua), __alignof__(struct se_ua), | |
123 | 0, NULL); | |
6708bb27 AG |
124 | if (!se_ua_cache) { |
125 | pr_err("kmem_cache_create() for struct se_ua failed\n"); | |
c66ac9db NB |
126 | goto out; |
127 | } | |
c66ac9db NB |
128 | t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache", |
129 | sizeof(struct t10_pr_registration), | |
130 | __alignof__(struct t10_pr_registration), 0, NULL); | |
6708bb27 AG |
131 | if (!t10_pr_reg_cache) { |
132 | pr_err("kmem_cache_create() for struct t10_pr_registration" | |
c66ac9db NB |
133 | " failed\n"); |
134 | goto out; | |
135 | } | |
136 | t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache", | |
137 | sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp), | |
138 | 0, NULL); | |
6708bb27 AG |
139 | if (!t10_alua_lu_gp_cache) { |
140 | pr_err("kmem_cache_create() for t10_alua_lu_gp_cache" | |
c66ac9db NB |
141 | " failed\n"); |
142 | goto out; | |
143 | } | |
144 | t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache", | |
145 | sizeof(struct t10_alua_lu_gp_member), | |
146 | __alignof__(struct t10_alua_lu_gp_member), 0, NULL); | |
6708bb27 AG |
147 | if (!t10_alua_lu_gp_mem_cache) { |
148 | pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_" | |
c66ac9db NB |
149 | "cache failed\n"); |
150 | goto out; | |
151 | } | |
152 | t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache", | |
153 | sizeof(struct t10_alua_tg_pt_gp), | |
154 | __alignof__(struct t10_alua_tg_pt_gp), 0, NULL); | |
6708bb27 AG |
155 | if (!t10_alua_tg_pt_gp_cache) { |
156 | pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" | |
c66ac9db NB |
157 | "cache failed\n"); |
158 | goto out; | |
159 | } | |
160 | t10_alua_tg_pt_gp_mem_cache = kmem_cache_create( | |
161 | "t10_alua_tg_pt_gp_mem_cache", | |
162 | sizeof(struct t10_alua_tg_pt_gp_member), | |
163 | __alignof__(struct t10_alua_tg_pt_gp_member), | |
164 | 0, NULL); | |
6708bb27 AG |
165 | if (!t10_alua_tg_pt_gp_mem_cache) { |
166 | pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" | |
c66ac9db NB |
167 | "mem_t failed\n"); |
168 | goto out; | |
169 | } | |
170 | ||
c66ac9db NB |
171 | return 0; |
172 | out: | |
173 | if (se_cmd_cache) | |
174 | kmem_cache_destroy(se_cmd_cache); | |
175 | if (se_tmr_req_cache) | |
176 | kmem_cache_destroy(se_tmr_req_cache); | |
177 | if (se_sess_cache) | |
178 | kmem_cache_destroy(se_sess_cache); | |
179 | if (se_ua_cache) | |
180 | kmem_cache_destroy(se_ua_cache); | |
c66ac9db NB |
181 | if (t10_pr_reg_cache) |
182 | kmem_cache_destroy(t10_pr_reg_cache); | |
183 | if (t10_alua_lu_gp_cache) | |
184 | kmem_cache_destroy(t10_alua_lu_gp_cache); | |
185 | if (t10_alua_lu_gp_mem_cache) | |
186 | kmem_cache_destroy(t10_alua_lu_gp_mem_cache); | |
187 | if (t10_alua_tg_pt_gp_cache) | |
188 | kmem_cache_destroy(t10_alua_tg_pt_gp_cache); | |
189 | if (t10_alua_tg_pt_gp_mem_cache) | |
190 | kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); | |
e3d6f909 | 191 | return -ENOMEM; |
c66ac9db NB |
192 | } |
193 | ||
e3d6f909 | 194 | void release_se_kmem_caches(void) |
c66ac9db | 195 | { |
c66ac9db NB |
196 | kmem_cache_destroy(se_cmd_cache); |
197 | kmem_cache_destroy(se_tmr_req_cache); | |
198 | kmem_cache_destroy(se_sess_cache); | |
199 | kmem_cache_destroy(se_ua_cache); | |
c66ac9db NB |
200 | kmem_cache_destroy(t10_pr_reg_cache); |
201 | kmem_cache_destroy(t10_alua_lu_gp_cache); | |
202 | kmem_cache_destroy(t10_alua_lu_gp_mem_cache); | |
203 | kmem_cache_destroy(t10_alua_tg_pt_gp_cache); | |
204 | kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); | |
c66ac9db NB |
205 | } |
206 | ||
e3d6f909 AG |
207 | /* This code ensures unique mib indexes are handed out. */ |
208 | static DEFINE_SPINLOCK(scsi_mib_index_lock); | |
209 | static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX]; | |
e89d15ee NB |
210 | |
211 | /* | |
212 | * Allocate a new row index for the entry type specified | |
213 | */ | |
214 | u32 scsi_get_new_index(scsi_index_t type) | |
215 | { | |
216 | u32 new_index; | |
217 | ||
e3d6f909 | 218 | BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)); |
e89d15ee | 219 | |
e3d6f909 AG |
220 | spin_lock(&scsi_mib_index_lock); |
221 | new_index = ++scsi_mib_index[type]; | |
222 | spin_unlock(&scsi_mib_index_lock); | |
e89d15ee NB |
223 | |
224 | return new_index; | |
225 | } | |
226 | ||
c66ac9db NB |
227 | void transport_init_queue_obj(struct se_queue_obj *qobj) |
228 | { | |
229 | atomic_set(&qobj->queue_cnt, 0); | |
230 | INIT_LIST_HEAD(&qobj->qobj_list); | |
231 | init_waitqueue_head(&qobj->thread_wq); | |
232 | spin_lock_init(&qobj->cmd_queue_lock); | |
233 | } | |
234 | EXPORT_SYMBOL(transport_init_queue_obj); | |
235 | ||
236 | static int transport_subsystem_reqmods(void) | |
237 | { | |
238 | int ret; | |
239 | ||
240 | ret = request_module("target_core_iblock"); | |
241 | if (ret != 0) | |
6708bb27 | 242 | pr_err("Unable to load target_core_iblock\n"); |
c66ac9db NB |
243 | |
244 | ret = request_module("target_core_file"); | |
245 | if (ret != 0) | |
6708bb27 | 246 | pr_err("Unable to load target_core_file\n"); |
c66ac9db NB |
247 | |
248 | ret = request_module("target_core_pscsi"); | |
249 | if (ret != 0) | |
6708bb27 | 250 | pr_err("Unable to load target_core_pscsi\n"); |
c66ac9db NB |
251 | |
252 | ret = request_module("target_core_stgt"); | |
253 | if (ret != 0) | |
6708bb27 | 254 | pr_err("Unable to load target_core_stgt\n"); |
c66ac9db NB |
255 | |
256 | return 0; | |
257 | } | |
258 | ||
259 | int transport_subsystem_check_init(void) | |
260 | { | |
e3d6f909 AG |
261 | int ret; |
262 | ||
263 | if (sub_api_initialized) | |
c66ac9db NB |
264 | return 0; |
265 | /* | |
266 | * Request the loading of known TCM subsystem plugins.. | |
267 | */ | |
e3d6f909 AG |
268 | ret = transport_subsystem_reqmods(); |
269 | if (ret < 0) | |
270 | return ret; | |
c66ac9db | 271 | |
e3d6f909 | 272 | sub_api_initialized = 1; |
c66ac9db NB |
273 | return 0; |
274 | } | |
275 | ||
276 | struct se_session *transport_init_session(void) | |
277 | { | |
278 | struct se_session *se_sess; | |
279 | ||
280 | se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL); | |
6708bb27 AG |
281 | if (!se_sess) { |
282 | pr_err("Unable to allocate struct se_session from" | |
c66ac9db NB |
283 | " se_sess_cache\n"); |
284 | return ERR_PTR(-ENOMEM); | |
285 | } | |
286 | INIT_LIST_HEAD(&se_sess->sess_list); | |
287 | INIT_LIST_HEAD(&se_sess->sess_acl_list); | |
c66ac9db NB |
288 | |
289 | return se_sess; | |
290 | } | |
291 | EXPORT_SYMBOL(transport_init_session); | |
292 | ||
293 | /* | |
294 | * Called with spin_lock_bh(&struct se_portal_group->session_lock called. | |
295 | */ | |
296 | void __transport_register_session( | |
297 | struct se_portal_group *se_tpg, | |
298 | struct se_node_acl *se_nacl, | |
299 | struct se_session *se_sess, | |
300 | void *fabric_sess_ptr) | |
301 | { | |
302 | unsigned char buf[PR_REG_ISID_LEN]; | |
303 | ||
304 | se_sess->se_tpg = se_tpg; | |
305 | se_sess->fabric_sess_ptr = fabric_sess_ptr; | |
306 | /* | |
307 | * Used by struct se_node_acl's under ConfigFS to locate active se_session-t | |
308 | * | |
309 | * Only set for struct se_session's that will actually be moving I/O. | |
310 | * eg: *NOT* discovery sessions. | |
311 | */ | |
312 | if (se_nacl) { | |
313 | /* | |
314 | * If the fabric module supports an ISID based TransportID, | |
315 | * save this value in binary from the fabric I_T Nexus now. | |
316 | */ | |
e3d6f909 | 317 | if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) { |
c66ac9db | 318 | memset(&buf[0], 0, PR_REG_ISID_LEN); |
e3d6f909 | 319 | se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, |
c66ac9db NB |
320 | &buf[0], PR_REG_ISID_LEN); |
321 | se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]); | |
322 | } | |
323 | spin_lock_irq(&se_nacl->nacl_sess_lock); | |
324 | /* | |
325 | * The se_nacl->nacl_sess pointer will be set to the | |
326 | * last active I_T Nexus for each struct se_node_acl. | |
327 | */ | |
328 | se_nacl->nacl_sess = se_sess; | |
329 | ||
330 | list_add_tail(&se_sess->sess_acl_list, | |
331 | &se_nacl->acl_sess_list); | |
332 | spin_unlock_irq(&se_nacl->nacl_sess_lock); | |
333 | } | |
334 | list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list); | |
335 | ||
6708bb27 | 336 | pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n", |
e3d6f909 | 337 | se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr); |
c66ac9db NB |
338 | } |
339 | EXPORT_SYMBOL(__transport_register_session); | |
340 | ||
341 | void transport_register_session( | |
342 | struct se_portal_group *se_tpg, | |
343 | struct se_node_acl *se_nacl, | |
344 | struct se_session *se_sess, | |
345 | void *fabric_sess_ptr) | |
346 | { | |
347 | spin_lock_bh(&se_tpg->session_lock); | |
348 | __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr); | |
349 | spin_unlock_bh(&se_tpg->session_lock); | |
350 | } | |
351 | EXPORT_SYMBOL(transport_register_session); | |
352 | ||
353 | void transport_deregister_session_configfs(struct se_session *se_sess) | |
354 | { | |
355 | struct se_node_acl *se_nacl; | |
23388864 | 356 | unsigned long flags; |
c66ac9db NB |
357 | /* |
358 | * Used by struct se_node_acl's under ConfigFS to locate active struct se_session | |
359 | */ | |
360 | se_nacl = se_sess->se_node_acl; | |
6708bb27 | 361 | if (se_nacl) { |
23388864 | 362 | spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); |
c66ac9db NB |
363 | list_del(&se_sess->sess_acl_list); |
364 | /* | |
365 | * If the session list is empty, then clear the pointer. | |
366 | * Otherwise, set the struct se_session pointer from the tail | |
367 | * element of the per struct se_node_acl active session list. | |
368 | */ | |
369 | if (list_empty(&se_nacl->acl_sess_list)) | |
370 | se_nacl->nacl_sess = NULL; | |
371 | else { | |
372 | se_nacl->nacl_sess = container_of( | |
373 | se_nacl->acl_sess_list.prev, | |
374 | struct se_session, sess_acl_list); | |
375 | } | |
23388864 | 376 | spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); |
c66ac9db NB |
377 | } |
378 | } | |
379 | EXPORT_SYMBOL(transport_deregister_session_configfs); | |
380 | ||
381 | void transport_free_session(struct se_session *se_sess) | |
382 | { | |
383 | kmem_cache_free(se_sess_cache, se_sess); | |
384 | } | |
385 | EXPORT_SYMBOL(transport_free_session); | |
386 | ||
387 | void transport_deregister_session(struct se_session *se_sess) | |
388 | { | |
389 | struct se_portal_group *se_tpg = se_sess->se_tpg; | |
390 | struct se_node_acl *se_nacl; | |
e63a8e19 | 391 | unsigned long flags; |
c66ac9db | 392 | |
6708bb27 | 393 | if (!se_tpg) { |
c66ac9db NB |
394 | transport_free_session(se_sess); |
395 | return; | |
396 | } | |
c66ac9db | 397 | |
e63a8e19 | 398 | spin_lock_irqsave(&se_tpg->session_lock, flags); |
c66ac9db NB |
399 | list_del(&se_sess->sess_list); |
400 | se_sess->se_tpg = NULL; | |
401 | se_sess->fabric_sess_ptr = NULL; | |
e63a8e19 | 402 | spin_unlock_irqrestore(&se_tpg->session_lock, flags); |
c66ac9db NB |
403 | |
404 | /* | |
405 | * Determine if we need to do extra work for this initiator node's | |
406 | * struct se_node_acl if it had been previously dynamically generated. | |
407 | */ | |
408 | se_nacl = se_sess->se_node_acl; | |
6708bb27 | 409 | if (se_nacl) { |
e63a8e19 | 410 | spin_lock_irqsave(&se_tpg->acl_node_lock, flags); |
c66ac9db | 411 | if (se_nacl->dynamic_node_acl) { |
6708bb27 AG |
412 | if (!se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache( |
413 | se_tpg)) { | |
c66ac9db NB |
414 | list_del(&se_nacl->acl_list); |
415 | se_tpg->num_node_acls--; | |
e63a8e19 | 416 | spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags); |
c66ac9db NB |
417 | |
418 | core_tpg_wait_for_nacl_pr_ref(se_nacl); | |
c66ac9db | 419 | core_free_device_list_for_node(se_nacl, se_tpg); |
e3d6f909 | 420 | se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, |
c66ac9db | 421 | se_nacl); |
e63a8e19 | 422 | spin_lock_irqsave(&se_tpg->acl_node_lock, flags); |
c66ac9db NB |
423 | } |
424 | } | |
e63a8e19 | 425 | spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags); |
c66ac9db NB |
426 | } |
427 | ||
428 | transport_free_session(se_sess); | |
429 | ||
6708bb27 | 430 | pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n", |
e3d6f909 | 431 | se_tpg->se_tpg_tfo->get_fabric_name()); |
c66ac9db NB |
432 | } |
433 | EXPORT_SYMBOL(transport_deregister_session); | |
434 | ||
435 | /* | |
a1d8b49a | 436 | * Called with cmd->t_state_lock held. |
c66ac9db NB |
437 | */ |
438 | static void transport_all_task_dev_remove_state(struct se_cmd *cmd) | |
439 | { | |
440 | struct se_device *dev; | |
441 | struct se_task *task; | |
442 | unsigned long flags; | |
443 | ||
a1d8b49a | 444 | list_for_each_entry(task, &cmd->t_task_list, t_list) { |
c66ac9db | 445 | dev = task->se_dev; |
6708bb27 | 446 | if (!dev) |
c66ac9db NB |
447 | continue; |
448 | ||
449 | if (atomic_read(&task->task_active)) | |
450 | continue; | |
451 | ||
6708bb27 | 452 | if (!atomic_read(&task->task_state_active)) |
c66ac9db NB |
453 | continue; |
454 | ||
455 | spin_lock_irqsave(&dev->execute_task_lock, flags); | |
456 | list_del(&task->t_state_list); | |
6708bb27 AG |
457 | pr_debug("Removed ITT: 0x%08x dev: %p task[%p]\n", |
458 | cmd->se_tfo->get_task_tag(cmd), dev, task); | |
c66ac9db NB |
459 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); |
460 | ||
461 | atomic_set(&task->task_state_active, 0); | |
a1d8b49a | 462 | atomic_dec(&cmd->t_task_cdbs_ex_left); |
c66ac9db NB |
463 | } |
464 | } | |
465 | ||
466 | /* transport_cmd_check_stop(): | |
467 | * | |
468 | * 'transport_off = 1' determines if t_transport_active should be cleared. | |
469 | * 'transport_off = 2' determines if task_dev_state should be removed. | |
470 | * | |
471 | * A non-zero u8 t_state sets cmd->t_state. | |
472 | * Returns 1 when command is stopped, else 0. | |
473 | */ | |
474 | static int transport_cmd_check_stop( | |
475 | struct se_cmd *cmd, | |
476 | int transport_off, | |
477 | u8 t_state) | |
478 | { | |
479 | unsigned long flags; | |
480 | ||
a1d8b49a | 481 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db NB |
482 | /* |
483 | * Determine if IOCTL context caller in requesting the stopping of this | |
484 | * command for LUN shutdown purposes. | |
485 | */ | |
a1d8b49a | 486 | if (atomic_read(&cmd->transport_lun_stop)) { |
6708bb27 | 487 | pr_debug("%s:%d atomic_read(&cmd->transport_lun_stop)" |
c66ac9db | 488 | " == TRUE for ITT: 0x%08x\n", __func__, __LINE__, |
e3d6f909 | 489 | cmd->se_tfo->get_task_tag(cmd)); |
c66ac9db NB |
490 | |
491 | cmd->deferred_t_state = cmd->t_state; | |
492 | cmd->t_state = TRANSPORT_DEFERRED_CMD; | |
a1d8b49a | 493 | atomic_set(&cmd->t_transport_active, 0); |
c66ac9db NB |
494 | if (transport_off == 2) |
495 | transport_all_task_dev_remove_state(cmd); | |
a1d8b49a | 496 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db | 497 | |
a1d8b49a | 498 | complete(&cmd->transport_lun_stop_comp); |
c66ac9db NB |
499 | return 1; |
500 | } | |
501 | /* | |
502 | * Determine if frontend context caller is requesting the stopping of | |
e3d6f909 | 503 | * this command for frontend exceptions. |
c66ac9db | 504 | */ |
a1d8b49a | 505 | if (atomic_read(&cmd->t_transport_stop)) { |
6708bb27 | 506 | pr_debug("%s:%d atomic_read(&cmd->t_transport_stop) ==" |
c66ac9db | 507 | " TRUE for ITT: 0x%08x\n", __func__, __LINE__, |
e3d6f909 | 508 | cmd->se_tfo->get_task_tag(cmd)); |
c66ac9db NB |
509 | |
510 | cmd->deferred_t_state = cmd->t_state; | |
511 | cmd->t_state = TRANSPORT_DEFERRED_CMD; | |
512 | if (transport_off == 2) | |
513 | transport_all_task_dev_remove_state(cmd); | |
514 | ||
515 | /* | |
516 | * Clear struct se_cmd->se_lun before the transport_off == 2 handoff | |
517 | * to FE. | |
518 | */ | |
519 | if (transport_off == 2) | |
520 | cmd->se_lun = NULL; | |
a1d8b49a | 521 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db | 522 | |
a1d8b49a | 523 | complete(&cmd->t_transport_stop_comp); |
c66ac9db NB |
524 | return 1; |
525 | } | |
526 | if (transport_off) { | |
a1d8b49a | 527 | atomic_set(&cmd->t_transport_active, 0); |
c66ac9db NB |
528 | if (transport_off == 2) { |
529 | transport_all_task_dev_remove_state(cmd); | |
530 | /* | |
531 | * Clear struct se_cmd->se_lun before the transport_off == 2 | |
532 | * handoff to fabric module. | |
533 | */ | |
534 | cmd->se_lun = NULL; | |
535 | /* | |
536 | * Some fabric modules like tcm_loop can release | |
25985edc | 537 | * their internally allocated I/O reference now and |
c66ac9db NB |
538 | * struct se_cmd now. |
539 | */ | |
e3d6f909 | 540 | if (cmd->se_tfo->check_stop_free != NULL) { |
c66ac9db | 541 | spin_unlock_irqrestore( |
a1d8b49a | 542 | &cmd->t_state_lock, flags); |
c66ac9db | 543 | |
e3d6f909 | 544 | cmd->se_tfo->check_stop_free(cmd); |
c66ac9db NB |
545 | return 1; |
546 | } | |
547 | } | |
a1d8b49a | 548 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
549 | |
550 | return 0; | |
551 | } else if (t_state) | |
552 | cmd->t_state = t_state; | |
a1d8b49a | 553 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
554 | |
555 | return 0; | |
556 | } | |
557 | ||
558 | static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) | |
559 | { | |
560 | return transport_cmd_check_stop(cmd, 2, 0); | |
561 | } | |
562 | ||
563 | static void transport_lun_remove_cmd(struct se_cmd *cmd) | |
564 | { | |
e3d6f909 | 565 | struct se_lun *lun = cmd->se_lun; |
c66ac9db NB |
566 | unsigned long flags; |
567 | ||
568 | if (!lun) | |
569 | return; | |
570 | ||
a1d8b49a | 571 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
6708bb27 | 572 | if (!atomic_read(&cmd->transport_dev_active)) { |
a1d8b49a | 573 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
574 | goto check_lun; |
575 | } | |
a1d8b49a | 576 | atomic_set(&cmd->transport_dev_active, 0); |
c66ac9db | 577 | transport_all_task_dev_remove_state(cmd); |
a1d8b49a | 578 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db | 579 | |
c66ac9db NB |
580 | |
581 | check_lun: | |
582 | spin_lock_irqsave(&lun->lun_cmd_lock, flags); | |
a1d8b49a | 583 | if (atomic_read(&cmd->transport_lun_active)) { |
5951146d | 584 | list_del(&cmd->se_lun_node); |
a1d8b49a | 585 | atomic_set(&cmd->transport_lun_active, 0); |
c66ac9db | 586 | #if 0 |
6708bb27 | 587 | pr_debug("Removed ITT: 0x%08x from LUN LIST[%d]\n" |
e3d6f909 | 588 | cmd->se_tfo->get_task_tag(cmd), lun->unpacked_lun); |
c66ac9db NB |
589 | #endif |
590 | } | |
591 | spin_unlock_irqrestore(&lun->lun_cmd_lock, flags); | |
592 | } | |
593 | ||
594 | void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) | |
595 | { | |
c66ac9db NB |
596 | transport_lun_remove_cmd(cmd); |
597 | ||
598 | if (transport_cmd_check_stop_to_fabric(cmd)) | |
599 | return; | |
77039d1e NB |
600 | if (remove) { |
601 | transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj); | |
35462975 | 602 | transport_generic_remove(cmd, 0); |
77039d1e | 603 | } |
c66ac9db NB |
604 | } |
605 | ||
606 | void transport_cmd_finish_abort_tmr(struct se_cmd *cmd) | |
607 | { | |
5951146d | 608 | transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj); |
c66ac9db NB |
609 | |
610 | if (transport_cmd_check_stop_to_fabric(cmd)) | |
611 | return; | |
612 | ||
35462975 | 613 | transport_generic_remove(cmd, 0); |
c66ac9db NB |
614 | } |
615 | ||
5951146d | 616 | static void transport_add_cmd_to_queue( |
c66ac9db NB |
617 | struct se_cmd *cmd, |
618 | int t_state) | |
619 | { | |
620 | struct se_device *dev = cmd->se_dev; | |
e3d6f909 | 621 | struct se_queue_obj *qobj = &dev->dev_queue_obj; |
c66ac9db NB |
622 | unsigned long flags; |
623 | ||
c66ac9db | 624 | if (t_state) { |
a1d8b49a | 625 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db | 626 | cmd->t_state = t_state; |
a1d8b49a AG |
627 | atomic_set(&cmd->t_transport_active, 1); |
628 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | |
c66ac9db NB |
629 | } |
630 | ||
631 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); | |
79a7fef2 RD |
632 | |
633 | /* If the cmd is already on the list, remove it before we add it */ | |
634 | if (!list_empty(&cmd->se_queue_node)) | |
635 | list_del(&cmd->se_queue_node); | |
636 | else | |
637 | atomic_inc(&qobj->queue_cnt); | |
638 | ||
07bde79a NB |
639 | if (cmd->se_cmd_flags & SCF_EMULATE_QUEUE_FULL) { |
640 | cmd->se_cmd_flags &= ~SCF_EMULATE_QUEUE_FULL; | |
641 | list_add(&cmd->se_queue_node, &qobj->qobj_list); | |
642 | } else | |
643 | list_add_tail(&cmd->se_queue_node, &qobj->qobj_list); | |
79a7fef2 | 644 | atomic_set(&cmd->t_transport_queue_active, 1); |
c66ac9db NB |
645 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); |
646 | ||
c66ac9db | 647 | wake_up_interruptible(&qobj->thread_wq); |
c66ac9db NB |
648 | } |
649 | ||
5951146d AG |
650 | static struct se_cmd * |
651 | transport_get_cmd_from_queue(struct se_queue_obj *qobj) | |
c66ac9db | 652 | { |
5951146d | 653 | struct se_cmd *cmd; |
c66ac9db NB |
654 | unsigned long flags; |
655 | ||
656 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); | |
657 | if (list_empty(&qobj->qobj_list)) { | |
658 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | |
659 | return NULL; | |
660 | } | |
5951146d | 661 | cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node); |
c66ac9db | 662 | |
79a7fef2 | 663 | atomic_set(&cmd->t_transport_queue_active, 0); |
c66ac9db | 664 | |
79a7fef2 | 665 | list_del_init(&cmd->se_queue_node); |
c66ac9db NB |
666 | atomic_dec(&qobj->queue_cnt); |
667 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | |
668 | ||
5951146d | 669 | return cmd; |
c66ac9db NB |
670 | } |
671 | ||
672 | static void transport_remove_cmd_from_queue(struct se_cmd *cmd, | |
673 | struct se_queue_obj *qobj) | |
674 | { | |
c66ac9db NB |
675 | unsigned long flags; |
676 | ||
677 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); | |
6708bb27 | 678 | if (!atomic_read(&cmd->t_transport_queue_active)) { |
c66ac9db NB |
679 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); |
680 | return; | |
681 | } | |
79a7fef2 RD |
682 | atomic_set(&cmd->t_transport_queue_active, 0); |
683 | atomic_dec(&qobj->queue_cnt); | |
684 | list_del_init(&cmd->se_queue_node); | |
c66ac9db NB |
685 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); |
686 | ||
a1d8b49a | 687 | if (atomic_read(&cmd->t_transport_queue_active)) { |
6708bb27 | 688 | pr_err("ITT: 0x%08x t_transport_queue_active: %d\n", |
e3d6f909 | 689 | cmd->se_tfo->get_task_tag(cmd), |
a1d8b49a | 690 | atomic_read(&cmd->t_transport_queue_active)); |
c66ac9db NB |
691 | } |
692 | } | |
693 | ||
694 | /* | |
695 | * Completion function used by TCM subsystem plugins (such as FILEIO) | |
696 | * for queueing up response from struct se_subsystem_api->do_task() | |
697 | */ | |
698 | void transport_complete_sync_cache(struct se_cmd *cmd, int good) | |
699 | { | |
a1d8b49a | 700 | struct se_task *task = list_entry(cmd->t_task_list.next, |
c66ac9db NB |
701 | struct se_task, t_list); |
702 | ||
703 | if (good) { | |
704 | cmd->scsi_status = SAM_STAT_GOOD; | |
705 | task->task_scsi_status = GOOD; | |
706 | } else { | |
707 | task->task_scsi_status = SAM_STAT_CHECK_CONDITION; | |
708 | task->task_error_status = PYX_TRANSPORT_ILLEGAL_REQUEST; | |
e3d6f909 | 709 | task->task_se_cmd->transport_error_status = |
c66ac9db NB |
710 | PYX_TRANSPORT_ILLEGAL_REQUEST; |
711 | } | |
712 | ||
713 | transport_complete_task(task, good); | |
714 | } | |
715 | EXPORT_SYMBOL(transport_complete_sync_cache); | |
716 | ||
717 | /* transport_complete_task(): | |
718 | * | |
719 | * Called from interrupt and non interrupt context depending | |
720 | * on the transport plugin. | |
721 | */ | |
722 | void transport_complete_task(struct se_task *task, int success) | |
723 | { | |
e3d6f909 | 724 | struct se_cmd *cmd = task->task_se_cmd; |
c66ac9db NB |
725 | struct se_device *dev = task->se_dev; |
726 | int t_state; | |
727 | unsigned long flags; | |
728 | #if 0 | |
6708bb27 | 729 | pr_debug("task: %p CDB: 0x%02x obj_ptr: %p\n", task, |
a1d8b49a | 730 | cmd->t_task_cdb[0], dev); |
c66ac9db | 731 | #endif |
e3d6f909 | 732 | if (dev) |
c66ac9db | 733 | atomic_inc(&dev->depth_left); |
c66ac9db | 734 | |
a1d8b49a | 735 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db NB |
736 | atomic_set(&task->task_active, 0); |
737 | ||
738 | /* | |
739 | * See if any sense data exists, if so set the TASK_SENSE flag. | |
740 | * Also check for any other post completion work that needs to be | |
741 | * done by the plugins. | |
742 | */ | |
743 | if (dev && dev->transport->transport_complete) { | |
744 | if (dev->transport->transport_complete(task) != 0) { | |
745 | cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE; | |
746 | task->task_sense = 1; | |
747 | success = 1; | |
748 | } | |
749 | } | |
750 | ||
751 | /* | |
752 | * See if we are waiting for outstanding struct se_task | |
753 | * to complete for an exception condition | |
754 | */ | |
755 | if (atomic_read(&task->task_stop)) { | |
756 | /* | |
a1d8b49a | 757 | * Decrement cmd->t_se_count if this task had |
c66ac9db NB |
758 | * previously thrown its timeout exception handler. |
759 | */ | |
760 | if (atomic_read(&task->task_timeout)) { | |
a1d8b49a | 761 | atomic_dec(&cmd->t_se_count); |
c66ac9db NB |
762 | atomic_set(&task->task_timeout, 0); |
763 | } | |
a1d8b49a | 764 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
765 | |
766 | complete(&task->task_stop_comp); | |
767 | return; | |
768 | } | |
769 | /* | |
770 | * If the task's timeout handler has fired, use the t_task_cdbs_timeout | |
771 | * left counter to determine when the struct se_cmd is ready to be queued to | |
772 | * the processing thread. | |
773 | */ | |
774 | if (atomic_read(&task->task_timeout)) { | |
6708bb27 AG |
775 | if (!atomic_dec_and_test( |
776 | &cmd->t_task_cdbs_timeout_left)) { | |
a1d8b49a | 777 | spin_unlock_irqrestore(&cmd->t_state_lock, |
c66ac9db NB |
778 | flags); |
779 | return; | |
780 | } | |
781 | t_state = TRANSPORT_COMPLETE_TIMEOUT; | |
a1d8b49a | 782 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
783 | |
784 | transport_add_cmd_to_queue(cmd, t_state); | |
785 | return; | |
786 | } | |
a1d8b49a | 787 | atomic_dec(&cmd->t_task_cdbs_timeout_left); |
c66ac9db NB |
788 | |
789 | /* | |
790 | * Decrement the outstanding t_task_cdbs_left count. The last | |
791 | * struct se_task from struct se_cmd will complete itself into the | |
792 | * device queue depending upon int success. | |
793 | */ | |
6708bb27 | 794 | if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) { |
c66ac9db | 795 | if (!success) |
a1d8b49a | 796 | cmd->t_tasks_failed = 1; |
c66ac9db | 797 | |
a1d8b49a | 798 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
799 | return; |
800 | } | |
801 | ||
a1d8b49a | 802 | if (!success || cmd->t_tasks_failed) { |
c66ac9db NB |
803 | t_state = TRANSPORT_COMPLETE_FAILURE; |
804 | if (!task->task_error_status) { | |
805 | task->task_error_status = | |
806 | PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; | |
807 | cmd->transport_error_status = | |
808 | PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; | |
809 | } | |
810 | } else { | |
a1d8b49a | 811 | atomic_set(&cmd->t_transport_complete, 1); |
c66ac9db NB |
812 | t_state = TRANSPORT_COMPLETE_OK; |
813 | } | |
a1d8b49a | 814 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
815 | |
816 | transport_add_cmd_to_queue(cmd, t_state); | |
817 | } | |
818 | EXPORT_SYMBOL(transport_complete_task); | |
819 | ||
820 | /* | |
821 | * Called by transport_add_tasks_from_cmd() once a struct se_cmd's | |
822 | * struct se_task list are ready to be added to the active execution list | |
823 | * struct se_device | |
824 | ||
825 | * Called with se_dev_t->execute_task_lock called. | |
826 | */ | |
827 | static inline int transport_add_task_check_sam_attr( | |
828 | struct se_task *task, | |
829 | struct se_task *task_prev, | |
830 | struct se_device *dev) | |
831 | { | |
832 | /* | |
833 | * No SAM Task attribute emulation enabled, add to tail of | |
834 | * execution queue | |
835 | */ | |
836 | if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) { | |
837 | list_add_tail(&task->t_execute_list, &dev->execute_task_list); | |
838 | return 0; | |
839 | } | |
840 | /* | |
841 | * HEAD_OF_QUEUE attribute for received CDB, which means | |
842 | * the first task that is associated with a struct se_cmd goes to | |
843 | * head of the struct se_device->execute_task_list, and task_prev | |
844 | * after that for each subsequent task | |
845 | */ | |
e66ecd50 | 846 | if (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG) { |
c66ac9db NB |
847 | list_add(&task->t_execute_list, |
848 | (task_prev != NULL) ? | |
849 | &task_prev->t_execute_list : | |
850 | &dev->execute_task_list); | |
851 | ||
6708bb27 | 852 | pr_debug("Set HEAD_OF_QUEUE for task CDB: 0x%02x" |
c66ac9db | 853 | " in execution queue\n", |
6708bb27 | 854 | task->task_se_cmd->t_task_cdb[0]); |
c66ac9db NB |
855 | return 1; |
856 | } | |
857 | /* | |
858 | * For ORDERED, SIMPLE or UNTAGGED attribute tasks once they have been | |
859 | * transitioned from Dermant -> Active state, and are added to the end | |
860 | * of the struct se_device->execute_task_list | |
861 | */ | |
862 | list_add_tail(&task->t_execute_list, &dev->execute_task_list); | |
863 | return 0; | |
864 | } | |
865 | ||
866 | /* __transport_add_task_to_execute_queue(): | |
867 | * | |
868 | * Called with se_dev_t->execute_task_lock called. | |
869 | */ | |
870 | static void __transport_add_task_to_execute_queue( | |
871 | struct se_task *task, | |
872 | struct se_task *task_prev, | |
873 | struct se_device *dev) | |
874 | { | |
875 | int head_of_queue; | |
876 | ||
877 | head_of_queue = transport_add_task_check_sam_attr(task, task_prev, dev); | |
878 | atomic_inc(&dev->execute_tasks); | |
879 | ||
880 | if (atomic_read(&task->task_state_active)) | |
881 | return; | |
882 | /* | |
883 | * Determine if this task needs to go to HEAD_OF_QUEUE for the | |
884 | * state list as well. Running with SAM Task Attribute emulation | |
885 | * will always return head_of_queue == 0 here | |
886 | */ | |
887 | if (head_of_queue) | |
888 | list_add(&task->t_state_list, (task_prev) ? | |
889 | &task_prev->t_state_list : | |
890 | &dev->state_task_list); | |
891 | else | |
892 | list_add_tail(&task->t_state_list, &dev->state_task_list); | |
893 | ||
894 | atomic_set(&task->task_state_active, 1); | |
895 | ||
6708bb27 | 896 | pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n", |
e3d6f909 | 897 | task->task_se_cmd->se_tfo->get_task_tag(task->task_se_cmd), |
c66ac9db NB |
898 | task, dev); |
899 | } | |
900 | ||
901 | static void transport_add_tasks_to_state_queue(struct se_cmd *cmd) | |
902 | { | |
903 | struct se_device *dev; | |
904 | struct se_task *task; | |
905 | unsigned long flags; | |
906 | ||
a1d8b49a AG |
907 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
908 | list_for_each_entry(task, &cmd->t_task_list, t_list) { | |
c66ac9db NB |
909 | dev = task->se_dev; |
910 | ||
911 | if (atomic_read(&task->task_state_active)) | |
912 | continue; | |
913 | ||
914 | spin_lock(&dev->execute_task_lock); | |
915 | list_add_tail(&task->t_state_list, &dev->state_task_list); | |
916 | atomic_set(&task->task_state_active, 1); | |
917 | ||
6708bb27 AG |
918 | pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n", |
919 | task->task_se_cmd->se_tfo->get_task_tag( | |
c66ac9db NB |
920 | task->task_se_cmd), task, dev); |
921 | ||
922 | spin_unlock(&dev->execute_task_lock); | |
923 | } | |
a1d8b49a | 924 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
925 | } |
926 | ||
927 | static void transport_add_tasks_from_cmd(struct se_cmd *cmd) | |
928 | { | |
5951146d | 929 | struct se_device *dev = cmd->se_dev; |
c66ac9db NB |
930 | struct se_task *task, *task_prev = NULL; |
931 | unsigned long flags; | |
932 | ||
933 | spin_lock_irqsave(&dev->execute_task_lock, flags); | |
a1d8b49a | 934 | list_for_each_entry(task, &cmd->t_task_list, t_list) { |
c66ac9db NB |
935 | if (atomic_read(&task->task_execute_queue)) |
936 | continue; | |
937 | /* | |
938 | * __transport_add_task_to_execute_queue() handles the | |
939 | * SAM Task Attribute emulation if enabled | |
940 | */ | |
941 | __transport_add_task_to_execute_queue(task, task_prev, dev); | |
942 | atomic_set(&task->task_execute_queue, 1); | |
943 | task_prev = task; | |
944 | } | |
945 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); | |
c66ac9db NB |
946 | } |
947 | ||
948 | /* transport_remove_task_from_execute_queue(): | |
949 | * | |
950 | * | |
951 | */ | |
52208ae3 | 952 | void transport_remove_task_from_execute_queue( |
c66ac9db NB |
953 | struct se_task *task, |
954 | struct se_device *dev) | |
955 | { | |
956 | unsigned long flags; | |
957 | ||
af57c3ac NB |
958 | if (atomic_read(&task->task_execute_queue) == 0) { |
959 | dump_stack(); | |
960 | return; | |
961 | } | |
962 | ||
c66ac9db NB |
963 | spin_lock_irqsave(&dev->execute_task_lock, flags); |
964 | list_del(&task->t_execute_list); | |
af57c3ac | 965 | atomic_set(&task->task_execute_queue, 0); |
c66ac9db NB |
966 | atomic_dec(&dev->execute_tasks); |
967 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); | |
968 | } | |
969 | ||
07bde79a NB |
970 | /* |
971 | * Handle QUEUE_FULL / -EAGAIN status | |
972 | */ | |
973 | ||
974 | static void target_qf_do_work(struct work_struct *work) | |
975 | { | |
976 | struct se_device *dev = container_of(work, struct se_device, | |
977 | qf_work_queue); | |
bcac364a | 978 | LIST_HEAD(qf_cmd_list); |
07bde79a NB |
979 | struct se_cmd *cmd, *cmd_tmp; |
980 | ||
981 | spin_lock_irq(&dev->qf_cmd_lock); | |
bcac364a RD |
982 | list_splice_init(&dev->qf_cmd_list, &qf_cmd_list); |
983 | spin_unlock_irq(&dev->qf_cmd_lock); | |
07bde79a | 984 | |
bcac364a | 985 | list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) { |
07bde79a NB |
986 | list_del(&cmd->se_qf_node); |
987 | atomic_dec(&dev->dev_qf_count); | |
988 | smp_mb__after_atomic_dec(); | |
07bde79a | 989 | |
6708bb27 | 990 | pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" |
07bde79a NB |
991 | " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, |
992 | (cmd->t_state == TRANSPORT_COMPLETE_OK) ? "COMPLETE_OK" : | |
993 | (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING" | |
994 | : "UNKNOWN"); | |
995 | /* | |
996 | * The SCF_EMULATE_QUEUE_FULL flag will be cleared once se_cmd | |
997 | * has been added to head of queue | |
998 | */ | |
999 | transport_add_cmd_to_queue(cmd, cmd->t_state); | |
07bde79a | 1000 | } |
07bde79a NB |
1001 | } |
1002 | ||
c66ac9db NB |
1003 | unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd) |
1004 | { | |
1005 | switch (cmd->data_direction) { | |
1006 | case DMA_NONE: | |
1007 | return "NONE"; | |
1008 | case DMA_FROM_DEVICE: | |
1009 | return "READ"; | |
1010 | case DMA_TO_DEVICE: | |
1011 | return "WRITE"; | |
1012 | case DMA_BIDIRECTIONAL: | |
1013 | return "BIDI"; | |
1014 | default: | |
1015 | break; | |
1016 | } | |
1017 | ||
1018 | return "UNKNOWN"; | |
1019 | } | |
1020 | ||
1021 | void transport_dump_dev_state( | |
1022 | struct se_device *dev, | |
1023 | char *b, | |
1024 | int *bl) | |
1025 | { | |
1026 | *bl += sprintf(b + *bl, "Status: "); | |
1027 | switch (dev->dev_status) { | |
1028 | case TRANSPORT_DEVICE_ACTIVATED: | |
1029 | *bl += sprintf(b + *bl, "ACTIVATED"); | |
1030 | break; | |
1031 | case TRANSPORT_DEVICE_DEACTIVATED: | |
1032 | *bl += sprintf(b + *bl, "DEACTIVATED"); | |
1033 | break; | |
1034 | case TRANSPORT_DEVICE_SHUTDOWN: | |
1035 | *bl += sprintf(b + *bl, "SHUTDOWN"); | |
1036 | break; | |
1037 | case TRANSPORT_DEVICE_OFFLINE_ACTIVATED: | |
1038 | case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED: | |
1039 | *bl += sprintf(b + *bl, "OFFLINE"); | |
1040 | break; | |
1041 | default: | |
1042 | *bl += sprintf(b + *bl, "UNKNOWN=%d", dev->dev_status); | |
1043 | break; | |
1044 | } | |
1045 | ||
1046 | *bl += sprintf(b + *bl, " Execute/Left/Max Queue Depth: %d/%d/%d", | |
1047 | atomic_read(&dev->execute_tasks), atomic_read(&dev->depth_left), | |
1048 | dev->queue_depth); | |
1049 | *bl += sprintf(b + *bl, " SectorSize: %u MaxSectors: %u\n", | |
e3d6f909 | 1050 | dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.max_sectors); |
c66ac9db NB |
1051 | *bl += sprintf(b + *bl, " "); |
1052 | } | |
1053 | ||
1054 | /* transport_release_all_cmds(): | |
1055 | * | |
1056 | * | |
1057 | */ | |
1058 | static void transport_release_all_cmds(struct se_device *dev) | |
1059 | { | |
5951146d | 1060 | struct se_cmd *cmd, *tcmd; |
c66ac9db NB |
1061 | int bug_out = 0, t_state; |
1062 | unsigned long flags; | |
1063 | ||
e3d6f909 | 1064 | spin_lock_irqsave(&dev->dev_queue_obj.cmd_queue_lock, flags); |
5951146d AG |
1065 | list_for_each_entry_safe(cmd, tcmd, &dev->dev_queue_obj.qobj_list, |
1066 | se_queue_node) { | |
1067 | t_state = cmd->t_state; | |
79a7fef2 | 1068 | list_del_init(&cmd->se_queue_node); |
e3d6f909 | 1069 | spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock, |
c66ac9db NB |
1070 | flags); |
1071 | ||
6708bb27 | 1072 | pr_err("Releasing ITT: 0x%08x, i_state: %u," |
c66ac9db | 1073 | " t_state: %u directly\n", |
e3d6f909 AG |
1074 | cmd->se_tfo->get_task_tag(cmd), |
1075 | cmd->se_tfo->get_cmd_state(cmd), t_state); | |
c66ac9db NB |
1076 | |
1077 | transport_release_fe_cmd(cmd); | |
1078 | bug_out = 1; | |
1079 | ||
e3d6f909 | 1080 | spin_lock_irqsave(&dev->dev_queue_obj.cmd_queue_lock, flags); |
c66ac9db | 1081 | } |
e3d6f909 | 1082 | spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock, flags); |
c66ac9db NB |
1083 | #if 0 |
1084 | if (bug_out) | |
1085 | BUG(); | |
1086 | #endif | |
1087 | } | |
1088 | ||
1089 | void transport_dump_vpd_proto_id( | |
1090 | struct t10_vpd *vpd, | |
1091 | unsigned char *p_buf, | |
1092 | int p_buf_len) | |
1093 | { | |
1094 | unsigned char buf[VPD_TMP_BUF_SIZE]; | |
1095 | int len; | |
1096 | ||
1097 | memset(buf, 0, VPD_TMP_BUF_SIZE); | |
1098 | len = sprintf(buf, "T10 VPD Protocol Identifier: "); | |
1099 | ||
1100 | switch (vpd->protocol_identifier) { | |
1101 | case 0x00: | |
1102 | sprintf(buf+len, "Fibre Channel\n"); | |
1103 | break; | |
1104 | case 0x10: | |
1105 | sprintf(buf+len, "Parallel SCSI\n"); | |
1106 | break; | |
1107 | case 0x20: | |
1108 | sprintf(buf+len, "SSA\n"); | |
1109 | break; | |
1110 | case 0x30: | |
1111 | sprintf(buf+len, "IEEE 1394\n"); | |
1112 | break; | |
1113 | case 0x40: | |
1114 | sprintf(buf+len, "SCSI Remote Direct Memory Access" | |
1115 | " Protocol\n"); | |
1116 | break; | |
1117 | case 0x50: | |
1118 | sprintf(buf+len, "Internet SCSI (iSCSI)\n"); | |
1119 | break; | |
1120 | case 0x60: | |
1121 | sprintf(buf+len, "SAS Serial SCSI Protocol\n"); | |
1122 | break; | |
1123 | case 0x70: | |
1124 | sprintf(buf+len, "Automation/Drive Interface Transport" | |
1125 | " Protocol\n"); | |
1126 | break; | |
1127 | case 0x80: | |
1128 | sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n"); | |
1129 | break; | |
1130 | default: | |
1131 | sprintf(buf+len, "Unknown 0x%02x\n", | |
1132 | vpd->protocol_identifier); | |
1133 | break; | |
1134 | } | |
1135 | ||
1136 | if (p_buf) | |
1137 | strncpy(p_buf, buf, p_buf_len); | |
1138 | else | |
6708bb27 | 1139 | pr_debug("%s", buf); |
c66ac9db NB |
1140 | } |
1141 | ||
1142 | void | |
1143 | transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83) | |
1144 | { | |
1145 | /* | |
1146 | * Check if the Protocol Identifier Valid (PIV) bit is set.. | |
1147 | * | |
1148 | * from spc3r23.pdf section 7.5.1 | |
1149 | */ | |
1150 | if (page_83[1] & 0x80) { | |
1151 | vpd->protocol_identifier = (page_83[0] & 0xf0); | |
1152 | vpd->protocol_identifier_set = 1; | |
1153 | transport_dump_vpd_proto_id(vpd, NULL, 0); | |
1154 | } | |
1155 | } | |
1156 | EXPORT_SYMBOL(transport_set_vpd_proto_id); | |
1157 | ||
1158 | int transport_dump_vpd_assoc( | |
1159 | struct t10_vpd *vpd, | |
1160 | unsigned char *p_buf, | |
1161 | int p_buf_len) | |
1162 | { | |
1163 | unsigned char buf[VPD_TMP_BUF_SIZE]; | |
e3d6f909 AG |
1164 | int ret = 0; |
1165 | int len; | |
c66ac9db NB |
1166 | |
1167 | memset(buf, 0, VPD_TMP_BUF_SIZE); | |
1168 | len = sprintf(buf, "T10 VPD Identifier Association: "); | |
1169 | ||
1170 | switch (vpd->association) { | |
1171 | case 0x00: | |
1172 | sprintf(buf+len, "addressed logical unit\n"); | |
1173 | break; | |
1174 | case 0x10: | |
1175 | sprintf(buf+len, "target port\n"); | |
1176 | break; | |
1177 | case 0x20: | |
1178 | sprintf(buf+len, "SCSI target device\n"); | |
1179 | break; | |
1180 | default: | |
1181 | sprintf(buf+len, "Unknown 0x%02x\n", vpd->association); | |
e3d6f909 | 1182 | ret = -EINVAL; |
c66ac9db NB |
1183 | break; |
1184 | } | |
1185 | ||
1186 | if (p_buf) | |
1187 | strncpy(p_buf, buf, p_buf_len); | |
1188 | else | |
6708bb27 | 1189 | pr_debug("%s", buf); |
c66ac9db NB |
1190 | |
1191 | return ret; | |
1192 | } | |
1193 | ||
1194 | int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83) | |
1195 | { | |
1196 | /* | |
1197 | * The VPD identification association.. | |
1198 | * | |
1199 | * from spc3r23.pdf Section 7.6.3.1 Table 297 | |
1200 | */ | |
1201 | vpd->association = (page_83[1] & 0x30); | |
1202 | return transport_dump_vpd_assoc(vpd, NULL, 0); | |
1203 | } | |
1204 | EXPORT_SYMBOL(transport_set_vpd_assoc); | |
1205 | ||
1206 | int transport_dump_vpd_ident_type( | |
1207 | struct t10_vpd *vpd, | |
1208 | unsigned char *p_buf, | |
1209 | int p_buf_len) | |
1210 | { | |
1211 | unsigned char buf[VPD_TMP_BUF_SIZE]; | |
e3d6f909 AG |
1212 | int ret = 0; |
1213 | int len; | |
c66ac9db NB |
1214 | |
1215 | memset(buf, 0, VPD_TMP_BUF_SIZE); | |
1216 | len = sprintf(buf, "T10 VPD Identifier Type: "); | |
1217 | ||
1218 | switch (vpd->device_identifier_type) { | |
1219 | case 0x00: | |
1220 | sprintf(buf+len, "Vendor specific\n"); | |
1221 | break; | |
1222 | case 0x01: | |
1223 | sprintf(buf+len, "T10 Vendor ID based\n"); | |
1224 | break; | |
1225 | case 0x02: | |
1226 | sprintf(buf+len, "EUI-64 based\n"); | |
1227 | break; | |
1228 | case 0x03: | |
1229 | sprintf(buf+len, "NAA\n"); | |
1230 | break; | |
1231 | case 0x04: | |
1232 | sprintf(buf+len, "Relative target port identifier\n"); | |
1233 | break; | |
1234 | case 0x08: | |
1235 | sprintf(buf+len, "SCSI name string\n"); | |
1236 | break; | |
1237 | default: | |
1238 | sprintf(buf+len, "Unsupported: 0x%02x\n", | |
1239 | vpd->device_identifier_type); | |
e3d6f909 | 1240 | ret = -EINVAL; |
c66ac9db NB |
1241 | break; |
1242 | } | |
1243 | ||
e3d6f909 AG |
1244 | if (p_buf) { |
1245 | if (p_buf_len < strlen(buf)+1) | |
1246 | return -EINVAL; | |
c66ac9db | 1247 | strncpy(p_buf, buf, p_buf_len); |
e3d6f909 | 1248 | } else { |
6708bb27 | 1249 | pr_debug("%s", buf); |
e3d6f909 | 1250 | } |
c66ac9db NB |
1251 | |
1252 | return ret; | |
1253 | } | |
1254 | ||
1255 | int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83) | |
1256 | { | |
1257 | /* | |
1258 | * The VPD identifier type.. | |
1259 | * | |
1260 | * from spc3r23.pdf Section 7.6.3.1 Table 298 | |
1261 | */ | |
1262 | vpd->device_identifier_type = (page_83[1] & 0x0f); | |
1263 | return transport_dump_vpd_ident_type(vpd, NULL, 0); | |
1264 | } | |
1265 | EXPORT_SYMBOL(transport_set_vpd_ident_type); | |
1266 | ||
1267 | int transport_dump_vpd_ident( | |
1268 | struct t10_vpd *vpd, | |
1269 | unsigned char *p_buf, | |
1270 | int p_buf_len) | |
1271 | { | |
1272 | unsigned char buf[VPD_TMP_BUF_SIZE]; | |
1273 | int ret = 0; | |
1274 | ||
1275 | memset(buf, 0, VPD_TMP_BUF_SIZE); | |
1276 | ||
1277 | switch (vpd->device_identifier_code_set) { | |
1278 | case 0x01: /* Binary */ | |
1279 | sprintf(buf, "T10 VPD Binary Device Identifier: %s\n", | |
1280 | &vpd->device_identifier[0]); | |
1281 | break; | |
1282 | case 0x02: /* ASCII */ | |
1283 | sprintf(buf, "T10 VPD ASCII Device Identifier: %s\n", | |
1284 | &vpd->device_identifier[0]); | |
1285 | break; | |
1286 | case 0x03: /* UTF-8 */ | |
1287 | sprintf(buf, "T10 VPD UTF-8 Device Identifier: %s\n", | |
1288 | &vpd->device_identifier[0]); | |
1289 | break; | |
1290 | default: | |
1291 | sprintf(buf, "T10 VPD Device Identifier encoding unsupported:" | |
1292 | " 0x%02x", vpd->device_identifier_code_set); | |
e3d6f909 | 1293 | ret = -EINVAL; |
c66ac9db NB |
1294 | break; |
1295 | } | |
1296 | ||
1297 | if (p_buf) | |
1298 | strncpy(p_buf, buf, p_buf_len); | |
1299 | else | |
6708bb27 | 1300 | pr_debug("%s", buf); |
c66ac9db NB |
1301 | |
1302 | return ret; | |
1303 | } | |
1304 | ||
1305 | int | |
1306 | transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83) | |
1307 | { | |
1308 | static const char hex_str[] = "0123456789abcdef"; | |
1309 | int j = 0, i = 4; /* offset to start of the identifer */ | |
1310 | ||
1311 | /* | |
1312 | * The VPD Code Set (encoding) | |
1313 | * | |
1314 | * from spc3r23.pdf Section 7.6.3.1 Table 296 | |
1315 | */ | |
1316 | vpd->device_identifier_code_set = (page_83[0] & 0x0f); | |
1317 | switch (vpd->device_identifier_code_set) { | |
1318 | case 0x01: /* Binary */ | |
1319 | vpd->device_identifier[j++] = | |
1320 | hex_str[vpd->device_identifier_type]; | |
1321 | while (i < (4 + page_83[3])) { | |
1322 | vpd->device_identifier[j++] = | |
1323 | hex_str[(page_83[i] & 0xf0) >> 4]; | |
1324 | vpd->device_identifier[j++] = | |
1325 | hex_str[page_83[i] & 0x0f]; | |
1326 | i++; | |
1327 | } | |
1328 | break; | |
1329 | case 0x02: /* ASCII */ | |
1330 | case 0x03: /* UTF-8 */ | |
1331 | while (i < (4 + page_83[3])) | |
1332 | vpd->device_identifier[j++] = page_83[i++]; | |
1333 | break; | |
1334 | default: | |
1335 | break; | |
1336 | } | |
1337 | ||
1338 | return transport_dump_vpd_ident(vpd, NULL, 0); | |
1339 | } | |
1340 | EXPORT_SYMBOL(transport_set_vpd_ident); | |
1341 | ||
1342 | static void core_setup_task_attr_emulation(struct se_device *dev) | |
1343 | { | |
1344 | /* | |
1345 | * If this device is from Target_Core_Mod/pSCSI, disable the | |
1346 | * SAM Task Attribute emulation. | |
1347 | * | |
1348 | * This is currently not available in upsream Linux/SCSI Target | |
1349 | * mode code, and is assumed to be disabled while using TCM/pSCSI. | |
1350 | */ | |
e3d6f909 | 1351 | if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { |
c66ac9db NB |
1352 | dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH; |
1353 | return; | |
1354 | } | |
1355 | ||
1356 | dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED; | |
6708bb27 | 1357 | pr_debug("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x" |
e3d6f909 AG |
1358 | " device\n", dev->transport->name, |
1359 | dev->transport->get_device_rev(dev)); | |
c66ac9db NB |
1360 | } |
1361 | ||
1362 | static void scsi_dump_inquiry(struct se_device *dev) | |
1363 | { | |
e3d6f909 | 1364 | struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn; |
c66ac9db NB |
1365 | int i, device_type; |
1366 | /* | |
1367 | * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer | |
1368 | */ | |
6708bb27 | 1369 | pr_debug(" Vendor: "); |
c66ac9db NB |
1370 | for (i = 0; i < 8; i++) |
1371 | if (wwn->vendor[i] >= 0x20) | |
6708bb27 | 1372 | pr_debug("%c", wwn->vendor[i]); |
c66ac9db | 1373 | else |
6708bb27 | 1374 | pr_debug(" "); |
c66ac9db | 1375 | |
6708bb27 | 1376 | pr_debug(" Model: "); |
c66ac9db NB |
1377 | for (i = 0; i < 16; i++) |
1378 | if (wwn->model[i] >= 0x20) | |
6708bb27 | 1379 | pr_debug("%c", wwn->model[i]); |
c66ac9db | 1380 | else |
6708bb27 | 1381 | pr_debug(" "); |
c66ac9db | 1382 | |
6708bb27 | 1383 | pr_debug(" Revision: "); |
c66ac9db NB |
1384 | for (i = 0; i < 4; i++) |
1385 | if (wwn->revision[i] >= 0x20) | |
6708bb27 | 1386 | pr_debug("%c", wwn->revision[i]); |
c66ac9db | 1387 | else |
6708bb27 | 1388 | pr_debug(" "); |
c66ac9db | 1389 | |
6708bb27 | 1390 | pr_debug("\n"); |
c66ac9db | 1391 | |
e3d6f909 | 1392 | device_type = dev->transport->get_device_type(dev); |
6708bb27 AG |
1393 | pr_debug(" Type: %s ", scsi_device_type(device_type)); |
1394 | pr_debug(" ANSI SCSI revision: %02x\n", | |
e3d6f909 | 1395 | dev->transport->get_device_rev(dev)); |
c66ac9db NB |
1396 | } |
1397 | ||
1398 | struct se_device *transport_add_device_to_core_hba( | |
1399 | struct se_hba *hba, | |
1400 | struct se_subsystem_api *transport, | |
1401 | struct se_subsystem_dev *se_dev, | |
1402 | u32 device_flags, | |
1403 | void *transport_dev, | |
1404 | struct se_dev_limits *dev_limits, | |
1405 | const char *inquiry_prod, | |
1406 | const char *inquiry_rev) | |
1407 | { | |
12a18bdc | 1408 | int force_pt; |
c66ac9db NB |
1409 | struct se_device *dev; |
1410 | ||
1411 | dev = kzalloc(sizeof(struct se_device), GFP_KERNEL); | |
6708bb27 AG |
1412 | if (!dev) { |
1413 | pr_err("Unable to allocate memory for se_dev_t\n"); | |
c66ac9db NB |
1414 | return NULL; |
1415 | } | |
c66ac9db | 1416 | |
e3d6f909 | 1417 | transport_init_queue_obj(&dev->dev_queue_obj); |
c66ac9db NB |
1418 | dev->dev_flags = device_flags; |
1419 | dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED; | |
5951146d | 1420 | dev->dev_ptr = transport_dev; |
c66ac9db NB |
1421 | dev->se_hba = hba; |
1422 | dev->se_sub_dev = se_dev; | |
1423 | dev->transport = transport; | |
1424 | atomic_set(&dev->active_cmds, 0); | |
1425 | INIT_LIST_HEAD(&dev->dev_list); | |
1426 | INIT_LIST_HEAD(&dev->dev_sep_list); | |
1427 | INIT_LIST_HEAD(&dev->dev_tmr_list); | |
1428 | INIT_LIST_HEAD(&dev->execute_task_list); | |
1429 | INIT_LIST_HEAD(&dev->delayed_cmd_list); | |
1430 | INIT_LIST_HEAD(&dev->ordered_cmd_list); | |
1431 | INIT_LIST_HEAD(&dev->state_task_list); | |
07bde79a | 1432 | INIT_LIST_HEAD(&dev->qf_cmd_list); |
c66ac9db NB |
1433 | spin_lock_init(&dev->execute_task_lock); |
1434 | spin_lock_init(&dev->delayed_cmd_lock); | |
1435 | spin_lock_init(&dev->ordered_cmd_lock); | |
1436 | spin_lock_init(&dev->state_task_lock); | |
1437 | spin_lock_init(&dev->dev_alua_lock); | |
1438 | spin_lock_init(&dev->dev_reservation_lock); | |
1439 | spin_lock_init(&dev->dev_status_lock); | |
1440 | spin_lock_init(&dev->dev_status_thr_lock); | |
1441 | spin_lock_init(&dev->se_port_lock); | |
1442 | spin_lock_init(&dev->se_tmr_lock); | |
07bde79a | 1443 | spin_lock_init(&dev->qf_cmd_lock); |
c66ac9db NB |
1444 | |
1445 | dev->queue_depth = dev_limits->queue_depth; | |
1446 | atomic_set(&dev->depth_left, dev->queue_depth); | |
1447 | atomic_set(&dev->dev_ordered_id, 0); | |
1448 | ||
1449 | se_dev_set_default_attribs(dev, dev_limits); | |
1450 | ||
1451 | dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX); | |
1452 | dev->creation_time = get_jiffies_64(); | |
1453 | spin_lock_init(&dev->stats_lock); | |
1454 | ||
1455 | spin_lock(&hba->device_lock); | |
1456 | list_add_tail(&dev->dev_list, &hba->hba_dev_list); | |
1457 | hba->dev_count++; | |
1458 | spin_unlock(&hba->device_lock); | |
1459 | /* | |
1460 | * Setup the SAM Task Attribute emulation for struct se_device | |
1461 | */ | |
1462 | core_setup_task_attr_emulation(dev); | |
1463 | /* | |
1464 | * Force PR and ALUA passthrough emulation with internal object use. | |
1465 | */ | |
1466 | force_pt = (hba->hba_flags & HBA_FLAGS_INTERNAL_USE); | |
1467 | /* | |
1468 | * Setup the Reservations infrastructure for struct se_device | |
1469 | */ | |
1470 | core_setup_reservations(dev, force_pt); | |
1471 | /* | |
1472 | * Setup the Asymmetric Logical Unit Assignment for struct se_device | |
1473 | */ | |
1474 | if (core_setup_alua(dev, force_pt) < 0) | |
1475 | goto out; | |
1476 | ||
1477 | /* | |
1478 | * Startup the struct se_device processing thread | |
1479 | */ | |
1480 | dev->process_thread = kthread_run(transport_processing_thread, dev, | |
e3d6f909 | 1481 | "LIO_%s", dev->transport->name); |
c66ac9db | 1482 | if (IS_ERR(dev->process_thread)) { |
6708bb27 | 1483 | pr_err("Unable to create kthread: LIO_%s\n", |
e3d6f909 | 1484 | dev->transport->name); |
c66ac9db NB |
1485 | goto out; |
1486 | } | |
07bde79a NB |
1487 | /* |
1488 | * Setup work_queue for QUEUE_FULL | |
1489 | */ | |
1490 | INIT_WORK(&dev->qf_work_queue, target_qf_do_work); | |
c66ac9db NB |
1491 | /* |
1492 | * Preload the initial INQUIRY const values if we are doing | |
1493 | * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI | |
1494 | * passthrough because this is being provided by the backend LLD. | |
1495 | * This is required so that transport_get_inquiry() copies these | |
1496 | * originals once back into DEV_T10_WWN(dev) for the virtual device | |
1497 | * setup. | |
1498 | */ | |
e3d6f909 | 1499 | if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) { |
f22c1196 | 1500 | if (!inquiry_prod || !inquiry_rev) { |
6708bb27 | 1501 | pr_err("All non TCM/pSCSI plugins require" |
c66ac9db NB |
1502 | " INQUIRY consts\n"); |
1503 | goto out; | |
1504 | } | |
1505 | ||
e3d6f909 AG |
1506 | strncpy(&dev->se_sub_dev->t10_wwn.vendor[0], "LIO-ORG", 8); |
1507 | strncpy(&dev->se_sub_dev->t10_wwn.model[0], inquiry_prod, 16); | |
1508 | strncpy(&dev->se_sub_dev->t10_wwn.revision[0], inquiry_rev, 4); | |
c66ac9db NB |
1509 | } |
1510 | scsi_dump_inquiry(dev); | |
1511 | ||
12a18bdc | 1512 | return dev; |
c66ac9db | 1513 | out: |
c66ac9db NB |
1514 | kthread_stop(dev->process_thread); |
1515 | ||
1516 | spin_lock(&hba->device_lock); | |
1517 | list_del(&dev->dev_list); | |
1518 | hba->dev_count--; | |
1519 | spin_unlock(&hba->device_lock); | |
1520 | ||
1521 | se_release_vpd_for_dev(dev); | |
1522 | ||
c66ac9db NB |
1523 | kfree(dev); |
1524 | ||
1525 | return NULL; | |
1526 | } | |
1527 | EXPORT_SYMBOL(transport_add_device_to_core_hba); | |
1528 | ||
1529 | /* transport_generic_prepare_cdb(): | |
1530 | * | |
1531 | * Since the Initiator sees iSCSI devices as LUNs, the SCSI CDB will | |
1532 | * contain the iSCSI LUN in bits 7-5 of byte 1 as per SAM-2. | |
1533 | * The point of this is since we are mapping iSCSI LUNs to | |
1534 | * SCSI Target IDs having a non-zero LUN in the CDB will throw the | |
1535 | * devices and HBAs for a loop. | |
1536 | */ | |
1537 | static inline void transport_generic_prepare_cdb( | |
1538 | unsigned char *cdb) | |
1539 | { | |
1540 | switch (cdb[0]) { | |
1541 | case READ_10: /* SBC - RDProtect */ | |
1542 | case READ_12: /* SBC - RDProtect */ | |
1543 | case READ_16: /* SBC - RDProtect */ | |
1544 | case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */ | |
1545 | case VERIFY: /* SBC - VRProtect */ | |
1546 | case VERIFY_16: /* SBC - VRProtect */ | |
1547 | case WRITE_VERIFY: /* SBC - VRProtect */ | |
1548 | case WRITE_VERIFY_12: /* SBC - VRProtect */ | |
1549 | break; | |
1550 | default: | |
1551 | cdb[1] &= 0x1f; /* clear logical unit number */ | |
1552 | break; | |
1553 | } | |
1554 | } | |
1555 | ||
1556 | static struct se_task * | |
1557 | transport_generic_get_task(struct se_cmd *cmd, | |
1558 | enum dma_data_direction data_direction) | |
1559 | { | |
1560 | struct se_task *task; | |
5951146d | 1561 | struct se_device *dev = cmd->se_dev; |
c66ac9db | 1562 | |
6708bb27 | 1563 | task = dev->transport->alloc_task(cmd->t_task_cdb); |
c66ac9db | 1564 | if (!task) { |
6708bb27 | 1565 | pr_err("Unable to allocate struct se_task\n"); |
c66ac9db NB |
1566 | return NULL; |
1567 | } | |
1568 | ||
1569 | INIT_LIST_HEAD(&task->t_list); | |
1570 | INIT_LIST_HEAD(&task->t_execute_list); | |
1571 | INIT_LIST_HEAD(&task->t_state_list); | |
1572 | init_completion(&task->task_stop_comp); | |
c66ac9db NB |
1573 | task->task_se_cmd = cmd; |
1574 | task->se_dev = dev; | |
1575 | task->task_data_direction = data_direction; | |
1576 | ||
c66ac9db NB |
1577 | return task; |
1578 | } | |
1579 | ||
1580 | static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *); | |
1581 | ||
c66ac9db NB |
1582 | /* |
1583 | * Used by fabric modules containing a local struct se_cmd within their | |
1584 | * fabric dependent per I/O descriptor. | |
1585 | */ | |
1586 | void transport_init_se_cmd( | |
1587 | struct se_cmd *cmd, | |
1588 | struct target_core_fabric_ops *tfo, | |
1589 | struct se_session *se_sess, | |
1590 | u32 data_length, | |
1591 | int data_direction, | |
1592 | int task_attr, | |
1593 | unsigned char *sense_buffer) | |
1594 | { | |
5951146d AG |
1595 | INIT_LIST_HEAD(&cmd->se_lun_node); |
1596 | INIT_LIST_HEAD(&cmd->se_delayed_node); | |
1597 | INIT_LIST_HEAD(&cmd->se_ordered_node); | |
07bde79a | 1598 | INIT_LIST_HEAD(&cmd->se_qf_node); |
79a7fef2 | 1599 | INIT_LIST_HEAD(&cmd->se_queue_node); |
c66ac9db | 1600 | |
a1d8b49a AG |
1601 | INIT_LIST_HEAD(&cmd->t_task_list); |
1602 | init_completion(&cmd->transport_lun_fe_stop_comp); | |
1603 | init_completion(&cmd->transport_lun_stop_comp); | |
1604 | init_completion(&cmd->t_transport_stop_comp); | |
1605 | spin_lock_init(&cmd->t_state_lock); | |
1606 | atomic_set(&cmd->transport_dev_active, 1); | |
c66ac9db NB |
1607 | |
1608 | cmd->se_tfo = tfo; | |
1609 | cmd->se_sess = se_sess; | |
1610 | cmd->data_length = data_length; | |
1611 | cmd->data_direction = data_direction; | |
1612 | cmd->sam_task_attr = task_attr; | |
1613 | cmd->sense_buffer = sense_buffer; | |
1614 | } | |
1615 | EXPORT_SYMBOL(transport_init_se_cmd); | |
1616 | ||
1617 | static int transport_check_alloc_task_attr(struct se_cmd *cmd) | |
1618 | { | |
1619 | /* | |
1620 | * Check if SAM Task Attribute emulation is enabled for this | |
1621 | * struct se_device storage object | |
1622 | */ | |
5951146d | 1623 | if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) |
c66ac9db NB |
1624 | return 0; |
1625 | ||
e66ecd50 | 1626 | if (cmd->sam_task_attr == MSG_ACA_TAG) { |
6708bb27 | 1627 | pr_debug("SAM Task Attribute ACA" |
c66ac9db | 1628 | " emulation is not supported\n"); |
e3d6f909 | 1629 | return -EINVAL; |
c66ac9db NB |
1630 | } |
1631 | /* | |
1632 | * Used to determine when ORDERED commands should go from | |
1633 | * Dormant to Active status. | |
1634 | */ | |
5951146d | 1635 | cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id); |
c66ac9db | 1636 | smp_mb__after_atomic_inc(); |
6708bb27 | 1637 | pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n", |
c66ac9db | 1638 | cmd->se_ordered_id, cmd->sam_task_attr, |
6708bb27 | 1639 | cmd->se_dev->transport->name); |
c66ac9db NB |
1640 | return 0; |
1641 | } | |
1642 | ||
1643 | void transport_free_se_cmd( | |
1644 | struct se_cmd *se_cmd) | |
1645 | { | |
1646 | if (se_cmd->se_tmr_req) | |
1647 | core_tmr_release_req(se_cmd->se_tmr_req); | |
1648 | /* | |
1649 | * Check and free any extended CDB buffer that was allocated | |
1650 | */ | |
a1d8b49a AG |
1651 | if (se_cmd->t_task_cdb != se_cmd->__t_task_cdb) |
1652 | kfree(se_cmd->t_task_cdb); | |
c66ac9db NB |
1653 | } |
1654 | EXPORT_SYMBOL(transport_free_se_cmd); | |
1655 | ||
1656 | static void transport_generic_wait_for_tasks(struct se_cmd *, int, int); | |
1657 | ||
1658 | /* transport_generic_allocate_tasks(): | |
1659 | * | |
1660 | * Called from fabric RX Thread. | |
1661 | */ | |
1662 | int transport_generic_allocate_tasks( | |
1663 | struct se_cmd *cmd, | |
1664 | unsigned char *cdb) | |
1665 | { | |
1666 | int ret; | |
1667 | ||
1668 | transport_generic_prepare_cdb(cdb); | |
1669 | ||
1670 | /* | |
1671 | * This is needed for early exceptions. | |
1672 | */ | |
1673 | cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks; | |
1674 | ||
c66ac9db NB |
1675 | /* |
1676 | * Ensure that the received CDB is less than the max (252 + 8) bytes | |
1677 | * for VARIABLE_LENGTH_CMD | |
1678 | */ | |
1679 | if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) { | |
6708bb27 | 1680 | pr_err("Received SCSI CDB with command_size: %d that" |
c66ac9db NB |
1681 | " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", |
1682 | scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE); | |
e3d6f909 | 1683 | return -EINVAL; |
c66ac9db NB |
1684 | } |
1685 | /* | |
1686 | * If the received CDB is larger than TCM_MAX_COMMAND_SIZE, | |
1687 | * allocate the additional extended CDB buffer now.. Otherwise | |
1688 | * setup the pointer from __t_task_cdb to t_task_cdb. | |
1689 | */ | |
a1d8b49a AG |
1690 | if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) { |
1691 | cmd->t_task_cdb = kzalloc(scsi_command_size(cdb), | |
c66ac9db | 1692 | GFP_KERNEL); |
6708bb27 AG |
1693 | if (!cmd->t_task_cdb) { |
1694 | pr_err("Unable to allocate cmd->t_task_cdb" | |
a1d8b49a | 1695 | " %u > sizeof(cmd->__t_task_cdb): %lu ops\n", |
c66ac9db | 1696 | scsi_command_size(cdb), |
a1d8b49a | 1697 | (unsigned long)sizeof(cmd->__t_task_cdb)); |
e3d6f909 | 1698 | return -ENOMEM; |
c66ac9db NB |
1699 | } |
1700 | } else | |
a1d8b49a | 1701 | cmd->t_task_cdb = &cmd->__t_task_cdb[0]; |
c66ac9db | 1702 | /* |
a1d8b49a | 1703 | * Copy the original CDB into cmd-> |
c66ac9db | 1704 | */ |
a1d8b49a | 1705 | memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb)); |
c66ac9db NB |
1706 | /* |
1707 | * Setup the received CDB based on SCSI defined opcodes and | |
1708 | * perform unit attention, persistent reservations and ALUA | |
a1d8b49a | 1709 | * checks for virtual device backends. The cmd->t_task_cdb |
c66ac9db NB |
1710 | * pointer is expected to be setup before we reach this point. |
1711 | */ | |
1712 | ret = transport_generic_cmd_sequencer(cmd, cdb); | |
1713 | if (ret < 0) | |
1714 | return ret; | |
1715 | /* | |
1716 | * Check for SAM Task Attribute Emulation | |
1717 | */ | |
1718 | if (transport_check_alloc_task_attr(cmd) < 0) { | |
1719 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
1720 | cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; | |
5951146d | 1721 | return -EINVAL; |
c66ac9db NB |
1722 | } |
1723 | spin_lock(&cmd->se_lun->lun_sep_lock); | |
1724 | if (cmd->se_lun->lun_sep) | |
1725 | cmd->se_lun->lun_sep->sep_stats.cmd_pdus++; | |
1726 | spin_unlock(&cmd->se_lun->lun_sep_lock); | |
1727 | return 0; | |
1728 | } | |
1729 | EXPORT_SYMBOL(transport_generic_allocate_tasks); | |
1730 | ||
dd8ae59d NB |
1731 | static void transport_generic_request_failure(struct se_cmd *, |
1732 | struct se_device *, int, int); | |
695434e1 NB |
1733 | /* |
1734 | * Used by fabric module frontends to queue tasks directly. | |
1735 | * Many only be used from process context only | |
1736 | */ | |
1737 | int transport_handle_cdb_direct( | |
1738 | struct se_cmd *cmd) | |
1739 | { | |
dd8ae59d NB |
1740 | int ret; |
1741 | ||
695434e1 NB |
1742 | if (!cmd->se_lun) { |
1743 | dump_stack(); | |
6708bb27 | 1744 | pr_err("cmd->se_lun is NULL\n"); |
695434e1 NB |
1745 | return -EINVAL; |
1746 | } | |
1747 | if (in_interrupt()) { | |
1748 | dump_stack(); | |
6708bb27 | 1749 | pr_err("transport_generic_handle_cdb cannot be called" |
695434e1 NB |
1750 | " from interrupt context\n"); |
1751 | return -EINVAL; | |
1752 | } | |
dd8ae59d NB |
1753 | /* |
1754 | * Set TRANSPORT_NEW_CMD state and cmd->t_transport_active=1 following | |
1755 | * transport_generic_handle_cdb*() -> transport_add_cmd_to_queue() | |
1756 | * in existing usage to ensure that outstanding descriptors are handled | |
1757 | * correctly during shutdown via transport_generic_wait_for_tasks() | |
1758 | * | |
1759 | * Also, we don't take cmd->t_state_lock here as we only expect | |
1760 | * this to be called for initial descriptor submission. | |
1761 | */ | |
1762 | cmd->t_state = TRANSPORT_NEW_CMD; | |
1763 | atomic_set(&cmd->t_transport_active, 1); | |
1764 | /* | |
1765 | * transport_generic_new_cmd() is already handling QUEUE_FULL, | |
1766 | * so follow TRANSPORT_NEW_CMD processing thread context usage | |
1767 | * and call transport_generic_request_failure() if necessary.. | |
1768 | */ | |
1769 | ret = transport_generic_new_cmd(cmd); | |
1770 | if (ret == -EAGAIN) | |
1771 | return 0; | |
1772 | else if (ret < 0) { | |
1773 | cmd->transport_error_status = ret; | |
1774 | transport_generic_request_failure(cmd, NULL, 0, | |
1775 | (cmd->data_direction != DMA_TO_DEVICE)); | |
1776 | } | |
1777 | return 0; | |
695434e1 NB |
1778 | } |
1779 | EXPORT_SYMBOL(transport_handle_cdb_direct); | |
1780 | ||
c66ac9db NB |
1781 | /* |
1782 | * Used by fabric module frontends defining a TFO->new_cmd_map() caller | |
1783 | * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to | |
1784 | * complete setup in TCM process context w/ TFO->new_cmd_map(). | |
1785 | */ | |
1786 | int transport_generic_handle_cdb_map( | |
1787 | struct se_cmd *cmd) | |
1788 | { | |
e3d6f909 | 1789 | if (!cmd->se_lun) { |
c66ac9db | 1790 | dump_stack(); |
6708bb27 | 1791 | pr_err("cmd->se_lun is NULL\n"); |
e3d6f909 | 1792 | return -EINVAL; |
c66ac9db NB |
1793 | } |
1794 | ||
1795 | transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP); | |
1796 | return 0; | |
1797 | } | |
1798 | EXPORT_SYMBOL(transport_generic_handle_cdb_map); | |
1799 | ||
1800 | /* transport_generic_handle_data(): | |
1801 | * | |
1802 | * | |
1803 | */ | |
1804 | int transport_generic_handle_data( | |
1805 | struct se_cmd *cmd) | |
1806 | { | |
1807 | /* | |
1808 | * For the software fabric case, then we assume the nexus is being | |
1809 | * failed/shutdown when signals are pending from the kthread context | |
1810 | * caller, so we return a failure. For the HW target mode case running | |
1811 | * in interrupt code, the signal_pending() check is skipped. | |
1812 | */ | |
1813 | if (!in_interrupt() && signal_pending(current)) | |
e3d6f909 | 1814 | return -EPERM; |
c66ac9db NB |
1815 | /* |
1816 | * If the received CDB has aleady been ABORTED by the generic | |
1817 | * target engine, we now call transport_check_aborted_status() | |
1818 | * to queue any delated TASK_ABORTED status for the received CDB to the | |
25985edc | 1819 | * fabric module as we are expecting no further incoming DATA OUT |
c66ac9db NB |
1820 | * sequences at this point. |
1821 | */ | |
1822 | if (transport_check_aborted_status(cmd, 1) != 0) | |
1823 | return 0; | |
1824 | ||
1825 | transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE); | |
1826 | return 0; | |
1827 | } | |
1828 | EXPORT_SYMBOL(transport_generic_handle_data); | |
1829 | ||
1830 | /* transport_generic_handle_tmr(): | |
1831 | * | |
1832 | * | |
1833 | */ | |
1834 | int transport_generic_handle_tmr( | |
1835 | struct se_cmd *cmd) | |
1836 | { | |
1837 | /* | |
1838 | * This is needed for early exceptions. | |
1839 | */ | |
1840 | cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks; | |
c66ac9db NB |
1841 | |
1842 | transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR); | |
1843 | return 0; | |
1844 | } | |
1845 | EXPORT_SYMBOL(transport_generic_handle_tmr); | |
1846 | ||
f4366772 NB |
1847 | void transport_generic_free_cmd_intr( |
1848 | struct se_cmd *cmd) | |
1849 | { | |
1850 | transport_add_cmd_to_queue(cmd, TRANSPORT_FREE_CMD_INTR); | |
1851 | } | |
1852 | EXPORT_SYMBOL(transport_generic_free_cmd_intr); | |
1853 | ||
c66ac9db NB |
1854 | static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) |
1855 | { | |
1856 | struct se_task *task, *task_tmp; | |
1857 | unsigned long flags; | |
1858 | int ret = 0; | |
1859 | ||
6708bb27 | 1860 | pr_debug("ITT[0x%08x] - Stopping tasks\n", |
e3d6f909 | 1861 | cmd->se_tfo->get_task_tag(cmd)); |
c66ac9db NB |
1862 | |
1863 | /* | |
1864 | * No tasks remain in the execution queue | |
1865 | */ | |
a1d8b49a | 1866 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db | 1867 | list_for_each_entry_safe(task, task_tmp, |
a1d8b49a | 1868 | &cmd->t_task_list, t_list) { |
6708bb27 | 1869 | pr_debug("task_no[%d] - Processing task %p\n", |
c66ac9db NB |
1870 | task->task_no, task); |
1871 | /* | |
1872 | * If the struct se_task has not been sent and is not active, | |
1873 | * remove the struct se_task from the execution queue. | |
1874 | */ | |
1875 | if (!atomic_read(&task->task_sent) && | |
1876 | !atomic_read(&task->task_active)) { | |
a1d8b49a | 1877 | spin_unlock_irqrestore(&cmd->t_state_lock, |
c66ac9db NB |
1878 | flags); |
1879 | transport_remove_task_from_execute_queue(task, | |
1880 | task->se_dev); | |
1881 | ||
6708bb27 | 1882 | pr_debug("task_no[%d] - Removed from execute queue\n", |
c66ac9db | 1883 | task->task_no); |
a1d8b49a | 1884 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db NB |
1885 | continue; |
1886 | } | |
1887 | ||
1888 | /* | |
1889 | * If the struct se_task is active, sleep until it is returned | |
1890 | * from the plugin. | |
1891 | */ | |
1892 | if (atomic_read(&task->task_active)) { | |
1893 | atomic_set(&task->task_stop, 1); | |
a1d8b49a | 1894 | spin_unlock_irqrestore(&cmd->t_state_lock, |
c66ac9db NB |
1895 | flags); |
1896 | ||
6708bb27 | 1897 | pr_debug("task_no[%d] - Waiting to complete\n", |
c66ac9db NB |
1898 | task->task_no); |
1899 | wait_for_completion(&task->task_stop_comp); | |
6708bb27 | 1900 | pr_debug("task_no[%d] - Stopped successfully\n", |
c66ac9db NB |
1901 | task->task_no); |
1902 | ||
a1d8b49a AG |
1903 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
1904 | atomic_dec(&cmd->t_task_cdbs_left); | |
c66ac9db NB |
1905 | |
1906 | atomic_set(&task->task_active, 0); | |
1907 | atomic_set(&task->task_stop, 0); | |
1908 | } else { | |
6708bb27 | 1909 | pr_debug("task_no[%d] - Did nothing\n", task->task_no); |
c66ac9db NB |
1910 | ret++; |
1911 | } | |
1912 | ||
1913 | __transport_stop_task_timer(task, &flags); | |
1914 | } | |
a1d8b49a | 1915 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
1916 | |
1917 | return ret; | |
1918 | } | |
1919 | ||
c66ac9db NB |
1920 | /* |
1921 | * Handle SAM-esque emulation for generic transport request failures. | |
1922 | */ | |
1923 | static void transport_generic_request_failure( | |
1924 | struct se_cmd *cmd, | |
1925 | struct se_device *dev, | |
1926 | int complete, | |
1927 | int sc) | |
1928 | { | |
07bde79a NB |
1929 | int ret = 0; |
1930 | ||
6708bb27 | 1931 | pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x" |
e3d6f909 | 1932 | " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd), |
a1d8b49a | 1933 | cmd->t_task_cdb[0]); |
6708bb27 | 1934 | pr_debug("-----[ i_state: %d t_state/def_t_state:" |
c66ac9db | 1935 | " %d/%d transport_error_status: %d\n", |
e3d6f909 | 1936 | cmd->se_tfo->get_cmd_state(cmd), |
c66ac9db NB |
1937 | cmd->t_state, cmd->deferred_t_state, |
1938 | cmd->transport_error_status); | |
6708bb27 | 1939 | pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d" |
c66ac9db NB |
1940 | " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --" |
1941 | " t_transport_active: %d t_transport_stop: %d" | |
6708bb27 | 1942 | " t_transport_sent: %d\n", cmd->t_task_list_num, |
a1d8b49a AG |
1943 | atomic_read(&cmd->t_task_cdbs_left), |
1944 | atomic_read(&cmd->t_task_cdbs_sent), | |
1945 | atomic_read(&cmd->t_task_cdbs_ex_left), | |
1946 | atomic_read(&cmd->t_transport_active), | |
1947 | atomic_read(&cmd->t_transport_stop), | |
1948 | atomic_read(&cmd->t_transport_sent)); | |
c66ac9db NB |
1949 | |
1950 | transport_stop_all_task_timers(cmd); | |
1951 | ||
1952 | if (dev) | |
e3d6f909 | 1953 | atomic_inc(&dev->depth_left); |
c66ac9db NB |
1954 | /* |
1955 | * For SAM Task Attribute emulation for failed struct se_cmd | |
1956 | */ | |
1957 | if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) | |
1958 | transport_complete_task_attr(cmd); | |
1959 | ||
1960 | if (complete) { | |
1961 | transport_direct_request_timeout(cmd); | |
1962 | cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE; | |
1963 | } | |
1964 | ||
1965 | switch (cmd->transport_error_status) { | |
1966 | case PYX_TRANSPORT_UNKNOWN_SAM_OPCODE: | |
1967 | cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; | |
1968 | break; | |
1969 | case PYX_TRANSPORT_REQ_TOO_MANY_SECTORS: | |
1970 | cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY; | |
1971 | break; | |
1972 | case PYX_TRANSPORT_INVALID_CDB_FIELD: | |
1973 | cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; | |
1974 | break; | |
1975 | case PYX_TRANSPORT_INVALID_PARAMETER_LIST: | |
1976 | cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; | |
1977 | break; | |
1978 | case PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES: | |
1979 | if (!sc) | |
1980 | transport_new_cmd_failure(cmd); | |
1981 | /* | |
1982 | * Currently for PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES, | |
1983 | * we force this session to fall back to session | |
1984 | * recovery. | |
1985 | */ | |
e3d6f909 AG |
1986 | cmd->se_tfo->fall_back_to_erl0(cmd->se_sess); |
1987 | cmd->se_tfo->stop_session(cmd->se_sess, 0, 0); | |
c66ac9db NB |
1988 | |
1989 | goto check_stop; | |
1990 | case PYX_TRANSPORT_LU_COMM_FAILURE: | |
1991 | case PYX_TRANSPORT_ILLEGAL_REQUEST: | |
1992 | cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | |
1993 | break; | |
1994 | case PYX_TRANSPORT_UNKNOWN_MODE_PAGE: | |
1995 | cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE; | |
1996 | break; | |
1997 | case PYX_TRANSPORT_WRITE_PROTECTED: | |
1998 | cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; | |
1999 | break; | |
2000 | case PYX_TRANSPORT_RESERVATION_CONFLICT: | |
2001 | /* | |
2002 | * No SENSE Data payload for this case, set SCSI Status | |
2003 | * and queue the response to $FABRIC_MOD. | |
2004 | * | |
2005 | * Uses linux/include/scsi/scsi.h SAM status codes defs | |
2006 | */ | |
2007 | cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; | |
2008 | /* | |
2009 | * For UA Interlock Code 11b, a RESERVATION CONFLICT will | |
2010 | * establish a UNIT ATTENTION with PREVIOUS RESERVATION | |
2011 | * CONFLICT STATUS. | |
2012 | * | |
2013 | * See spc4r17, section 7.4.6 Control Mode Page, Table 349 | |
2014 | */ | |
e3d6f909 AG |
2015 | if (cmd->se_sess && |
2016 | cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2) | |
2017 | core_scsi3_ua_allocate(cmd->se_sess->se_node_acl, | |
c66ac9db NB |
2018 | cmd->orig_fe_lun, 0x2C, |
2019 | ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); | |
2020 | ||
07bde79a NB |
2021 | ret = cmd->se_tfo->queue_status(cmd); |
2022 | if (ret == -EAGAIN) | |
2023 | goto queue_full; | |
c66ac9db NB |
2024 | goto check_stop; |
2025 | case PYX_TRANSPORT_USE_SENSE_REASON: | |
2026 | /* | |
2027 | * struct se_cmd->scsi_sense_reason already set | |
2028 | */ | |
2029 | break; | |
2030 | default: | |
6708bb27 | 2031 | pr_err("Unknown transport error for CDB 0x%02x: %d\n", |
a1d8b49a | 2032 | cmd->t_task_cdb[0], |
c66ac9db NB |
2033 | cmd->transport_error_status); |
2034 | cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; | |
2035 | break; | |
2036 | } | |
16ab8e60 NB |
2037 | /* |
2038 | * If a fabric does not define a cmd->se_tfo->new_cmd_map caller, | |
2039 | * make the call to transport_send_check_condition_and_sense() | |
2040 | * directly. Otherwise expect the fabric to make the call to | |
2041 | * transport_send_check_condition_and_sense() after handling | |
2042 | * possible unsoliticied write data payloads. | |
2043 | */ | |
2044 | if (!sc && !cmd->se_tfo->new_cmd_map) | |
c66ac9db | 2045 | transport_new_cmd_failure(cmd); |
07bde79a NB |
2046 | else { |
2047 | ret = transport_send_check_condition_and_sense(cmd, | |
2048 | cmd->scsi_sense_reason, 0); | |
2049 | if (ret == -EAGAIN) | |
2050 | goto queue_full; | |
2051 | } | |
2052 | ||
c66ac9db NB |
2053 | check_stop: |
2054 | transport_lun_remove_cmd(cmd); | |
6708bb27 | 2055 | if (!transport_cmd_check_stop_to_fabric(cmd)) |
c66ac9db | 2056 | ; |
07bde79a NB |
2057 | return; |
2058 | ||
2059 | queue_full: | |
2060 | cmd->t_state = TRANSPORT_COMPLETE_OK; | |
2061 | transport_handle_queue_full(cmd, cmd->se_dev, transport_complete_qf); | |
c66ac9db NB |
2062 | } |
2063 | ||
2064 | static void transport_direct_request_timeout(struct se_cmd *cmd) | |
2065 | { | |
2066 | unsigned long flags; | |
2067 | ||
a1d8b49a | 2068 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
6708bb27 | 2069 | if (!atomic_read(&cmd->t_transport_timeout)) { |
a1d8b49a | 2070 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
2071 | return; |
2072 | } | |
a1d8b49a AG |
2073 | if (atomic_read(&cmd->t_task_cdbs_timeout_left)) { |
2074 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | |
c66ac9db NB |
2075 | return; |
2076 | } | |
2077 | ||
a1d8b49a AG |
2078 | atomic_sub(atomic_read(&cmd->t_transport_timeout), |
2079 | &cmd->t_se_count); | |
2080 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | |
c66ac9db NB |
2081 | } |
2082 | ||
2083 | static void transport_generic_request_timeout(struct se_cmd *cmd) | |
2084 | { | |
2085 | unsigned long flags; | |
2086 | ||
2087 | /* | |
a1d8b49a | 2088 | * Reset cmd->t_se_count to allow transport_generic_remove() |
c66ac9db NB |
2089 | * to allow last call to free memory resources. |
2090 | */ | |
a1d8b49a AG |
2091 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
2092 | if (atomic_read(&cmd->t_transport_timeout) > 1) { | |
2093 | int tmp = (atomic_read(&cmd->t_transport_timeout) - 1); | |
c66ac9db | 2094 | |
a1d8b49a | 2095 | atomic_sub(tmp, &cmd->t_se_count); |
c66ac9db | 2096 | } |
a1d8b49a | 2097 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db | 2098 | |
35462975 | 2099 | transport_generic_remove(cmd, 0); |
c66ac9db NB |
2100 | } |
2101 | ||
c66ac9db NB |
2102 | static inline u32 transport_lba_21(unsigned char *cdb) |
2103 | { | |
2104 | return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3]; | |
2105 | } | |
2106 | ||
2107 | static inline u32 transport_lba_32(unsigned char *cdb) | |
2108 | { | |
2109 | return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; | |
2110 | } | |
2111 | ||
2112 | static inline unsigned long long transport_lba_64(unsigned char *cdb) | |
2113 | { | |
2114 | unsigned int __v1, __v2; | |
2115 | ||
2116 | __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; | |
2117 | __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; | |
2118 | ||
2119 | return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; | |
2120 | } | |
2121 | ||
2122 | /* | |
2123 | * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs | |
2124 | */ | |
2125 | static inline unsigned long long transport_lba_64_ext(unsigned char *cdb) | |
2126 | { | |
2127 | unsigned int __v1, __v2; | |
2128 | ||
2129 | __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15]; | |
2130 | __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19]; | |
2131 | ||
2132 | return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; | |
2133 | } | |
2134 | ||
2135 | static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd) | |
2136 | { | |
2137 | unsigned long flags; | |
2138 | ||
a1d8b49a | 2139 | spin_lock_irqsave(&se_cmd->t_state_lock, flags); |
c66ac9db | 2140 | se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; |
a1d8b49a | 2141 | spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); |
c66ac9db NB |
2142 | } |
2143 | ||
2144 | /* | |
2145 | * Called from interrupt context. | |
2146 | */ | |
2147 | static void transport_task_timeout_handler(unsigned long data) | |
2148 | { | |
2149 | struct se_task *task = (struct se_task *)data; | |
e3d6f909 | 2150 | struct se_cmd *cmd = task->task_se_cmd; |
c66ac9db NB |
2151 | unsigned long flags; |
2152 | ||
6708bb27 | 2153 | pr_debug("transport task timeout fired! task: %p cmd: %p\n", task, cmd); |
c66ac9db | 2154 | |
a1d8b49a | 2155 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db | 2156 | if (task->task_flags & TF_STOP) { |
a1d8b49a | 2157 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
2158 | return; |
2159 | } | |
2160 | task->task_flags &= ~TF_RUNNING; | |
2161 | ||
2162 | /* | |
2163 | * Determine if transport_complete_task() has already been called. | |
2164 | */ | |
6708bb27 AG |
2165 | if (!atomic_read(&task->task_active)) { |
2166 | pr_debug("transport task: %p cmd: %p timeout task_active" | |
c66ac9db | 2167 | " == 0\n", task, cmd); |
a1d8b49a | 2168 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
2169 | return; |
2170 | } | |
2171 | ||
a1d8b49a AG |
2172 | atomic_inc(&cmd->t_se_count); |
2173 | atomic_inc(&cmd->t_transport_timeout); | |
2174 | cmd->t_tasks_failed = 1; | |
c66ac9db NB |
2175 | |
2176 | atomic_set(&task->task_timeout, 1); | |
2177 | task->task_error_status = PYX_TRANSPORT_TASK_TIMEOUT; | |
2178 | task->task_scsi_status = 1; | |
2179 | ||
2180 | if (atomic_read(&task->task_stop)) { | |
6708bb27 | 2181 | pr_debug("transport task: %p cmd: %p timeout task_stop" |
c66ac9db | 2182 | " == 1\n", task, cmd); |
a1d8b49a | 2183 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
2184 | complete(&task->task_stop_comp); |
2185 | return; | |
2186 | } | |
2187 | ||
6708bb27 AG |
2188 | if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) { |
2189 | pr_debug("transport task: %p cmd: %p timeout non zero" | |
c66ac9db | 2190 | " t_task_cdbs_left\n", task, cmd); |
a1d8b49a | 2191 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
2192 | return; |
2193 | } | |
6708bb27 | 2194 | pr_debug("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n", |
c66ac9db NB |
2195 | task, cmd); |
2196 | ||
2197 | cmd->t_state = TRANSPORT_COMPLETE_FAILURE; | |
a1d8b49a | 2198 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
2199 | |
2200 | transport_add_cmd_to_queue(cmd, TRANSPORT_COMPLETE_FAILURE); | |
2201 | } | |
2202 | ||
2203 | /* | |
a1d8b49a | 2204 | * Called with cmd->t_state_lock held. |
c66ac9db NB |
2205 | */ |
2206 | static void transport_start_task_timer(struct se_task *task) | |
2207 | { | |
2208 | struct se_device *dev = task->se_dev; | |
2209 | int timeout; | |
2210 | ||
2211 | if (task->task_flags & TF_RUNNING) | |
2212 | return; | |
2213 | /* | |
2214 | * If the task_timeout is disabled, exit now. | |
2215 | */ | |
e3d6f909 | 2216 | timeout = dev->se_sub_dev->se_dev_attrib.task_timeout; |
6708bb27 | 2217 | if (!timeout) |
c66ac9db NB |
2218 | return; |
2219 | ||
2220 | init_timer(&task->task_timer); | |
2221 | task->task_timer.expires = (get_jiffies_64() + timeout * HZ); | |
2222 | task->task_timer.data = (unsigned long) task; | |
2223 | task->task_timer.function = transport_task_timeout_handler; | |
2224 | ||
2225 | task->task_flags |= TF_RUNNING; | |
2226 | add_timer(&task->task_timer); | |
2227 | #if 0 | |
6708bb27 | 2228 | pr_debug("Starting task timer for cmd: %p task: %p seconds:" |
c66ac9db NB |
2229 | " %d\n", task->task_se_cmd, task, timeout); |
2230 | #endif | |
2231 | } | |
2232 | ||
2233 | /* | |
a1d8b49a | 2234 | * Called with spin_lock_irq(&cmd->t_state_lock) held. |
c66ac9db NB |
2235 | */ |
2236 | void __transport_stop_task_timer(struct se_task *task, unsigned long *flags) | |
2237 | { | |
e3d6f909 | 2238 | struct se_cmd *cmd = task->task_se_cmd; |
c66ac9db | 2239 | |
6708bb27 | 2240 | if (!task->task_flags & TF_RUNNING) |
c66ac9db NB |
2241 | return; |
2242 | ||
2243 | task->task_flags |= TF_STOP; | |
a1d8b49a | 2244 | spin_unlock_irqrestore(&cmd->t_state_lock, *flags); |
c66ac9db NB |
2245 | |
2246 | del_timer_sync(&task->task_timer); | |
2247 | ||
a1d8b49a | 2248 | spin_lock_irqsave(&cmd->t_state_lock, *flags); |
c66ac9db NB |
2249 | task->task_flags &= ~TF_RUNNING; |
2250 | task->task_flags &= ~TF_STOP; | |
2251 | } | |
2252 | ||
2253 | static void transport_stop_all_task_timers(struct se_cmd *cmd) | |
2254 | { | |
2255 | struct se_task *task = NULL, *task_tmp; | |
2256 | unsigned long flags; | |
2257 | ||
a1d8b49a | 2258 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db | 2259 | list_for_each_entry_safe(task, task_tmp, |
a1d8b49a | 2260 | &cmd->t_task_list, t_list) |
c66ac9db | 2261 | __transport_stop_task_timer(task, &flags); |
a1d8b49a | 2262 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
2263 | } |
2264 | ||
2265 | static inline int transport_tcq_window_closed(struct se_device *dev) | |
2266 | { | |
2267 | if (dev->dev_tcq_window_closed++ < | |
2268 | PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD) { | |
2269 | msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT); | |
2270 | } else | |
2271 | msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG); | |
2272 | ||
e3d6f909 | 2273 | wake_up_interruptible(&dev->dev_queue_obj.thread_wq); |
c66ac9db NB |
2274 | return 0; |
2275 | } | |
2276 | ||
2277 | /* | |
2278 | * Called from Fabric Module context from transport_execute_tasks() | |
2279 | * | |
2280 | * The return of this function determins if the tasks from struct se_cmd | |
2281 | * get added to the execution queue in transport_execute_tasks(), | |
2282 | * or are added to the delayed or ordered lists here. | |
2283 | */ | |
2284 | static inline int transport_execute_task_attr(struct se_cmd *cmd) | |
2285 | { | |
5951146d | 2286 | if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) |
c66ac9db NB |
2287 | return 1; |
2288 | /* | |
25985edc | 2289 | * Check for the existence of HEAD_OF_QUEUE, and if true return 1 |
c66ac9db NB |
2290 | * to allow the passed struct se_cmd list of tasks to the front of the list. |
2291 | */ | |
e66ecd50 | 2292 | if (cmd->sam_task_attr == MSG_HEAD_TAG) { |
5951146d | 2293 | atomic_inc(&cmd->se_dev->dev_hoq_count); |
c66ac9db | 2294 | smp_mb__after_atomic_inc(); |
6708bb27 | 2295 | pr_debug("Added HEAD_OF_QUEUE for CDB:" |
c66ac9db | 2296 | " 0x%02x, se_ordered_id: %u\n", |
6708bb27 | 2297 | cmd->t_task_cdb[0], |
c66ac9db NB |
2298 | cmd->se_ordered_id); |
2299 | return 1; | |
e66ecd50 | 2300 | } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { |
5951146d AG |
2301 | spin_lock(&cmd->se_dev->ordered_cmd_lock); |
2302 | list_add_tail(&cmd->se_ordered_node, | |
2303 | &cmd->se_dev->ordered_cmd_list); | |
2304 | spin_unlock(&cmd->se_dev->ordered_cmd_lock); | |
c66ac9db | 2305 | |
5951146d | 2306 | atomic_inc(&cmd->se_dev->dev_ordered_sync); |
c66ac9db NB |
2307 | smp_mb__after_atomic_inc(); |
2308 | ||
6708bb27 | 2309 | pr_debug("Added ORDERED for CDB: 0x%02x to ordered" |
c66ac9db | 2310 | " list, se_ordered_id: %u\n", |
a1d8b49a | 2311 | cmd->t_task_cdb[0], |
c66ac9db NB |
2312 | cmd->se_ordered_id); |
2313 | /* | |
2314 | * Add ORDERED command to tail of execution queue if | |
2315 | * no other older commands exist that need to be | |
2316 | * completed first. | |
2317 | */ | |
6708bb27 | 2318 | if (!atomic_read(&cmd->se_dev->simple_cmds)) |
c66ac9db NB |
2319 | return 1; |
2320 | } else { | |
2321 | /* | |
2322 | * For SIMPLE and UNTAGGED Task Attribute commands | |
2323 | */ | |
5951146d | 2324 | atomic_inc(&cmd->se_dev->simple_cmds); |
c66ac9db NB |
2325 | smp_mb__after_atomic_inc(); |
2326 | } | |
2327 | /* | |
2328 | * Otherwise if one or more outstanding ORDERED task attribute exist, | |
2329 | * add the dormant task(s) built for the passed struct se_cmd to the | |
2330 | * execution queue and become in Active state for this struct se_device. | |
2331 | */ | |
5951146d | 2332 | if (atomic_read(&cmd->se_dev->dev_ordered_sync) != 0) { |
c66ac9db NB |
2333 | /* |
2334 | * Otherwise, add cmd w/ tasks to delayed cmd queue that | |
25985edc | 2335 | * will be drained upon completion of HEAD_OF_QUEUE task. |
c66ac9db | 2336 | */ |
5951146d | 2337 | spin_lock(&cmd->se_dev->delayed_cmd_lock); |
c66ac9db | 2338 | cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR; |
5951146d AG |
2339 | list_add_tail(&cmd->se_delayed_node, |
2340 | &cmd->se_dev->delayed_cmd_list); | |
2341 | spin_unlock(&cmd->se_dev->delayed_cmd_lock); | |
c66ac9db | 2342 | |
6708bb27 | 2343 | pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to" |
c66ac9db | 2344 | " delayed CMD list, se_ordered_id: %u\n", |
a1d8b49a | 2345 | cmd->t_task_cdb[0], cmd->sam_task_attr, |
c66ac9db NB |
2346 | cmd->se_ordered_id); |
2347 | /* | |
2348 | * Return zero to let transport_execute_tasks() know | |
2349 | * not to add the delayed tasks to the execution list. | |
2350 | */ | |
2351 | return 0; | |
2352 | } | |
2353 | /* | |
2354 | * Otherwise, no ORDERED task attributes exist.. | |
2355 | */ | |
2356 | return 1; | |
2357 | } | |
2358 | ||
2359 | /* | |
2360 | * Called from fabric module context in transport_generic_new_cmd() and | |
2361 | * transport_generic_process_write() | |
2362 | */ | |
2363 | static int transport_execute_tasks(struct se_cmd *cmd) | |
2364 | { | |
2365 | int add_tasks; | |
2366 | ||
db1620a2 CH |
2367 | if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) { |
2368 | cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE; | |
2369 | transport_generic_request_failure(cmd, NULL, 0, 1); | |
2370 | return 0; | |
c66ac9db | 2371 | } |
db1620a2 | 2372 | |
c66ac9db NB |
2373 | /* |
2374 | * Call transport_cmd_check_stop() to see if a fabric exception | |
25985edc | 2375 | * has occurred that prevents execution. |
c66ac9db | 2376 | */ |
6708bb27 | 2377 | if (!transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING)) { |
c66ac9db NB |
2378 | /* |
2379 | * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE | |
2380 | * attribute for the tasks of the received struct se_cmd CDB | |
2381 | */ | |
2382 | add_tasks = transport_execute_task_attr(cmd); | |
e3d6f909 | 2383 | if (!add_tasks) |
c66ac9db NB |
2384 | goto execute_tasks; |
2385 | /* | |
2386 | * This calls transport_add_tasks_from_cmd() to handle | |
2387 | * HEAD_OF_QUEUE ordering for SAM Task Attribute emulation | |
2388 | * (if enabled) in __transport_add_task_to_execute_queue() and | |
2389 | * transport_add_task_check_sam_attr(). | |
2390 | */ | |
2391 | transport_add_tasks_from_cmd(cmd); | |
2392 | } | |
2393 | /* | |
2394 | * Kick the execution queue for the cmd associated struct se_device | |
2395 | * storage object. | |
2396 | */ | |
2397 | execute_tasks: | |
5951146d | 2398 | __transport_execute_tasks(cmd->se_dev); |
c66ac9db NB |
2399 | return 0; |
2400 | } | |
2401 | ||
2402 | /* | |
2403 | * Called to check struct se_device tcq depth window, and once open pull struct se_task | |
2404 | * from struct se_device->execute_task_list and | |
2405 | * | |
2406 | * Called from transport_processing_thread() | |
2407 | */ | |
2408 | static int __transport_execute_tasks(struct se_device *dev) | |
2409 | { | |
2410 | int error; | |
2411 | struct se_cmd *cmd = NULL; | |
e3d6f909 | 2412 | struct se_task *task = NULL; |
c66ac9db NB |
2413 | unsigned long flags; |
2414 | ||
2415 | /* | |
2416 | * Check if there is enough room in the device and HBA queue to send | |
a1d8b49a | 2417 | * struct se_tasks to the selected transport. |
c66ac9db NB |
2418 | */ |
2419 | check_depth: | |
e3d6f909 | 2420 | if (!atomic_read(&dev->depth_left)) |
c66ac9db | 2421 | return transport_tcq_window_closed(dev); |
c66ac9db | 2422 | |
e3d6f909 | 2423 | dev->dev_tcq_window_closed = 0; |
c66ac9db | 2424 | |
e3d6f909 AG |
2425 | spin_lock_irq(&dev->execute_task_lock); |
2426 | if (list_empty(&dev->execute_task_list)) { | |
2427 | spin_unlock_irq(&dev->execute_task_lock); | |
c66ac9db NB |
2428 | return 0; |
2429 | } | |
e3d6f909 AG |
2430 | task = list_first_entry(&dev->execute_task_list, |
2431 | struct se_task, t_execute_list); | |
2432 | list_del(&task->t_execute_list); | |
2433 | atomic_set(&task->task_execute_queue, 0); | |
2434 | atomic_dec(&dev->execute_tasks); | |
2435 | spin_unlock_irq(&dev->execute_task_lock); | |
c66ac9db NB |
2436 | |
2437 | atomic_dec(&dev->depth_left); | |
c66ac9db | 2438 | |
e3d6f909 | 2439 | cmd = task->task_se_cmd; |
c66ac9db | 2440 | |
a1d8b49a | 2441 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db NB |
2442 | atomic_set(&task->task_active, 1); |
2443 | atomic_set(&task->task_sent, 1); | |
a1d8b49a | 2444 | atomic_inc(&cmd->t_task_cdbs_sent); |
c66ac9db | 2445 | |
a1d8b49a AG |
2446 | if (atomic_read(&cmd->t_task_cdbs_sent) == |
2447 | cmd->t_task_list_num) | |
c66ac9db NB |
2448 | atomic_set(&cmd->transport_sent, 1); |
2449 | ||
2450 | transport_start_task_timer(task); | |
a1d8b49a | 2451 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
2452 | /* |
2453 | * The struct se_cmd->transport_emulate_cdb() function pointer is used | |
e3d6f909 | 2454 | * to grab REPORT_LUNS and other CDBs we want to handle before they hit the |
c66ac9db NB |
2455 | * struct se_subsystem_api->do_task() caller below. |
2456 | */ | |
2457 | if (cmd->transport_emulate_cdb) { | |
2458 | error = cmd->transport_emulate_cdb(cmd); | |
2459 | if (error != 0) { | |
2460 | cmd->transport_error_status = error; | |
2461 | atomic_set(&task->task_active, 0); | |
2462 | atomic_set(&cmd->transport_sent, 0); | |
2463 | transport_stop_tasks_for_cmd(cmd); | |
2464 | transport_generic_request_failure(cmd, dev, 0, 1); | |
2465 | goto check_depth; | |
2466 | } | |
2467 | /* | |
2468 | * Handle the successful completion for transport_emulate_cdb() | |
2469 | * for synchronous operation, following SCF_EMULATE_CDB_ASYNC | |
2470 | * Otherwise the caller is expected to complete the task with | |
2471 | * proper status. | |
2472 | */ | |
2473 | if (!(cmd->se_cmd_flags & SCF_EMULATE_CDB_ASYNC)) { | |
2474 | cmd->scsi_status = SAM_STAT_GOOD; | |
2475 | task->task_scsi_status = GOOD; | |
2476 | transport_complete_task(task, 1); | |
2477 | } | |
2478 | } else { | |
2479 | /* | |
2480 | * Currently for all virtual TCM plugins including IBLOCK, FILEIO and | |
2481 | * RAMDISK we use the internal transport_emulate_control_cdb() logic | |
2482 | * with struct se_subsystem_api callers for the primary SPC-3 TYPE_DISK | |
2483 | * LUN emulation code. | |
2484 | * | |
2485 | * For TCM/pSCSI and all other SCF_SCSI_DATA_SG_IO_CDB I/O tasks we | |
2486 | * call ->do_task() directly and let the underlying TCM subsystem plugin | |
2487 | * code handle the CDB emulation. | |
2488 | */ | |
e3d6f909 AG |
2489 | if ((dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) && |
2490 | (!(task->task_se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB))) | |
c66ac9db NB |
2491 | error = transport_emulate_control_cdb(task); |
2492 | else | |
e3d6f909 | 2493 | error = dev->transport->do_task(task); |
c66ac9db NB |
2494 | |
2495 | if (error != 0) { | |
2496 | cmd->transport_error_status = error; | |
2497 | atomic_set(&task->task_active, 0); | |
2498 | atomic_set(&cmd->transport_sent, 0); | |
2499 | transport_stop_tasks_for_cmd(cmd); | |
2500 | transport_generic_request_failure(cmd, dev, 0, 1); | |
2501 | } | |
2502 | } | |
2503 | ||
2504 | goto check_depth; | |
2505 | ||
2506 | return 0; | |
2507 | } | |
2508 | ||
2509 | void transport_new_cmd_failure(struct se_cmd *se_cmd) | |
2510 | { | |
2511 | unsigned long flags; | |
2512 | /* | |
2513 | * Any unsolicited data will get dumped for failed command inside of | |
2514 | * the fabric plugin | |
2515 | */ | |
a1d8b49a | 2516 | spin_lock_irqsave(&se_cmd->t_state_lock, flags); |
c66ac9db NB |
2517 | se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED; |
2518 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
a1d8b49a | 2519 | spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); |
c66ac9db NB |
2520 | } |
2521 | ||
2522 | static void transport_nop_wait_for_tasks(struct se_cmd *, int, int); | |
2523 | ||
2524 | static inline u32 transport_get_sectors_6( | |
2525 | unsigned char *cdb, | |
2526 | struct se_cmd *cmd, | |
2527 | int *ret) | |
2528 | { | |
5951146d | 2529 | struct se_device *dev = cmd->se_dev; |
c66ac9db NB |
2530 | |
2531 | /* | |
2532 | * Assume TYPE_DISK for non struct se_device objects. | |
2533 | * Use 8-bit sector value. | |
2534 | */ | |
2535 | if (!dev) | |
2536 | goto type_disk; | |
2537 | ||
2538 | /* | |
2539 | * Use 24-bit allocation length for TYPE_TAPE. | |
2540 | */ | |
e3d6f909 | 2541 | if (dev->transport->get_device_type(dev) == TYPE_TAPE) |
c66ac9db NB |
2542 | return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4]; |
2543 | ||
2544 | /* | |
2545 | * Everything else assume TYPE_DISK Sector CDB location. | |
2546 | * Use 8-bit sector value. | |
2547 | */ | |
2548 | type_disk: | |
2549 | return (u32)cdb[4]; | |
2550 | } | |
2551 | ||
2552 | static inline u32 transport_get_sectors_10( | |
2553 | unsigned char *cdb, | |
2554 | struct se_cmd *cmd, | |
2555 | int *ret) | |
2556 | { | |
5951146d | 2557 | struct se_device *dev = cmd->se_dev; |
c66ac9db NB |
2558 | |
2559 | /* | |
2560 | * Assume TYPE_DISK for non struct se_device objects. | |
2561 | * Use 16-bit sector value. | |
2562 | */ | |
2563 | if (!dev) | |
2564 | goto type_disk; | |
2565 | ||
2566 | /* | |
2567 | * XXX_10 is not defined in SSC, throw an exception | |
2568 | */ | |
e3d6f909 AG |
2569 | if (dev->transport->get_device_type(dev) == TYPE_TAPE) { |
2570 | *ret = -EINVAL; | |
c66ac9db NB |
2571 | return 0; |
2572 | } | |
2573 | ||
2574 | /* | |
2575 | * Everything else assume TYPE_DISK Sector CDB location. | |
2576 | * Use 16-bit sector value. | |
2577 | */ | |
2578 | type_disk: | |
2579 | return (u32)(cdb[7] << 8) + cdb[8]; | |
2580 | } | |
2581 | ||
2582 | static inline u32 transport_get_sectors_12( | |
2583 | unsigned char *cdb, | |
2584 | struct se_cmd *cmd, | |
2585 | int *ret) | |
2586 | { | |
5951146d | 2587 | struct se_device *dev = cmd->se_dev; |
c66ac9db NB |
2588 | |
2589 | /* | |
2590 | * Assume TYPE_DISK for non struct se_device objects. | |
2591 | * Use 32-bit sector value. | |
2592 | */ | |
2593 | if (!dev) | |
2594 | goto type_disk; | |
2595 | ||
2596 | /* | |
2597 | * XXX_12 is not defined in SSC, throw an exception | |
2598 | */ | |
e3d6f909 AG |
2599 | if (dev->transport->get_device_type(dev) == TYPE_TAPE) { |
2600 | *ret = -EINVAL; | |
c66ac9db NB |
2601 | return 0; |
2602 | } | |
2603 | ||
2604 | /* | |
2605 | * Everything else assume TYPE_DISK Sector CDB location. | |
2606 | * Use 32-bit sector value. | |
2607 | */ | |
2608 | type_disk: | |
2609 | return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9]; | |
2610 | } | |
2611 | ||
2612 | static inline u32 transport_get_sectors_16( | |
2613 | unsigned char *cdb, | |
2614 | struct se_cmd *cmd, | |
2615 | int *ret) | |
2616 | { | |
5951146d | 2617 | struct se_device *dev = cmd->se_dev; |
c66ac9db NB |
2618 | |
2619 | /* | |
2620 | * Assume TYPE_DISK for non struct se_device objects. | |
2621 | * Use 32-bit sector value. | |
2622 | */ | |
2623 | if (!dev) | |
2624 | goto type_disk; | |
2625 | ||
2626 | /* | |
2627 | * Use 24-bit allocation length for TYPE_TAPE. | |
2628 | */ | |
e3d6f909 | 2629 | if (dev->transport->get_device_type(dev) == TYPE_TAPE) |
c66ac9db NB |
2630 | return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14]; |
2631 | ||
2632 | type_disk: | |
2633 | return (u32)(cdb[10] << 24) + (cdb[11] << 16) + | |
2634 | (cdb[12] << 8) + cdb[13]; | |
2635 | } | |
2636 | ||
2637 | /* | |
2638 | * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants | |
2639 | */ | |
2640 | static inline u32 transport_get_sectors_32( | |
2641 | unsigned char *cdb, | |
2642 | struct se_cmd *cmd, | |
2643 | int *ret) | |
2644 | { | |
2645 | /* | |
2646 | * Assume TYPE_DISK for non struct se_device objects. | |
2647 | * Use 32-bit sector value. | |
2648 | */ | |
2649 | return (u32)(cdb[28] << 24) + (cdb[29] << 16) + | |
2650 | (cdb[30] << 8) + cdb[31]; | |
2651 | ||
2652 | } | |
2653 | ||
2654 | static inline u32 transport_get_size( | |
2655 | u32 sectors, | |
2656 | unsigned char *cdb, | |
2657 | struct se_cmd *cmd) | |
2658 | { | |
5951146d | 2659 | struct se_device *dev = cmd->se_dev; |
c66ac9db | 2660 | |
e3d6f909 | 2661 | if (dev->transport->get_device_type(dev) == TYPE_TAPE) { |
c66ac9db | 2662 | if (cdb[1] & 1) { /* sectors */ |
e3d6f909 | 2663 | return dev->se_sub_dev->se_dev_attrib.block_size * sectors; |
c66ac9db NB |
2664 | } else /* bytes */ |
2665 | return sectors; | |
2666 | } | |
2667 | #if 0 | |
6708bb27 | 2668 | pr_debug("Returning block_size: %u, sectors: %u == %u for" |
e3d6f909 AG |
2669 | " %s object\n", dev->se_sub_dev->se_dev_attrib.block_size, sectors, |
2670 | dev->se_sub_dev->se_dev_attrib.block_size * sectors, | |
2671 | dev->transport->name); | |
c66ac9db | 2672 | #endif |
e3d6f909 | 2673 | return dev->se_sub_dev->se_dev_attrib.block_size * sectors; |
c66ac9db NB |
2674 | } |
2675 | ||
c66ac9db NB |
2676 | static void transport_xor_callback(struct se_cmd *cmd) |
2677 | { | |
2678 | unsigned char *buf, *addr; | |
ec98f782 | 2679 | struct scatterlist *sg; |
c66ac9db NB |
2680 | unsigned int offset; |
2681 | int i; | |
ec98f782 | 2682 | int count; |
c66ac9db NB |
2683 | /* |
2684 | * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command | |
2685 | * | |
2686 | * 1) read the specified logical block(s); | |
2687 | * 2) transfer logical blocks from the data-out buffer; | |
2688 | * 3) XOR the logical blocks transferred from the data-out buffer with | |
2689 | * the logical blocks read, storing the resulting XOR data in a buffer; | |
2690 | * 4) if the DISABLE WRITE bit is set to zero, then write the logical | |
2691 | * blocks transferred from the data-out buffer; and | |
2692 | * 5) transfer the resulting XOR data to the data-in buffer. | |
2693 | */ | |
2694 | buf = kmalloc(cmd->data_length, GFP_KERNEL); | |
6708bb27 AG |
2695 | if (!buf) { |
2696 | pr_err("Unable to allocate xor_callback buf\n"); | |
c66ac9db NB |
2697 | return; |
2698 | } | |
2699 | /* | |
ec98f782 | 2700 | * Copy the scatterlist WRITE buffer located at cmd->t_data_sg |
c66ac9db NB |
2701 | * into the locally allocated *buf |
2702 | */ | |
ec98f782 AG |
2703 | sg_copy_to_buffer(cmd->t_data_sg, |
2704 | cmd->t_data_nents, | |
2705 | buf, | |
2706 | cmd->data_length); | |
2707 | ||
c66ac9db NB |
2708 | /* |
2709 | * Now perform the XOR against the BIDI read memory located at | |
a1d8b49a | 2710 | * cmd->t_mem_bidi_list |
c66ac9db NB |
2711 | */ |
2712 | ||
2713 | offset = 0; | |
ec98f782 AG |
2714 | for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) { |
2715 | addr = kmap_atomic(sg_page(sg), KM_USER0); | |
2716 | if (!addr) | |
c66ac9db NB |
2717 | goto out; |
2718 | ||
ec98f782 AG |
2719 | for (i = 0; i < sg->length; i++) |
2720 | *(addr + sg->offset + i) ^= *(buf + offset + i); | |
c66ac9db | 2721 | |
ec98f782 | 2722 | offset += sg->length; |
c66ac9db NB |
2723 | kunmap_atomic(addr, KM_USER0); |
2724 | } | |
ec98f782 | 2725 | |
c66ac9db NB |
2726 | out: |
2727 | kfree(buf); | |
2728 | } | |
2729 | ||
2730 | /* | |
2731 | * Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd | |
2732 | */ | |
2733 | static int transport_get_sense_data(struct se_cmd *cmd) | |
2734 | { | |
2735 | unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL; | |
2736 | struct se_device *dev; | |
2737 | struct se_task *task = NULL, *task_tmp; | |
2738 | unsigned long flags; | |
2739 | u32 offset = 0; | |
2740 | ||
e3d6f909 AG |
2741 | WARN_ON(!cmd->se_lun); |
2742 | ||
a1d8b49a | 2743 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db | 2744 | if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { |
a1d8b49a | 2745 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
2746 | return 0; |
2747 | } | |
2748 | ||
2749 | list_for_each_entry_safe(task, task_tmp, | |
a1d8b49a | 2750 | &cmd->t_task_list, t_list) { |
c66ac9db NB |
2751 | |
2752 | if (!task->task_sense) | |
2753 | continue; | |
2754 | ||
2755 | dev = task->se_dev; | |
6708bb27 | 2756 | if (!dev) |
c66ac9db NB |
2757 | continue; |
2758 | ||
e3d6f909 | 2759 | if (!dev->transport->get_sense_buffer) { |
6708bb27 | 2760 | pr_err("dev->transport->get_sense_buffer" |
c66ac9db NB |
2761 | " is NULL\n"); |
2762 | continue; | |
2763 | } | |
2764 | ||
e3d6f909 | 2765 | sense_buffer = dev->transport->get_sense_buffer(task); |
6708bb27 AG |
2766 | if (!sense_buffer) { |
2767 | pr_err("ITT[0x%08x]_TASK[%d]: Unable to locate" | |
c66ac9db | 2768 | " sense buffer for task with sense\n", |
e3d6f909 | 2769 | cmd->se_tfo->get_task_tag(cmd), task->task_no); |
c66ac9db NB |
2770 | continue; |
2771 | } | |
a1d8b49a | 2772 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db | 2773 | |
e3d6f909 | 2774 | offset = cmd->se_tfo->set_fabric_sense_len(cmd, |
c66ac9db NB |
2775 | TRANSPORT_SENSE_BUFFER); |
2776 | ||
5951146d | 2777 | memcpy(&buffer[offset], sense_buffer, |
c66ac9db NB |
2778 | TRANSPORT_SENSE_BUFFER); |
2779 | cmd->scsi_status = task->task_scsi_status; | |
2780 | /* Automatically padded */ | |
2781 | cmd->scsi_sense_length = | |
2782 | (TRANSPORT_SENSE_BUFFER + offset); | |
2783 | ||
6708bb27 | 2784 | pr_debug("HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x" |
c66ac9db | 2785 | " and sense\n", |
e3d6f909 | 2786 | dev->se_hba->hba_id, dev->transport->name, |
c66ac9db NB |
2787 | cmd->scsi_status); |
2788 | return 0; | |
2789 | } | |
a1d8b49a | 2790 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
2791 | |
2792 | return -1; | |
2793 | } | |
2794 | ||
c66ac9db NB |
2795 | static int |
2796 | transport_handle_reservation_conflict(struct se_cmd *cmd) | |
2797 | { | |
2798 | cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks; | |
2799 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
2800 | cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT; | |
2801 | cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; | |
2802 | /* | |
2803 | * For UA Interlock Code 11b, a RESERVATION CONFLICT will | |
2804 | * establish a UNIT ATTENTION with PREVIOUS RESERVATION | |
2805 | * CONFLICT STATUS. | |
2806 | * | |
2807 | * See spc4r17, section 7.4.6 Control Mode Page, Table 349 | |
2808 | */ | |
e3d6f909 AG |
2809 | if (cmd->se_sess && |
2810 | cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2) | |
2811 | core_scsi3_ua_allocate(cmd->se_sess->se_node_acl, | |
c66ac9db NB |
2812 | cmd->orig_fe_lun, 0x2C, |
2813 | ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); | |
5951146d | 2814 | return -EINVAL; |
c66ac9db NB |
2815 | } |
2816 | ||
ec98f782 AG |
2817 | static inline long long transport_dev_end_lba(struct se_device *dev) |
2818 | { | |
2819 | return dev->transport->get_blocks(dev) + 1; | |
2820 | } | |
2821 | ||
2822 | static int transport_cmd_get_valid_sectors(struct se_cmd *cmd) | |
2823 | { | |
2824 | struct se_device *dev = cmd->se_dev; | |
2825 | u32 sectors; | |
2826 | ||
2827 | if (dev->transport->get_device_type(dev) != TYPE_DISK) | |
2828 | return 0; | |
2829 | ||
2830 | sectors = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size); | |
2831 | ||
6708bb27 AG |
2832 | if ((cmd->t_task_lba + sectors) > transport_dev_end_lba(dev)) { |
2833 | pr_err("LBA: %llu Sectors: %u exceeds" | |
ec98f782 AG |
2834 | " transport_dev_end_lba(): %llu\n", |
2835 | cmd->t_task_lba, sectors, | |
2836 | transport_dev_end_lba(dev)); | |
7abbe7f3 | 2837 | return -EINVAL; |
ec98f782 AG |
2838 | } |
2839 | ||
7abbe7f3 | 2840 | return 0; |
ec98f782 AG |
2841 | } |
2842 | ||
706d5860 NB |
2843 | static int target_check_write_same_discard(unsigned char *flags, struct se_device *dev) |
2844 | { | |
2845 | /* | |
2846 | * Determine if the received WRITE_SAME is used to for direct | |
2847 | * passthrough into Linux/SCSI with struct request via TCM/pSCSI | |
2848 | * or we are signaling the use of internal WRITE_SAME + UNMAP=1 | |
2849 | * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK code. | |
2850 | */ | |
2851 | int passthrough = (dev->transport->transport_type == | |
2852 | TRANSPORT_PLUGIN_PHBA_PDEV); | |
2853 | ||
2854 | if (!passthrough) { | |
2855 | if ((flags[0] & 0x04) || (flags[0] & 0x02)) { | |
2856 | pr_err("WRITE_SAME PBDATA and LBDATA" | |
2857 | " bits not supported for Block Discard" | |
2858 | " Emulation\n"); | |
2859 | return -ENOSYS; | |
2860 | } | |
2861 | /* | |
2862 | * Currently for the emulated case we only accept | |
2863 | * tpws with the UNMAP=1 bit set. | |
2864 | */ | |
2865 | if (!(flags[0] & 0x08)) { | |
2866 | pr_err("WRITE_SAME w/o UNMAP bit not" | |
2867 | " supported for Block Discard Emulation\n"); | |
2868 | return -ENOSYS; | |
2869 | } | |
2870 | } | |
2871 | ||
2872 | return 0; | |
2873 | } | |
2874 | ||
c66ac9db NB |
2875 | /* transport_generic_cmd_sequencer(): |
2876 | * | |
2877 | * Generic Command Sequencer that should work for most DAS transport | |
2878 | * drivers. | |
2879 | * | |
2880 | * Called from transport_generic_allocate_tasks() in the $FABRIC_MOD | |
2881 | * RX Thread. | |
2882 | * | |
2883 | * FIXME: Need to support other SCSI OPCODES where as well. | |
2884 | */ | |
2885 | static int transport_generic_cmd_sequencer( | |
2886 | struct se_cmd *cmd, | |
2887 | unsigned char *cdb) | |
2888 | { | |
5951146d | 2889 | struct se_device *dev = cmd->se_dev; |
c66ac9db NB |
2890 | struct se_subsystem_dev *su_dev = dev->se_sub_dev; |
2891 | int ret = 0, sector_ret = 0, passthrough; | |
2892 | u32 sectors = 0, size = 0, pr_reg_type = 0; | |
2893 | u16 service_action; | |
2894 | u8 alua_ascq = 0; | |
2895 | /* | |
2896 | * Check for an existing UNIT ATTENTION condition | |
2897 | */ | |
2898 | if (core_scsi3_ua_check(cmd, cdb) < 0) { | |
2899 | cmd->transport_wait_for_tasks = | |
2900 | &transport_nop_wait_for_tasks; | |
2901 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
2902 | cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION; | |
5951146d | 2903 | return -EINVAL; |
c66ac9db NB |
2904 | } |
2905 | /* | |
2906 | * Check status of Asymmetric Logical Unit Assignment port | |
2907 | */ | |
e3d6f909 | 2908 | ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq); |
c66ac9db NB |
2909 | if (ret != 0) { |
2910 | cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks; | |
2911 | /* | |
25985edc | 2912 | * Set SCSI additional sense code (ASC) to 'LUN Not Accessible'; |
c66ac9db NB |
2913 | * The ALUA additional sense code qualifier (ASCQ) is determined |
2914 | * by the ALUA primary or secondary access state.. | |
2915 | */ | |
2916 | if (ret > 0) { | |
2917 | #if 0 | |
6708bb27 | 2918 | pr_debug("[%s]: ALUA TG Port not available," |
c66ac9db | 2919 | " SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n", |
e3d6f909 | 2920 | cmd->se_tfo->get_fabric_name(), alua_ascq); |
c66ac9db NB |
2921 | #endif |
2922 | transport_set_sense_codes(cmd, 0x04, alua_ascq); | |
2923 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
2924 | cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY; | |
5951146d | 2925 | return -EINVAL; |
c66ac9db NB |
2926 | } |
2927 | goto out_invalid_cdb_field; | |
2928 | } | |
2929 | /* | |
2930 | * Check status for SPC-3 Persistent Reservations | |
2931 | */ | |
e3d6f909 AG |
2932 | if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) { |
2933 | if (su_dev->t10_pr.pr_ops.t10_seq_non_holder( | |
c66ac9db NB |
2934 | cmd, cdb, pr_reg_type) != 0) |
2935 | return transport_handle_reservation_conflict(cmd); | |
2936 | /* | |
2937 | * This means the CDB is allowed for the SCSI Initiator port | |
2938 | * when said port is *NOT* holding the legacy SPC-2 or | |
2939 | * SPC-3 Persistent Reservation. | |
2940 | */ | |
2941 | } | |
2942 | ||
2943 | switch (cdb[0]) { | |
2944 | case READ_6: | |
2945 | sectors = transport_get_sectors_6(cdb, cmd, §or_ret); | |
2946 | if (sector_ret) | |
2947 | goto out_unsupported_cdb; | |
2948 | size = transport_get_size(sectors, cdb, cmd); | |
2949 | cmd->transport_split_cdb = &split_cdb_XX_6; | |
a1d8b49a | 2950 | cmd->t_task_lba = transport_lba_21(cdb); |
c66ac9db NB |
2951 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
2952 | break; | |
2953 | case READ_10: | |
2954 | sectors = transport_get_sectors_10(cdb, cmd, §or_ret); | |
2955 | if (sector_ret) | |
2956 | goto out_unsupported_cdb; | |
2957 | size = transport_get_size(sectors, cdb, cmd); | |
2958 | cmd->transport_split_cdb = &split_cdb_XX_10; | |
a1d8b49a | 2959 | cmd->t_task_lba = transport_lba_32(cdb); |
c66ac9db NB |
2960 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
2961 | break; | |
2962 | case READ_12: | |
2963 | sectors = transport_get_sectors_12(cdb, cmd, §or_ret); | |
2964 | if (sector_ret) | |
2965 | goto out_unsupported_cdb; | |
2966 | size = transport_get_size(sectors, cdb, cmd); | |
2967 | cmd->transport_split_cdb = &split_cdb_XX_12; | |
a1d8b49a | 2968 | cmd->t_task_lba = transport_lba_32(cdb); |
c66ac9db NB |
2969 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
2970 | break; | |
2971 | case READ_16: | |
2972 | sectors = transport_get_sectors_16(cdb, cmd, §or_ret); | |
2973 | if (sector_ret) | |
2974 | goto out_unsupported_cdb; | |
2975 | size = transport_get_size(sectors, cdb, cmd); | |
2976 | cmd->transport_split_cdb = &split_cdb_XX_16; | |
a1d8b49a | 2977 | cmd->t_task_lba = transport_lba_64(cdb); |
c66ac9db NB |
2978 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
2979 | break; | |
2980 | case WRITE_6: | |
2981 | sectors = transport_get_sectors_6(cdb, cmd, §or_ret); | |
2982 | if (sector_ret) | |
2983 | goto out_unsupported_cdb; | |
2984 | size = transport_get_size(sectors, cdb, cmd); | |
2985 | cmd->transport_split_cdb = &split_cdb_XX_6; | |
a1d8b49a | 2986 | cmd->t_task_lba = transport_lba_21(cdb); |
c66ac9db NB |
2987 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
2988 | break; | |
2989 | case WRITE_10: | |
2990 | sectors = transport_get_sectors_10(cdb, cmd, §or_ret); | |
2991 | if (sector_ret) | |
2992 | goto out_unsupported_cdb; | |
2993 | size = transport_get_size(sectors, cdb, cmd); | |
2994 | cmd->transport_split_cdb = &split_cdb_XX_10; | |
a1d8b49a AG |
2995 | cmd->t_task_lba = transport_lba_32(cdb); |
2996 | cmd->t_tasks_fua = (cdb[1] & 0x8); | |
c66ac9db NB |
2997 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
2998 | break; | |
2999 | case WRITE_12: | |
3000 | sectors = transport_get_sectors_12(cdb, cmd, §or_ret); | |
3001 | if (sector_ret) | |
3002 | goto out_unsupported_cdb; | |
3003 | size = transport_get_size(sectors, cdb, cmd); | |
3004 | cmd->transport_split_cdb = &split_cdb_XX_12; | |
a1d8b49a AG |
3005 | cmd->t_task_lba = transport_lba_32(cdb); |
3006 | cmd->t_tasks_fua = (cdb[1] & 0x8); | |
c66ac9db NB |
3007 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
3008 | break; | |
3009 | case WRITE_16: | |
3010 | sectors = transport_get_sectors_16(cdb, cmd, §or_ret); | |
3011 | if (sector_ret) | |
3012 | goto out_unsupported_cdb; | |
3013 | size = transport_get_size(sectors, cdb, cmd); | |
3014 | cmd->transport_split_cdb = &split_cdb_XX_16; | |
a1d8b49a AG |
3015 | cmd->t_task_lba = transport_lba_64(cdb); |
3016 | cmd->t_tasks_fua = (cdb[1] & 0x8); | |
c66ac9db NB |
3017 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
3018 | break; | |
3019 | case XDWRITEREAD_10: | |
3020 | if ((cmd->data_direction != DMA_TO_DEVICE) || | |
a1d8b49a | 3021 | !(cmd->t_tasks_bidi)) |
c66ac9db NB |
3022 | goto out_invalid_cdb_field; |
3023 | sectors = transport_get_sectors_10(cdb, cmd, §or_ret); | |
3024 | if (sector_ret) | |
3025 | goto out_unsupported_cdb; | |
3026 | size = transport_get_size(sectors, cdb, cmd); | |
3027 | cmd->transport_split_cdb = &split_cdb_XX_10; | |
a1d8b49a | 3028 | cmd->t_task_lba = transport_lba_32(cdb); |
c66ac9db | 3029 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
e3d6f909 | 3030 | passthrough = (dev->transport->transport_type == |
c66ac9db NB |
3031 | TRANSPORT_PLUGIN_PHBA_PDEV); |
3032 | /* | |
3033 | * Skip the remaining assignments for TCM/PSCSI passthrough | |
3034 | */ | |
3035 | if (passthrough) | |
3036 | break; | |
3037 | /* | |
3038 | * Setup BIDI XOR callback to be run during transport_generic_complete_ok() | |
3039 | */ | |
3040 | cmd->transport_complete_callback = &transport_xor_callback; | |
a1d8b49a | 3041 | cmd->t_tasks_fua = (cdb[1] & 0x8); |
c66ac9db NB |
3042 | break; |
3043 | case VARIABLE_LENGTH_CMD: | |
3044 | service_action = get_unaligned_be16(&cdb[8]); | |
3045 | /* | |
3046 | * Determine if this is TCM/PSCSI device and we should disable | |
3047 | * internal emulation for this CDB. | |
3048 | */ | |
e3d6f909 | 3049 | passthrough = (dev->transport->transport_type == |
c66ac9db NB |
3050 | TRANSPORT_PLUGIN_PHBA_PDEV); |
3051 | ||
3052 | switch (service_action) { | |
3053 | case XDWRITEREAD_32: | |
3054 | sectors = transport_get_sectors_32(cdb, cmd, §or_ret); | |
3055 | if (sector_ret) | |
3056 | goto out_unsupported_cdb; | |
3057 | size = transport_get_size(sectors, cdb, cmd); | |
3058 | /* | |
3059 | * Use WRITE_32 and READ_32 opcodes for the emulated | |
3060 | * XDWRITE_READ_32 logic. | |
3061 | */ | |
3062 | cmd->transport_split_cdb = &split_cdb_XX_32; | |
a1d8b49a | 3063 | cmd->t_task_lba = transport_lba_64_ext(cdb); |
c66ac9db NB |
3064 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
3065 | ||
3066 | /* | |
3067 | * Skip the remaining assignments for TCM/PSCSI passthrough | |
3068 | */ | |
3069 | if (passthrough) | |
3070 | break; | |
3071 | ||
3072 | /* | |
3073 | * Setup BIDI XOR callback to be run during | |
3074 | * transport_generic_complete_ok() | |
3075 | */ | |
3076 | cmd->transport_complete_callback = &transport_xor_callback; | |
a1d8b49a | 3077 | cmd->t_tasks_fua = (cdb[10] & 0x8); |
c66ac9db NB |
3078 | break; |
3079 | case WRITE_SAME_32: | |
3080 | sectors = transport_get_sectors_32(cdb, cmd, §or_ret); | |
3081 | if (sector_ret) | |
3082 | goto out_unsupported_cdb; | |
dd3a5ad8 | 3083 | |
6708bb27 | 3084 | if (sectors) |
12850626 | 3085 | size = transport_get_size(1, cdb, cmd); |
6708bb27 AG |
3086 | else { |
3087 | pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not" | |
3088 | " supported\n"); | |
3089 | goto out_invalid_cdb_field; | |
3090 | } | |
dd3a5ad8 | 3091 | |
a1d8b49a | 3092 | cmd->t_task_lba = get_unaligned_be64(&cdb[12]); |
c66ac9db NB |
3093 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
3094 | ||
706d5860 | 3095 | if (target_check_write_same_discard(&cdb[10], dev) < 0) |
c66ac9db | 3096 | goto out_invalid_cdb_field; |
706d5860 | 3097 | |
c66ac9db NB |
3098 | break; |
3099 | default: | |
6708bb27 | 3100 | pr_err("VARIABLE_LENGTH_CMD service action" |
c66ac9db NB |
3101 | " 0x%04x not supported\n", service_action); |
3102 | goto out_unsupported_cdb; | |
3103 | } | |
3104 | break; | |
e434f1f1 | 3105 | case MAINTENANCE_IN: |
e3d6f909 | 3106 | if (dev->transport->get_device_type(dev) != TYPE_ROM) { |
c66ac9db NB |
3107 | /* MAINTENANCE_IN from SCC-2 */ |
3108 | /* | |
3109 | * Check for emulated MI_REPORT_TARGET_PGS. | |
3110 | */ | |
3111 | if (cdb[1] == MI_REPORT_TARGET_PGS) { | |
3112 | cmd->transport_emulate_cdb = | |
e3d6f909 | 3113 | (su_dev->t10_alua.alua_type == |
c66ac9db | 3114 | SPC3_ALUA_EMULATED) ? |
e3d6f909 | 3115 | core_emulate_report_target_port_groups : |
c66ac9db NB |
3116 | NULL; |
3117 | } | |
3118 | size = (cdb[6] << 24) | (cdb[7] << 16) | | |
3119 | (cdb[8] << 8) | cdb[9]; | |
3120 | } else { | |
3121 | /* GPCMD_SEND_KEY from multi media commands */ | |
3122 | size = (cdb[8] << 8) + cdb[9]; | |
3123 | } | |
05d1c7c0 | 3124 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
3125 | break; |
3126 | case MODE_SELECT: | |
3127 | size = cdb[4]; | |
3128 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | |
3129 | break; | |
3130 | case MODE_SELECT_10: | |
3131 | size = (cdb[7] << 8) + cdb[8]; | |
3132 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | |
3133 | break; | |
3134 | case MODE_SENSE: | |
3135 | size = cdb[4]; | |
05d1c7c0 | 3136 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
3137 | break; |
3138 | case MODE_SENSE_10: | |
3139 | case GPCMD_READ_BUFFER_CAPACITY: | |
3140 | case GPCMD_SEND_OPC: | |
3141 | case LOG_SELECT: | |
3142 | case LOG_SENSE: | |
3143 | size = (cdb[7] << 8) + cdb[8]; | |
05d1c7c0 | 3144 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
3145 | break; |
3146 | case READ_BLOCK_LIMITS: | |
3147 | size = READ_BLOCK_LEN; | |
05d1c7c0 | 3148 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
3149 | break; |
3150 | case GPCMD_GET_CONFIGURATION: | |
3151 | case GPCMD_READ_FORMAT_CAPACITIES: | |
3152 | case GPCMD_READ_DISC_INFO: | |
3153 | case GPCMD_READ_TRACK_RZONE_INFO: | |
3154 | size = (cdb[7] << 8) + cdb[8]; | |
3155 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | |
3156 | break; | |
3157 | case PERSISTENT_RESERVE_IN: | |
3158 | case PERSISTENT_RESERVE_OUT: | |
3159 | cmd->transport_emulate_cdb = | |
e3d6f909 | 3160 | (su_dev->t10_pr.res_type == |
c66ac9db | 3161 | SPC3_PERSISTENT_RESERVATIONS) ? |
e3d6f909 | 3162 | core_scsi3_emulate_pr : NULL; |
c66ac9db | 3163 | size = (cdb[7] << 8) + cdb[8]; |
05d1c7c0 | 3164 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
3165 | break; |
3166 | case GPCMD_MECHANISM_STATUS: | |
3167 | case GPCMD_READ_DVD_STRUCTURE: | |
3168 | size = (cdb[8] << 8) + cdb[9]; | |
3169 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | |
3170 | break; | |
3171 | case READ_POSITION: | |
3172 | size = READ_POSITION_LEN; | |
05d1c7c0 | 3173 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db | 3174 | break; |
e434f1f1 | 3175 | case MAINTENANCE_OUT: |
e3d6f909 | 3176 | if (dev->transport->get_device_type(dev) != TYPE_ROM) { |
c66ac9db NB |
3177 | /* MAINTENANCE_OUT from SCC-2 |
3178 | * | |
3179 | * Check for emulated MO_SET_TARGET_PGS. | |
3180 | */ | |
3181 | if (cdb[1] == MO_SET_TARGET_PGS) { | |
3182 | cmd->transport_emulate_cdb = | |
e3d6f909 | 3183 | (su_dev->t10_alua.alua_type == |
c66ac9db | 3184 | SPC3_ALUA_EMULATED) ? |
e3d6f909 | 3185 | core_emulate_set_target_port_groups : |
c66ac9db NB |
3186 | NULL; |
3187 | } | |
3188 | ||
3189 | size = (cdb[6] << 24) | (cdb[7] << 16) | | |
3190 | (cdb[8] << 8) | cdb[9]; | |
3191 | } else { | |
3192 | /* GPCMD_REPORT_KEY from multi media commands */ | |
3193 | size = (cdb[8] << 8) + cdb[9]; | |
3194 | } | |
05d1c7c0 | 3195 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
3196 | break; |
3197 | case INQUIRY: | |
3198 | size = (cdb[3] << 8) + cdb[4]; | |
3199 | /* | |
3200 | * Do implict HEAD_OF_QUEUE processing for INQUIRY. | |
3201 | * See spc4r17 section 5.3 | |
3202 | */ | |
5951146d | 3203 | if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) |
e66ecd50 | 3204 | cmd->sam_task_attr = MSG_HEAD_TAG; |
05d1c7c0 | 3205 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
3206 | break; |
3207 | case READ_BUFFER: | |
3208 | size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; | |
05d1c7c0 | 3209 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
3210 | break; |
3211 | case READ_CAPACITY: | |
3212 | size = READ_CAP_LEN; | |
05d1c7c0 | 3213 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
3214 | break; |
3215 | case READ_MEDIA_SERIAL_NUMBER: | |
3216 | case SECURITY_PROTOCOL_IN: | |
3217 | case SECURITY_PROTOCOL_OUT: | |
3218 | size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; | |
05d1c7c0 | 3219 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
3220 | break; |
3221 | case SERVICE_ACTION_IN: | |
3222 | case ACCESS_CONTROL_IN: | |
3223 | case ACCESS_CONTROL_OUT: | |
3224 | case EXTENDED_COPY: | |
3225 | case READ_ATTRIBUTE: | |
3226 | case RECEIVE_COPY_RESULTS: | |
3227 | case WRITE_ATTRIBUTE: | |
3228 | size = (cdb[10] << 24) | (cdb[11] << 16) | | |
3229 | (cdb[12] << 8) | cdb[13]; | |
05d1c7c0 | 3230 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
3231 | break; |
3232 | case RECEIVE_DIAGNOSTIC: | |
3233 | case SEND_DIAGNOSTIC: | |
3234 | size = (cdb[3] << 8) | cdb[4]; | |
05d1c7c0 | 3235 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
3236 | break; |
3237 | /* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */ | |
3238 | #if 0 | |
3239 | case GPCMD_READ_CD: | |
3240 | sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; | |
3241 | size = (2336 * sectors); | |
05d1c7c0 | 3242 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
3243 | break; |
3244 | #endif | |
3245 | case READ_TOC: | |
3246 | size = cdb[8]; | |
05d1c7c0 | 3247 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
3248 | break; |
3249 | case REQUEST_SENSE: | |
3250 | size = cdb[4]; | |
05d1c7c0 | 3251 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
3252 | break; |
3253 | case READ_ELEMENT_STATUS: | |
3254 | size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9]; | |
05d1c7c0 | 3255 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
3256 | break; |
3257 | case WRITE_BUFFER: | |
3258 | size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; | |
05d1c7c0 | 3259 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
3260 | break; |
3261 | case RESERVE: | |
3262 | case RESERVE_10: | |
3263 | /* | |
3264 | * The SPC-2 RESERVE does not contain a size in the SCSI CDB. | |
3265 | * Assume the passthrough or $FABRIC_MOD will tell us about it. | |
3266 | */ | |
3267 | if (cdb[0] == RESERVE_10) | |
3268 | size = (cdb[7] << 8) | cdb[8]; | |
3269 | else | |
3270 | size = cmd->data_length; | |
3271 | ||
3272 | /* | |
3273 | * Setup the legacy emulated handler for SPC-2 and | |
3274 | * >= SPC-3 compatible reservation handling (CRH=1) | |
3275 | * Otherwise, we assume the underlying SCSI logic is | |
3276 | * is running in SPC_PASSTHROUGH, and wants reservations | |
3277 | * emulation disabled. | |
3278 | */ | |
3279 | cmd->transport_emulate_cdb = | |
e3d6f909 | 3280 | (su_dev->t10_pr.res_type != |
c66ac9db | 3281 | SPC_PASSTHROUGH) ? |
e3d6f909 | 3282 | core_scsi2_emulate_crh : NULL; |
c66ac9db NB |
3283 | cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; |
3284 | break; | |
3285 | case RELEASE: | |
3286 | case RELEASE_10: | |
3287 | /* | |
3288 | * The SPC-2 RELEASE does not contain a size in the SCSI CDB. | |
3289 | * Assume the passthrough or $FABRIC_MOD will tell us about it. | |
3290 | */ | |
3291 | if (cdb[0] == RELEASE_10) | |
3292 | size = (cdb[7] << 8) | cdb[8]; | |
3293 | else | |
3294 | size = cmd->data_length; | |
3295 | ||
3296 | cmd->transport_emulate_cdb = | |
e3d6f909 | 3297 | (su_dev->t10_pr.res_type != |
c66ac9db | 3298 | SPC_PASSTHROUGH) ? |
e3d6f909 | 3299 | core_scsi2_emulate_crh : NULL; |
c66ac9db NB |
3300 | cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; |
3301 | break; | |
3302 | case SYNCHRONIZE_CACHE: | |
3303 | case 0x91: /* SYNCHRONIZE_CACHE_16: */ | |
3304 | /* | |
3305 | * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE | |
3306 | */ | |
3307 | if (cdb[0] == SYNCHRONIZE_CACHE) { | |
3308 | sectors = transport_get_sectors_10(cdb, cmd, §or_ret); | |
a1d8b49a | 3309 | cmd->t_task_lba = transport_lba_32(cdb); |
c66ac9db NB |
3310 | } else { |
3311 | sectors = transport_get_sectors_16(cdb, cmd, §or_ret); | |
a1d8b49a | 3312 | cmd->t_task_lba = transport_lba_64(cdb); |
c66ac9db NB |
3313 | } |
3314 | if (sector_ret) | |
3315 | goto out_unsupported_cdb; | |
3316 | ||
3317 | size = transport_get_size(sectors, cdb, cmd); | |
3318 | cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; | |
3319 | ||
3320 | /* | |
3321 | * For TCM/pSCSI passthrough, skip cmd->transport_emulate_cdb() | |
3322 | */ | |
e3d6f909 | 3323 | if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) |
c66ac9db NB |
3324 | break; |
3325 | /* | |
3326 | * Set SCF_EMULATE_CDB_ASYNC to ensure asynchronous operation | |
3327 | * for SYNCHRONIZE_CACHE* Immed=1 case in __transport_execute_tasks() | |
3328 | */ | |
3329 | cmd->se_cmd_flags |= SCF_EMULATE_CDB_ASYNC; | |
3330 | /* | |
3331 | * Check to ensure that LBA + Range does not exceed past end of | |
7abbe7f3 | 3332 | * device for IBLOCK and FILEIO ->do_sync_cache() backend calls |
c66ac9db | 3333 | */ |
7abbe7f3 NB |
3334 | if ((cmd->t_task_lba != 0) || (sectors != 0)) { |
3335 | if (transport_cmd_get_valid_sectors(cmd) < 0) | |
3336 | goto out_invalid_cdb_field; | |
3337 | } | |
c66ac9db NB |
3338 | break; |
3339 | case UNMAP: | |
3340 | size = get_unaligned_be16(&cdb[7]); | |
05d1c7c0 | 3341 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
3342 | break; |
3343 | case WRITE_SAME_16: | |
3344 | sectors = transport_get_sectors_16(cdb, cmd, §or_ret); | |
3345 | if (sector_ret) | |
3346 | goto out_unsupported_cdb; | |
dd3a5ad8 | 3347 | |
6708bb27 | 3348 | if (sectors) |
12850626 | 3349 | size = transport_get_size(1, cdb, cmd); |
6708bb27 AG |
3350 | else { |
3351 | pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); | |
3352 | goto out_invalid_cdb_field; | |
3353 | } | |
dd3a5ad8 | 3354 | |
5db0753b | 3355 | cmd->t_task_lba = get_unaligned_be64(&cdb[2]); |
706d5860 NB |
3356 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
3357 | ||
3358 | if (target_check_write_same_discard(&cdb[1], dev) < 0) | |
3359 | goto out_invalid_cdb_field; | |
3360 | break; | |
3361 | case WRITE_SAME: | |
3362 | sectors = transport_get_sectors_10(cdb, cmd, §or_ret); | |
3363 | if (sector_ret) | |
3364 | goto out_unsupported_cdb; | |
3365 | ||
3366 | if (sectors) | |
12850626 | 3367 | size = transport_get_size(1, cdb, cmd); |
706d5860 NB |
3368 | else { |
3369 | pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); | |
3370 | goto out_invalid_cdb_field; | |
c66ac9db | 3371 | } |
706d5860 NB |
3372 | |
3373 | cmd->t_task_lba = get_unaligned_be32(&cdb[2]); | |
c66ac9db | 3374 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
706d5860 NB |
3375 | /* |
3376 | * Follow sbcr26 with WRITE_SAME (10) and check for the existence | |
3377 | * of byte 1 bit 3 UNMAP instead of original reserved field | |
3378 | */ | |
3379 | if (target_check_write_same_discard(&cdb[1], dev) < 0) | |
3380 | goto out_invalid_cdb_field; | |
c66ac9db NB |
3381 | break; |
3382 | case ALLOW_MEDIUM_REMOVAL: | |
3383 | case GPCMD_CLOSE_TRACK: | |
3384 | case ERASE: | |
3385 | case INITIALIZE_ELEMENT_STATUS: | |
3386 | case GPCMD_LOAD_UNLOAD: | |
3387 | case REZERO_UNIT: | |
3388 | case SEEK_10: | |
3389 | case GPCMD_SET_SPEED: | |
3390 | case SPACE: | |
3391 | case START_STOP: | |
3392 | case TEST_UNIT_READY: | |
3393 | case VERIFY: | |
3394 | case WRITE_FILEMARKS: | |
3395 | case MOVE_MEDIUM: | |
3396 | cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; | |
3397 | break; | |
3398 | case REPORT_LUNS: | |
3399 | cmd->transport_emulate_cdb = | |
e3d6f909 | 3400 | transport_core_report_lun_response; |
c66ac9db NB |
3401 | size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; |
3402 | /* | |
3403 | * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS | |
3404 | * See spc4r17 section 5.3 | |
3405 | */ | |
5951146d | 3406 | if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) |
e66ecd50 | 3407 | cmd->sam_task_attr = MSG_HEAD_TAG; |
05d1c7c0 | 3408 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
3409 | break; |
3410 | default: | |
6708bb27 | 3411 | pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode" |
c66ac9db | 3412 | " 0x%02x, sending CHECK_CONDITION.\n", |
e3d6f909 | 3413 | cmd->se_tfo->get_fabric_name(), cdb[0]); |
c66ac9db NB |
3414 | cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks; |
3415 | goto out_unsupported_cdb; | |
3416 | } | |
3417 | ||
3418 | if (size != cmd->data_length) { | |
6708bb27 | 3419 | pr_warn("TARGET_CORE[%s]: Expected Transfer Length:" |
c66ac9db | 3420 | " %u does not match SCSI CDB Length: %u for SAM Opcode:" |
e3d6f909 | 3421 | " 0x%02x\n", cmd->se_tfo->get_fabric_name(), |
c66ac9db NB |
3422 | cmd->data_length, size, cdb[0]); |
3423 | ||
3424 | cmd->cmd_spdtl = size; | |
3425 | ||
3426 | if (cmd->data_direction == DMA_TO_DEVICE) { | |
6708bb27 | 3427 | pr_err("Rejecting underflow/overflow" |
c66ac9db NB |
3428 | " WRITE data\n"); |
3429 | goto out_invalid_cdb_field; | |
3430 | } | |
3431 | /* | |
3432 | * Reject READ_* or WRITE_* with overflow/underflow for | |
3433 | * type SCF_SCSI_DATA_SG_IO_CDB. | |
3434 | */ | |
6708bb27 AG |
3435 | if (!ret && (dev->se_sub_dev->se_dev_attrib.block_size != 512)) { |
3436 | pr_err("Failing OVERFLOW/UNDERFLOW for LBA op" | |
c66ac9db | 3437 | " CDB on non 512-byte sector setup subsystem" |
e3d6f909 | 3438 | " plugin: %s\n", dev->transport->name); |
c66ac9db NB |
3439 | /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */ |
3440 | goto out_invalid_cdb_field; | |
3441 | } | |
3442 | ||
3443 | if (size > cmd->data_length) { | |
3444 | cmd->se_cmd_flags |= SCF_OVERFLOW_BIT; | |
3445 | cmd->residual_count = (size - cmd->data_length); | |
3446 | } else { | |
3447 | cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; | |
3448 | cmd->residual_count = (cmd->data_length - size); | |
3449 | } | |
3450 | cmd->data_length = size; | |
3451 | } | |
3452 | ||
d0229ae3 AG |
3453 | /* Let's limit control cdbs to a page, for simplicity's sake. */ |
3454 | if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) && | |
3455 | size > PAGE_SIZE) | |
3456 | goto out_invalid_cdb_field; | |
3457 | ||
c66ac9db NB |
3458 | transport_set_supported_SAM_opcode(cmd); |
3459 | return ret; | |
3460 | ||
3461 | out_unsupported_cdb: | |
3462 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
3463 | cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; | |
5951146d | 3464 | return -EINVAL; |
c66ac9db NB |
3465 | out_invalid_cdb_field: |
3466 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
3467 | cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; | |
5951146d | 3468 | return -EINVAL; |
c66ac9db NB |
3469 | } |
3470 | ||
c66ac9db NB |
3471 | /* |
3472 | * Called from transport_generic_complete_ok() and | |
3473 | * transport_generic_request_failure() to determine which dormant/delayed | |
3474 | * and ordered cmds need to have their tasks added to the execution queue. | |
3475 | */ | |
3476 | static void transport_complete_task_attr(struct se_cmd *cmd) | |
3477 | { | |
5951146d | 3478 | struct se_device *dev = cmd->se_dev; |
c66ac9db NB |
3479 | struct se_cmd *cmd_p, *cmd_tmp; |
3480 | int new_active_tasks = 0; | |
3481 | ||
e66ecd50 | 3482 | if (cmd->sam_task_attr == MSG_SIMPLE_TAG) { |
c66ac9db NB |
3483 | atomic_dec(&dev->simple_cmds); |
3484 | smp_mb__after_atomic_dec(); | |
3485 | dev->dev_cur_ordered_id++; | |
6708bb27 | 3486 | pr_debug("Incremented dev->dev_cur_ordered_id: %u for" |
c66ac9db NB |
3487 | " SIMPLE: %u\n", dev->dev_cur_ordered_id, |
3488 | cmd->se_ordered_id); | |
e66ecd50 | 3489 | } else if (cmd->sam_task_attr == MSG_HEAD_TAG) { |
c66ac9db NB |
3490 | atomic_dec(&dev->dev_hoq_count); |
3491 | smp_mb__after_atomic_dec(); | |
3492 | dev->dev_cur_ordered_id++; | |
6708bb27 | 3493 | pr_debug("Incremented dev_cur_ordered_id: %u for" |
c66ac9db NB |
3494 | " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id, |
3495 | cmd->se_ordered_id); | |
e66ecd50 | 3496 | } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { |
c66ac9db | 3497 | spin_lock(&dev->ordered_cmd_lock); |
5951146d | 3498 | list_del(&cmd->se_ordered_node); |
c66ac9db NB |
3499 | atomic_dec(&dev->dev_ordered_sync); |
3500 | smp_mb__after_atomic_dec(); | |
3501 | spin_unlock(&dev->ordered_cmd_lock); | |
3502 | ||
3503 | dev->dev_cur_ordered_id++; | |
6708bb27 | 3504 | pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:" |
c66ac9db NB |
3505 | " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id); |
3506 | } | |
3507 | /* | |
3508 | * Process all commands up to the last received | |
3509 | * ORDERED task attribute which requires another blocking | |
3510 | * boundary | |
3511 | */ | |
3512 | spin_lock(&dev->delayed_cmd_lock); | |
3513 | list_for_each_entry_safe(cmd_p, cmd_tmp, | |
5951146d | 3514 | &dev->delayed_cmd_list, se_delayed_node) { |
c66ac9db | 3515 | |
5951146d | 3516 | list_del(&cmd_p->se_delayed_node); |
c66ac9db NB |
3517 | spin_unlock(&dev->delayed_cmd_lock); |
3518 | ||
6708bb27 | 3519 | pr_debug("Calling add_tasks() for" |
c66ac9db NB |
3520 | " cmd_p: 0x%02x Task Attr: 0x%02x" |
3521 | " Dormant -> Active, se_ordered_id: %u\n", | |
6708bb27 | 3522 | cmd_p->t_task_cdb[0], |
c66ac9db NB |
3523 | cmd_p->sam_task_attr, cmd_p->se_ordered_id); |
3524 | ||
3525 | transport_add_tasks_from_cmd(cmd_p); | |
3526 | new_active_tasks++; | |
3527 | ||
3528 | spin_lock(&dev->delayed_cmd_lock); | |
e66ecd50 | 3529 | if (cmd_p->sam_task_attr == MSG_ORDERED_TAG) |
c66ac9db NB |
3530 | break; |
3531 | } | |
3532 | spin_unlock(&dev->delayed_cmd_lock); | |
3533 | /* | |
3534 | * If new tasks have become active, wake up the transport thread | |
3535 | * to do the processing of the Active tasks. | |
3536 | */ | |
3537 | if (new_active_tasks != 0) | |
e3d6f909 | 3538 | wake_up_interruptible(&dev->dev_queue_obj.thread_wq); |
c66ac9db NB |
3539 | } |
3540 | ||
07bde79a NB |
3541 | static int transport_complete_qf(struct se_cmd *cmd) |
3542 | { | |
3543 | int ret = 0; | |
3544 | ||
3545 | if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) | |
3546 | return cmd->se_tfo->queue_status(cmd); | |
3547 | ||
3548 | switch (cmd->data_direction) { | |
3549 | case DMA_FROM_DEVICE: | |
3550 | ret = cmd->se_tfo->queue_data_in(cmd); | |
3551 | break; | |
3552 | case DMA_TO_DEVICE: | |
ec98f782 | 3553 | if (cmd->t_bidi_data_sg) { |
07bde79a NB |
3554 | ret = cmd->se_tfo->queue_data_in(cmd); |
3555 | if (ret < 0) | |
3556 | return ret; | |
3557 | } | |
3558 | /* Fall through for DMA_TO_DEVICE */ | |
3559 | case DMA_NONE: | |
3560 | ret = cmd->se_tfo->queue_status(cmd); | |
3561 | break; | |
3562 | default: | |
3563 | break; | |
3564 | } | |
3565 | ||
3566 | return ret; | |
3567 | } | |
3568 | ||
3569 | static void transport_handle_queue_full( | |
3570 | struct se_cmd *cmd, | |
3571 | struct se_device *dev, | |
3572 | int (*qf_callback)(struct se_cmd *)) | |
3573 | { | |
3574 | spin_lock_irq(&dev->qf_cmd_lock); | |
3575 | cmd->se_cmd_flags |= SCF_EMULATE_QUEUE_FULL; | |
3576 | cmd->transport_qf_callback = qf_callback; | |
3577 | list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); | |
3578 | atomic_inc(&dev->dev_qf_count); | |
3579 | smp_mb__after_atomic_inc(); | |
3580 | spin_unlock_irq(&cmd->se_dev->qf_cmd_lock); | |
3581 | ||
3582 | schedule_work(&cmd->se_dev->qf_work_queue); | |
3583 | } | |
3584 | ||
c66ac9db NB |
3585 | static void transport_generic_complete_ok(struct se_cmd *cmd) |
3586 | { | |
07bde79a | 3587 | int reason = 0, ret; |
c66ac9db NB |
3588 | /* |
3589 | * Check if we need to move delayed/dormant tasks from cmds on the | |
3590 | * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task | |
3591 | * Attribute. | |
3592 | */ | |
5951146d | 3593 | if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) |
c66ac9db | 3594 | transport_complete_task_attr(cmd); |
07bde79a NB |
3595 | /* |
3596 | * Check to schedule QUEUE_FULL work, or execute an existing | |
3597 | * cmd->transport_qf_callback() | |
3598 | */ | |
3599 | if (atomic_read(&cmd->se_dev->dev_qf_count) != 0) | |
3600 | schedule_work(&cmd->se_dev->qf_work_queue); | |
3601 | ||
3602 | if (cmd->transport_qf_callback) { | |
3603 | ret = cmd->transport_qf_callback(cmd); | |
3604 | if (ret < 0) | |
3605 | goto queue_full; | |
3606 | ||
3607 | cmd->transport_qf_callback = NULL; | |
3608 | goto done; | |
3609 | } | |
c66ac9db NB |
3610 | /* |
3611 | * Check if we need to retrieve a sense buffer from | |
3612 | * the struct se_cmd in question. | |
3613 | */ | |
3614 | if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { | |
3615 | if (transport_get_sense_data(cmd) < 0) | |
3616 | reason = TCM_NON_EXISTENT_LUN; | |
3617 | ||
3618 | /* | |
3619 | * Only set when an struct se_task->task_scsi_status returned | |
3620 | * a non GOOD status. | |
3621 | */ | |
3622 | if (cmd->scsi_status) { | |
07bde79a | 3623 | ret = transport_send_check_condition_and_sense( |
c66ac9db | 3624 | cmd, reason, 1); |
07bde79a NB |
3625 | if (ret == -EAGAIN) |
3626 | goto queue_full; | |
3627 | ||
c66ac9db NB |
3628 | transport_lun_remove_cmd(cmd); |
3629 | transport_cmd_check_stop_to_fabric(cmd); | |
3630 | return; | |
3631 | } | |
3632 | } | |
3633 | /* | |
25985edc | 3634 | * Check for a callback, used by amongst other things |
c66ac9db NB |
3635 | * XDWRITE_READ_10 emulation. |
3636 | */ | |
3637 | if (cmd->transport_complete_callback) | |
3638 | cmd->transport_complete_callback(cmd); | |
3639 | ||
3640 | switch (cmd->data_direction) { | |
3641 | case DMA_FROM_DEVICE: | |
3642 | spin_lock(&cmd->se_lun->lun_sep_lock); | |
e3d6f909 AG |
3643 | if (cmd->se_lun->lun_sep) { |
3644 | cmd->se_lun->lun_sep->sep_stats.tx_data_octets += | |
c66ac9db NB |
3645 | cmd->data_length; |
3646 | } | |
3647 | spin_unlock(&cmd->se_lun->lun_sep_lock); | |
c66ac9db | 3648 | |
07bde79a NB |
3649 | ret = cmd->se_tfo->queue_data_in(cmd); |
3650 | if (ret == -EAGAIN) | |
3651 | goto queue_full; | |
c66ac9db NB |
3652 | break; |
3653 | case DMA_TO_DEVICE: | |
3654 | spin_lock(&cmd->se_lun->lun_sep_lock); | |
e3d6f909 AG |
3655 | if (cmd->se_lun->lun_sep) { |
3656 | cmd->se_lun->lun_sep->sep_stats.rx_data_octets += | |
c66ac9db NB |
3657 | cmd->data_length; |
3658 | } | |
3659 | spin_unlock(&cmd->se_lun->lun_sep_lock); | |
3660 | /* | |
3661 | * Check if we need to send READ payload for BIDI-COMMAND | |
3662 | */ | |
ec98f782 | 3663 | if (cmd->t_bidi_data_sg) { |
c66ac9db | 3664 | spin_lock(&cmd->se_lun->lun_sep_lock); |
e3d6f909 AG |
3665 | if (cmd->se_lun->lun_sep) { |
3666 | cmd->se_lun->lun_sep->sep_stats.tx_data_octets += | |
c66ac9db NB |
3667 | cmd->data_length; |
3668 | } | |
3669 | spin_unlock(&cmd->se_lun->lun_sep_lock); | |
07bde79a NB |
3670 | ret = cmd->se_tfo->queue_data_in(cmd); |
3671 | if (ret == -EAGAIN) | |
3672 | goto queue_full; | |
c66ac9db NB |
3673 | break; |
3674 | } | |
3675 | /* Fall through for DMA_TO_DEVICE */ | |
3676 | case DMA_NONE: | |
07bde79a NB |
3677 | ret = cmd->se_tfo->queue_status(cmd); |
3678 | if (ret == -EAGAIN) | |
3679 | goto queue_full; | |
c66ac9db NB |
3680 | break; |
3681 | default: | |
3682 | break; | |
3683 | } | |
3684 | ||
07bde79a | 3685 | done: |
c66ac9db NB |
3686 | transport_lun_remove_cmd(cmd); |
3687 | transport_cmd_check_stop_to_fabric(cmd); | |
07bde79a NB |
3688 | return; |
3689 | ||
3690 | queue_full: | |
6708bb27 | 3691 | pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p," |
07bde79a NB |
3692 | " data_direction: %d\n", cmd, cmd->data_direction); |
3693 | transport_handle_queue_full(cmd, cmd->se_dev, transport_complete_qf); | |
c66ac9db NB |
3694 | } |
3695 | ||
3696 | static void transport_free_dev_tasks(struct se_cmd *cmd) | |
3697 | { | |
3698 | struct se_task *task, *task_tmp; | |
3699 | unsigned long flags; | |
3700 | ||
a1d8b49a | 3701 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db | 3702 | list_for_each_entry_safe(task, task_tmp, |
a1d8b49a | 3703 | &cmd->t_task_list, t_list) { |
c66ac9db NB |
3704 | if (atomic_read(&task->task_active)) |
3705 | continue; | |
3706 | ||
3707 | kfree(task->task_sg_bidi); | |
3708 | kfree(task->task_sg); | |
3709 | ||
3710 | list_del(&task->t_list); | |
3711 | ||
a1d8b49a | 3712 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db | 3713 | if (task->se_dev) |
e3d6f909 | 3714 | task->se_dev->transport->free_task(task); |
c66ac9db | 3715 | else |
6708bb27 | 3716 | pr_err("task[%u] - task->se_dev is NULL\n", |
c66ac9db | 3717 | task->task_no); |
a1d8b49a | 3718 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db | 3719 | } |
a1d8b49a | 3720 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
3721 | } |
3722 | ||
6708bb27 | 3723 | static inline void transport_free_sgl(struct scatterlist *sgl, int nents) |
c66ac9db | 3724 | { |
ec98f782 | 3725 | struct scatterlist *sg; |
ec98f782 | 3726 | int count; |
c66ac9db | 3727 | |
6708bb27 AG |
3728 | for_each_sg(sgl, sg, nents, count) |
3729 | __free_page(sg_page(sg)); | |
c66ac9db | 3730 | |
6708bb27 AG |
3731 | kfree(sgl); |
3732 | } | |
c66ac9db | 3733 | |
6708bb27 AG |
3734 | static inline void transport_free_pages(struct se_cmd *cmd) |
3735 | { | |
3736 | if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) | |
3737 | return; | |
3738 | ||
3739 | transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents); | |
ec98f782 AG |
3740 | cmd->t_data_sg = NULL; |
3741 | cmd->t_data_nents = 0; | |
c66ac9db | 3742 | |
6708bb27 | 3743 | transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents); |
ec98f782 AG |
3744 | cmd->t_bidi_data_sg = NULL; |
3745 | cmd->t_bidi_data_nents = 0; | |
c66ac9db NB |
3746 | } |
3747 | ||
3748 | static inline void transport_release_tasks(struct se_cmd *cmd) | |
3749 | { | |
3750 | transport_free_dev_tasks(cmd); | |
3751 | } | |
3752 | ||
3753 | static inline int transport_dec_and_check(struct se_cmd *cmd) | |
3754 | { | |
3755 | unsigned long flags; | |
3756 | ||
a1d8b49a AG |
3757 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
3758 | if (atomic_read(&cmd->t_fe_count)) { | |
6708bb27 | 3759 | if (!atomic_dec_and_test(&cmd->t_fe_count)) { |
a1d8b49a | 3760 | spin_unlock_irqrestore(&cmd->t_state_lock, |
c66ac9db NB |
3761 | flags); |
3762 | return 1; | |
3763 | } | |
3764 | } | |
3765 | ||
a1d8b49a | 3766 | if (atomic_read(&cmd->t_se_count)) { |
6708bb27 | 3767 | if (!atomic_dec_and_test(&cmd->t_se_count)) { |
a1d8b49a | 3768 | spin_unlock_irqrestore(&cmd->t_state_lock, |
c66ac9db NB |
3769 | flags); |
3770 | return 1; | |
3771 | } | |
3772 | } | |
a1d8b49a | 3773 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
3774 | |
3775 | return 0; | |
3776 | } | |
3777 | ||
3778 | static void transport_release_fe_cmd(struct se_cmd *cmd) | |
3779 | { | |
3780 | unsigned long flags; | |
3781 | ||
3782 | if (transport_dec_and_check(cmd)) | |
3783 | return; | |
3784 | ||
a1d8b49a | 3785 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
6708bb27 | 3786 | if (!atomic_read(&cmd->transport_dev_active)) { |
a1d8b49a | 3787 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
3788 | goto free_pages; |
3789 | } | |
a1d8b49a | 3790 | atomic_set(&cmd->transport_dev_active, 0); |
c66ac9db | 3791 | transport_all_task_dev_remove_state(cmd); |
a1d8b49a | 3792 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
3793 | |
3794 | transport_release_tasks(cmd); | |
3795 | free_pages: | |
3796 | transport_free_pages(cmd); | |
31afc39c | 3797 | transport_release_cmd(cmd); |
c66ac9db NB |
3798 | } |
3799 | ||
35462975 CH |
3800 | static int |
3801 | transport_generic_remove(struct se_cmd *cmd, int session_reinstatement) | |
c66ac9db NB |
3802 | { |
3803 | unsigned long flags; | |
3804 | ||
c66ac9db NB |
3805 | if (transport_dec_and_check(cmd)) { |
3806 | if (session_reinstatement) { | |
a1d8b49a | 3807 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db | 3808 | transport_all_task_dev_remove_state(cmd); |
a1d8b49a | 3809 | spin_unlock_irqrestore(&cmd->t_state_lock, |
c66ac9db NB |
3810 | flags); |
3811 | } | |
3812 | return 1; | |
3813 | } | |
3814 | ||
a1d8b49a | 3815 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
6708bb27 | 3816 | if (!atomic_read(&cmd->transport_dev_active)) { |
a1d8b49a | 3817 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
3818 | goto free_pages; |
3819 | } | |
a1d8b49a | 3820 | atomic_set(&cmd->transport_dev_active, 0); |
c66ac9db | 3821 | transport_all_task_dev_remove_state(cmd); |
a1d8b49a | 3822 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
3823 | |
3824 | transport_release_tasks(cmd); | |
5951146d | 3825 | |
c66ac9db NB |
3826 | free_pages: |
3827 | transport_free_pages(cmd); | |
35462975 | 3828 | transport_release_cmd(cmd); |
c66ac9db NB |
3829 | return 0; |
3830 | } | |
3831 | ||
3832 | /* | |
ec98f782 AG |
3833 | * transport_generic_map_mem_to_cmd - Use fabric-alloced pages instead of |
3834 | * allocating in the core. | |
c66ac9db NB |
3835 | * @cmd: Associated se_cmd descriptor |
3836 | * @mem: SGL style memory for TCM WRITE / READ | |
3837 | * @sg_mem_num: Number of SGL elements | |
3838 | * @mem_bidi_in: SGL style memory for TCM BIDI READ | |
3839 | * @sg_mem_bidi_num: Number of BIDI READ SGL elements | |
3840 | * | |
3841 | * Return: nonzero return cmd was rejected for -ENOMEM or inproper usage | |
3842 | * of parameters. | |
3843 | */ | |
3844 | int transport_generic_map_mem_to_cmd( | |
3845 | struct se_cmd *cmd, | |
5951146d AG |
3846 | struct scatterlist *sgl, |
3847 | u32 sgl_count, | |
3848 | struct scatterlist *sgl_bidi, | |
3849 | u32 sgl_bidi_count) | |
c66ac9db | 3850 | { |
5951146d | 3851 | if (!sgl || !sgl_count) |
c66ac9db | 3852 | return 0; |
c66ac9db | 3853 | |
c66ac9db NB |
3854 | if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) || |
3855 | (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) { | |
c66ac9db | 3856 | |
ec98f782 AG |
3857 | cmd->t_data_sg = sgl; |
3858 | cmd->t_data_nents = sgl_count; | |
c66ac9db | 3859 | |
ec98f782 AG |
3860 | if (sgl_bidi && sgl_bidi_count) { |
3861 | cmd->t_bidi_data_sg = sgl_bidi; | |
3862 | cmd->t_bidi_data_nents = sgl_bidi_count; | |
c66ac9db NB |
3863 | } |
3864 | cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; | |
c66ac9db NB |
3865 | } |
3866 | ||
3867 | return 0; | |
3868 | } | |
3869 | EXPORT_SYMBOL(transport_generic_map_mem_to_cmd); | |
3870 | ||
c66ac9db NB |
3871 | static int transport_new_cmd_obj(struct se_cmd *cmd) |
3872 | { | |
5951146d | 3873 | struct se_device *dev = cmd->se_dev; |
01cde4d5 | 3874 | int set_counts = 1, rc, task_cdbs; |
c66ac9db | 3875 | |
ec98f782 AG |
3876 | /* |
3877 | * Setup any BIDI READ tasks and memory from | |
3878 | * cmd->t_mem_bidi_list so the READ struct se_tasks | |
3879 | * are queued first for the non pSCSI passthrough case. | |
3880 | */ | |
3881 | if (cmd->t_bidi_data_sg && | |
3882 | (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) { | |
3883 | rc = transport_allocate_tasks(cmd, | |
3884 | cmd->t_task_lba, | |
3885 | DMA_FROM_DEVICE, | |
3886 | cmd->t_bidi_data_sg, | |
3887 | cmd->t_bidi_data_nents); | |
6708bb27 | 3888 | if (rc <= 0) { |
c66ac9db NB |
3889 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
3890 | cmd->scsi_sense_reason = | |
ec98f782 | 3891 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
01cde4d5 | 3892 | return -EINVAL; |
c66ac9db | 3893 | } |
ec98f782 AG |
3894 | atomic_inc(&cmd->t_fe_count); |
3895 | atomic_inc(&cmd->t_se_count); | |
3896 | set_counts = 0; | |
3897 | } | |
3898 | /* | |
3899 | * Setup the tasks and memory from cmd->t_mem_list | |
3900 | * Note for BIDI transfers this will contain the WRITE payload | |
3901 | */ | |
3902 | task_cdbs = transport_allocate_tasks(cmd, | |
3903 | cmd->t_task_lba, | |
3904 | cmd->data_direction, | |
3905 | cmd->t_data_sg, | |
3906 | cmd->t_data_nents); | |
6708bb27 | 3907 | if (task_cdbs <= 0) { |
ec98f782 AG |
3908 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
3909 | cmd->scsi_sense_reason = | |
3910 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | |
01cde4d5 | 3911 | return -EINVAL; |
ec98f782 | 3912 | } |
c66ac9db | 3913 | |
ec98f782 AG |
3914 | if (set_counts) { |
3915 | atomic_inc(&cmd->t_fe_count); | |
3916 | atomic_inc(&cmd->t_se_count); | |
c66ac9db NB |
3917 | } |
3918 | ||
ec98f782 AG |
3919 | cmd->t_task_list_num = task_cdbs; |
3920 | ||
a1d8b49a AG |
3921 | atomic_set(&cmd->t_task_cdbs_left, task_cdbs); |
3922 | atomic_set(&cmd->t_task_cdbs_ex_left, task_cdbs); | |
3923 | atomic_set(&cmd->t_task_cdbs_timeout_left, task_cdbs); | |
c66ac9db NB |
3924 | return 0; |
3925 | } | |
3926 | ||
05d1c7c0 AG |
3927 | void *transport_kmap_first_data_page(struct se_cmd *cmd) |
3928 | { | |
ec98f782 | 3929 | struct scatterlist *sg = cmd->t_data_sg; |
05d1c7c0 | 3930 | |
ec98f782 | 3931 | BUG_ON(!sg); |
05d1c7c0 | 3932 | /* |
ec98f782 AG |
3933 | * We need to take into account a possible offset here for fabrics like |
3934 | * tcm_loop who may be using a contig buffer from the SCSI midlayer for | |
3935 | * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd() | |
05d1c7c0 | 3936 | */ |
ec98f782 | 3937 | return kmap(sg_page(sg)) + sg->offset; |
05d1c7c0 AG |
3938 | } |
3939 | EXPORT_SYMBOL(transport_kmap_first_data_page); | |
3940 | ||
3941 | void transport_kunmap_first_data_page(struct se_cmd *cmd) | |
3942 | { | |
ec98f782 | 3943 | kunmap(sg_page(cmd->t_data_sg)); |
05d1c7c0 AG |
3944 | } |
3945 | EXPORT_SYMBOL(transport_kunmap_first_data_page); | |
3946 | ||
c66ac9db | 3947 | static int |
05d1c7c0 | 3948 | transport_generic_get_mem(struct se_cmd *cmd) |
c66ac9db | 3949 | { |
ec98f782 AG |
3950 | u32 length = cmd->data_length; |
3951 | unsigned int nents; | |
3952 | struct page *page; | |
3953 | int i = 0; | |
c66ac9db | 3954 | |
ec98f782 AG |
3955 | nents = DIV_ROUND_UP(length, PAGE_SIZE); |
3956 | cmd->t_data_sg = kmalloc(sizeof(struct scatterlist) * nents, GFP_KERNEL); | |
3957 | if (!cmd->t_data_sg) | |
3958 | return -ENOMEM; | |
c66ac9db | 3959 | |
ec98f782 AG |
3960 | cmd->t_data_nents = nents; |
3961 | sg_init_table(cmd->t_data_sg, nents); | |
c66ac9db | 3962 | |
ec98f782 AG |
3963 | while (length) { |
3964 | u32 page_len = min_t(u32, length, PAGE_SIZE); | |
3965 | page = alloc_page(GFP_KERNEL | __GFP_ZERO); | |
3966 | if (!page) | |
3967 | goto out; | |
c66ac9db | 3968 | |
ec98f782 AG |
3969 | sg_set_page(&cmd->t_data_sg[i], page, page_len, 0); |
3970 | length -= page_len; | |
3971 | i++; | |
c66ac9db | 3972 | } |
c66ac9db | 3973 | return 0; |
c66ac9db | 3974 | |
ec98f782 AG |
3975 | out: |
3976 | while (i >= 0) { | |
3977 | __free_page(sg_page(&cmd->t_data_sg[i])); | |
3978 | i--; | |
c66ac9db | 3979 | } |
ec98f782 AG |
3980 | kfree(cmd->t_data_sg); |
3981 | cmd->t_data_sg = NULL; | |
3982 | return -ENOMEM; | |
c66ac9db NB |
3983 | } |
3984 | ||
a1d8b49a AG |
3985 | /* Reduce sectors if they are too long for the device */ |
3986 | static inline sector_t transport_limit_task_sectors( | |
c66ac9db NB |
3987 | struct se_device *dev, |
3988 | unsigned long long lba, | |
a1d8b49a | 3989 | sector_t sectors) |
c66ac9db | 3990 | { |
a1d8b49a | 3991 | sectors = min_t(sector_t, sectors, dev->se_sub_dev->se_dev_attrib.max_sectors); |
c66ac9db | 3992 | |
a1d8b49a AG |
3993 | if (dev->transport->get_device_type(dev) == TYPE_DISK) |
3994 | if ((lba + sectors) > transport_dev_end_lba(dev)) | |
3995 | sectors = ((transport_dev_end_lba(dev) - lba) + 1); | |
c66ac9db | 3996 | |
a1d8b49a | 3997 | return sectors; |
c66ac9db NB |
3998 | } |
3999 | ||
c66ac9db NB |
4000 | |
4001 | /* | |
4002 | * This function can be used by HW target mode drivers to create a linked | |
4003 | * scatterlist from all contiguously allocated struct se_task->task_sg[]. | |
4004 | * This is intended to be called during the completion path by TCM Core | |
4005 | * when struct target_core_fabric_ops->check_task_sg_chaining is enabled. | |
4006 | */ | |
4007 | void transport_do_task_sg_chain(struct se_cmd *cmd) | |
4008 | { | |
ec98f782 AG |
4009 | struct scatterlist *sg_first = NULL; |
4010 | struct scatterlist *sg_prev = NULL; | |
4011 | int sg_prev_nents = 0; | |
4012 | struct scatterlist *sg; | |
c66ac9db | 4013 | struct se_task *task; |
ec98f782 | 4014 | u32 chained_nents = 0; |
c66ac9db NB |
4015 | int i; |
4016 | ||
ec98f782 AG |
4017 | BUG_ON(!cmd->se_tfo->task_sg_chaining); |
4018 | ||
c66ac9db NB |
4019 | /* |
4020 | * Walk the struct se_task list and setup scatterlist chains | |
a1d8b49a | 4021 | * for each contiguously allocated struct se_task->task_sg[]. |
c66ac9db | 4022 | */ |
a1d8b49a | 4023 | list_for_each_entry(task, &cmd->t_task_list, t_list) { |
ec98f782 | 4024 | if (!task->task_sg) |
c66ac9db NB |
4025 | continue; |
4026 | ||
ec98f782 AG |
4027 | if (!sg_first) { |
4028 | sg_first = task->task_sg; | |
6708bb27 | 4029 | chained_nents = task->task_sg_nents; |
97868c89 | 4030 | } else { |
ec98f782 | 4031 | sg_chain(sg_prev, sg_prev_nents, task->task_sg); |
6708bb27 | 4032 | chained_nents += task->task_sg_nents; |
97868c89 | 4033 | } |
c3c74c7a NB |
4034 | /* |
4035 | * For the padded tasks, use the extra SGL vector allocated | |
4036 | * in transport_allocate_data_tasks() for the sg_prev_nents | |
4037 | * offset into sg_chain() above.. The last task of a | |
4038 | * multi-task list, or a single task will not have | |
4039 | * task->task_sg_padded set.. | |
4040 | */ | |
4041 | if (task->task_padded_sg) | |
4042 | sg_prev_nents = (task->task_sg_nents + 1); | |
4043 | else | |
4044 | sg_prev_nents = task->task_sg_nents; | |
ec98f782 AG |
4045 | |
4046 | sg_prev = task->task_sg; | |
c66ac9db NB |
4047 | } |
4048 | /* | |
4049 | * Setup the starting pointer and total t_tasks_sg_linked_no including | |
4050 | * padding SGs for linking and to mark the end. | |
4051 | */ | |
a1d8b49a | 4052 | cmd->t_tasks_sg_chained = sg_first; |
ec98f782 | 4053 | cmd->t_tasks_sg_chained_no = chained_nents; |
c66ac9db | 4054 | |
6708bb27 | 4055 | pr_debug("Setup cmd: %p cmd->t_tasks_sg_chained: %p and" |
a1d8b49a AG |
4056 | " t_tasks_sg_chained_no: %u\n", cmd, cmd->t_tasks_sg_chained, |
4057 | cmd->t_tasks_sg_chained_no); | |
c66ac9db | 4058 | |
a1d8b49a AG |
4059 | for_each_sg(cmd->t_tasks_sg_chained, sg, |
4060 | cmd->t_tasks_sg_chained_no, i) { | |
c66ac9db | 4061 | |
6708bb27 | 4062 | pr_debug("SG[%d]: %p page: %p length: %d offset: %d\n", |
5951146d | 4063 | i, sg, sg_page(sg), sg->length, sg->offset); |
c66ac9db | 4064 | if (sg_is_chain(sg)) |
6708bb27 | 4065 | pr_debug("SG: %p sg_is_chain=1\n", sg); |
c66ac9db | 4066 | if (sg_is_last(sg)) |
6708bb27 | 4067 | pr_debug("SG: %p sg_is_last=1\n", sg); |
c66ac9db | 4068 | } |
c66ac9db NB |
4069 | } |
4070 | EXPORT_SYMBOL(transport_do_task_sg_chain); | |
4071 | ||
a1d8b49a AG |
4072 | /* |
4073 | * Break up cmd into chunks transport can handle | |
4074 | */ | |
ec98f782 | 4075 | static int transport_allocate_data_tasks( |
c66ac9db NB |
4076 | struct se_cmd *cmd, |
4077 | unsigned long long lba, | |
c66ac9db | 4078 | enum dma_data_direction data_direction, |
ec98f782 AG |
4079 | struct scatterlist *sgl, |
4080 | unsigned int sgl_nents) | |
c66ac9db NB |
4081 | { |
4082 | unsigned char *cdb = NULL; | |
4083 | struct se_task *task; | |
5951146d | 4084 | struct se_device *dev = cmd->se_dev; |
ec98f782 | 4085 | unsigned long flags; |
1d20bb61 | 4086 | int task_count, i, ret; |
277c5f27 | 4087 | sector_t sectors, dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors; |
ec98f782 AG |
4088 | u32 sector_size = dev->se_sub_dev->se_dev_attrib.block_size; |
4089 | struct scatterlist *sg; | |
4090 | struct scatterlist *cmd_sg; | |
a1d8b49a | 4091 | |
ec98f782 AG |
4092 | WARN_ON(cmd->data_length % sector_size); |
4093 | sectors = DIV_ROUND_UP(cmd->data_length, sector_size); | |
277c5f27 NB |
4094 | task_count = DIV_ROUND_UP_SECTOR_T(sectors, dev_max_sectors); |
4095 | ||
ec98f782 AG |
4096 | cmd_sg = sgl; |
4097 | for (i = 0; i < task_count; i++) { | |
c3c74c7a | 4098 | unsigned int task_size, task_sg_nents_padded; |
ec98f782 | 4099 | int count; |
a1d8b49a | 4100 | |
c66ac9db | 4101 | task = transport_generic_get_task(cmd, data_direction); |
a1d8b49a | 4102 | if (!task) |
ec98f782 | 4103 | return -ENOMEM; |
c66ac9db | 4104 | |
c66ac9db | 4105 | task->task_lba = lba; |
ec98f782 AG |
4106 | task->task_sectors = min(sectors, dev_max_sectors); |
4107 | task->task_size = task->task_sectors * sector_size; | |
c66ac9db | 4108 | |
e3d6f909 | 4109 | cdb = dev->transport->get_cdb(task); |
a1d8b49a AG |
4110 | BUG_ON(!cdb); |
4111 | ||
4112 | memcpy(cdb, cmd->t_task_cdb, | |
4113 | scsi_command_size(cmd->t_task_cdb)); | |
4114 | ||
4115 | /* Update new cdb with updated lba/sectors */ | |
3a867205 | 4116 | cmd->transport_split_cdb(task->task_lba, task->task_sectors, cdb); |
525a48a2 NB |
4117 | /* |
4118 | * This now assumes that passed sg_ents are in PAGE_SIZE chunks | |
4119 | * in order to calculate the number per task SGL entries | |
4120 | */ | |
4121 | task->task_sg_nents = DIV_ROUND_UP(task->task_size, PAGE_SIZE); | |
c66ac9db | 4122 | /* |
ec98f782 AG |
4123 | * Check if the fabric module driver is requesting that all |
4124 | * struct se_task->task_sg[] be chained together.. If so, | |
4125 | * then allocate an extra padding SG entry for linking and | |
c3c74c7a NB |
4126 | * marking the end of the chained SGL for every task except |
4127 | * the last one for (task_count > 1) operation, or skipping | |
4128 | * the extra padding for the (task_count == 1) case. | |
c66ac9db | 4129 | */ |
c3c74c7a NB |
4130 | if (cmd->se_tfo->task_sg_chaining && (i < (task_count - 1))) { |
4131 | task_sg_nents_padded = (task->task_sg_nents + 1); | |
ec98f782 | 4132 | task->task_padded_sg = 1; |
c3c74c7a NB |
4133 | } else |
4134 | task_sg_nents_padded = task->task_sg_nents; | |
c66ac9db | 4135 | |
1d20bb61 | 4136 | task->task_sg = kmalloc(sizeof(struct scatterlist) * |
c3c74c7a | 4137 | task_sg_nents_padded, GFP_KERNEL); |
ec98f782 AG |
4138 | if (!task->task_sg) { |
4139 | cmd->se_dev->transport->free_task(task); | |
4140 | return -ENOMEM; | |
4141 | } | |
4142 | ||
c3c74c7a | 4143 | sg_init_table(task->task_sg, task_sg_nents_padded); |
c66ac9db | 4144 | |
ec98f782 AG |
4145 | task_size = task->task_size; |
4146 | ||
4147 | /* Build new sgl, only up to task_size */ | |
6708bb27 | 4148 | for_each_sg(task->task_sg, sg, task->task_sg_nents, count) { |
ec98f782 AG |
4149 | if (cmd_sg->length > task_size) |
4150 | break; | |
4151 | ||
4152 | *sg = *cmd_sg; | |
4153 | task_size -= cmd_sg->length; | |
4154 | cmd_sg = sg_next(cmd_sg); | |
c66ac9db | 4155 | } |
c66ac9db | 4156 | |
ec98f782 AG |
4157 | lba += task->task_sectors; |
4158 | sectors -= task->task_sectors; | |
c66ac9db | 4159 | |
ec98f782 AG |
4160 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
4161 | list_add_tail(&task->t_list, &cmd->t_task_list); | |
4162 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | |
c66ac9db | 4163 | } |
1d20bb61 NB |
4164 | /* |
4165 | * Now perform the memory map of task->task_sg[] into backend | |
4166 | * subsystem memory.. | |
4167 | */ | |
4168 | list_for_each_entry(task, &cmd->t_task_list, t_list) { | |
4169 | if (atomic_read(&task->task_sent)) | |
4170 | continue; | |
4171 | if (!dev->transport->map_data_SG) | |
4172 | continue; | |
4173 | ||
4174 | ret = dev->transport->map_data_SG(task); | |
4175 | if (ret < 0) | |
4176 | return 0; | |
4177 | } | |
c66ac9db | 4178 | |
ec98f782 | 4179 | return task_count; |
c66ac9db NB |
4180 | } |
4181 | ||
4182 | static int | |
ec98f782 | 4183 | transport_allocate_control_task(struct se_cmd *cmd) |
c66ac9db | 4184 | { |
5951146d | 4185 | struct se_device *dev = cmd->se_dev; |
c66ac9db NB |
4186 | unsigned char *cdb; |
4187 | struct se_task *task; | |
ec98f782 | 4188 | unsigned long flags; |
6708bb27 | 4189 | int ret = 0; |
c66ac9db NB |
4190 | |
4191 | task = transport_generic_get_task(cmd, cmd->data_direction); | |
4192 | if (!task) | |
ec98f782 | 4193 | return -ENOMEM; |
c66ac9db | 4194 | |
e3d6f909 | 4195 | cdb = dev->transport->get_cdb(task); |
a1d8b49a AG |
4196 | BUG_ON(!cdb); |
4197 | memcpy(cdb, cmd->t_task_cdb, | |
4198 | scsi_command_size(cmd->t_task_cdb)); | |
c66ac9db | 4199 | |
ec98f782 AG |
4200 | task->task_sg = kmalloc(sizeof(struct scatterlist) * cmd->t_data_nents, |
4201 | GFP_KERNEL); | |
4202 | if (!task->task_sg) { | |
4203 | cmd->se_dev->transport->free_task(task); | |
4204 | return -ENOMEM; | |
4205 | } | |
4206 | ||
4207 | memcpy(task->task_sg, cmd->t_data_sg, | |
4208 | sizeof(struct scatterlist) * cmd->t_data_nents); | |
c66ac9db | 4209 | task->task_size = cmd->data_length; |
6708bb27 | 4210 | task->task_sg_nents = cmd->t_data_nents; |
c66ac9db | 4211 | |
ec98f782 AG |
4212 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
4213 | list_add_tail(&task->t_list, &cmd->t_task_list); | |
4214 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | |
c66ac9db NB |
4215 | |
4216 | if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) { | |
1d20bb61 NB |
4217 | if (dev->transport->map_control_SG) |
4218 | ret = dev->transport->map_control_SG(task); | |
c66ac9db NB |
4219 | } else if (cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) { |
4220 | if (dev->transport->cdb_none) | |
6708bb27 | 4221 | ret = dev->transport->cdb_none(task); |
c66ac9db | 4222 | } else { |
6708bb27 | 4223 | pr_err("target: Unknown control cmd type!\n"); |
c66ac9db | 4224 | BUG(); |
ec98f782 | 4225 | } |
6708bb27 AG |
4226 | |
4227 | /* Success! Return number of tasks allocated */ | |
4228 | if (ret == 0) | |
4229 | return 1; | |
4230 | return ret; | |
ec98f782 AG |
4231 | } |
4232 | ||
4233 | static u32 transport_allocate_tasks( | |
4234 | struct se_cmd *cmd, | |
4235 | unsigned long long lba, | |
4236 | enum dma_data_direction data_direction, | |
4237 | struct scatterlist *sgl, | |
4238 | unsigned int sgl_nents) | |
4239 | { | |
01cde4d5 NB |
4240 | if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { |
4241 | if (transport_cmd_get_valid_sectors(cmd) < 0) | |
4242 | return -EINVAL; | |
4243 | ||
ec98f782 AG |
4244 | return transport_allocate_data_tasks(cmd, lba, data_direction, |
4245 | sgl, sgl_nents); | |
01cde4d5 | 4246 | } else |
6708bb27 AG |
4247 | return transport_allocate_control_task(cmd); |
4248 | ||
c66ac9db NB |
4249 | } |
4250 | ||
ec98f782 | 4251 | |
c66ac9db NB |
4252 | /* transport_generic_new_cmd(): Called from transport_processing_thread() |
4253 | * | |
4254 | * Allocate storage transport resources from a set of values predefined | |
4255 | * by transport_generic_cmd_sequencer() from the iSCSI Target RX process. | |
4256 | * Any non zero return here is treated as an "out of resource' op here. | |
4257 | */ | |
4258 | /* | |
4259 | * Generate struct se_task(s) and/or their payloads for this CDB. | |
4260 | */ | |
a1d8b49a | 4261 | int transport_generic_new_cmd(struct se_cmd *cmd) |
c66ac9db | 4262 | { |
c66ac9db NB |
4263 | int ret = 0; |
4264 | ||
4265 | /* | |
4266 | * Determine is the TCM fabric module has already allocated physical | |
4267 | * memory, and is directly calling transport_generic_map_mem_to_cmd() | |
ec98f782 | 4268 | * beforehand. |
c66ac9db | 4269 | */ |
ec98f782 AG |
4270 | if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) && |
4271 | cmd->data_length) { | |
05d1c7c0 | 4272 | ret = transport_generic_get_mem(cmd); |
c66ac9db NB |
4273 | if (ret < 0) |
4274 | return ret; | |
4275 | } | |
1d20bb61 NB |
4276 | /* |
4277 | * Call transport_new_cmd_obj() to invoke transport_allocate_tasks() for | |
4278 | * control or data CDB types, and perform the map to backend subsystem | |
4279 | * code from SGL memory allocated here by transport_generic_get_mem(), or | |
4280 | * via pre-existing SGL memory setup explictly by fabric module code with | |
4281 | * transport_generic_map_mem_to_cmd(). | |
4282 | */ | |
c66ac9db NB |
4283 | ret = transport_new_cmd_obj(cmd); |
4284 | if (ret < 0) | |
4285 | return ret; | |
c66ac9db | 4286 | /* |
a1d8b49a | 4287 | * For WRITEs, let the fabric know its buffer is ready.. |
c66ac9db NB |
4288 | * This WRITE struct se_cmd (and all of its associated struct se_task's) |
4289 | * will be added to the struct se_device execution queue after its WRITE | |
4290 | * data has arrived. (ie: It gets handled by the transport processing | |
4291 | * thread a second time) | |
4292 | */ | |
4293 | if (cmd->data_direction == DMA_TO_DEVICE) { | |
4294 | transport_add_tasks_to_state_queue(cmd); | |
4295 | return transport_generic_write_pending(cmd); | |
4296 | } | |
4297 | /* | |
4298 | * Everything else but a WRITE, add the struct se_cmd's struct se_task's | |
4299 | * to the execution queue. | |
4300 | */ | |
4301 | transport_execute_tasks(cmd); | |
4302 | return 0; | |
4303 | } | |
a1d8b49a | 4304 | EXPORT_SYMBOL(transport_generic_new_cmd); |
c66ac9db NB |
4305 | |
4306 | /* transport_generic_process_write(): | |
4307 | * | |
4308 | * | |
4309 | */ | |
4310 | void transport_generic_process_write(struct se_cmd *cmd) | |
4311 | { | |
c66ac9db NB |
4312 | transport_execute_tasks(cmd); |
4313 | } | |
4314 | EXPORT_SYMBOL(transport_generic_process_write); | |
4315 | ||
07bde79a NB |
4316 | static int transport_write_pending_qf(struct se_cmd *cmd) |
4317 | { | |
4318 | return cmd->se_tfo->write_pending(cmd); | |
4319 | } | |
4320 | ||
c66ac9db NB |
4321 | /* transport_generic_write_pending(): |
4322 | * | |
4323 | * | |
4324 | */ | |
4325 | static int transport_generic_write_pending(struct se_cmd *cmd) | |
4326 | { | |
4327 | unsigned long flags; | |
4328 | int ret; | |
4329 | ||
a1d8b49a | 4330 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db | 4331 | cmd->t_state = TRANSPORT_WRITE_PENDING; |
a1d8b49a | 4332 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
07bde79a NB |
4333 | |
4334 | if (cmd->transport_qf_callback) { | |
4335 | ret = cmd->transport_qf_callback(cmd); | |
4336 | if (ret == -EAGAIN) | |
4337 | goto queue_full; | |
4338 | else if (ret < 0) | |
4339 | return ret; | |
4340 | ||
4341 | cmd->transport_qf_callback = NULL; | |
4342 | return 0; | |
4343 | } | |
05d1c7c0 | 4344 | |
c66ac9db NB |
4345 | /* |
4346 | * Clear the se_cmd for WRITE_PENDING status in order to set | |
a1d8b49a | 4347 | * cmd->t_transport_active=0 so that transport_generic_handle_data |
c66ac9db | 4348 | * can be called from HW target mode interrupt code. This is safe |
e3d6f909 | 4349 | * to be called with transport_off=1 before the cmd->se_tfo->write_pending |
c66ac9db NB |
4350 | * because the se_cmd->se_lun pointer is not being cleared. |
4351 | */ | |
4352 | transport_cmd_check_stop(cmd, 1, 0); | |
4353 | ||
4354 | /* | |
4355 | * Call the fabric write_pending function here to let the | |
4356 | * frontend know that WRITE buffers are ready. | |
4357 | */ | |
e3d6f909 | 4358 | ret = cmd->se_tfo->write_pending(cmd); |
07bde79a NB |
4359 | if (ret == -EAGAIN) |
4360 | goto queue_full; | |
4361 | else if (ret < 0) | |
c66ac9db NB |
4362 | return ret; |
4363 | ||
4364 | return PYX_TRANSPORT_WRITE_PENDING; | |
07bde79a NB |
4365 | |
4366 | queue_full: | |
6708bb27 | 4367 | pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); |
07bde79a NB |
4368 | cmd->t_state = TRANSPORT_COMPLETE_QF_WP; |
4369 | transport_handle_queue_full(cmd, cmd->se_dev, | |
4370 | transport_write_pending_qf); | |
4371 | return ret; | |
c66ac9db NB |
4372 | } |
4373 | ||
35462975 | 4374 | void transport_release_cmd(struct se_cmd *cmd) |
c66ac9db | 4375 | { |
e3d6f909 | 4376 | BUG_ON(!cmd->se_tfo); |
c66ac9db NB |
4377 | |
4378 | transport_free_se_cmd(cmd); | |
35462975 | 4379 | cmd->se_tfo->release_cmd(cmd); |
c66ac9db | 4380 | } |
35462975 | 4381 | EXPORT_SYMBOL(transport_release_cmd); |
c66ac9db NB |
4382 | |
4383 | /* transport_generic_free_cmd(): | |
4384 | * | |
4385 | * Called from processing frontend to release storage engine resources | |
4386 | */ | |
4387 | void transport_generic_free_cmd( | |
4388 | struct se_cmd *cmd, | |
4389 | int wait_for_tasks, | |
c66ac9db NB |
4390 | int session_reinstatement) |
4391 | { | |
5951146d | 4392 | if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) |
35462975 | 4393 | transport_release_cmd(cmd); |
c66ac9db NB |
4394 | else { |
4395 | core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd); | |
4396 | ||
e3d6f909 | 4397 | if (cmd->se_lun) { |
c66ac9db | 4398 | #if 0 |
6708bb27 | 4399 | pr_debug("cmd: %p ITT: 0x%08x contains" |
e3d6f909 AG |
4400 | " cmd->se_lun\n", cmd, |
4401 | cmd->se_tfo->get_task_tag(cmd)); | |
c66ac9db NB |
4402 | #endif |
4403 | transport_lun_remove_cmd(cmd); | |
4404 | } | |
4405 | ||
4406 | if (wait_for_tasks && cmd->transport_wait_for_tasks) | |
4407 | cmd->transport_wait_for_tasks(cmd, 0, 0); | |
4408 | ||
f4366772 NB |
4409 | transport_free_dev_tasks(cmd); |
4410 | ||
35462975 | 4411 | transport_generic_remove(cmd, session_reinstatement); |
c66ac9db NB |
4412 | } |
4413 | } | |
4414 | EXPORT_SYMBOL(transport_generic_free_cmd); | |
4415 | ||
4416 | static void transport_nop_wait_for_tasks( | |
4417 | struct se_cmd *cmd, | |
4418 | int remove_cmd, | |
4419 | int session_reinstatement) | |
4420 | { | |
4421 | return; | |
4422 | } | |
4423 | ||
4424 | /* transport_lun_wait_for_tasks(): | |
4425 | * | |
4426 | * Called from ConfigFS context to stop the passed struct se_cmd to allow | |
4427 | * an struct se_lun to be successfully shutdown. | |
4428 | */ | |
4429 | static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun) | |
4430 | { | |
4431 | unsigned long flags; | |
4432 | int ret; | |
4433 | /* | |
4434 | * If the frontend has already requested this struct se_cmd to | |
4435 | * be stopped, we can safely ignore this struct se_cmd. | |
4436 | */ | |
a1d8b49a AG |
4437 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
4438 | if (atomic_read(&cmd->t_transport_stop)) { | |
4439 | atomic_set(&cmd->transport_lun_stop, 0); | |
6708bb27 | 4440 | pr_debug("ConfigFS ITT[0x%08x] - t_transport_stop ==" |
e3d6f909 | 4441 | " TRUE, skipping\n", cmd->se_tfo->get_task_tag(cmd)); |
a1d8b49a | 4442 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db | 4443 | transport_cmd_check_stop(cmd, 1, 0); |
e3d6f909 | 4444 | return -EPERM; |
c66ac9db | 4445 | } |
a1d8b49a AG |
4446 | atomic_set(&cmd->transport_lun_fe_stop, 1); |
4447 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | |
c66ac9db | 4448 | |
5951146d | 4449 | wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq); |
c66ac9db NB |
4450 | |
4451 | ret = transport_stop_tasks_for_cmd(cmd); | |
4452 | ||
6708bb27 AG |
4453 | pr_debug("ConfigFS: cmd: %p t_tasks: %d stop tasks ret:" |
4454 | " %d\n", cmd, cmd->t_task_list_num, ret); | |
c66ac9db | 4455 | if (!ret) { |
6708bb27 | 4456 | pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n", |
e3d6f909 | 4457 | cmd->se_tfo->get_task_tag(cmd)); |
a1d8b49a | 4458 | wait_for_completion(&cmd->transport_lun_stop_comp); |
6708bb27 | 4459 | pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n", |
e3d6f909 | 4460 | cmd->se_tfo->get_task_tag(cmd)); |
c66ac9db | 4461 | } |
5951146d | 4462 | transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj); |
c66ac9db NB |
4463 | |
4464 | return 0; | |
4465 | } | |
4466 | ||
c66ac9db NB |
4467 | static void __transport_clear_lun_from_sessions(struct se_lun *lun) |
4468 | { | |
4469 | struct se_cmd *cmd = NULL; | |
4470 | unsigned long lun_flags, cmd_flags; | |
4471 | /* | |
4472 | * Do exception processing and return CHECK_CONDITION status to the | |
4473 | * Initiator Port. | |
4474 | */ | |
4475 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); | |
5951146d AG |
4476 | while (!list_empty(&lun->lun_cmd_list)) { |
4477 | cmd = list_first_entry(&lun->lun_cmd_list, | |
4478 | struct se_cmd, se_lun_node); | |
4479 | list_del(&cmd->se_lun_node); | |
4480 | ||
a1d8b49a | 4481 | atomic_set(&cmd->transport_lun_active, 0); |
c66ac9db NB |
4482 | /* |
4483 | * This will notify iscsi_target_transport.c: | |
4484 | * transport_cmd_check_stop() that a LUN shutdown is in | |
4485 | * progress for the iscsi_cmd_t. | |
4486 | */ | |
a1d8b49a | 4487 | spin_lock(&cmd->t_state_lock); |
6708bb27 | 4488 | pr_debug("SE_LUN[%d] - Setting cmd->transport" |
c66ac9db | 4489 | "_lun_stop for ITT: 0x%08x\n", |
e3d6f909 AG |
4490 | cmd->se_lun->unpacked_lun, |
4491 | cmd->se_tfo->get_task_tag(cmd)); | |
a1d8b49a AG |
4492 | atomic_set(&cmd->transport_lun_stop, 1); |
4493 | spin_unlock(&cmd->t_state_lock); | |
c66ac9db NB |
4494 | |
4495 | spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); | |
4496 | ||
6708bb27 AG |
4497 | if (!cmd->se_lun) { |
4498 | pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n", | |
e3d6f909 AG |
4499 | cmd->se_tfo->get_task_tag(cmd), |
4500 | cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); | |
c66ac9db NB |
4501 | BUG(); |
4502 | } | |
4503 | /* | |
4504 | * If the Storage engine still owns the iscsi_cmd_t, determine | |
4505 | * and/or stop its context. | |
4506 | */ | |
6708bb27 | 4507 | pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport" |
e3d6f909 AG |
4508 | "_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun, |
4509 | cmd->se_tfo->get_task_tag(cmd)); | |
c66ac9db | 4510 | |
e3d6f909 | 4511 | if (transport_lun_wait_for_tasks(cmd, cmd->se_lun) < 0) { |
c66ac9db NB |
4512 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); |
4513 | continue; | |
4514 | } | |
4515 | ||
6708bb27 | 4516 | pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun" |
c66ac9db | 4517 | "_wait_for_tasks(): SUCCESS\n", |
e3d6f909 AG |
4518 | cmd->se_lun->unpacked_lun, |
4519 | cmd->se_tfo->get_task_tag(cmd)); | |
c66ac9db | 4520 | |
a1d8b49a | 4521 | spin_lock_irqsave(&cmd->t_state_lock, cmd_flags); |
6708bb27 | 4522 | if (!atomic_read(&cmd->transport_dev_active)) { |
a1d8b49a | 4523 | spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); |
c66ac9db NB |
4524 | goto check_cond; |
4525 | } | |
a1d8b49a | 4526 | atomic_set(&cmd->transport_dev_active, 0); |
c66ac9db | 4527 | transport_all_task_dev_remove_state(cmd); |
a1d8b49a | 4528 | spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); |
c66ac9db NB |
4529 | |
4530 | transport_free_dev_tasks(cmd); | |
4531 | /* | |
4532 | * The Storage engine stopped this struct se_cmd before it was | |
4533 | * send to the fabric frontend for delivery back to the | |
4534 | * Initiator Node. Return this SCSI CDB back with an | |
4535 | * CHECK_CONDITION status. | |
4536 | */ | |
4537 | check_cond: | |
4538 | transport_send_check_condition_and_sense(cmd, | |
4539 | TCM_NON_EXISTENT_LUN, 0); | |
4540 | /* | |
4541 | * If the fabric frontend is waiting for this iscsi_cmd_t to | |
4542 | * be released, notify the waiting thread now that LU has | |
4543 | * finished accessing it. | |
4544 | */ | |
a1d8b49a AG |
4545 | spin_lock_irqsave(&cmd->t_state_lock, cmd_flags); |
4546 | if (atomic_read(&cmd->transport_lun_fe_stop)) { | |
6708bb27 | 4547 | pr_debug("SE_LUN[%d] - Detected FE stop for" |
c66ac9db NB |
4548 | " struct se_cmd: %p ITT: 0x%08x\n", |
4549 | lun->unpacked_lun, | |
e3d6f909 | 4550 | cmd, cmd->se_tfo->get_task_tag(cmd)); |
c66ac9db | 4551 | |
a1d8b49a | 4552 | spin_unlock_irqrestore(&cmd->t_state_lock, |
c66ac9db NB |
4553 | cmd_flags); |
4554 | transport_cmd_check_stop(cmd, 1, 0); | |
a1d8b49a | 4555 | complete(&cmd->transport_lun_fe_stop_comp); |
c66ac9db NB |
4556 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); |
4557 | continue; | |
4558 | } | |
6708bb27 | 4559 | pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n", |
e3d6f909 | 4560 | lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd)); |
c66ac9db | 4561 | |
a1d8b49a | 4562 | spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); |
c66ac9db NB |
4563 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); |
4564 | } | |
4565 | spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); | |
4566 | } | |
4567 | ||
4568 | static int transport_clear_lun_thread(void *p) | |
4569 | { | |
4570 | struct se_lun *lun = (struct se_lun *)p; | |
4571 | ||
4572 | __transport_clear_lun_from_sessions(lun); | |
4573 | complete(&lun->lun_shutdown_comp); | |
4574 | ||
4575 | return 0; | |
4576 | } | |
4577 | ||
4578 | int transport_clear_lun_from_sessions(struct se_lun *lun) | |
4579 | { | |
4580 | struct task_struct *kt; | |
4581 | ||
5951146d | 4582 | kt = kthread_run(transport_clear_lun_thread, lun, |
c66ac9db NB |
4583 | "tcm_cl_%u", lun->unpacked_lun); |
4584 | if (IS_ERR(kt)) { | |
6708bb27 | 4585 | pr_err("Unable to start clear_lun thread\n"); |
e3d6f909 | 4586 | return PTR_ERR(kt); |
c66ac9db NB |
4587 | } |
4588 | wait_for_completion(&lun->lun_shutdown_comp); | |
4589 | ||
4590 | return 0; | |
4591 | } | |
4592 | ||
4593 | /* transport_generic_wait_for_tasks(): | |
4594 | * | |
4595 | * Called from frontend or passthrough context to wait for storage engine | |
4596 | * to pause and/or release frontend generated struct se_cmd. | |
4597 | */ | |
4598 | static void transport_generic_wait_for_tasks( | |
4599 | struct se_cmd *cmd, | |
4600 | int remove_cmd, | |
4601 | int session_reinstatement) | |
4602 | { | |
4603 | unsigned long flags; | |
4604 | ||
4605 | if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req)) | |
4606 | return; | |
4607 | ||
a1d8b49a | 4608 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db NB |
4609 | /* |
4610 | * If we are already stopped due to an external event (ie: LUN shutdown) | |
4611 | * sleep until the connection can have the passed struct se_cmd back. | |
a1d8b49a | 4612 | * The cmd->transport_lun_stopped_sem will be upped by |
c66ac9db NB |
4613 | * transport_clear_lun_from_sessions() once the ConfigFS context caller |
4614 | * has completed its operation on the struct se_cmd. | |
4615 | */ | |
a1d8b49a | 4616 | if (atomic_read(&cmd->transport_lun_stop)) { |
c66ac9db | 4617 | |
6708bb27 | 4618 | pr_debug("wait_for_tasks: Stopping" |
e3d6f909 | 4619 | " wait_for_completion(&cmd->t_tasktransport_lun_fe" |
c66ac9db | 4620 | "_stop_comp); for ITT: 0x%08x\n", |
e3d6f909 | 4621 | cmd->se_tfo->get_task_tag(cmd)); |
c66ac9db NB |
4622 | /* |
4623 | * There is a special case for WRITES where a FE exception + | |
4624 | * LUN shutdown means ConfigFS context is still sleeping on | |
4625 | * transport_lun_stop_comp in transport_lun_wait_for_tasks(). | |
4626 | * We go ahead and up transport_lun_stop_comp just to be sure | |
4627 | * here. | |
4628 | */ | |
a1d8b49a AG |
4629 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
4630 | complete(&cmd->transport_lun_stop_comp); | |
4631 | wait_for_completion(&cmd->transport_lun_fe_stop_comp); | |
4632 | spin_lock_irqsave(&cmd->t_state_lock, flags); | |
c66ac9db NB |
4633 | |
4634 | transport_all_task_dev_remove_state(cmd); | |
4635 | /* | |
4636 | * At this point, the frontend who was the originator of this | |
4637 | * struct se_cmd, now owns the structure and can be released through | |
4638 | * normal means below. | |
4639 | */ | |
6708bb27 | 4640 | pr_debug("wait_for_tasks: Stopped" |
e3d6f909 | 4641 | " wait_for_completion(&cmd->t_tasktransport_lun_fe_" |
c66ac9db | 4642 | "stop_comp); for ITT: 0x%08x\n", |
e3d6f909 | 4643 | cmd->se_tfo->get_task_tag(cmd)); |
c66ac9db | 4644 | |
a1d8b49a | 4645 | atomic_set(&cmd->transport_lun_stop, 0); |
c66ac9db | 4646 | } |
a1d8b49a AG |
4647 | if (!atomic_read(&cmd->t_transport_active) || |
4648 | atomic_read(&cmd->t_transport_aborted)) | |
c66ac9db NB |
4649 | goto remove; |
4650 | ||
a1d8b49a | 4651 | atomic_set(&cmd->t_transport_stop, 1); |
c66ac9db | 4652 | |
6708bb27 | 4653 | pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x" |
c66ac9db | 4654 | " i_state: %d, t_state/def_t_state: %d/%d, t_transport_stop" |
e3d6f909 AG |
4655 | " = TRUE\n", cmd, cmd->se_tfo->get_task_tag(cmd), |
4656 | cmd->se_tfo->get_cmd_state(cmd), cmd->t_state, | |
c66ac9db NB |
4657 | cmd->deferred_t_state); |
4658 | ||
a1d8b49a | 4659 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db | 4660 | |
5951146d | 4661 | wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq); |
c66ac9db | 4662 | |
a1d8b49a | 4663 | wait_for_completion(&cmd->t_transport_stop_comp); |
c66ac9db | 4664 | |
a1d8b49a AG |
4665 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
4666 | atomic_set(&cmd->t_transport_active, 0); | |
4667 | atomic_set(&cmd->t_transport_stop, 0); | |
c66ac9db | 4668 | |
6708bb27 | 4669 | pr_debug("wait_for_tasks: Stopped wait_for_compltion(" |
a1d8b49a | 4670 | "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n", |
e3d6f909 | 4671 | cmd->se_tfo->get_task_tag(cmd)); |
c66ac9db | 4672 | remove: |
a1d8b49a | 4673 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
4674 | if (!remove_cmd) |
4675 | return; | |
4676 | ||
35462975 | 4677 | transport_generic_free_cmd(cmd, 0, session_reinstatement); |
c66ac9db NB |
4678 | } |
4679 | ||
4680 | static int transport_get_sense_codes( | |
4681 | struct se_cmd *cmd, | |
4682 | u8 *asc, | |
4683 | u8 *ascq) | |
4684 | { | |
4685 | *asc = cmd->scsi_asc; | |
4686 | *ascq = cmd->scsi_ascq; | |
4687 | ||
4688 | return 0; | |
4689 | } | |
4690 | ||
4691 | static int transport_set_sense_codes( | |
4692 | struct se_cmd *cmd, | |
4693 | u8 asc, | |
4694 | u8 ascq) | |
4695 | { | |
4696 | cmd->scsi_asc = asc; | |
4697 | cmd->scsi_ascq = ascq; | |
4698 | ||
4699 | return 0; | |
4700 | } | |
4701 | ||
4702 | int transport_send_check_condition_and_sense( | |
4703 | struct se_cmd *cmd, | |
4704 | u8 reason, | |
4705 | int from_transport) | |
4706 | { | |
4707 | unsigned char *buffer = cmd->sense_buffer; | |
4708 | unsigned long flags; | |
4709 | int offset; | |
4710 | u8 asc = 0, ascq = 0; | |
4711 | ||
a1d8b49a | 4712 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db | 4713 | if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { |
a1d8b49a | 4714 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
4715 | return 0; |
4716 | } | |
4717 | cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION; | |
a1d8b49a | 4718 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
4719 | |
4720 | if (!reason && from_transport) | |
4721 | goto after_reason; | |
4722 | ||
4723 | if (!from_transport) | |
4724 | cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE; | |
4725 | /* | |
4726 | * Data Segment and SenseLength of the fabric response PDU. | |
4727 | * | |
4728 | * TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE | |
4729 | * from include/scsi/scsi_cmnd.h | |
4730 | */ | |
e3d6f909 | 4731 | offset = cmd->se_tfo->set_fabric_sense_len(cmd, |
c66ac9db NB |
4732 | TRANSPORT_SENSE_BUFFER); |
4733 | /* | |
4734 | * Actual SENSE DATA, see SPC-3 7.23.2 SPC_SENSE_KEY_OFFSET uses | |
4735 | * SENSE KEY values from include/scsi/scsi.h | |
4736 | */ | |
4737 | switch (reason) { | |
4738 | case TCM_NON_EXISTENT_LUN: | |
eb39d340 NB |
4739 | /* CURRENT ERROR */ |
4740 | buffer[offset] = 0x70; | |
4741 | /* ILLEGAL REQUEST */ | |
4742 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; | |
4743 | /* LOGICAL UNIT NOT SUPPORTED */ | |
4744 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x25; | |
4745 | break; | |
c66ac9db NB |
4746 | case TCM_UNSUPPORTED_SCSI_OPCODE: |
4747 | case TCM_SECTOR_COUNT_TOO_MANY: | |
4748 | /* CURRENT ERROR */ | |
4749 | buffer[offset] = 0x70; | |
4750 | /* ILLEGAL REQUEST */ | |
4751 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; | |
4752 | /* INVALID COMMAND OPERATION CODE */ | |
4753 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x20; | |
4754 | break; | |
4755 | case TCM_UNKNOWN_MODE_PAGE: | |
4756 | /* CURRENT ERROR */ | |
4757 | buffer[offset] = 0x70; | |
4758 | /* ILLEGAL REQUEST */ | |
4759 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; | |
4760 | /* INVALID FIELD IN CDB */ | |
4761 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24; | |
4762 | break; | |
4763 | case TCM_CHECK_CONDITION_ABORT_CMD: | |
4764 | /* CURRENT ERROR */ | |
4765 | buffer[offset] = 0x70; | |
4766 | /* ABORTED COMMAND */ | |
4767 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | |
4768 | /* BUS DEVICE RESET FUNCTION OCCURRED */ | |
4769 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x29; | |
4770 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x03; | |
4771 | break; | |
4772 | case TCM_INCORRECT_AMOUNT_OF_DATA: | |
4773 | /* CURRENT ERROR */ | |
4774 | buffer[offset] = 0x70; | |
4775 | /* ABORTED COMMAND */ | |
4776 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | |
4777 | /* WRITE ERROR */ | |
4778 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c; | |
4779 | /* NOT ENOUGH UNSOLICITED DATA */ | |
4780 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0d; | |
4781 | break; | |
4782 | case TCM_INVALID_CDB_FIELD: | |
4783 | /* CURRENT ERROR */ | |
4784 | buffer[offset] = 0x70; | |
4785 | /* ABORTED COMMAND */ | |
4786 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | |
4787 | /* INVALID FIELD IN CDB */ | |
4788 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24; | |
4789 | break; | |
4790 | case TCM_INVALID_PARAMETER_LIST: | |
4791 | /* CURRENT ERROR */ | |
4792 | buffer[offset] = 0x70; | |
4793 | /* ABORTED COMMAND */ | |
4794 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | |
4795 | /* INVALID FIELD IN PARAMETER LIST */ | |
4796 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26; | |
4797 | break; | |
4798 | case TCM_UNEXPECTED_UNSOLICITED_DATA: | |
4799 | /* CURRENT ERROR */ | |
4800 | buffer[offset] = 0x70; | |
4801 | /* ABORTED COMMAND */ | |
4802 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | |
4803 | /* WRITE ERROR */ | |
4804 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c; | |
4805 | /* UNEXPECTED_UNSOLICITED_DATA */ | |
4806 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0c; | |
4807 | break; | |
4808 | case TCM_SERVICE_CRC_ERROR: | |
4809 | /* CURRENT ERROR */ | |
4810 | buffer[offset] = 0x70; | |
4811 | /* ABORTED COMMAND */ | |
4812 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | |
4813 | /* PROTOCOL SERVICE CRC ERROR */ | |
4814 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x47; | |
4815 | /* N/A */ | |
4816 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x05; | |
4817 | break; | |
4818 | case TCM_SNACK_REJECTED: | |
4819 | /* CURRENT ERROR */ | |
4820 | buffer[offset] = 0x70; | |
4821 | /* ABORTED COMMAND */ | |
4822 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | |
4823 | /* READ ERROR */ | |
4824 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x11; | |
4825 | /* FAILED RETRANSMISSION REQUEST */ | |
4826 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x13; | |
4827 | break; | |
4828 | case TCM_WRITE_PROTECTED: | |
4829 | /* CURRENT ERROR */ | |
4830 | buffer[offset] = 0x70; | |
4831 | /* DATA PROTECT */ | |
4832 | buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT; | |
4833 | /* WRITE PROTECTED */ | |
4834 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27; | |
4835 | break; | |
4836 | case TCM_CHECK_CONDITION_UNIT_ATTENTION: | |
4837 | /* CURRENT ERROR */ | |
4838 | buffer[offset] = 0x70; | |
4839 | /* UNIT ATTENTION */ | |
4840 | buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION; | |
4841 | core_scsi3_ua_for_check_condition(cmd, &asc, &ascq); | |
4842 | buffer[offset+SPC_ASC_KEY_OFFSET] = asc; | |
4843 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq; | |
4844 | break; | |
4845 | case TCM_CHECK_CONDITION_NOT_READY: | |
4846 | /* CURRENT ERROR */ | |
4847 | buffer[offset] = 0x70; | |
4848 | /* Not Ready */ | |
4849 | buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY; | |
4850 | transport_get_sense_codes(cmd, &asc, &ascq); | |
4851 | buffer[offset+SPC_ASC_KEY_OFFSET] = asc; | |
4852 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq; | |
4853 | break; | |
4854 | case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: | |
4855 | default: | |
4856 | /* CURRENT ERROR */ | |
4857 | buffer[offset] = 0x70; | |
4858 | /* ILLEGAL REQUEST */ | |
4859 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; | |
4860 | /* LOGICAL UNIT COMMUNICATION FAILURE */ | |
4861 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x80; | |
4862 | break; | |
4863 | } | |
4864 | /* | |
4865 | * This code uses linux/include/scsi/scsi.h SAM status codes! | |
4866 | */ | |
4867 | cmd->scsi_status = SAM_STAT_CHECK_CONDITION; | |
4868 | /* | |
4869 | * Automatically padded, this value is encoded in the fabric's | |
4870 | * data_length response PDU containing the SCSI defined sense data. | |
4871 | */ | |
4872 | cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset; | |
4873 | ||
4874 | after_reason: | |
07bde79a | 4875 | return cmd->se_tfo->queue_status(cmd); |
c66ac9db NB |
4876 | } |
4877 | EXPORT_SYMBOL(transport_send_check_condition_and_sense); | |
4878 | ||
4879 | int transport_check_aborted_status(struct se_cmd *cmd, int send_status) | |
4880 | { | |
4881 | int ret = 0; | |
4882 | ||
a1d8b49a | 4883 | if (atomic_read(&cmd->t_transport_aborted) != 0) { |
6708bb27 | 4884 | if (!send_status || |
c66ac9db NB |
4885 | (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS)) |
4886 | return 1; | |
4887 | #if 0 | |
6708bb27 | 4888 | pr_debug("Sending delayed SAM_STAT_TASK_ABORTED" |
c66ac9db | 4889 | " status for CDB: 0x%02x ITT: 0x%08x\n", |
a1d8b49a | 4890 | cmd->t_task_cdb[0], |
e3d6f909 | 4891 | cmd->se_tfo->get_task_tag(cmd)); |
c66ac9db NB |
4892 | #endif |
4893 | cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS; | |
e3d6f909 | 4894 | cmd->se_tfo->queue_status(cmd); |
c66ac9db NB |
4895 | ret = 1; |
4896 | } | |
4897 | return ret; | |
4898 | } | |
4899 | EXPORT_SYMBOL(transport_check_aborted_status); | |
4900 | ||
4901 | void transport_send_task_abort(struct se_cmd *cmd) | |
4902 | { | |
c252f003 NB |
4903 | unsigned long flags; |
4904 | ||
4905 | spin_lock_irqsave(&cmd->t_state_lock, flags); | |
4906 | if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { | |
4907 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | |
4908 | return; | |
4909 | } | |
4910 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | |
4911 | ||
c66ac9db NB |
4912 | /* |
4913 | * If there are still expected incoming fabric WRITEs, we wait | |
4914 | * until until they have completed before sending a TASK_ABORTED | |
4915 | * response. This response with TASK_ABORTED status will be | |
4916 | * queued back to fabric module by transport_check_aborted_status(). | |
4917 | */ | |
4918 | if (cmd->data_direction == DMA_TO_DEVICE) { | |
e3d6f909 | 4919 | if (cmd->se_tfo->write_pending_status(cmd) != 0) { |
a1d8b49a | 4920 | atomic_inc(&cmd->t_transport_aborted); |
c66ac9db NB |
4921 | smp_mb__after_atomic_inc(); |
4922 | cmd->scsi_status = SAM_STAT_TASK_ABORTED; | |
4923 | transport_new_cmd_failure(cmd); | |
4924 | return; | |
4925 | } | |
4926 | } | |
4927 | cmd->scsi_status = SAM_STAT_TASK_ABORTED; | |
4928 | #if 0 | |
6708bb27 | 4929 | pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x," |
a1d8b49a | 4930 | " ITT: 0x%08x\n", cmd->t_task_cdb[0], |
e3d6f909 | 4931 | cmd->se_tfo->get_task_tag(cmd)); |
c66ac9db | 4932 | #endif |
e3d6f909 | 4933 | cmd->se_tfo->queue_status(cmd); |
c66ac9db NB |
4934 | } |
4935 | ||
4936 | /* transport_generic_do_tmr(): | |
4937 | * | |
4938 | * | |
4939 | */ | |
4940 | int transport_generic_do_tmr(struct se_cmd *cmd) | |
4941 | { | |
5951146d | 4942 | struct se_device *dev = cmd->se_dev; |
c66ac9db NB |
4943 | struct se_tmr_req *tmr = cmd->se_tmr_req; |
4944 | int ret; | |
4945 | ||
4946 | switch (tmr->function) { | |
5c6cd613 | 4947 | case TMR_ABORT_TASK: |
c66ac9db NB |
4948 | tmr->response = TMR_FUNCTION_REJECTED; |
4949 | break; | |
5c6cd613 NB |
4950 | case TMR_ABORT_TASK_SET: |
4951 | case TMR_CLEAR_ACA: | |
4952 | case TMR_CLEAR_TASK_SET: | |
c66ac9db NB |
4953 | tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED; |
4954 | break; | |
5c6cd613 | 4955 | case TMR_LUN_RESET: |
c66ac9db NB |
4956 | ret = core_tmr_lun_reset(dev, tmr, NULL, NULL); |
4957 | tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE : | |
4958 | TMR_FUNCTION_REJECTED; | |
4959 | break; | |
5c6cd613 | 4960 | case TMR_TARGET_WARM_RESET: |
c66ac9db NB |
4961 | tmr->response = TMR_FUNCTION_REJECTED; |
4962 | break; | |
5c6cd613 | 4963 | case TMR_TARGET_COLD_RESET: |
c66ac9db NB |
4964 | tmr->response = TMR_FUNCTION_REJECTED; |
4965 | break; | |
c66ac9db | 4966 | default: |
6708bb27 | 4967 | pr_err("Uknown TMR function: 0x%02x.\n", |
c66ac9db NB |
4968 | tmr->function); |
4969 | tmr->response = TMR_FUNCTION_REJECTED; | |
4970 | break; | |
4971 | } | |
4972 | ||
4973 | cmd->t_state = TRANSPORT_ISTATE_PROCESSING; | |
e3d6f909 | 4974 | cmd->se_tfo->queue_tm_rsp(cmd); |
c66ac9db NB |
4975 | |
4976 | transport_cmd_check_stop(cmd, 2, 0); | |
4977 | return 0; | |
4978 | } | |
4979 | ||
4980 | /* | |
4981 | * Called with spin_lock_irq(&dev->execute_task_lock); held | |
4982 | * | |
4983 | */ | |
4984 | static struct se_task * | |
4985 | transport_get_task_from_state_list(struct se_device *dev) | |
4986 | { | |
4987 | struct se_task *task; | |
4988 | ||
4989 | if (list_empty(&dev->state_task_list)) | |
4990 | return NULL; | |
4991 | ||
4992 | list_for_each_entry(task, &dev->state_task_list, t_state_list) | |
4993 | break; | |
4994 | ||
4995 | list_del(&task->t_state_list); | |
4996 | atomic_set(&task->task_state_active, 0); | |
4997 | ||
4998 | return task; | |
4999 | } | |
5000 | ||
5001 | static void transport_processing_shutdown(struct se_device *dev) | |
5002 | { | |
5003 | struct se_cmd *cmd; | |
c66ac9db | 5004 | struct se_task *task; |
c66ac9db NB |
5005 | unsigned long flags; |
5006 | /* | |
5007 | * Empty the struct se_device's struct se_task state list. | |
5008 | */ | |
5009 | spin_lock_irqsave(&dev->execute_task_lock, flags); | |
5010 | while ((task = transport_get_task_from_state_list(dev))) { | |
e3d6f909 | 5011 | if (!task->task_se_cmd) { |
6708bb27 | 5012 | pr_err("task->task_se_cmd is NULL!\n"); |
c66ac9db NB |
5013 | continue; |
5014 | } | |
e3d6f909 | 5015 | cmd = task->task_se_cmd; |
c66ac9db | 5016 | |
c66ac9db NB |
5017 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); |
5018 | ||
a1d8b49a | 5019 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db | 5020 | |
6708bb27 AG |
5021 | pr_debug("PT: cmd: %p task: %p ITT: 0x%08x," |
5022 | " i_state: %d, t_state/def_t_state:" | |
c66ac9db | 5023 | " %d/%d cdb: 0x%02x\n", cmd, task, |
6708bb27 AG |
5024 | cmd->se_tfo->get_task_tag(cmd), |
5025 | cmd->se_tfo->get_cmd_state(cmd), | |
c66ac9db | 5026 | cmd->t_state, cmd->deferred_t_state, |
a1d8b49a | 5027 | cmd->t_task_cdb[0]); |
6708bb27 | 5028 | pr_debug("PT: ITT[0x%08x] - t_tasks: %d t_task_cdbs_left:" |
c66ac9db NB |
5029 | " %d t_task_cdbs_sent: %d -- t_transport_active: %d" |
5030 | " t_transport_stop: %d t_transport_sent: %d\n", | |
e3d6f909 | 5031 | cmd->se_tfo->get_task_tag(cmd), |
6708bb27 | 5032 | cmd->t_task_list_num, |
a1d8b49a AG |
5033 | atomic_read(&cmd->t_task_cdbs_left), |
5034 | atomic_read(&cmd->t_task_cdbs_sent), | |
5035 | atomic_read(&cmd->t_transport_active), | |
5036 | atomic_read(&cmd->t_transport_stop), | |
5037 | atomic_read(&cmd->t_transport_sent)); | |
c66ac9db NB |
5038 | |
5039 | if (atomic_read(&task->task_active)) { | |
5040 | atomic_set(&task->task_stop, 1); | |
5041 | spin_unlock_irqrestore( | |
a1d8b49a | 5042 | &cmd->t_state_lock, flags); |
c66ac9db | 5043 | |
6708bb27 | 5044 | pr_debug("Waiting for task: %p to shutdown for dev:" |
c66ac9db NB |
5045 | " %p\n", task, dev); |
5046 | wait_for_completion(&task->task_stop_comp); | |
6708bb27 | 5047 | pr_debug("Completed task: %p shutdown for dev: %p\n", |
c66ac9db NB |
5048 | task, dev); |
5049 | ||
a1d8b49a AG |
5050 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
5051 | atomic_dec(&cmd->t_task_cdbs_left); | |
c66ac9db NB |
5052 | |
5053 | atomic_set(&task->task_active, 0); | |
5054 | atomic_set(&task->task_stop, 0); | |
52208ae3 NB |
5055 | } else { |
5056 | if (atomic_read(&task->task_execute_queue) != 0) | |
5057 | transport_remove_task_from_execute_queue(task, dev); | |
c66ac9db NB |
5058 | } |
5059 | __transport_stop_task_timer(task, &flags); | |
5060 | ||
6708bb27 | 5061 | if (!atomic_dec_and_test(&cmd->t_task_cdbs_ex_left)) { |
c66ac9db | 5062 | spin_unlock_irqrestore( |
a1d8b49a | 5063 | &cmd->t_state_lock, flags); |
c66ac9db | 5064 | |
6708bb27 | 5065 | pr_debug("Skipping task: %p, dev: %p for" |
c66ac9db | 5066 | " t_task_cdbs_ex_left: %d\n", task, dev, |
a1d8b49a | 5067 | atomic_read(&cmd->t_task_cdbs_ex_left)); |
c66ac9db NB |
5068 | |
5069 | spin_lock_irqsave(&dev->execute_task_lock, flags); | |
5070 | continue; | |
5071 | } | |
5072 | ||
a1d8b49a | 5073 | if (atomic_read(&cmd->t_transport_active)) { |
6708bb27 | 5074 | pr_debug("got t_transport_active = 1 for task: %p, dev:" |
c66ac9db NB |
5075 | " %p\n", task, dev); |
5076 | ||
a1d8b49a | 5077 | if (atomic_read(&cmd->t_fe_count)) { |
c66ac9db | 5078 | spin_unlock_irqrestore( |
a1d8b49a | 5079 | &cmd->t_state_lock, flags); |
c66ac9db NB |
5080 | transport_send_check_condition_and_sense( |
5081 | cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, | |
5082 | 0); | |
5083 | transport_remove_cmd_from_queue(cmd, | |
5951146d | 5084 | &cmd->se_dev->dev_queue_obj); |
c66ac9db NB |
5085 | |
5086 | transport_lun_remove_cmd(cmd); | |
5087 | transport_cmd_check_stop(cmd, 1, 0); | |
5088 | } else { | |
5089 | spin_unlock_irqrestore( | |
a1d8b49a | 5090 | &cmd->t_state_lock, flags); |
c66ac9db NB |
5091 | |
5092 | transport_remove_cmd_from_queue(cmd, | |
5951146d | 5093 | &cmd->se_dev->dev_queue_obj); |
c66ac9db NB |
5094 | |
5095 | transport_lun_remove_cmd(cmd); | |
5096 | ||
5097 | if (transport_cmd_check_stop(cmd, 1, 0)) | |
35462975 | 5098 | transport_generic_remove(cmd, 0); |
c66ac9db NB |
5099 | } |
5100 | ||
5101 | spin_lock_irqsave(&dev->execute_task_lock, flags); | |
5102 | continue; | |
5103 | } | |
6708bb27 | 5104 | pr_debug("Got t_transport_active = 0 for task: %p, dev: %p\n", |
c66ac9db NB |
5105 | task, dev); |
5106 | ||
a1d8b49a | 5107 | if (atomic_read(&cmd->t_fe_count)) { |
c66ac9db | 5108 | spin_unlock_irqrestore( |
a1d8b49a | 5109 | &cmd->t_state_lock, flags); |
c66ac9db NB |
5110 | transport_send_check_condition_and_sense(cmd, |
5111 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); | |
5112 | transport_remove_cmd_from_queue(cmd, | |
5951146d | 5113 | &cmd->se_dev->dev_queue_obj); |
c66ac9db NB |
5114 | |
5115 | transport_lun_remove_cmd(cmd); | |
5116 | transport_cmd_check_stop(cmd, 1, 0); | |
5117 | } else { | |
5118 | spin_unlock_irqrestore( | |
a1d8b49a | 5119 | &cmd->t_state_lock, flags); |
c66ac9db NB |
5120 | |
5121 | transport_remove_cmd_from_queue(cmd, | |
5951146d | 5122 | &cmd->se_dev->dev_queue_obj); |
c66ac9db NB |
5123 | transport_lun_remove_cmd(cmd); |
5124 | ||
5125 | if (transport_cmd_check_stop(cmd, 1, 0)) | |
35462975 | 5126 | transport_generic_remove(cmd, 0); |
c66ac9db NB |
5127 | } |
5128 | ||
5129 | spin_lock_irqsave(&dev->execute_task_lock, flags); | |
5130 | } | |
5131 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); | |
5132 | /* | |
5133 | * Empty the struct se_device's struct se_cmd list. | |
5134 | */ | |
5951146d | 5135 | while ((cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj))) { |
c66ac9db | 5136 | |
6708bb27 | 5137 | pr_debug("From Device Queue: cmd: %p t_state: %d\n", |
5951146d | 5138 | cmd, cmd->t_state); |
c66ac9db | 5139 | |
a1d8b49a | 5140 | if (atomic_read(&cmd->t_fe_count)) { |
c66ac9db NB |
5141 | transport_send_check_condition_and_sense(cmd, |
5142 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); | |
5143 | ||
5144 | transport_lun_remove_cmd(cmd); | |
5145 | transport_cmd_check_stop(cmd, 1, 0); | |
5146 | } else { | |
5147 | transport_lun_remove_cmd(cmd); | |
5148 | if (transport_cmd_check_stop(cmd, 1, 0)) | |
35462975 | 5149 | transport_generic_remove(cmd, 0); |
c66ac9db | 5150 | } |
c66ac9db | 5151 | } |
c66ac9db NB |
5152 | } |
5153 | ||
5154 | /* transport_processing_thread(): | |
5155 | * | |
5156 | * | |
5157 | */ | |
5158 | static int transport_processing_thread(void *param) | |
5159 | { | |
5951146d | 5160 | int ret; |
c66ac9db NB |
5161 | struct se_cmd *cmd; |
5162 | struct se_device *dev = (struct se_device *) param; | |
c66ac9db NB |
5163 | |
5164 | set_user_nice(current, -20); | |
5165 | ||
5166 | while (!kthread_should_stop()) { | |
e3d6f909 AG |
5167 | ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq, |
5168 | atomic_read(&dev->dev_queue_obj.queue_cnt) || | |
c66ac9db NB |
5169 | kthread_should_stop()); |
5170 | if (ret < 0) | |
5171 | goto out; | |
5172 | ||
5173 | spin_lock_irq(&dev->dev_status_lock); | |
5174 | if (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) { | |
5175 | spin_unlock_irq(&dev->dev_status_lock); | |
5176 | transport_processing_shutdown(dev); | |
5177 | continue; | |
5178 | } | |
5179 | spin_unlock_irq(&dev->dev_status_lock); | |
5180 | ||
5181 | get_cmd: | |
5182 | __transport_execute_tasks(dev); | |
5183 | ||
5951146d AG |
5184 | cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj); |
5185 | if (!cmd) | |
c66ac9db NB |
5186 | continue; |
5187 | ||
5951146d | 5188 | switch (cmd->t_state) { |
680b73c5 CH |
5189 | case TRANSPORT_NEW_CMD: |
5190 | BUG(); | |
5191 | break; | |
c66ac9db | 5192 | case TRANSPORT_NEW_CMD_MAP: |
6708bb27 AG |
5193 | if (!cmd->se_tfo->new_cmd_map) { |
5194 | pr_err("cmd->se_tfo->new_cmd_map is" | |
c66ac9db NB |
5195 | " NULL for TRANSPORT_NEW_CMD_MAP\n"); |
5196 | BUG(); | |
5197 | } | |
e3d6f909 | 5198 | ret = cmd->se_tfo->new_cmd_map(cmd); |
c66ac9db NB |
5199 | if (ret < 0) { |
5200 | cmd->transport_error_status = ret; | |
5201 | transport_generic_request_failure(cmd, NULL, | |
5202 | 0, (cmd->data_direction != | |
5203 | DMA_TO_DEVICE)); | |
5204 | break; | |
5205 | } | |
c66ac9db | 5206 | ret = transport_generic_new_cmd(cmd); |
07bde79a NB |
5207 | if (ret == -EAGAIN) |
5208 | break; | |
5209 | else if (ret < 0) { | |
c66ac9db NB |
5210 | cmd->transport_error_status = ret; |
5211 | transport_generic_request_failure(cmd, NULL, | |
5212 | 0, (cmd->data_direction != | |
5213 | DMA_TO_DEVICE)); | |
5214 | } | |
5215 | break; | |
5216 | case TRANSPORT_PROCESS_WRITE: | |
5217 | transport_generic_process_write(cmd); | |
5218 | break; | |
5219 | case TRANSPORT_COMPLETE_OK: | |
5220 | transport_stop_all_task_timers(cmd); | |
5221 | transport_generic_complete_ok(cmd); | |
5222 | break; | |
5223 | case TRANSPORT_REMOVE: | |
35462975 | 5224 | transport_generic_remove(cmd, 0); |
c66ac9db | 5225 | break; |
f4366772 | 5226 | case TRANSPORT_FREE_CMD_INTR: |
35462975 | 5227 | transport_generic_free_cmd(cmd, 0, 0); |
f4366772 | 5228 | break; |
c66ac9db NB |
5229 | case TRANSPORT_PROCESS_TMR: |
5230 | transport_generic_do_tmr(cmd); | |
5231 | break; | |
5232 | case TRANSPORT_COMPLETE_FAILURE: | |
5233 | transport_generic_request_failure(cmd, NULL, 1, 1); | |
5234 | break; | |
5235 | case TRANSPORT_COMPLETE_TIMEOUT: | |
5236 | transport_stop_all_task_timers(cmd); | |
5237 | transport_generic_request_timeout(cmd); | |
5238 | break; | |
07bde79a NB |
5239 | case TRANSPORT_COMPLETE_QF_WP: |
5240 | transport_generic_write_pending(cmd); | |
5241 | break; | |
c66ac9db | 5242 | default: |
6708bb27 | 5243 | pr_err("Unknown t_state: %d deferred_t_state:" |
c66ac9db | 5244 | " %d for ITT: 0x%08x i_state: %d on SE LUN:" |
5951146d | 5245 | " %u\n", cmd->t_state, cmd->deferred_t_state, |
e3d6f909 AG |
5246 | cmd->se_tfo->get_task_tag(cmd), |
5247 | cmd->se_tfo->get_cmd_state(cmd), | |
5248 | cmd->se_lun->unpacked_lun); | |
c66ac9db NB |
5249 | BUG(); |
5250 | } | |
5251 | ||
5252 | goto get_cmd; | |
5253 | } | |
5254 | ||
5255 | out: | |
5256 | transport_release_all_cmds(dev); | |
5257 | dev->process_thread = NULL; | |
5258 | return 0; | |
5259 | } |