Commit | Line | Data |
---|---|---|
c66ac9db NB |
1 | /******************************************************************************* |
2 | * Filename: target_core_transport.c | |
3 | * | |
4 | * This file contains the Generic Target Engine Core. | |
5 | * | |
6 | * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc. | |
7 | * Copyright (c) 2005, 2006, 2007 SBE, Inc. | |
8 | * Copyright (c) 2007-2010 Rising Tide Systems | |
9 | * Copyright (c) 2008-2010 Linux-iSCSI.org | |
10 | * | |
11 | * Nicholas A. Bellinger <nab@kernel.org> | |
12 | * | |
13 | * This program is free software; you can redistribute it and/or modify | |
14 | * it under the terms of the GNU General Public License as published by | |
15 | * the Free Software Foundation; either version 2 of the License, or | |
16 | * (at your option) any later version. | |
17 | * | |
18 | * This program is distributed in the hope that it will be useful, | |
19 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
20 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
21 | * GNU General Public License for more details. | |
22 | * | |
23 | * You should have received a copy of the GNU General Public License | |
24 | * along with this program; if not, write to the Free Software | |
25 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
26 | * | |
27 | ******************************************************************************/ | |
28 | ||
29 | #include <linux/version.h> | |
30 | #include <linux/net.h> | |
31 | #include <linux/delay.h> | |
32 | #include <linux/string.h> | |
33 | #include <linux/timer.h> | |
34 | #include <linux/slab.h> | |
35 | #include <linux/blkdev.h> | |
36 | #include <linux/spinlock.h> | |
c66ac9db NB |
37 | #include <linux/kthread.h> |
38 | #include <linux/in.h> | |
39 | #include <linux/cdrom.h> | |
40 | #include <asm/unaligned.h> | |
41 | #include <net/sock.h> | |
42 | #include <net/tcp.h> | |
43 | #include <scsi/scsi.h> | |
44 | #include <scsi/scsi_cmnd.h> | |
e66ecd50 | 45 | #include <scsi/scsi_tcq.h> |
c66ac9db NB |
46 | |
47 | #include <target/target_core_base.h> | |
48 | #include <target/target_core_device.h> | |
49 | #include <target/target_core_tmr.h> | |
50 | #include <target/target_core_tpg.h> | |
51 | #include <target/target_core_transport.h> | |
52 | #include <target/target_core_fabric_ops.h> | |
53 | #include <target/target_core_configfs.h> | |
54 | ||
55 | #include "target_core_alua.h" | |
56 | #include "target_core_hba.h" | |
57 | #include "target_core_pr.h" | |
58 | #include "target_core_scdb.h" | |
59 | #include "target_core_ua.h" | |
60 | ||
61 | /* #define DEBUG_CDB_HANDLER */ | |
62 | #ifdef DEBUG_CDB_HANDLER | |
63 | #define DEBUG_CDB_H(x...) printk(KERN_INFO x) | |
64 | #else | |
65 | #define DEBUG_CDB_H(x...) | |
66 | #endif | |
67 | ||
68 | /* #define DEBUG_CMD_MAP */ | |
69 | #ifdef DEBUG_CMD_MAP | |
70 | #define DEBUG_CMD_M(x...) printk(KERN_INFO x) | |
71 | #else | |
72 | #define DEBUG_CMD_M(x...) | |
73 | #endif | |
74 | ||
75 | /* #define DEBUG_MEM_ALLOC */ | |
76 | #ifdef DEBUG_MEM_ALLOC | |
77 | #define DEBUG_MEM(x...) printk(KERN_INFO x) | |
78 | #else | |
79 | #define DEBUG_MEM(x...) | |
80 | #endif | |
81 | ||
82 | /* #define DEBUG_MEM2_ALLOC */ | |
83 | #ifdef DEBUG_MEM2_ALLOC | |
84 | #define DEBUG_MEM2(x...) printk(KERN_INFO x) | |
85 | #else | |
86 | #define DEBUG_MEM2(x...) | |
87 | #endif | |
88 | ||
89 | /* #define DEBUG_SG_CALC */ | |
90 | #ifdef DEBUG_SG_CALC | |
91 | #define DEBUG_SC(x...) printk(KERN_INFO x) | |
92 | #else | |
93 | #define DEBUG_SC(x...) | |
94 | #endif | |
95 | ||
96 | /* #define DEBUG_SE_OBJ */ | |
97 | #ifdef DEBUG_SE_OBJ | |
98 | #define DEBUG_SO(x...) printk(KERN_INFO x) | |
99 | #else | |
100 | #define DEBUG_SO(x...) | |
101 | #endif | |
102 | ||
103 | /* #define DEBUG_CMD_VOL */ | |
104 | #ifdef DEBUG_CMD_VOL | |
105 | #define DEBUG_VOL(x...) printk(KERN_INFO x) | |
106 | #else | |
107 | #define DEBUG_VOL(x...) | |
108 | #endif | |
109 | ||
110 | /* #define DEBUG_CMD_STOP */ | |
111 | #ifdef DEBUG_CMD_STOP | |
112 | #define DEBUG_CS(x...) printk(KERN_INFO x) | |
113 | #else | |
114 | #define DEBUG_CS(x...) | |
115 | #endif | |
116 | ||
117 | /* #define DEBUG_PASSTHROUGH */ | |
118 | #ifdef DEBUG_PASSTHROUGH | |
119 | #define DEBUG_PT(x...) printk(KERN_INFO x) | |
120 | #else | |
121 | #define DEBUG_PT(x...) | |
122 | #endif | |
123 | ||
124 | /* #define DEBUG_TASK_STOP */ | |
125 | #ifdef DEBUG_TASK_STOP | |
126 | #define DEBUG_TS(x...) printk(KERN_INFO x) | |
127 | #else | |
128 | #define DEBUG_TS(x...) | |
129 | #endif | |
130 | ||
131 | /* #define DEBUG_TRANSPORT_STOP */ | |
132 | #ifdef DEBUG_TRANSPORT_STOP | |
133 | #define DEBUG_TRANSPORT_S(x...) printk(KERN_INFO x) | |
134 | #else | |
135 | #define DEBUG_TRANSPORT_S(x...) | |
136 | #endif | |
137 | ||
138 | /* #define DEBUG_TASK_FAILURE */ | |
139 | #ifdef DEBUG_TASK_FAILURE | |
140 | #define DEBUG_TF(x...) printk(KERN_INFO x) | |
141 | #else | |
142 | #define DEBUG_TF(x...) | |
143 | #endif | |
144 | ||
145 | /* #define DEBUG_DEV_OFFLINE */ | |
146 | #ifdef DEBUG_DEV_OFFLINE | |
147 | #define DEBUG_DO(x...) printk(KERN_INFO x) | |
148 | #else | |
149 | #define DEBUG_DO(x...) | |
150 | #endif | |
151 | ||
152 | /* #define DEBUG_TASK_STATE */ | |
153 | #ifdef DEBUG_TASK_STATE | |
154 | #define DEBUG_TSTATE(x...) printk(KERN_INFO x) | |
155 | #else | |
156 | #define DEBUG_TSTATE(x...) | |
157 | #endif | |
158 | ||
159 | /* #define DEBUG_STATUS_THR */ | |
160 | #ifdef DEBUG_STATUS_THR | |
161 | #define DEBUG_ST(x...) printk(KERN_INFO x) | |
162 | #else | |
163 | #define DEBUG_ST(x...) | |
164 | #endif | |
165 | ||
166 | /* #define DEBUG_TASK_TIMEOUT */ | |
167 | #ifdef DEBUG_TASK_TIMEOUT | |
168 | #define DEBUG_TT(x...) printk(KERN_INFO x) | |
169 | #else | |
170 | #define DEBUG_TT(x...) | |
171 | #endif | |
172 | ||
173 | /* #define DEBUG_GENERIC_REQUEST_FAILURE */ | |
174 | #ifdef DEBUG_GENERIC_REQUEST_FAILURE | |
175 | #define DEBUG_GRF(x...) printk(KERN_INFO x) | |
176 | #else | |
177 | #define DEBUG_GRF(x...) | |
178 | #endif | |
179 | ||
180 | /* #define DEBUG_SAM_TASK_ATTRS */ | |
181 | #ifdef DEBUG_SAM_TASK_ATTRS | |
182 | #define DEBUG_STA(x...) printk(KERN_INFO x) | |
183 | #else | |
184 | #define DEBUG_STA(x...) | |
185 | #endif | |
186 | ||
e3d6f909 | 187 | static int sub_api_initialized; |
c66ac9db NB |
188 | |
189 | static struct kmem_cache *se_cmd_cache; | |
190 | static struct kmem_cache *se_sess_cache; | |
191 | struct kmem_cache *se_tmr_req_cache; | |
192 | struct kmem_cache *se_ua_cache; | |
193 | struct kmem_cache *se_mem_cache; | |
194 | struct kmem_cache *t10_pr_reg_cache; | |
195 | struct kmem_cache *t10_alua_lu_gp_cache; | |
196 | struct kmem_cache *t10_alua_lu_gp_mem_cache; | |
197 | struct kmem_cache *t10_alua_tg_pt_gp_cache; | |
198 | struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; | |
199 | ||
200 | /* Used for transport_dev_get_map_*() */ | |
201 | typedef int (*map_func_t)(struct se_task *, u32); | |
202 | ||
203 | static int transport_generic_write_pending(struct se_cmd *); | |
5951146d | 204 | static int transport_processing_thread(void *param); |
c66ac9db NB |
205 | static int __transport_execute_tasks(struct se_device *dev); |
206 | static void transport_complete_task_attr(struct se_cmd *cmd); | |
207 | static void transport_direct_request_timeout(struct se_cmd *cmd); | |
208 | static void transport_free_dev_tasks(struct se_cmd *cmd); | |
a1d8b49a | 209 | static u32 transport_allocate_tasks(struct se_cmd *cmd, |
c66ac9db NB |
210 | unsigned long long starting_lba, u32 sectors, |
211 | enum dma_data_direction data_direction, | |
212 | struct list_head *mem_list, int set_counts); | |
a1d8b49a | 213 | static int transport_generic_get_mem(struct se_cmd *cmd, u32 length); |
c66ac9db NB |
214 | static int transport_generic_remove(struct se_cmd *cmd, |
215 | int release_to_pool, int session_reinstatement); | |
a1d8b49a | 216 | static int transport_cmd_get_valid_sectors(struct se_cmd *cmd); |
c66ac9db | 217 | static int transport_map_sg_to_mem(struct se_cmd *cmd, |
a1d8b49a AG |
218 | struct list_head *se_mem_list, struct scatterlist *sgl); |
219 | static void transport_memcpy_se_mem_read_contig(unsigned char *dst, | |
220 | struct list_head *se_mem_list, u32 len); | |
c66ac9db NB |
221 | static void transport_release_fe_cmd(struct se_cmd *cmd); |
222 | static void transport_remove_cmd_from_queue(struct se_cmd *cmd, | |
223 | struct se_queue_obj *qobj); | |
224 | static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq); | |
225 | static void transport_stop_all_task_timers(struct se_cmd *cmd); | |
226 | ||
e3d6f909 | 227 | int init_se_kmem_caches(void) |
c66ac9db | 228 | { |
c66ac9db NB |
229 | se_cmd_cache = kmem_cache_create("se_cmd_cache", |
230 | sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL); | |
231 | if (!(se_cmd_cache)) { | |
232 | printk(KERN_ERR "kmem_cache_create for struct se_cmd failed\n"); | |
233 | goto out; | |
234 | } | |
235 | se_tmr_req_cache = kmem_cache_create("se_tmr_cache", | |
236 | sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req), | |
237 | 0, NULL); | |
238 | if (!(se_tmr_req_cache)) { | |
239 | printk(KERN_ERR "kmem_cache_create() for struct se_tmr_req" | |
240 | " failed\n"); | |
241 | goto out; | |
242 | } | |
243 | se_sess_cache = kmem_cache_create("se_sess_cache", | |
244 | sizeof(struct se_session), __alignof__(struct se_session), | |
245 | 0, NULL); | |
246 | if (!(se_sess_cache)) { | |
247 | printk(KERN_ERR "kmem_cache_create() for struct se_session" | |
248 | " failed\n"); | |
249 | goto out; | |
250 | } | |
251 | se_ua_cache = kmem_cache_create("se_ua_cache", | |
252 | sizeof(struct se_ua), __alignof__(struct se_ua), | |
253 | 0, NULL); | |
254 | if (!(se_ua_cache)) { | |
255 | printk(KERN_ERR "kmem_cache_create() for struct se_ua failed\n"); | |
256 | goto out; | |
257 | } | |
258 | se_mem_cache = kmem_cache_create("se_mem_cache", | |
259 | sizeof(struct se_mem), __alignof__(struct se_mem), 0, NULL); | |
260 | if (!(se_mem_cache)) { | |
261 | printk(KERN_ERR "kmem_cache_create() for struct se_mem failed\n"); | |
262 | goto out; | |
263 | } | |
264 | t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache", | |
265 | sizeof(struct t10_pr_registration), | |
266 | __alignof__(struct t10_pr_registration), 0, NULL); | |
267 | if (!(t10_pr_reg_cache)) { | |
268 | printk(KERN_ERR "kmem_cache_create() for struct t10_pr_registration" | |
269 | " failed\n"); | |
270 | goto out; | |
271 | } | |
272 | t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache", | |
273 | sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp), | |
274 | 0, NULL); | |
275 | if (!(t10_alua_lu_gp_cache)) { | |
276 | printk(KERN_ERR "kmem_cache_create() for t10_alua_lu_gp_cache" | |
277 | " failed\n"); | |
278 | goto out; | |
279 | } | |
280 | t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache", | |
281 | sizeof(struct t10_alua_lu_gp_member), | |
282 | __alignof__(struct t10_alua_lu_gp_member), 0, NULL); | |
283 | if (!(t10_alua_lu_gp_mem_cache)) { | |
284 | printk(KERN_ERR "kmem_cache_create() for t10_alua_lu_gp_mem_" | |
285 | "cache failed\n"); | |
286 | goto out; | |
287 | } | |
288 | t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache", | |
289 | sizeof(struct t10_alua_tg_pt_gp), | |
290 | __alignof__(struct t10_alua_tg_pt_gp), 0, NULL); | |
291 | if (!(t10_alua_tg_pt_gp_cache)) { | |
292 | printk(KERN_ERR "kmem_cache_create() for t10_alua_tg_pt_gp_" | |
293 | "cache failed\n"); | |
294 | goto out; | |
295 | } | |
296 | t10_alua_tg_pt_gp_mem_cache = kmem_cache_create( | |
297 | "t10_alua_tg_pt_gp_mem_cache", | |
298 | sizeof(struct t10_alua_tg_pt_gp_member), | |
299 | __alignof__(struct t10_alua_tg_pt_gp_member), | |
300 | 0, NULL); | |
301 | if (!(t10_alua_tg_pt_gp_mem_cache)) { | |
302 | printk(KERN_ERR "kmem_cache_create() for t10_alua_tg_pt_gp_" | |
303 | "mem_t failed\n"); | |
304 | goto out; | |
305 | } | |
306 | ||
c66ac9db NB |
307 | return 0; |
308 | out: | |
309 | if (se_cmd_cache) | |
310 | kmem_cache_destroy(se_cmd_cache); | |
311 | if (se_tmr_req_cache) | |
312 | kmem_cache_destroy(se_tmr_req_cache); | |
313 | if (se_sess_cache) | |
314 | kmem_cache_destroy(se_sess_cache); | |
315 | if (se_ua_cache) | |
316 | kmem_cache_destroy(se_ua_cache); | |
317 | if (se_mem_cache) | |
318 | kmem_cache_destroy(se_mem_cache); | |
319 | if (t10_pr_reg_cache) | |
320 | kmem_cache_destroy(t10_pr_reg_cache); | |
321 | if (t10_alua_lu_gp_cache) | |
322 | kmem_cache_destroy(t10_alua_lu_gp_cache); | |
323 | if (t10_alua_lu_gp_mem_cache) | |
324 | kmem_cache_destroy(t10_alua_lu_gp_mem_cache); | |
325 | if (t10_alua_tg_pt_gp_cache) | |
326 | kmem_cache_destroy(t10_alua_tg_pt_gp_cache); | |
327 | if (t10_alua_tg_pt_gp_mem_cache) | |
328 | kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); | |
e3d6f909 | 329 | return -ENOMEM; |
c66ac9db NB |
330 | } |
331 | ||
e3d6f909 | 332 | void release_se_kmem_caches(void) |
c66ac9db | 333 | { |
c66ac9db NB |
334 | kmem_cache_destroy(se_cmd_cache); |
335 | kmem_cache_destroy(se_tmr_req_cache); | |
336 | kmem_cache_destroy(se_sess_cache); | |
337 | kmem_cache_destroy(se_ua_cache); | |
338 | kmem_cache_destroy(se_mem_cache); | |
339 | kmem_cache_destroy(t10_pr_reg_cache); | |
340 | kmem_cache_destroy(t10_alua_lu_gp_cache); | |
341 | kmem_cache_destroy(t10_alua_lu_gp_mem_cache); | |
342 | kmem_cache_destroy(t10_alua_tg_pt_gp_cache); | |
343 | kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); | |
c66ac9db NB |
344 | } |
345 | ||
e3d6f909 AG |
346 | /* This code ensures unique mib indexes are handed out. */ |
347 | static DEFINE_SPINLOCK(scsi_mib_index_lock); | |
348 | static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX]; | |
e89d15ee NB |
349 | |
350 | /* | |
351 | * Allocate a new row index for the entry type specified | |
352 | */ | |
353 | u32 scsi_get_new_index(scsi_index_t type) | |
354 | { | |
355 | u32 new_index; | |
356 | ||
e3d6f909 | 357 | BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)); |
e89d15ee | 358 | |
e3d6f909 AG |
359 | spin_lock(&scsi_mib_index_lock); |
360 | new_index = ++scsi_mib_index[type]; | |
361 | spin_unlock(&scsi_mib_index_lock); | |
e89d15ee NB |
362 | |
363 | return new_index; | |
364 | } | |
365 | ||
c66ac9db NB |
366 | void transport_init_queue_obj(struct se_queue_obj *qobj) |
367 | { | |
368 | atomic_set(&qobj->queue_cnt, 0); | |
369 | INIT_LIST_HEAD(&qobj->qobj_list); | |
370 | init_waitqueue_head(&qobj->thread_wq); | |
371 | spin_lock_init(&qobj->cmd_queue_lock); | |
372 | } | |
373 | EXPORT_SYMBOL(transport_init_queue_obj); | |
374 | ||
375 | static int transport_subsystem_reqmods(void) | |
376 | { | |
377 | int ret; | |
378 | ||
379 | ret = request_module("target_core_iblock"); | |
380 | if (ret != 0) | |
381 | printk(KERN_ERR "Unable to load target_core_iblock\n"); | |
382 | ||
383 | ret = request_module("target_core_file"); | |
384 | if (ret != 0) | |
385 | printk(KERN_ERR "Unable to load target_core_file\n"); | |
386 | ||
387 | ret = request_module("target_core_pscsi"); | |
388 | if (ret != 0) | |
389 | printk(KERN_ERR "Unable to load target_core_pscsi\n"); | |
390 | ||
391 | ret = request_module("target_core_stgt"); | |
392 | if (ret != 0) | |
393 | printk(KERN_ERR "Unable to load target_core_stgt\n"); | |
394 | ||
395 | return 0; | |
396 | } | |
397 | ||
398 | int transport_subsystem_check_init(void) | |
399 | { | |
e3d6f909 AG |
400 | int ret; |
401 | ||
402 | if (sub_api_initialized) | |
c66ac9db NB |
403 | return 0; |
404 | /* | |
405 | * Request the loading of known TCM subsystem plugins.. | |
406 | */ | |
e3d6f909 AG |
407 | ret = transport_subsystem_reqmods(); |
408 | if (ret < 0) | |
409 | return ret; | |
c66ac9db | 410 | |
e3d6f909 | 411 | sub_api_initialized = 1; |
c66ac9db NB |
412 | return 0; |
413 | } | |
414 | ||
415 | struct se_session *transport_init_session(void) | |
416 | { | |
417 | struct se_session *se_sess; | |
418 | ||
419 | se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL); | |
420 | if (!(se_sess)) { | |
421 | printk(KERN_ERR "Unable to allocate struct se_session from" | |
422 | " se_sess_cache\n"); | |
423 | return ERR_PTR(-ENOMEM); | |
424 | } | |
425 | INIT_LIST_HEAD(&se_sess->sess_list); | |
426 | INIT_LIST_HEAD(&se_sess->sess_acl_list); | |
c66ac9db NB |
427 | |
428 | return se_sess; | |
429 | } | |
430 | EXPORT_SYMBOL(transport_init_session); | |
431 | ||
432 | /* | |
433 | * Called with spin_lock_bh(&struct se_portal_group->session_lock called. | |
434 | */ | |
435 | void __transport_register_session( | |
436 | struct se_portal_group *se_tpg, | |
437 | struct se_node_acl *se_nacl, | |
438 | struct se_session *se_sess, | |
439 | void *fabric_sess_ptr) | |
440 | { | |
441 | unsigned char buf[PR_REG_ISID_LEN]; | |
442 | ||
443 | se_sess->se_tpg = se_tpg; | |
444 | se_sess->fabric_sess_ptr = fabric_sess_ptr; | |
445 | /* | |
446 | * Used by struct se_node_acl's under ConfigFS to locate active se_session-t | |
447 | * | |
448 | * Only set for struct se_session's that will actually be moving I/O. | |
449 | * eg: *NOT* discovery sessions. | |
450 | */ | |
451 | if (se_nacl) { | |
452 | /* | |
453 | * If the fabric module supports an ISID based TransportID, | |
454 | * save this value in binary from the fabric I_T Nexus now. | |
455 | */ | |
e3d6f909 | 456 | if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) { |
c66ac9db | 457 | memset(&buf[0], 0, PR_REG_ISID_LEN); |
e3d6f909 | 458 | se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, |
c66ac9db NB |
459 | &buf[0], PR_REG_ISID_LEN); |
460 | se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]); | |
461 | } | |
462 | spin_lock_irq(&se_nacl->nacl_sess_lock); | |
463 | /* | |
464 | * The se_nacl->nacl_sess pointer will be set to the | |
465 | * last active I_T Nexus for each struct se_node_acl. | |
466 | */ | |
467 | se_nacl->nacl_sess = se_sess; | |
468 | ||
469 | list_add_tail(&se_sess->sess_acl_list, | |
470 | &se_nacl->acl_sess_list); | |
471 | spin_unlock_irq(&se_nacl->nacl_sess_lock); | |
472 | } | |
473 | list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list); | |
474 | ||
475 | printk(KERN_INFO "TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n", | |
e3d6f909 | 476 | se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr); |
c66ac9db NB |
477 | } |
478 | EXPORT_SYMBOL(__transport_register_session); | |
479 | ||
480 | void transport_register_session( | |
481 | struct se_portal_group *se_tpg, | |
482 | struct se_node_acl *se_nacl, | |
483 | struct se_session *se_sess, | |
484 | void *fabric_sess_ptr) | |
485 | { | |
486 | spin_lock_bh(&se_tpg->session_lock); | |
487 | __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr); | |
488 | spin_unlock_bh(&se_tpg->session_lock); | |
489 | } | |
490 | EXPORT_SYMBOL(transport_register_session); | |
491 | ||
492 | void transport_deregister_session_configfs(struct se_session *se_sess) | |
493 | { | |
494 | struct se_node_acl *se_nacl; | |
23388864 | 495 | unsigned long flags; |
c66ac9db NB |
496 | /* |
497 | * Used by struct se_node_acl's under ConfigFS to locate active struct se_session | |
498 | */ | |
499 | se_nacl = se_sess->se_node_acl; | |
500 | if ((se_nacl)) { | |
23388864 | 501 | spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); |
c66ac9db NB |
502 | list_del(&se_sess->sess_acl_list); |
503 | /* | |
504 | * If the session list is empty, then clear the pointer. | |
505 | * Otherwise, set the struct se_session pointer from the tail | |
506 | * element of the per struct se_node_acl active session list. | |
507 | */ | |
508 | if (list_empty(&se_nacl->acl_sess_list)) | |
509 | se_nacl->nacl_sess = NULL; | |
510 | else { | |
511 | se_nacl->nacl_sess = container_of( | |
512 | se_nacl->acl_sess_list.prev, | |
513 | struct se_session, sess_acl_list); | |
514 | } | |
23388864 | 515 | spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); |
c66ac9db NB |
516 | } |
517 | } | |
518 | EXPORT_SYMBOL(transport_deregister_session_configfs); | |
519 | ||
520 | void transport_free_session(struct se_session *se_sess) | |
521 | { | |
522 | kmem_cache_free(se_sess_cache, se_sess); | |
523 | } | |
524 | EXPORT_SYMBOL(transport_free_session); | |
525 | ||
526 | void transport_deregister_session(struct se_session *se_sess) | |
527 | { | |
528 | struct se_portal_group *se_tpg = se_sess->se_tpg; | |
529 | struct se_node_acl *se_nacl; | |
530 | ||
531 | if (!(se_tpg)) { | |
532 | transport_free_session(se_sess); | |
533 | return; | |
534 | } | |
c66ac9db NB |
535 | |
536 | spin_lock_bh(&se_tpg->session_lock); | |
537 | list_del(&se_sess->sess_list); | |
538 | se_sess->se_tpg = NULL; | |
539 | se_sess->fabric_sess_ptr = NULL; | |
540 | spin_unlock_bh(&se_tpg->session_lock); | |
541 | ||
542 | /* | |
543 | * Determine if we need to do extra work for this initiator node's | |
544 | * struct se_node_acl if it had been previously dynamically generated. | |
545 | */ | |
546 | se_nacl = se_sess->se_node_acl; | |
547 | if ((se_nacl)) { | |
548 | spin_lock_bh(&se_tpg->acl_node_lock); | |
549 | if (se_nacl->dynamic_node_acl) { | |
e3d6f909 | 550 | if (!(se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache( |
c66ac9db NB |
551 | se_tpg))) { |
552 | list_del(&se_nacl->acl_list); | |
553 | se_tpg->num_node_acls--; | |
554 | spin_unlock_bh(&se_tpg->acl_node_lock); | |
555 | ||
556 | core_tpg_wait_for_nacl_pr_ref(se_nacl); | |
c66ac9db | 557 | core_free_device_list_for_node(se_nacl, se_tpg); |
e3d6f909 | 558 | se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, |
c66ac9db NB |
559 | se_nacl); |
560 | spin_lock_bh(&se_tpg->acl_node_lock); | |
561 | } | |
562 | } | |
563 | spin_unlock_bh(&se_tpg->acl_node_lock); | |
564 | } | |
565 | ||
566 | transport_free_session(se_sess); | |
567 | ||
568 | printk(KERN_INFO "TARGET_CORE[%s]: Deregistered fabric_sess\n", | |
e3d6f909 | 569 | se_tpg->se_tpg_tfo->get_fabric_name()); |
c66ac9db NB |
570 | } |
571 | EXPORT_SYMBOL(transport_deregister_session); | |
572 | ||
573 | /* | |
a1d8b49a | 574 | * Called with cmd->t_state_lock held. |
c66ac9db NB |
575 | */ |
576 | static void transport_all_task_dev_remove_state(struct se_cmd *cmd) | |
577 | { | |
578 | struct se_device *dev; | |
579 | struct se_task *task; | |
580 | unsigned long flags; | |
581 | ||
a1d8b49a | 582 | list_for_each_entry(task, &cmd->t_task_list, t_list) { |
c66ac9db NB |
583 | dev = task->se_dev; |
584 | if (!(dev)) | |
585 | continue; | |
586 | ||
587 | if (atomic_read(&task->task_active)) | |
588 | continue; | |
589 | ||
590 | if (!(atomic_read(&task->task_state_active))) | |
591 | continue; | |
592 | ||
593 | spin_lock_irqsave(&dev->execute_task_lock, flags); | |
594 | list_del(&task->t_state_list); | |
595 | DEBUG_TSTATE("Removed ITT: 0x%08x dev: %p task[%p]\n", | |
e3d6f909 | 596 | cmd->se_tfo->tfo_get_task_tag(cmd), dev, task); |
c66ac9db NB |
597 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); |
598 | ||
599 | atomic_set(&task->task_state_active, 0); | |
a1d8b49a | 600 | atomic_dec(&cmd->t_task_cdbs_ex_left); |
c66ac9db NB |
601 | } |
602 | } | |
603 | ||
604 | /* transport_cmd_check_stop(): | |
605 | * | |
606 | * 'transport_off = 1' determines if t_transport_active should be cleared. | |
607 | * 'transport_off = 2' determines if task_dev_state should be removed. | |
608 | * | |
609 | * A non-zero u8 t_state sets cmd->t_state. | |
610 | * Returns 1 when command is stopped, else 0. | |
611 | */ | |
612 | static int transport_cmd_check_stop( | |
613 | struct se_cmd *cmd, | |
614 | int transport_off, | |
615 | u8 t_state) | |
616 | { | |
617 | unsigned long flags; | |
618 | ||
a1d8b49a | 619 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db NB |
620 | /* |
621 | * Determine if IOCTL context caller in requesting the stopping of this | |
622 | * command for LUN shutdown purposes. | |
623 | */ | |
a1d8b49a AG |
624 | if (atomic_read(&cmd->transport_lun_stop)) { |
625 | DEBUG_CS("%s:%d atomic_read(&cmd->transport_lun_stop)" | |
c66ac9db | 626 | " == TRUE for ITT: 0x%08x\n", __func__, __LINE__, |
e3d6f909 | 627 | cmd->se_tfo->get_task_tag(cmd)); |
c66ac9db NB |
628 | |
629 | cmd->deferred_t_state = cmd->t_state; | |
630 | cmd->t_state = TRANSPORT_DEFERRED_CMD; | |
a1d8b49a | 631 | atomic_set(&cmd->t_transport_active, 0); |
c66ac9db NB |
632 | if (transport_off == 2) |
633 | transport_all_task_dev_remove_state(cmd); | |
a1d8b49a | 634 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db | 635 | |
a1d8b49a | 636 | complete(&cmd->transport_lun_stop_comp); |
c66ac9db NB |
637 | return 1; |
638 | } | |
639 | /* | |
640 | * Determine if frontend context caller is requesting the stopping of | |
e3d6f909 | 641 | * this command for frontend exceptions. |
c66ac9db | 642 | */ |
a1d8b49a AG |
643 | if (atomic_read(&cmd->t_transport_stop)) { |
644 | DEBUG_CS("%s:%d atomic_read(&cmd->t_transport_stop) ==" | |
c66ac9db | 645 | " TRUE for ITT: 0x%08x\n", __func__, __LINE__, |
e3d6f909 | 646 | cmd->se_tfo->get_task_tag(cmd)); |
c66ac9db NB |
647 | |
648 | cmd->deferred_t_state = cmd->t_state; | |
649 | cmd->t_state = TRANSPORT_DEFERRED_CMD; | |
650 | if (transport_off == 2) | |
651 | transport_all_task_dev_remove_state(cmd); | |
652 | ||
653 | /* | |
654 | * Clear struct se_cmd->se_lun before the transport_off == 2 handoff | |
655 | * to FE. | |
656 | */ | |
657 | if (transport_off == 2) | |
658 | cmd->se_lun = NULL; | |
a1d8b49a | 659 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db | 660 | |
a1d8b49a | 661 | complete(&cmd->t_transport_stop_comp); |
c66ac9db NB |
662 | return 1; |
663 | } | |
664 | if (transport_off) { | |
a1d8b49a | 665 | atomic_set(&cmd->t_transport_active, 0); |
c66ac9db NB |
666 | if (transport_off == 2) { |
667 | transport_all_task_dev_remove_state(cmd); | |
668 | /* | |
669 | * Clear struct se_cmd->se_lun before the transport_off == 2 | |
670 | * handoff to fabric module. | |
671 | */ | |
672 | cmd->se_lun = NULL; | |
673 | /* | |
674 | * Some fabric modules like tcm_loop can release | |
25985edc | 675 | * their internally allocated I/O reference now and |
c66ac9db NB |
676 | * struct se_cmd now. |
677 | */ | |
e3d6f909 | 678 | if (cmd->se_tfo->check_stop_free != NULL) { |
c66ac9db | 679 | spin_unlock_irqrestore( |
a1d8b49a | 680 | &cmd->t_state_lock, flags); |
c66ac9db | 681 | |
e3d6f909 | 682 | cmd->se_tfo->check_stop_free(cmd); |
c66ac9db NB |
683 | return 1; |
684 | } | |
685 | } | |
a1d8b49a | 686 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
687 | |
688 | return 0; | |
689 | } else if (t_state) | |
690 | cmd->t_state = t_state; | |
a1d8b49a | 691 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
692 | |
693 | return 0; | |
694 | } | |
695 | ||
696 | static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) | |
697 | { | |
698 | return transport_cmd_check_stop(cmd, 2, 0); | |
699 | } | |
700 | ||
701 | static void transport_lun_remove_cmd(struct se_cmd *cmd) | |
702 | { | |
e3d6f909 | 703 | struct se_lun *lun = cmd->se_lun; |
c66ac9db NB |
704 | unsigned long flags; |
705 | ||
706 | if (!lun) | |
707 | return; | |
708 | ||
a1d8b49a AG |
709 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
710 | if (!(atomic_read(&cmd->transport_dev_active))) { | |
711 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | |
c66ac9db NB |
712 | goto check_lun; |
713 | } | |
a1d8b49a | 714 | atomic_set(&cmd->transport_dev_active, 0); |
c66ac9db | 715 | transport_all_task_dev_remove_state(cmd); |
a1d8b49a | 716 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db | 717 | |
c66ac9db NB |
718 | |
719 | check_lun: | |
720 | spin_lock_irqsave(&lun->lun_cmd_lock, flags); | |
a1d8b49a | 721 | if (atomic_read(&cmd->transport_lun_active)) { |
5951146d | 722 | list_del(&cmd->se_lun_node); |
a1d8b49a | 723 | atomic_set(&cmd->transport_lun_active, 0); |
c66ac9db NB |
724 | #if 0 |
725 | printk(KERN_INFO "Removed ITT: 0x%08x from LUN LIST[%d]\n" | |
e3d6f909 | 726 | cmd->se_tfo->get_task_tag(cmd), lun->unpacked_lun); |
c66ac9db NB |
727 | #endif |
728 | } | |
729 | spin_unlock_irqrestore(&lun->lun_cmd_lock, flags); | |
730 | } | |
731 | ||
732 | void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) | |
733 | { | |
5951146d | 734 | transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj); |
c66ac9db NB |
735 | transport_lun_remove_cmd(cmd); |
736 | ||
737 | if (transport_cmd_check_stop_to_fabric(cmd)) | |
738 | return; | |
739 | if (remove) | |
740 | transport_generic_remove(cmd, 0, 0); | |
741 | } | |
742 | ||
743 | void transport_cmd_finish_abort_tmr(struct se_cmd *cmd) | |
744 | { | |
5951146d | 745 | transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj); |
c66ac9db NB |
746 | |
747 | if (transport_cmd_check_stop_to_fabric(cmd)) | |
748 | return; | |
749 | ||
750 | transport_generic_remove(cmd, 0, 0); | |
751 | } | |
752 | ||
5951146d | 753 | static void transport_add_cmd_to_queue( |
c66ac9db NB |
754 | struct se_cmd *cmd, |
755 | int t_state) | |
756 | { | |
757 | struct se_device *dev = cmd->se_dev; | |
e3d6f909 | 758 | struct se_queue_obj *qobj = &dev->dev_queue_obj; |
c66ac9db NB |
759 | unsigned long flags; |
760 | ||
5951146d | 761 | INIT_LIST_HEAD(&cmd->se_queue_node); |
c66ac9db NB |
762 | |
763 | if (t_state) { | |
a1d8b49a | 764 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db | 765 | cmd->t_state = t_state; |
a1d8b49a AG |
766 | atomic_set(&cmd->t_transport_active, 1); |
767 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | |
c66ac9db NB |
768 | } |
769 | ||
770 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); | |
5951146d | 771 | list_add_tail(&cmd->se_queue_node, &qobj->qobj_list); |
a1d8b49a | 772 | atomic_inc(&cmd->t_transport_queue_active); |
c66ac9db NB |
773 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); |
774 | ||
775 | atomic_inc(&qobj->queue_cnt); | |
776 | wake_up_interruptible(&qobj->thread_wq); | |
c66ac9db NB |
777 | } |
778 | ||
5951146d AG |
779 | static struct se_cmd * |
780 | transport_get_cmd_from_queue(struct se_queue_obj *qobj) | |
c66ac9db | 781 | { |
5951146d | 782 | struct se_cmd *cmd; |
c66ac9db NB |
783 | unsigned long flags; |
784 | ||
785 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); | |
786 | if (list_empty(&qobj->qobj_list)) { | |
787 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | |
788 | return NULL; | |
789 | } | |
5951146d | 790 | cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node); |
c66ac9db | 791 | |
a1d8b49a | 792 | atomic_dec(&cmd->t_transport_queue_active); |
c66ac9db | 793 | |
5951146d | 794 | list_del(&cmd->se_queue_node); |
c66ac9db NB |
795 | atomic_dec(&qobj->queue_cnt); |
796 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | |
797 | ||
5951146d | 798 | return cmd; |
c66ac9db NB |
799 | } |
800 | ||
801 | static void transport_remove_cmd_from_queue(struct se_cmd *cmd, | |
802 | struct se_queue_obj *qobj) | |
803 | { | |
5951146d | 804 | struct se_cmd *t; |
c66ac9db NB |
805 | unsigned long flags; |
806 | ||
807 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); | |
a1d8b49a | 808 | if (!(atomic_read(&cmd->t_transport_queue_active))) { |
c66ac9db NB |
809 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); |
810 | return; | |
811 | } | |
812 | ||
5951146d AG |
813 | list_for_each_entry(t, &qobj->qobj_list, se_queue_node) |
814 | if (t == cmd) { | |
a1d8b49a | 815 | atomic_dec(&cmd->t_transport_queue_active); |
5951146d AG |
816 | atomic_dec(&qobj->queue_cnt); |
817 | list_del(&cmd->se_queue_node); | |
818 | break; | |
819 | } | |
c66ac9db NB |
820 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); |
821 | ||
a1d8b49a | 822 | if (atomic_read(&cmd->t_transport_queue_active)) { |
c66ac9db | 823 | printk(KERN_ERR "ITT: 0x%08x t_transport_queue_active: %d\n", |
e3d6f909 | 824 | cmd->se_tfo->get_task_tag(cmd), |
a1d8b49a | 825 | atomic_read(&cmd->t_transport_queue_active)); |
c66ac9db NB |
826 | } |
827 | } | |
828 | ||
829 | /* | |
830 | * Completion function used by TCM subsystem plugins (such as FILEIO) | |
831 | * for queueing up response from struct se_subsystem_api->do_task() | |
832 | */ | |
833 | void transport_complete_sync_cache(struct se_cmd *cmd, int good) | |
834 | { | |
a1d8b49a | 835 | struct se_task *task = list_entry(cmd->t_task_list.next, |
c66ac9db NB |
836 | struct se_task, t_list); |
837 | ||
838 | if (good) { | |
839 | cmd->scsi_status = SAM_STAT_GOOD; | |
840 | task->task_scsi_status = GOOD; | |
841 | } else { | |
842 | task->task_scsi_status = SAM_STAT_CHECK_CONDITION; | |
843 | task->task_error_status = PYX_TRANSPORT_ILLEGAL_REQUEST; | |
e3d6f909 | 844 | task->task_se_cmd->transport_error_status = |
c66ac9db NB |
845 | PYX_TRANSPORT_ILLEGAL_REQUEST; |
846 | } | |
847 | ||
848 | transport_complete_task(task, good); | |
849 | } | |
850 | EXPORT_SYMBOL(transport_complete_sync_cache); | |
851 | ||
852 | /* transport_complete_task(): | |
853 | * | |
854 | * Called from interrupt and non interrupt context depending | |
855 | * on the transport plugin. | |
856 | */ | |
857 | void transport_complete_task(struct se_task *task, int success) | |
858 | { | |
e3d6f909 | 859 | struct se_cmd *cmd = task->task_se_cmd; |
c66ac9db NB |
860 | struct se_device *dev = task->se_dev; |
861 | int t_state; | |
862 | unsigned long flags; | |
863 | #if 0 | |
864 | printk(KERN_INFO "task: %p CDB: 0x%02x obj_ptr: %p\n", task, | |
a1d8b49a | 865 | cmd->t_task_cdb[0], dev); |
c66ac9db | 866 | #endif |
e3d6f909 | 867 | if (dev) |
c66ac9db | 868 | atomic_inc(&dev->depth_left); |
c66ac9db | 869 | |
a1d8b49a | 870 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db NB |
871 | atomic_set(&task->task_active, 0); |
872 | ||
873 | /* | |
874 | * See if any sense data exists, if so set the TASK_SENSE flag. | |
875 | * Also check for any other post completion work that needs to be | |
876 | * done by the plugins. | |
877 | */ | |
878 | if (dev && dev->transport->transport_complete) { | |
879 | if (dev->transport->transport_complete(task) != 0) { | |
880 | cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE; | |
881 | task->task_sense = 1; | |
882 | success = 1; | |
883 | } | |
884 | } | |
885 | ||
886 | /* | |
887 | * See if we are waiting for outstanding struct se_task | |
888 | * to complete for an exception condition | |
889 | */ | |
890 | if (atomic_read(&task->task_stop)) { | |
891 | /* | |
a1d8b49a | 892 | * Decrement cmd->t_se_count if this task had |
c66ac9db NB |
893 | * previously thrown its timeout exception handler. |
894 | */ | |
895 | if (atomic_read(&task->task_timeout)) { | |
a1d8b49a | 896 | atomic_dec(&cmd->t_se_count); |
c66ac9db NB |
897 | atomic_set(&task->task_timeout, 0); |
898 | } | |
a1d8b49a | 899 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
900 | |
901 | complete(&task->task_stop_comp); | |
902 | return; | |
903 | } | |
904 | /* | |
905 | * If the task's timeout handler has fired, use the t_task_cdbs_timeout | |
906 | * left counter to determine when the struct se_cmd is ready to be queued to | |
907 | * the processing thread. | |
908 | */ | |
909 | if (atomic_read(&task->task_timeout)) { | |
910 | if (!(atomic_dec_and_test( | |
a1d8b49a AG |
911 | &cmd->t_task_cdbs_timeout_left))) { |
912 | spin_unlock_irqrestore(&cmd->t_state_lock, | |
c66ac9db NB |
913 | flags); |
914 | return; | |
915 | } | |
916 | t_state = TRANSPORT_COMPLETE_TIMEOUT; | |
a1d8b49a | 917 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
918 | |
919 | transport_add_cmd_to_queue(cmd, t_state); | |
920 | return; | |
921 | } | |
a1d8b49a | 922 | atomic_dec(&cmd->t_task_cdbs_timeout_left); |
c66ac9db NB |
923 | |
924 | /* | |
925 | * Decrement the outstanding t_task_cdbs_left count. The last | |
926 | * struct se_task from struct se_cmd will complete itself into the | |
927 | * device queue depending upon int success. | |
928 | */ | |
a1d8b49a | 929 | if (!(atomic_dec_and_test(&cmd->t_task_cdbs_left))) { |
c66ac9db | 930 | if (!success) |
a1d8b49a | 931 | cmd->t_tasks_failed = 1; |
c66ac9db | 932 | |
a1d8b49a | 933 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
934 | return; |
935 | } | |
936 | ||
a1d8b49a | 937 | if (!success || cmd->t_tasks_failed) { |
c66ac9db NB |
938 | t_state = TRANSPORT_COMPLETE_FAILURE; |
939 | if (!task->task_error_status) { | |
940 | task->task_error_status = | |
941 | PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; | |
942 | cmd->transport_error_status = | |
943 | PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; | |
944 | } | |
945 | } else { | |
a1d8b49a | 946 | atomic_set(&cmd->t_transport_complete, 1); |
c66ac9db NB |
947 | t_state = TRANSPORT_COMPLETE_OK; |
948 | } | |
a1d8b49a | 949 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
950 | |
951 | transport_add_cmd_to_queue(cmd, t_state); | |
952 | } | |
953 | EXPORT_SYMBOL(transport_complete_task); | |
954 | ||
955 | /* | |
956 | * Called by transport_add_tasks_from_cmd() once a struct se_cmd's | |
957 | * struct se_task list are ready to be added to the active execution list | |
958 | * struct se_device | |
959 | ||
960 | * Called with se_dev_t->execute_task_lock called. | |
961 | */ | |
962 | static inline int transport_add_task_check_sam_attr( | |
963 | struct se_task *task, | |
964 | struct se_task *task_prev, | |
965 | struct se_device *dev) | |
966 | { | |
967 | /* | |
968 | * No SAM Task attribute emulation enabled, add to tail of | |
969 | * execution queue | |
970 | */ | |
971 | if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) { | |
972 | list_add_tail(&task->t_execute_list, &dev->execute_task_list); | |
973 | return 0; | |
974 | } | |
975 | /* | |
976 | * HEAD_OF_QUEUE attribute for received CDB, which means | |
977 | * the first task that is associated with a struct se_cmd goes to | |
978 | * head of the struct se_device->execute_task_list, and task_prev | |
979 | * after that for each subsequent task | |
980 | */ | |
e66ecd50 | 981 | if (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG) { |
c66ac9db NB |
982 | list_add(&task->t_execute_list, |
983 | (task_prev != NULL) ? | |
984 | &task_prev->t_execute_list : | |
985 | &dev->execute_task_list); | |
986 | ||
987 | DEBUG_STA("Set HEAD_OF_QUEUE for task CDB: 0x%02x" | |
988 | " in execution queue\n", | |
989 | T_TASK(task->task_se_cmd)->t_task_cdb[0]); | |
990 | return 1; | |
991 | } | |
992 | /* | |
993 | * For ORDERED, SIMPLE or UNTAGGED attribute tasks once they have been | |
994 | * transitioned from Dermant -> Active state, and are added to the end | |
995 | * of the struct se_device->execute_task_list | |
996 | */ | |
997 | list_add_tail(&task->t_execute_list, &dev->execute_task_list); | |
998 | return 0; | |
999 | } | |
1000 | ||
1001 | /* __transport_add_task_to_execute_queue(): | |
1002 | * | |
1003 | * Called with se_dev_t->execute_task_lock called. | |
1004 | */ | |
1005 | static void __transport_add_task_to_execute_queue( | |
1006 | struct se_task *task, | |
1007 | struct se_task *task_prev, | |
1008 | struct se_device *dev) | |
1009 | { | |
1010 | int head_of_queue; | |
1011 | ||
1012 | head_of_queue = transport_add_task_check_sam_attr(task, task_prev, dev); | |
1013 | atomic_inc(&dev->execute_tasks); | |
1014 | ||
1015 | if (atomic_read(&task->task_state_active)) | |
1016 | return; | |
1017 | /* | |
1018 | * Determine if this task needs to go to HEAD_OF_QUEUE for the | |
1019 | * state list as well. Running with SAM Task Attribute emulation | |
1020 | * will always return head_of_queue == 0 here | |
1021 | */ | |
1022 | if (head_of_queue) | |
1023 | list_add(&task->t_state_list, (task_prev) ? | |
1024 | &task_prev->t_state_list : | |
1025 | &dev->state_task_list); | |
1026 | else | |
1027 | list_add_tail(&task->t_state_list, &dev->state_task_list); | |
1028 | ||
1029 | atomic_set(&task->task_state_active, 1); | |
1030 | ||
1031 | DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n", | |
e3d6f909 | 1032 | task->task_se_cmd->se_tfo->get_task_tag(task->task_se_cmd), |
c66ac9db NB |
1033 | task, dev); |
1034 | } | |
1035 | ||
1036 | static void transport_add_tasks_to_state_queue(struct se_cmd *cmd) | |
1037 | { | |
1038 | struct se_device *dev; | |
1039 | struct se_task *task; | |
1040 | unsigned long flags; | |
1041 | ||
a1d8b49a AG |
1042 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
1043 | list_for_each_entry(task, &cmd->t_task_list, t_list) { | |
c66ac9db NB |
1044 | dev = task->se_dev; |
1045 | ||
1046 | if (atomic_read(&task->task_state_active)) | |
1047 | continue; | |
1048 | ||
1049 | spin_lock(&dev->execute_task_lock); | |
1050 | list_add_tail(&task->t_state_list, &dev->state_task_list); | |
1051 | atomic_set(&task->task_state_active, 1); | |
1052 | ||
1053 | DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n", | |
e3d6f909 | 1054 | task->se_cmd->se_tfo->get_task_tag( |
c66ac9db NB |
1055 | task->task_se_cmd), task, dev); |
1056 | ||
1057 | spin_unlock(&dev->execute_task_lock); | |
1058 | } | |
a1d8b49a | 1059 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
1060 | } |
1061 | ||
1062 | static void transport_add_tasks_from_cmd(struct se_cmd *cmd) | |
1063 | { | |
5951146d | 1064 | struct se_device *dev = cmd->se_dev; |
c66ac9db NB |
1065 | struct se_task *task, *task_prev = NULL; |
1066 | unsigned long flags; | |
1067 | ||
1068 | spin_lock_irqsave(&dev->execute_task_lock, flags); | |
a1d8b49a | 1069 | list_for_each_entry(task, &cmd->t_task_list, t_list) { |
c66ac9db NB |
1070 | if (atomic_read(&task->task_execute_queue)) |
1071 | continue; | |
1072 | /* | |
1073 | * __transport_add_task_to_execute_queue() handles the | |
1074 | * SAM Task Attribute emulation if enabled | |
1075 | */ | |
1076 | __transport_add_task_to_execute_queue(task, task_prev, dev); | |
1077 | atomic_set(&task->task_execute_queue, 1); | |
1078 | task_prev = task; | |
1079 | } | |
1080 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); | |
c66ac9db NB |
1081 | } |
1082 | ||
1083 | /* transport_remove_task_from_execute_queue(): | |
1084 | * | |
1085 | * | |
1086 | */ | |
52208ae3 | 1087 | void transport_remove_task_from_execute_queue( |
c66ac9db NB |
1088 | struct se_task *task, |
1089 | struct se_device *dev) | |
1090 | { | |
1091 | unsigned long flags; | |
1092 | ||
af57c3ac NB |
1093 | if (atomic_read(&task->task_execute_queue) == 0) { |
1094 | dump_stack(); | |
1095 | return; | |
1096 | } | |
1097 | ||
c66ac9db NB |
1098 | spin_lock_irqsave(&dev->execute_task_lock, flags); |
1099 | list_del(&task->t_execute_list); | |
af57c3ac | 1100 | atomic_set(&task->task_execute_queue, 0); |
c66ac9db NB |
1101 | atomic_dec(&dev->execute_tasks); |
1102 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); | |
1103 | } | |
1104 | ||
1105 | unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd) | |
1106 | { | |
1107 | switch (cmd->data_direction) { | |
1108 | case DMA_NONE: | |
1109 | return "NONE"; | |
1110 | case DMA_FROM_DEVICE: | |
1111 | return "READ"; | |
1112 | case DMA_TO_DEVICE: | |
1113 | return "WRITE"; | |
1114 | case DMA_BIDIRECTIONAL: | |
1115 | return "BIDI"; | |
1116 | default: | |
1117 | break; | |
1118 | } | |
1119 | ||
1120 | return "UNKNOWN"; | |
1121 | } | |
1122 | ||
1123 | void transport_dump_dev_state( | |
1124 | struct se_device *dev, | |
1125 | char *b, | |
1126 | int *bl) | |
1127 | { | |
1128 | *bl += sprintf(b + *bl, "Status: "); | |
1129 | switch (dev->dev_status) { | |
1130 | case TRANSPORT_DEVICE_ACTIVATED: | |
1131 | *bl += sprintf(b + *bl, "ACTIVATED"); | |
1132 | break; | |
1133 | case TRANSPORT_DEVICE_DEACTIVATED: | |
1134 | *bl += sprintf(b + *bl, "DEACTIVATED"); | |
1135 | break; | |
1136 | case TRANSPORT_DEVICE_SHUTDOWN: | |
1137 | *bl += sprintf(b + *bl, "SHUTDOWN"); | |
1138 | break; | |
1139 | case TRANSPORT_DEVICE_OFFLINE_ACTIVATED: | |
1140 | case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED: | |
1141 | *bl += sprintf(b + *bl, "OFFLINE"); | |
1142 | break; | |
1143 | default: | |
1144 | *bl += sprintf(b + *bl, "UNKNOWN=%d", dev->dev_status); | |
1145 | break; | |
1146 | } | |
1147 | ||
1148 | *bl += sprintf(b + *bl, " Execute/Left/Max Queue Depth: %d/%d/%d", | |
1149 | atomic_read(&dev->execute_tasks), atomic_read(&dev->depth_left), | |
1150 | dev->queue_depth); | |
1151 | *bl += sprintf(b + *bl, " SectorSize: %u MaxSectors: %u\n", | |
e3d6f909 | 1152 | dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.max_sectors); |
c66ac9db NB |
1153 | *bl += sprintf(b + *bl, " "); |
1154 | } | |
1155 | ||
1156 | /* transport_release_all_cmds(): | |
1157 | * | |
1158 | * | |
1159 | */ | |
1160 | static void transport_release_all_cmds(struct se_device *dev) | |
1161 | { | |
5951146d | 1162 | struct se_cmd *cmd, *tcmd; |
c66ac9db NB |
1163 | int bug_out = 0, t_state; |
1164 | unsigned long flags; | |
1165 | ||
e3d6f909 | 1166 | spin_lock_irqsave(&dev->dev_queue_obj.cmd_queue_lock, flags); |
5951146d AG |
1167 | list_for_each_entry_safe(cmd, tcmd, &dev->dev_queue_obj.qobj_list, |
1168 | se_queue_node) { | |
1169 | t_state = cmd->t_state; | |
1170 | list_del(&cmd->se_queue_node); | |
e3d6f909 | 1171 | spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock, |
c66ac9db NB |
1172 | flags); |
1173 | ||
1174 | printk(KERN_ERR "Releasing ITT: 0x%08x, i_state: %u," | |
1175 | " t_state: %u directly\n", | |
e3d6f909 AG |
1176 | cmd->se_tfo->get_task_tag(cmd), |
1177 | cmd->se_tfo->get_cmd_state(cmd), t_state); | |
c66ac9db NB |
1178 | |
1179 | transport_release_fe_cmd(cmd); | |
1180 | bug_out = 1; | |
1181 | ||
e3d6f909 | 1182 | spin_lock_irqsave(&dev->dev_queue_obj.cmd_queue_lock, flags); |
c66ac9db | 1183 | } |
e3d6f909 | 1184 | spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock, flags); |
c66ac9db NB |
1185 | #if 0 |
1186 | if (bug_out) | |
1187 | BUG(); | |
1188 | #endif | |
1189 | } | |
1190 | ||
1191 | void transport_dump_vpd_proto_id( | |
1192 | struct t10_vpd *vpd, | |
1193 | unsigned char *p_buf, | |
1194 | int p_buf_len) | |
1195 | { | |
1196 | unsigned char buf[VPD_TMP_BUF_SIZE]; | |
1197 | int len; | |
1198 | ||
1199 | memset(buf, 0, VPD_TMP_BUF_SIZE); | |
1200 | len = sprintf(buf, "T10 VPD Protocol Identifier: "); | |
1201 | ||
1202 | switch (vpd->protocol_identifier) { | |
1203 | case 0x00: | |
1204 | sprintf(buf+len, "Fibre Channel\n"); | |
1205 | break; | |
1206 | case 0x10: | |
1207 | sprintf(buf+len, "Parallel SCSI\n"); | |
1208 | break; | |
1209 | case 0x20: | |
1210 | sprintf(buf+len, "SSA\n"); | |
1211 | break; | |
1212 | case 0x30: | |
1213 | sprintf(buf+len, "IEEE 1394\n"); | |
1214 | break; | |
1215 | case 0x40: | |
1216 | sprintf(buf+len, "SCSI Remote Direct Memory Access" | |
1217 | " Protocol\n"); | |
1218 | break; | |
1219 | case 0x50: | |
1220 | sprintf(buf+len, "Internet SCSI (iSCSI)\n"); | |
1221 | break; | |
1222 | case 0x60: | |
1223 | sprintf(buf+len, "SAS Serial SCSI Protocol\n"); | |
1224 | break; | |
1225 | case 0x70: | |
1226 | sprintf(buf+len, "Automation/Drive Interface Transport" | |
1227 | " Protocol\n"); | |
1228 | break; | |
1229 | case 0x80: | |
1230 | sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n"); | |
1231 | break; | |
1232 | default: | |
1233 | sprintf(buf+len, "Unknown 0x%02x\n", | |
1234 | vpd->protocol_identifier); | |
1235 | break; | |
1236 | } | |
1237 | ||
1238 | if (p_buf) | |
1239 | strncpy(p_buf, buf, p_buf_len); | |
1240 | else | |
1241 | printk(KERN_INFO "%s", buf); | |
1242 | } | |
1243 | ||
1244 | void | |
1245 | transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83) | |
1246 | { | |
1247 | /* | |
1248 | * Check if the Protocol Identifier Valid (PIV) bit is set.. | |
1249 | * | |
1250 | * from spc3r23.pdf section 7.5.1 | |
1251 | */ | |
1252 | if (page_83[1] & 0x80) { | |
1253 | vpd->protocol_identifier = (page_83[0] & 0xf0); | |
1254 | vpd->protocol_identifier_set = 1; | |
1255 | transport_dump_vpd_proto_id(vpd, NULL, 0); | |
1256 | } | |
1257 | } | |
1258 | EXPORT_SYMBOL(transport_set_vpd_proto_id); | |
1259 | ||
1260 | int transport_dump_vpd_assoc( | |
1261 | struct t10_vpd *vpd, | |
1262 | unsigned char *p_buf, | |
1263 | int p_buf_len) | |
1264 | { | |
1265 | unsigned char buf[VPD_TMP_BUF_SIZE]; | |
e3d6f909 AG |
1266 | int ret = 0; |
1267 | int len; | |
c66ac9db NB |
1268 | |
1269 | memset(buf, 0, VPD_TMP_BUF_SIZE); | |
1270 | len = sprintf(buf, "T10 VPD Identifier Association: "); | |
1271 | ||
1272 | switch (vpd->association) { | |
1273 | case 0x00: | |
1274 | sprintf(buf+len, "addressed logical unit\n"); | |
1275 | break; | |
1276 | case 0x10: | |
1277 | sprintf(buf+len, "target port\n"); | |
1278 | break; | |
1279 | case 0x20: | |
1280 | sprintf(buf+len, "SCSI target device\n"); | |
1281 | break; | |
1282 | default: | |
1283 | sprintf(buf+len, "Unknown 0x%02x\n", vpd->association); | |
e3d6f909 | 1284 | ret = -EINVAL; |
c66ac9db NB |
1285 | break; |
1286 | } | |
1287 | ||
1288 | if (p_buf) | |
1289 | strncpy(p_buf, buf, p_buf_len); | |
1290 | else | |
1291 | printk("%s", buf); | |
1292 | ||
1293 | return ret; | |
1294 | } | |
1295 | ||
1296 | int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83) | |
1297 | { | |
1298 | /* | |
1299 | * The VPD identification association.. | |
1300 | * | |
1301 | * from spc3r23.pdf Section 7.6.3.1 Table 297 | |
1302 | */ | |
1303 | vpd->association = (page_83[1] & 0x30); | |
1304 | return transport_dump_vpd_assoc(vpd, NULL, 0); | |
1305 | } | |
1306 | EXPORT_SYMBOL(transport_set_vpd_assoc); | |
1307 | ||
1308 | int transport_dump_vpd_ident_type( | |
1309 | struct t10_vpd *vpd, | |
1310 | unsigned char *p_buf, | |
1311 | int p_buf_len) | |
1312 | { | |
1313 | unsigned char buf[VPD_TMP_BUF_SIZE]; | |
e3d6f909 AG |
1314 | int ret = 0; |
1315 | int len; | |
c66ac9db NB |
1316 | |
1317 | memset(buf, 0, VPD_TMP_BUF_SIZE); | |
1318 | len = sprintf(buf, "T10 VPD Identifier Type: "); | |
1319 | ||
1320 | switch (vpd->device_identifier_type) { | |
1321 | case 0x00: | |
1322 | sprintf(buf+len, "Vendor specific\n"); | |
1323 | break; | |
1324 | case 0x01: | |
1325 | sprintf(buf+len, "T10 Vendor ID based\n"); | |
1326 | break; | |
1327 | case 0x02: | |
1328 | sprintf(buf+len, "EUI-64 based\n"); | |
1329 | break; | |
1330 | case 0x03: | |
1331 | sprintf(buf+len, "NAA\n"); | |
1332 | break; | |
1333 | case 0x04: | |
1334 | sprintf(buf+len, "Relative target port identifier\n"); | |
1335 | break; | |
1336 | case 0x08: | |
1337 | sprintf(buf+len, "SCSI name string\n"); | |
1338 | break; | |
1339 | default: | |
1340 | sprintf(buf+len, "Unsupported: 0x%02x\n", | |
1341 | vpd->device_identifier_type); | |
e3d6f909 | 1342 | ret = -EINVAL; |
c66ac9db NB |
1343 | break; |
1344 | } | |
1345 | ||
e3d6f909 AG |
1346 | if (p_buf) { |
1347 | if (p_buf_len < strlen(buf)+1) | |
1348 | return -EINVAL; | |
c66ac9db | 1349 | strncpy(p_buf, buf, p_buf_len); |
e3d6f909 | 1350 | } else { |
c66ac9db | 1351 | printk("%s", buf); |
e3d6f909 | 1352 | } |
c66ac9db NB |
1353 | |
1354 | return ret; | |
1355 | } | |
1356 | ||
1357 | int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83) | |
1358 | { | |
1359 | /* | |
1360 | * The VPD identifier type.. | |
1361 | * | |
1362 | * from spc3r23.pdf Section 7.6.3.1 Table 298 | |
1363 | */ | |
1364 | vpd->device_identifier_type = (page_83[1] & 0x0f); | |
1365 | return transport_dump_vpd_ident_type(vpd, NULL, 0); | |
1366 | } | |
1367 | EXPORT_SYMBOL(transport_set_vpd_ident_type); | |
1368 | ||
1369 | int transport_dump_vpd_ident( | |
1370 | struct t10_vpd *vpd, | |
1371 | unsigned char *p_buf, | |
1372 | int p_buf_len) | |
1373 | { | |
1374 | unsigned char buf[VPD_TMP_BUF_SIZE]; | |
1375 | int ret = 0; | |
1376 | ||
1377 | memset(buf, 0, VPD_TMP_BUF_SIZE); | |
1378 | ||
1379 | switch (vpd->device_identifier_code_set) { | |
1380 | case 0x01: /* Binary */ | |
1381 | sprintf(buf, "T10 VPD Binary Device Identifier: %s\n", | |
1382 | &vpd->device_identifier[0]); | |
1383 | break; | |
1384 | case 0x02: /* ASCII */ | |
1385 | sprintf(buf, "T10 VPD ASCII Device Identifier: %s\n", | |
1386 | &vpd->device_identifier[0]); | |
1387 | break; | |
1388 | case 0x03: /* UTF-8 */ | |
1389 | sprintf(buf, "T10 VPD UTF-8 Device Identifier: %s\n", | |
1390 | &vpd->device_identifier[0]); | |
1391 | break; | |
1392 | default: | |
1393 | sprintf(buf, "T10 VPD Device Identifier encoding unsupported:" | |
1394 | " 0x%02x", vpd->device_identifier_code_set); | |
e3d6f909 | 1395 | ret = -EINVAL; |
c66ac9db NB |
1396 | break; |
1397 | } | |
1398 | ||
1399 | if (p_buf) | |
1400 | strncpy(p_buf, buf, p_buf_len); | |
1401 | else | |
1402 | printk("%s", buf); | |
1403 | ||
1404 | return ret; | |
1405 | } | |
1406 | ||
1407 | int | |
1408 | transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83) | |
1409 | { | |
1410 | static const char hex_str[] = "0123456789abcdef"; | |
1411 | int j = 0, i = 4; /* offset to start of the identifer */ | |
1412 | ||
1413 | /* | |
1414 | * The VPD Code Set (encoding) | |
1415 | * | |
1416 | * from spc3r23.pdf Section 7.6.3.1 Table 296 | |
1417 | */ | |
1418 | vpd->device_identifier_code_set = (page_83[0] & 0x0f); | |
1419 | switch (vpd->device_identifier_code_set) { | |
1420 | case 0x01: /* Binary */ | |
1421 | vpd->device_identifier[j++] = | |
1422 | hex_str[vpd->device_identifier_type]; | |
1423 | while (i < (4 + page_83[3])) { | |
1424 | vpd->device_identifier[j++] = | |
1425 | hex_str[(page_83[i] & 0xf0) >> 4]; | |
1426 | vpd->device_identifier[j++] = | |
1427 | hex_str[page_83[i] & 0x0f]; | |
1428 | i++; | |
1429 | } | |
1430 | break; | |
1431 | case 0x02: /* ASCII */ | |
1432 | case 0x03: /* UTF-8 */ | |
1433 | while (i < (4 + page_83[3])) | |
1434 | vpd->device_identifier[j++] = page_83[i++]; | |
1435 | break; | |
1436 | default: | |
1437 | break; | |
1438 | } | |
1439 | ||
1440 | return transport_dump_vpd_ident(vpd, NULL, 0); | |
1441 | } | |
1442 | EXPORT_SYMBOL(transport_set_vpd_ident); | |
1443 | ||
1444 | static void core_setup_task_attr_emulation(struct se_device *dev) | |
1445 | { | |
1446 | /* | |
1447 | * If this device is from Target_Core_Mod/pSCSI, disable the | |
1448 | * SAM Task Attribute emulation. | |
1449 | * | |
1450 | * This is currently not available in upsream Linux/SCSI Target | |
1451 | * mode code, and is assumed to be disabled while using TCM/pSCSI. | |
1452 | */ | |
e3d6f909 | 1453 | if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { |
c66ac9db NB |
1454 | dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH; |
1455 | return; | |
1456 | } | |
1457 | ||
1458 | dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED; | |
1459 | DEBUG_STA("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x" | |
e3d6f909 AG |
1460 | " device\n", dev->transport->name, |
1461 | dev->transport->get_device_rev(dev)); | |
c66ac9db NB |
1462 | } |
1463 | ||
1464 | static void scsi_dump_inquiry(struct se_device *dev) | |
1465 | { | |
e3d6f909 | 1466 | struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn; |
c66ac9db NB |
1467 | int i, device_type; |
1468 | /* | |
1469 | * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer | |
1470 | */ | |
1471 | printk(" Vendor: "); | |
1472 | for (i = 0; i < 8; i++) | |
1473 | if (wwn->vendor[i] >= 0x20) | |
1474 | printk("%c", wwn->vendor[i]); | |
1475 | else | |
1476 | printk(" "); | |
1477 | ||
1478 | printk(" Model: "); | |
1479 | for (i = 0; i < 16; i++) | |
1480 | if (wwn->model[i] >= 0x20) | |
1481 | printk("%c", wwn->model[i]); | |
1482 | else | |
1483 | printk(" "); | |
1484 | ||
1485 | printk(" Revision: "); | |
1486 | for (i = 0; i < 4; i++) | |
1487 | if (wwn->revision[i] >= 0x20) | |
1488 | printk("%c", wwn->revision[i]); | |
1489 | else | |
1490 | printk(" "); | |
1491 | ||
1492 | printk("\n"); | |
1493 | ||
e3d6f909 | 1494 | device_type = dev->transport->get_device_type(dev); |
c66ac9db NB |
1495 | printk(" Type: %s ", scsi_device_type(device_type)); |
1496 | printk(" ANSI SCSI revision: %02x\n", | |
e3d6f909 | 1497 | dev->transport->get_device_rev(dev)); |
c66ac9db NB |
1498 | } |
1499 | ||
1500 | struct se_device *transport_add_device_to_core_hba( | |
1501 | struct se_hba *hba, | |
1502 | struct se_subsystem_api *transport, | |
1503 | struct se_subsystem_dev *se_dev, | |
1504 | u32 device_flags, | |
1505 | void *transport_dev, | |
1506 | struct se_dev_limits *dev_limits, | |
1507 | const char *inquiry_prod, | |
1508 | const char *inquiry_rev) | |
1509 | { | |
12a18bdc | 1510 | int force_pt; |
c66ac9db NB |
1511 | struct se_device *dev; |
1512 | ||
1513 | dev = kzalloc(sizeof(struct se_device), GFP_KERNEL); | |
1514 | if (!(dev)) { | |
1515 | printk(KERN_ERR "Unable to allocate memory for se_dev_t\n"); | |
1516 | return NULL; | |
1517 | } | |
c66ac9db | 1518 | |
e3d6f909 | 1519 | transport_init_queue_obj(&dev->dev_queue_obj); |
c66ac9db NB |
1520 | dev->dev_flags = device_flags; |
1521 | dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED; | |
5951146d | 1522 | dev->dev_ptr = transport_dev; |
c66ac9db NB |
1523 | dev->se_hba = hba; |
1524 | dev->se_sub_dev = se_dev; | |
1525 | dev->transport = transport; | |
1526 | atomic_set(&dev->active_cmds, 0); | |
1527 | INIT_LIST_HEAD(&dev->dev_list); | |
1528 | INIT_LIST_HEAD(&dev->dev_sep_list); | |
1529 | INIT_LIST_HEAD(&dev->dev_tmr_list); | |
1530 | INIT_LIST_HEAD(&dev->execute_task_list); | |
1531 | INIT_LIST_HEAD(&dev->delayed_cmd_list); | |
1532 | INIT_LIST_HEAD(&dev->ordered_cmd_list); | |
1533 | INIT_LIST_HEAD(&dev->state_task_list); | |
1534 | spin_lock_init(&dev->execute_task_lock); | |
1535 | spin_lock_init(&dev->delayed_cmd_lock); | |
1536 | spin_lock_init(&dev->ordered_cmd_lock); | |
1537 | spin_lock_init(&dev->state_task_lock); | |
1538 | spin_lock_init(&dev->dev_alua_lock); | |
1539 | spin_lock_init(&dev->dev_reservation_lock); | |
1540 | spin_lock_init(&dev->dev_status_lock); | |
1541 | spin_lock_init(&dev->dev_status_thr_lock); | |
1542 | spin_lock_init(&dev->se_port_lock); | |
1543 | spin_lock_init(&dev->se_tmr_lock); | |
1544 | ||
1545 | dev->queue_depth = dev_limits->queue_depth; | |
1546 | atomic_set(&dev->depth_left, dev->queue_depth); | |
1547 | atomic_set(&dev->dev_ordered_id, 0); | |
1548 | ||
1549 | se_dev_set_default_attribs(dev, dev_limits); | |
1550 | ||
1551 | dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX); | |
1552 | dev->creation_time = get_jiffies_64(); | |
1553 | spin_lock_init(&dev->stats_lock); | |
1554 | ||
1555 | spin_lock(&hba->device_lock); | |
1556 | list_add_tail(&dev->dev_list, &hba->hba_dev_list); | |
1557 | hba->dev_count++; | |
1558 | spin_unlock(&hba->device_lock); | |
1559 | /* | |
1560 | * Setup the SAM Task Attribute emulation for struct se_device | |
1561 | */ | |
1562 | core_setup_task_attr_emulation(dev); | |
1563 | /* | |
1564 | * Force PR and ALUA passthrough emulation with internal object use. | |
1565 | */ | |
1566 | force_pt = (hba->hba_flags & HBA_FLAGS_INTERNAL_USE); | |
1567 | /* | |
1568 | * Setup the Reservations infrastructure for struct se_device | |
1569 | */ | |
1570 | core_setup_reservations(dev, force_pt); | |
1571 | /* | |
1572 | * Setup the Asymmetric Logical Unit Assignment for struct se_device | |
1573 | */ | |
1574 | if (core_setup_alua(dev, force_pt) < 0) | |
1575 | goto out; | |
1576 | ||
1577 | /* | |
1578 | * Startup the struct se_device processing thread | |
1579 | */ | |
1580 | dev->process_thread = kthread_run(transport_processing_thread, dev, | |
e3d6f909 | 1581 | "LIO_%s", dev->transport->name); |
c66ac9db NB |
1582 | if (IS_ERR(dev->process_thread)) { |
1583 | printk(KERN_ERR "Unable to create kthread: LIO_%s\n", | |
e3d6f909 | 1584 | dev->transport->name); |
c66ac9db NB |
1585 | goto out; |
1586 | } | |
1587 | ||
1588 | /* | |
1589 | * Preload the initial INQUIRY const values if we are doing | |
1590 | * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI | |
1591 | * passthrough because this is being provided by the backend LLD. | |
1592 | * This is required so that transport_get_inquiry() copies these | |
1593 | * originals once back into DEV_T10_WWN(dev) for the virtual device | |
1594 | * setup. | |
1595 | */ | |
e3d6f909 | 1596 | if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) { |
f22c1196 | 1597 | if (!inquiry_prod || !inquiry_rev) { |
c66ac9db NB |
1598 | printk(KERN_ERR "All non TCM/pSCSI plugins require" |
1599 | " INQUIRY consts\n"); | |
1600 | goto out; | |
1601 | } | |
1602 | ||
e3d6f909 AG |
1603 | strncpy(&dev->se_sub_dev->t10_wwn.vendor[0], "LIO-ORG", 8); |
1604 | strncpy(&dev->se_sub_dev->t10_wwn.model[0], inquiry_prod, 16); | |
1605 | strncpy(&dev->se_sub_dev->t10_wwn.revision[0], inquiry_rev, 4); | |
c66ac9db NB |
1606 | } |
1607 | scsi_dump_inquiry(dev); | |
1608 | ||
12a18bdc | 1609 | return dev; |
c66ac9db | 1610 | out: |
c66ac9db NB |
1611 | kthread_stop(dev->process_thread); |
1612 | ||
1613 | spin_lock(&hba->device_lock); | |
1614 | list_del(&dev->dev_list); | |
1615 | hba->dev_count--; | |
1616 | spin_unlock(&hba->device_lock); | |
1617 | ||
1618 | se_release_vpd_for_dev(dev); | |
1619 | ||
c66ac9db NB |
1620 | kfree(dev); |
1621 | ||
1622 | return NULL; | |
1623 | } | |
1624 | EXPORT_SYMBOL(transport_add_device_to_core_hba); | |
1625 | ||
1626 | /* transport_generic_prepare_cdb(): | |
1627 | * | |
1628 | * Since the Initiator sees iSCSI devices as LUNs, the SCSI CDB will | |
1629 | * contain the iSCSI LUN in bits 7-5 of byte 1 as per SAM-2. | |
1630 | * The point of this is since we are mapping iSCSI LUNs to | |
1631 | * SCSI Target IDs having a non-zero LUN in the CDB will throw the | |
1632 | * devices and HBAs for a loop. | |
1633 | */ | |
1634 | static inline void transport_generic_prepare_cdb( | |
1635 | unsigned char *cdb) | |
1636 | { | |
1637 | switch (cdb[0]) { | |
1638 | case READ_10: /* SBC - RDProtect */ | |
1639 | case READ_12: /* SBC - RDProtect */ | |
1640 | case READ_16: /* SBC - RDProtect */ | |
1641 | case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */ | |
1642 | case VERIFY: /* SBC - VRProtect */ | |
1643 | case VERIFY_16: /* SBC - VRProtect */ | |
1644 | case WRITE_VERIFY: /* SBC - VRProtect */ | |
1645 | case WRITE_VERIFY_12: /* SBC - VRProtect */ | |
1646 | break; | |
1647 | default: | |
1648 | cdb[1] &= 0x1f; /* clear logical unit number */ | |
1649 | break; | |
1650 | } | |
1651 | } | |
1652 | ||
1653 | static struct se_task * | |
1654 | transport_generic_get_task(struct se_cmd *cmd, | |
1655 | enum dma_data_direction data_direction) | |
1656 | { | |
1657 | struct se_task *task; | |
5951146d | 1658 | struct se_device *dev = cmd->se_dev; |
c66ac9db NB |
1659 | unsigned long flags; |
1660 | ||
1661 | task = dev->transport->alloc_task(cmd); | |
1662 | if (!task) { | |
1663 | printk(KERN_ERR "Unable to allocate struct se_task\n"); | |
1664 | return NULL; | |
1665 | } | |
1666 | ||
1667 | INIT_LIST_HEAD(&task->t_list); | |
1668 | INIT_LIST_HEAD(&task->t_execute_list); | |
1669 | INIT_LIST_HEAD(&task->t_state_list); | |
1670 | init_completion(&task->task_stop_comp); | |
c66ac9db NB |
1671 | task->task_se_cmd = cmd; |
1672 | task->se_dev = dev; | |
1673 | task->task_data_direction = data_direction; | |
1674 | ||
a1d8b49a AG |
1675 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
1676 | list_add_tail(&task->t_list, &cmd->t_task_list); | |
1677 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | |
c66ac9db NB |
1678 | |
1679 | return task; | |
1680 | } | |
1681 | ||
1682 | static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *); | |
1683 | ||
c66ac9db NB |
1684 | /* |
1685 | * Used by fabric modules containing a local struct se_cmd within their | |
1686 | * fabric dependent per I/O descriptor. | |
1687 | */ | |
1688 | void transport_init_se_cmd( | |
1689 | struct se_cmd *cmd, | |
1690 | struct target_core_fabric_ops *tfo, | |
1691 | struct se_session *se_sess, | |
1692 | u32 data_length, | |
1693 | int data_direction, | |
1694 | int task_attr, | |
1695 | unsigned char *sense_buffer) | |
1696 | { | |
5951146d AG |
1697 | INIT_LIST_HEAD(&cmd->se_lun_node); |
1698 | INIT_LIST_HEAD(&cmd->se_delayed_node); | |
1699 | INIT_LIST_HEAD(&cmd->se_ordered_node); | |
c66ac9db | 1700 | |
a1d8b49a AG |
1701 | INIT_LIST_HEAD(&cmd->t_mem_list); |
1702 | INIT_LIST_HEAD(&cmd->t_mem_bidi_list); | |
1703 | INIT_LIST_HEAD(&cmd->t_task_list); | |
1704 | init_completion(&cmd->transport_lun_fe_stop_comp); | |
1705 | init_completion(&cmd->transport_lun_stop_comp); | |
1706 | init_completion(&cmd->t_transport_stop_comp); | |
1707 | spin_lock_init(&cmd->t_state_lock); | |
1708 | atomic_set(&cmd->transport_dev_active, 1); | |
c66ac9db NB |
1709 | |
1710 | cmd->se_tfo = tfo; | |
1711 | cmd->se_sess = se_sess; | |
1712 | cmd->data_length = data_length; | |
1713 | cmd->data_direction = data_direction; | |
1714 | cmd->sam_task_attr = task_attr; | |
1715 | cmd->sense_buffer = sense_buffer; | |
1716 | } | |
1717 | EXPORT_SYMBOL(transport_init_se_cmd); | |
1718 | ||
1719 | static int transport_check_alloc_task_attr(struct se_cmd *cmd) | |
1720 | { | |
1721 | /* | |
1722 | * Check if SAM Task Attribute emulation is enabled for this | |
1723 | * struct se_device storage object | |
1724 | */ | |
5951146d | 1725 | if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) |
c66ac9db NB |
1726 | return 0; |
1727 | ||
e66ecd50 | 1728 | if (cmd->sam_task_attr == MSG_ACA_TAG) { |
c66ac9db NB |
1729 | DEBUG_STA("SAM Task Attribute ACA" |
1730 | " emulation is not supported\n"); | |
e3d6f909 | 1731 | return -EINVAL; |
c66ac9db NB |
1732 | } |
1733 | /* | |
1734 | * Used to determine when ORDERED commands should go from | |
1735 | * Dormant to Active status. | |
1736 | */ | |
5951146d | 1737 | cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id); |
c66ac9db NB |
1738 | smp_mb__after_atomic_inc(); |
1739 | DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n", | |
1740 | cmd->se_ordered_id, cmd->sam_task_attr, | |
1741 | TRANSPORT(cmd->se_dev)->name); | |
1742 | return 0; | |
1743 | } | |
1744 | ||
1745 | void transport_free_se_cmd( | |
1746 | struct se_cmd *se_cmd) | |
1747 | { | |
1748 | if (se_cmd->se_tmr_req) | |
1749 | core_tmr_release_req(se_cmd->se_tmr_req); | |
1750 | /* | |
1751 | * Check and free any extended CDB buffer that was allocated | |
1752 | */ | |
a1d8b49a AG |
1753 | if (se_cmd->t_task_cdb != se_cmd->__t_task_cdb) |
1754 | kfree(se_cmd->t_task_cdb); | |
c66ac9db NB |
1755 | } |
1756 | EXPORT_SYMBOL(transport_free_se_cmd); | |
1757 | ||
1758 | static void transport_generic_wait_for_tasks(struct se_cmd *, int, int); | |
1759 | ||
1760 | /* transport_generic_allocate_tasks(): | |
1761 | * | |
1762 | * Called from fabric RX Thread. | |
1763 | */ | |
1764 | int transport_generic_allocate_tasks( | |
1765 | struct se_cmd *cmd, | |
1766 | unsigned char *cdb) | |
1767 | { | |
1768 | int ret; | |
1769 | ||
1770 | transport_generic_prepare_cdb(cdb); | |
1771 | ||
1772 | /* | |
1773 | * This is needed for early exceptions. | |
1774 | */ | |
1775 | cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks; | |
1776 | ||
c66ac9db NB |
1777 | /* |
1778 | * Ensure that the received CDB is less than the max (252 + 8) bytes | |
1779 | * for VARIABLE_LENGTH_CMD | |
1780 | */ | |
1781 | if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) { | |
1782 | printk(KERN_ERR "Received SCSI CDB with command_size: %d that" | |
1783 | " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", | |
1784 | scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE); | |
e3d6f909 | 1785 | return -EINVAL; |
c66ac9db NB |
1786 | } |
1787 | /* | |
1788 | * If the received CDB is larger than TCM_MAX_COMMAND_SIZE, | |
1789 | * allocate the additional extended CDB buffer now.. Otherwise | |
1790 | * setup the pointer from __t_task_cdb to t_task_cdb. | |
1791 | */ | |
a1d8b49a AG |
1792 | if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) { |
1793 | cmd->t_task_cdb = kzalloc(scsi_command_size(cdb), | |
c66ac9db | 1794 | GFP_KERNEL); |
a1d8b49a AG |
1795 | if (!(cmd->t_task_cdb)) { |
1796 | printk(KERN_ERR "Unable to allocate cmd->t_task_cdb" | |
1797 | " %u > sizeof(cmd->__t_task_cdb): %lu ops\n", | |
c66ac9db | 1798 | scsi_command_size(cdb), |
a1d8b49a | 1799 | (unsigned long)sizeof(cmd->__t_task_cdb)); |
e3d6f909 | 1800 | return -ENOMEM; |
c66ac9db NB |
1801 | } |
1802 | } else | |
a1d8b49a | 1803 | cmd->t_task_cdb = &cmd->__t_task_cdb[0]; |
c66ac9db | 1804 | /* |
a1d8b49a | 1805 | * Copy the original CDB into cmd-> |
c66ac9db | 1806 | */ |
a1d8b49a | 1807 | memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb)); |
c66ac9db NB |
1808 | /* |
1809 | * Setup the received CDB based on SCSI defined opcodes and | |
1810 | * perform unit attention, persistent reservations and ALUA | |
a1d8b49a | 1811 | * checks for virtual device backends. The cmd->t_task_cdb |
c66ac9db NB |
1812 | * pointer is expected to be setup before we reach this point. |
1813 | */ | |
1814 | ret = transport_generic_cmd_sequencer(cmd, cdb); | |
1815 | if (ret < 0) | |
1816 | return ret; | |
1817 | /* | |
1818 | * Check for SAM Task Attribute Emulation | |
1819 | */ | |
1820 | if (transport_check_alloc_task_attr(cmd) < 0) { | |
1821 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
1822 | cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; | |
5951146d | 1823 | return -EINVAL; |
c66ac9db NB |
1824 | } |
1825 | spin_lock(&cmd->se_lun->lun_sep_lock); | |
1826 | if (cmd->se_lun->lun_sep) | |
1827 | cmd->se_lun->lun_sep->sep_stats.cmd_pdus++; | |
1828 | spin_unlock(&cmd->se_lun->lun_sep_lock); | |
1829 | return 0; | |
1830 | } | |
1831 | EXPORT_SYMBOL(transport_generic_allocate_tasks); | |
1832 | ||
1833 | /* | |
1834 | * Used by fabric module frontends not defining a TFO->new_cmd_map() | |
1835 | * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD statis | |
1836 | */ | |
1837 | int transport_generic_handle_cdb( | |
1838 | struct se_cmd *cmd) | |
1839 | { | |
e3d6f909 | 1840 | if (!cmd->se_lun) { |
c66ac9db | 1841 | dump_stack(); |
e3d6f909 AG |
1842 | printk(KERN_ERR "cmd->se_lun is NULL\n"); |
1843 | return -EINVAL; | |
c66ac9db | 1844 | } |
c66ac9db NB |
1845 | transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD); |
1846 | return 0; | |
1847 | } | |
1848 | EXPORT_SYMBOL(transport_generic_handle_cdb); | |
1849 | ||
1850 | /* | |
1851 | * Used by fabric module frontends defining a TFO->new_cmd_map() caller | |
1852 | * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to | |
1853 | * complete setup in TCM process context w/ TFO->new_cmd_map(). | |
1854 | */ | |
1855 | int transport_generic_handle_cdb_map( | |
1856 | struct se_cmd *cmd) | |
1857 | { | |
e3d6f909 | 1858 | if (!cmd->se_lun) { |
c66ac9db | 1859 | dump_stack(); |
e3d6f909 AG |
1860 | printk(KERN_ERR "cmd->se_lun is NULL\n"); |
1861 | return -EINVAL; | |
c66ac9db NB |
1862 | } |
1863 | ||
1864 | transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP); | |
1865 | return 0; | |
1866 | } | |
1867 | EXPORT_SYMBOL(transport_generic_handle_cdb_map); | |
1868 | ||
1869 | /* transport_generic_handle_data(): | |
1870 | * | |
1871 | * | |
1872 | */ | |
1873 | int transport_generic_handle_data( | |
1874 | struct se_cmd *cmd) | |
1875 | { | |
1876 | /* | |
1877 | * For the software fabric case, then we assume the nexus is being | |
1878 | * failed/shutdown when signals are pending from the kthread context | |
1879 | * caller, so we return a failure. For the HW target mode case running | |
1880 | * in interrupt code, the signal_pending() check is skipped. | |
1881 | */ | |
1882 | if (!in_interrupt() && signal_pending(current)) | |
e3d6f909 | 1883 | return -EPERM; |
c66ac9db NB |
1884 | /* |
1885 | * If the received CDB has aleady been ABORTED by the generic | |
1886 | * target engine, we now call transport_check_aborted_status() | |
1887 | * to queue any delated TASK_ABORTED status for the received CDB to the | |
25985edc | 1888 | * fabric module as we are expecting no further incoming DATA OUT |
c66ac9db NB |
1889 | * sequences at this point. |
1890 | */ | |
1891 | if (transport_check_aborted_status(cmd, 1) != 0) | |
1892 | return 0; | |
1893 | ||
1894 | transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE); | |
1895 | return 0; | |
1896 | } | |
1897 | EXPORT_SYMBOL(transport_generic_handle_data); | |
1898 | ||
1899 | /* transport_generic_handle_tmr(): | |
1900 | * | |
1901 | * | |
1902 | */ | |
1903 | int transport_generic_handle_tmr( | |
1904 | struct se_cmd *cmd) | |
1905 | { | |
1906 | /* | |
1907 | * This is needed for early exceptions. | |
1908 | */ | |
1909 | cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks; | |
c66ac9db NB |
1910 | |
1911 | transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR); | |
1912 | return 0; | |
1913 | } | |
1914 | EXPORT_SYMBOL(transport_generic_handle_tmr); | |
1915 | ||
f4366772 NB |
1916 | void transport_generic_free_cmd_intr( |
1917 | struct se_cmd *cmd) | |
1918 | { | |
1919 | transport_add_cmd_to_queue(cmd, TRANSPORT_FREE_CMD_INTR); | |
1920 | } | |
1921 | EXPORT_SYMBOL(transport_generic_free_cmd_intr); | |
1922 | ||
c66ac9db NB |
1923 | static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) |
1924 | { | |
1925 | struct se_task *task, *task_tmp; | |
1926 | unsigned long flags; | |
1927 | int ret = 0; | |
1928 | ||
1929 | DEBUG_TS("ITT[0x%08x] - Stopping tasks\n", | |
e3d6f909 | 1930 | cmd->se_tfo->get_task_tag(cmd)); |
c66ac9db NB |
1931 | |
1932 | /* | |
1933 | * No tasks remain in the execution queue | |
1934 | */ | |
a1d8b49a | 1935 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db | 1936 | list_for_each_entry_safe(task, task_tmp, |
a1d8b49a | 1937 | &cmd->t_task_list, t_list) { |
c66ac9db NB |
1938 | DEBUG_TS("task_no[%d] - Processing task %p\n", |
1939 | task->task_no, task); | |
1940 | /* | |
1941 | * If the struct se_task has not been sent and is not active, | |
1942 | * remove the struct se_task from the execution queue. | |
1943 | */ | |
1944 | if (!atomic_read(&task->task_sent) && | |
1945 | !atomic_read(&task->task_active)) { | |
a1d8b49a | 1946 | spin_unlock_irqrestore(&cmd->t_state_lock, |
c66ac9db NB |
1947 | flags); |
1948 | transport_remove_task_from_execute_queue(task, | |
1949 | task->se_dev); | |
1950 | ||
1951 | DEBUG_TS("task_no[%d] - Removed from execute queue\n", | |
1952 | task->task_no); | |
a1d8b49a | 1953 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db NB |
1954 | continue; |
1955 | } | |
1956 | ||
1957 | /* | |
1958 | * If the struct se_task is active, sleep until it is returned | |
1959 | * from the plugin. | |
1960 | */ | |
1961 | if (atomic_read(&task->task_active)) { | |
1962 | atomic_set(&task->task_stop, 1); | |
a1d8b49a | 1963 | spin_unlock_irqrestore(&cmd->t_state_lock, |
c66ac9db NB |
1964 | flags); |
1965 | ||
1966 | DEBUG_TS("task_no[%d] - Waiting to complete\n", | |
1967 | task->task_no); | |
1968 | wait_for_completion(&task->task_stop_comp); | |
1969 | DEBUG_TS("task_no[%d] - Stopped successfully\n", | |
1970 | task->task_no); | |
1971 | ||
a1d8b49a AG |
1972 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
1973 | atomic_dec(&cmd->t_task_cdbs_left); | |
c66ac9db NB |
1974 | |
1975 | atomic_set(&task->task_active, 0); | |
1976 | atomic_set(&task->task_stop, 0); | |
1977 | } else { | |
1978 | DEBUG_TS("task_no[%d] - Did nothing\n", task->task_no); | |
1979 | ret++; | |
1980 | } | |
1981 | ||
1982 | __transport_stop_task_timer(task, &flags); | |
1983 | } | |
a1d8b49a | 1984 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
1985 | |
1986 | return ret; | |
1987 | } | |
1988 | ||
c66ac9db NB |
1989 | /* |
1990 | * Handle SAM-esque emulation for generic transport request failures. | |
1991 | */ | |
1992 | static void transport_generic_request_failure( | |
1993 | struct se_cmd *cmd, | |
1994 | struct se_device *dev, | |
1995 | int complete, | |
1996 | int sc) | |
1997 | { | |
1998 | DEBUG_GRF("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x" | |
e3d6f909 | 1999 | " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd), |
a1d8b49a | 2000 | cmd->t_task_cdb[0]); |
c66ac9db NB |
2001 | DEBUG_GRF("-----[ i_state: %d t_state/def_t_state:" |
2002 | " %d/%d transport_error_status: %d\n", | |
e3d6f909 | 2003 | cmd->se_tfo->get_cmd_state(cmd), |
c66ac9db NB |
2004 | cmd->t_state, cmd->deferred_t_state, |
2005 | cmd->transport_error_status); | |
2006 | DEBUG_GRF("-----[ t_task_cdbs: %d t_task_cdbs_left: %d" | |
2007 | " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --" | |
2008 | " t_transport_active: %d t_transport_stop: %d" | |
a1d8b49a AG |
2009 | " t_transport_sent: %d\n", cmd->t_task_cdbs, |
2010 | atomic_read(&cmd->t_task_cdbs_left), | |
2011 | atomic_read(&cmd->t_task_cdbs_sent), | |
2012 | atomic_read(&cmd->t_task_cdbs_ex_left), | |
2013 | atomic_read(&cmd->t_transport_active), | |
2014 | atomic_read(&cmd->t_transport_stop), | |
2015 | atomic_read(&cmd->t_transport_sent)); | |
c66ac9db NB |
2016 | |
2017 | transport_stop_all_task_timers(cmd); | |
2018 | ||
2019 | if (dev) | |
e3d6f909 | 2020 | atomic_inc(&dev->depth_left); |
c66ac9db NB |
2021 | /* |
2022 | * For SAM Task Attribute emulation for failed struct se_cmd | |
2023 | */ | |
2024 | if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) | |
2025 | transport_complete_task_attr(cmd); | |
2026 | ||
2027 | if (complete) { | |
2028 | transport_direct_request_timeout(cmd); | |
2029 | cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE; | |
2030 | } | |
2031 | ||
2032 | switch (cmd->transport_error_status) { | |
2033 | case PYX_TRANSPORT_UNKNOWN_SAM_OPCODE: | |
2034 | cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; | |
2035 | break; | |
2036 | case PYX_TRANSPORT_REQ_TOO_MANY_SECTORS: | |
2037 | cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY; | |
2038 | break; | |
2039 | case PYX_TRANSPORT_INVALID_CDB_FIELD: | |
2040 | cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; | |
2041 | break; | |
2042 | case PYX_TRANSPORT_INVALID_PARAMETER_LIST: | |
2043 | cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; | |
2044 | break; | |
2045 | case PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES: | |
2046 | if (!sc) | |
2047 | transport_new_cmd_failure(cmd); | |
2048 | /* | |
2049 | * Currently for PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES, | |
2050 | * we force this session to fall back to session | |
2051 | * recovery. | |
2052 | */ | |
e3d6f909 AG |
2053 | cmd->se_tfo->fall_back_to_erl0(cmd->se_sess); |
2054 | cmd->se_tfo->stop_session(cmd->se_sess, 0, 0); | |
c66ac9db NB |
2055 | |
2056 | goto check_stop; | |
2057 | case PYX_TRANSPORT_LU_COMM_FAILURE: | |
2058 | case PYX_TRANSPORT_ILLEGAL_REQUEST: | |
2059 | cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | |
2060 | break; | |
2061 | case PYX_TRANSPORT_UNKNOWN_MODE_PAGE: | |
2062 | cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE; | |
2063 | break; | |
2064 | case PYX_TRANSPORT_WRITE_PROTECTED: | |
2065 | cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; | |
2066 | break; | |
2067 | case PYX_TRANSPORT_RESERVATION_CONFLICT: | |
2068 | /* | |
2069 | * No SENSE Data payload for this case, set SCSI Status | |
2070 | * and queue the response to $FABRIC_MOD. | |
2071 | * | |
2072 | * Uses linux/include/scsi/scsi.h SAM status codes defs | |
2073 | */ | |
2074 | cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; | |
2075 | /* | |
2076 | * For UA Interlock Code 11b, a RESERVATION CONFLICT will | |
2077 | * establish a UNIT ATTENTION with PREVIOUS RESERVATION | |
2078 | * CONFLICT STATUS. | |
2079 | * | |
2080 | * See spc4r17, section 7.4.6 Control Mode Page, Table 349 | |
2081 | */ | |
e3d6f909 AG |
2082 | if (cmd->se_sess && |
2083 | cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2) | |
2084 | core_scsi3_ua_allocate(cmd->se_sess->se_node_acl, | |
c66ac9db NB |
2085 | cmd->orig_fe_lun, 0x2C, |
2086 | ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); | |
2087 | ||
e3d6f909 | 2088 | cmd->se_tfo->queue_status(cmd); |
c66ac9db NB |
2089 | goto check_stop; |
2090 | case PYX_TRANSPORT_USE_SENSE_REASON: | |
2091 | /* | |
2092 | * struct se_cmd->scsi_sense_reason already set | |
2093 | */ | |
2094 | break; | |
2095 | default: | |
2096 | printk(KERN_ERR "Unknown transport error for CDB 0x%02x: %d\n", | |
a1d8b49a | 2097 | cmd->t_task_cdb[0], |
c66ac9db NB |
2098 | cmd->transport_error_status); |
2099 | cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; | |
2100 | break; | |
2101 | } | |
2102 | ||
2103 | if (!sc) | |
2104 | transport_new_cmd_failure(cmd); | |
2105 | else | |
2106 | transport_send_check_condition_and_sense(cmd, | |
2107 | cmd->scsi_sense_reason, 0); | |
2108 | check_stop: | |
2109 | transport_lun_remove_cmd(cmd); | |
2110 | if (!(transport_cmd_check_stop_to_fabric(cmd))) | |
2111 | ; | |
2112 | } | |
2113 | ||
2114 | static void transport_direct_request_timeout(struct se_cmd *cmd) | |
2115 | { | |
2116 | unsigned long flags; | |
2117 | ||
a1d8b49a AG |
2118 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
2119 | if (!(atomic_read(&cmd->t_transport_timeout))) { | |
2120 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | |
c66ac9db NB |
2121 | return; |
2122 | } | |
a1d8b49a AG |
2123 | if (atomic_read(&cmd->t_task_cdbs_timeout_left)) { |
2124 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | |
c66ac9db NB |
2125 | return; |
2126 | } | |
2127 | ||
a1d8b49a AG |
2128 | atomic_sub(atomic_read(&cmd->t_transport_timeout), |
2129 | &cmd->t_se_count); | |
2130 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | |
c66ac9db NB |
2131 | } |
2132 | ||
2133 | static void transport_generic_request_timeout(struct se_cmd *cmd) | |
2134 | { | |
2135 | unsigned long flags; | |
2136 | ||
2137 | /* | |
a1d8b49a | 2138 | * Reset cmd->t_se_count to allow transport_generic_remove() |
c66ac9db NB |
2139 | * to allow last call to free memory resources. |
2140 | */ | |
a1d8b49a AG |
2141 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
2142 | if (atomic_read(&cmd->t_transport_timeout) > 1) { | |
2143 | int tmp = (atomic_read(&cmd->t_transport_timeout) - 1); | |
c66ac9db | 2144 | |
a1d8b49a | 2145 | atomic_sub(tmp, &cmd->t_se_count); |
c66ac9db | 2146 | } |
a1d8b49a | 2147 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
2148 | |
2149 | transport_generic_remove(cmd, 0, 0); | |
2150 | } | |
2151 | ||
2152 | static int | |
2153 | transport_generic_allocate_buf(struct se_cmd *cmd, u32 data_length) | |
2154 | { | |
2155 | unsigned char *buf; | |
2156 | ||
2157 | buf = kzalloc(data_length, GFP_KERNEL); | |
2158 | if (!(buf)) { | |
2159 | printk(KERN_ERR "Unable to allocate memory for buffer\n"); | |
e3d6f909 | 2160 | return -ENOMEM; |
c66ac9db NB |
2161 | } |
2162 | ||
a1d8b49a AG |
2163 | cmd->t_tasks_se_num = 0; |
2164 | cmd->t_task_buf = buf; | |
c66ac9db NB |
2165 | |
2166 | return 0; | |
2167 | } | |
2168 | ||
2169 | static inline u32 transport_lba_21(unsigned char *cdb) | |
2170 | { | |
2171 | return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3]; | |
2172 | } | |
2173 | ||
2174 | static inline u32 transport_lba_32(unsigned char *cdb) | |
2175 | { | |
2176 | return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; | |
2177 | } | |
2178 | ||
2179 | static inline unsigned long long transport_lba_64(unsigned char *cdb) | |
2180 | { | |
2181 | unsigned int __v1, __v2; | |
2182 | ||
2183 | __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; | |
2184 | __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; | |
2185 | ||
2186 | return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; | |
2187 | } | |
2188 | ||
2189 | /* | |
2190 | * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs | |
2191 | */ | |
2192 | static inline unsigned long long transport_lba_64_ext(unsigned char *cdb) | |
2193 | { | |
2194 | unsigned int __v1, __v2; | |
2195 | ||
2196 | __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15]; | |
2197 | __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19]; | |
2198 | ||
2199 | return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; | |
2200 | } | |
2201 | ||
2202 | static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd) | |
2203 | { | |
2204 | unsigned long flags; | |
2205 | ||
a1d8b49a | 2206 | spin_lock_irqsave(&se_cmd->t_state_lock, flags); |
c66ac9db | 2207 | se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; |
a1d8b49a | 2208 | spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); |
c66ac9db NB |
2209 | } |
2210 | ||
2211 | /* | |
2212 | * Called from interrupt context. | |
2213 | */ | |
2214 | static void transport_task_timeout_handler(unsigned long data) | |
2215 | { | |
2216 | struct se_task *task = (struct se_task *)data; | |
e3d6f909 | 2217 | struct se_cmd *cmd = task->task_se_cmd; |
c66ac9db NB |
2218 | unsigned long flags; |
2219 | ||
2220 | DEBUG_TT("transport task timeout fired! task: %p cmd: %p\n", task, cmd); | |
2221 | ||
a1d8b49a | 2222 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db | 2223 | if (task->task_flags & TF_STOP) { |
a1d8b49a | 2224 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
2225 | return; |
2226 | } | |
2227 | task->task_flags &= ~TF_RUNNING; | |
2228 | ||
2229 | /* | |
2230 | * Determine if transport_complete_task() has already been called. | |
2231 | */ | |
2232 | if (!(atomic_read(&task->task_active))) { | |
2233 | DEBUG_TT("transport task: %p cmd: %p timeout task_active" | |
2234 | " == 0\n", task, cmd); | |
a1d8b49a | 2235 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
2236 | return; |
2237 | } | |
2238 | ||
a1d8b49a AG |
2239 | atomic_inc(&cmd->t_se_count); |
2240 | atomic_inc(&cmd->t_transport_timeout); | |
2241 | cmd->t_tasks_failed = 1; | |
c66ac9db NB |
2242 | |
2243 | atomic_set(&task->task_timeout, 1); | |
2244 | task->task_error_status = PYX_TRANSPORT_TASK_TIMEOUT; | |
2245 | task->task_scsi_status = 1; | |
2246 | ||
2247 | if (atomic_read(&task->task_stop)) { | |
2248 | DEBUG_TT("transport task: %p cmd: %p timeout task_stop" | |
2249 | " == 1\n", task, cmd); | |
a1d8b49a | 2250 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
2251 | complete(&task->task_stop_comp); |
2252 | return; | |
2253 | } | |
2254 | ||
a1d8b49a | 2255 | if (!(atomic_dec_and_test(&cmd->t_task_cdbs_left))) { |
c66ac9db NB |
2256 | DEBUG_TT("transport task: %p cmd: %p timeout non zero" |
2257 | " t_task_cdbs_left\n", task, cmd); | |
a1d8b49a | 2258 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
2259 | return; |
2260 | } | |
2261 | DEBUG_TT("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n", | |
2262 | task, cmd); | |
2263 | ||
2264 | cmd->t_state = TRANSPORT_COMPLETE_FAILURE; | |
a1d8b49a | 2265 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
2266 | |
2267 | transport_add_cmd_to_queue(cmd, TRANSPORT_COMPLETE_FAILURE); | |
2268 | } | |
2269 | ||
2270 | /* | |
a1d8b49a | 2271 | * Called with cmd->t_state_lock held. |
c66ac9db NB |
2272 | */ |
2273 | static void transport_start_task_timer(struct se_task *task) | |
2274 | { | |
2275 | struct se_device *dev = task->se_dev; | |
2276 | int timeout; | |
2277 | ||
2278 | if (task->task_flags & TF_RUNNING) | |
2279 | return; | |
2280 | /* | |
2281 | * If the task_timeout is disabled, exit now. | |
2282 | */ | |
e3d6f909 | 2283 | timeout = dev->se_sub_dev->se_dev_attrib.task_timeout; |
c66ac9db NB |
2284 | if (!(timeout)) |
2285 | return; | |
2286 | ||
2287 | init_timer(&task->task_timer); | |
2288 | task->task_timer.expires = (get_jiffies_64() + timeout * HZ); | |
2289 | task->task_timer.data = (unsigned long) task; | |
2290 | task->task_timer.function = transport_task_timeout_handler; | |
2291 | ||
2292 | task->task_flags |= TF_RUNNING; | |
2293 | add_timer(&task->task_timer); | |
2294 | #if 0 | |
2295 | printk(KERN_INFO "Starting task timer for cmd: %p task: %p seconds:" | |
2296 | " %d\n", task->task_se_cmd, task, timeout); | |
2297 | #endif | |
2298 | } | |
2299 | ||
2300 | /* | |
a1d8b49a | 2301 | * Called with spin_lock_irq(&cmd->t_state_lock) held. |
c66ac9db NB |
2302 | */ |
2303 | void __transport_stop_task_timer(struct se_task *task, unsigned long *flags) | |
2304 | { | |
e3d6f909 | 2305 | struct se_cmd *cmd = task->task_se_cmd; |
c66ac9db NB |
2306 | |
2307 | if (!(task->task_flags & TF_RUNNING)) | |
2308 | return; | |
2309 | ||
2310 | task->task_flags |= TF_STOP; | |
a1d8b49a | 2311 | spin_unlock_irqrestore(&cmd->t_state_lock, *flags); |
c66ac9db NB |
2312 | |
2313 | del_timer_sync(&task->task_timer); | |
2314 | ||
a1d8b49a | 2315 | spin_lock_irqsave(&cmd->t_state_lock, *flags); |
c66ac9db NB |
2316 | task->task_flags &= ~TF_RUNNING; |
2317 | task->task_flags &= ~TF_STOP; | |
2318 | } | |
2319 | ||
2320 | static void transport_stop_all_task_timers(struct se_cmd *cmd) | |
2321 | { | |
2322 | struct se_task *task = NULL, *task_tmp; | |
2323 | unsigned long flags; | |
2324 | ||
a1d8b49a | 2325 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db | 2326 | list_for_each_entry_safe(task, task_tmp, |
a1d8b49a | 2327 | &cmd->t_task_list, t_list) |
c66ac9db | 2328 | __transport_stop_task_timer(task, &flags); |
a1d8b49a | 2329 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
2330 | } |
2331 | ||
2332 | static inline int transport_tcq_window_closed(struct se_device *dev) | |
2333 | { | |
2334 | if (dev->dev_tcq_window_closed++ < | |
2335 | PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD) { | |
2336 | msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT); | |
2337 | } else | |
2338 | msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG); | |
2339 | ||
e3d6f909 | 2340 | wake_up_interruptible(&dev->dev_queue_obj.thread_wq); |
c66ac9db NB |
2341 | return 0; |
2342 | } | |
2343 | ||
2344 | /* | |
2345 | * Called from Fabric Module context from transport_execute_tasks() | |
2346 | * | |
2347 | * The return of this function determins if the tasks from struct se_cmd | |
2348 | * get added to the execution queue in transport_execute_tasks(), | |
2349 | * or are added to the delayed or ordered lists here. | |
2350 | */ | |
2351 | static inline int transport_execute_task_attr(struct se_cmd *cmd) | |
2352 | { | |
5951146d | 2353 | if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) |
c66ac9db NB |
2354 | return 1; |
2355 | /* | |
25985edc | 2356 | * Check for the existence of HEAD_OF_QUEUE, and if true return 1 |
c66ac9db NB |
2357 | * to allow the passed struct se_cmd list of tasks to the front of the list. |
2358 | */ | |
e66ecd50 | 2359 | if (cmd->sam_task_attr == MSG_HEAD_TAG) { |
5951146d | 2360 | atomic_inc(&cmd->se_dev->dev_hoq_count); |
c66ac9db NB |
2361 | smp_mb__after_atomic_inc(); |
2362 | DEBUG_STA("Added HEAD_OF_QUEUE for CDB:" | |
2363 | " 0x%02x, se_ordered_id: %u\n", | |
a1d8b49a | 2364 | cmd->_task_cdb[0], |
c66ac9db NB |
2365 | cmd->se_ordered_id); |
2366 | return 1; | |
e66ecd50 | 2367 | } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { |
5951146d AG |
2368 | spin_lock(&cmd->se_dev->ordered_cmd_lock); |
2369 | list_add_tail(&cmd->se_ordered_node, | |
2370 | &cmd->se_dev->ordered_cmd_list); | |
2371 | spin_unlock(&cmd->se_dev->ordered_cmd_lock); | |
c66ac9db | 2372 | |
5951146d | 2373 | atomic_inc(&cmd->se_dev->dev_ordered_sync); |
c66ac9db NB |
2374 | smp_mb__after_atomic_inc(); |
2375 | ||
2376 | DEBUG_STA("Added ORDERED for CDB: 0x%02x to ordered" | |
2377 | " list, se_ordered_id: %u\n", | |
a1d8b49a | 2378 | cmd->t_task_cdb[0], |
c66ac9db NB |
2379 | cmd->se_ordered_id); |
2380 | /* | |
2381 | * Add ORDERED command to tail of execution queue if | |
2382 | * no other older commands exist that need to be | |
2383 | * completed first. | |
2384 | */ | |
5951146d | 2385 | if (!(atomic_read(&cmd->se_dev->simple_cmds))) |
c66ac9db NB |
2386 | return 1; |
2387 | } else { | |
2388 | /* | |
2389 | * For SIMPLE and UNTAGGED Task Attribute commands | |
2390 | */ | |
5951146d | 2391 | atomic_inc(&cmd->se_dev->simple_cmds); |
c66ac9db NB |
2392 | smp_mb__after_atomic_inc(); |
2393 | } | |
2394 | /* | |
2395 | * Otherwise if one or more outstanding ORDERED task attribute exist, | |
2396 | * add the dormant task(s) built for the passed struct se_cmd to the | |
2397 | * execution queue and become in Active state for this struct se_device. | |
2398 | */ | |
5951146d | 2399 | if (atomic_read(&cmd->se_dev->dev_ordered_sync) != 0) { |
c66ac9db NB |
2400 | /* |
2401 | * Otherwise, add cmd w/ tasks to delayed cmd queue that | |
25985edc | 2402 | * will be drained upon completion of HEAD_OF_QUEUE task. |
c66ac9db | 2403 | */ |
5951146d | 2404 | spin_lock(&cmd->se_dev->delayed_cmd_lock); |
c66ac9db | 2405 | cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR; |
5951146d AG |
2406 | list_add_tail(&cmd->se_delayed_node, |
2407 | &cmd->se_dev->delayed_cmd_list); | |
2408 | spin_unlock(&cmd->se_dev->delayed_cmd_lock); | |
c66ac9db NB |
2409 | |
2410 | DEBUG_STA("Added CDB: 0x%02x Task Attr: 0x%02x to" | |
2411 | " delayed CMD list, se_ordered_id: %u\n", | |
a1d8b49a | 2412 | cmd->t_task_cdb[0], cmd->sam_task_attr, |
c66ac9db NB |
2413 | cmd->se_ordered_id); |
2414 | /* | |
2415 | * Return zero to let transport_execute_tasks() know | |
2416 | * not to add the delayed tasks to the execution list. | |
2417 | */ | |
2418 | return 0; | |
2419 | } | |
2420 | /* | |
2421 | * Otherwise, no ORDERED task attributes exist.. | |
2422 | */ | |
2423 | return 1; | |
2424 | } | |
2425 | ||
2426 | /* | |
2427 | * Called from fabric module context in transport_generic_new_cmd() and | |
2428 | * transport_generic_process_write() | |
2429 | */ | |
2430 | static int transport_execute_tasks(struct se_cmd *cmd) | |
2431 | { | |
2432 | int add_tasks; | |
2433 | ||
2434 | if (!(cmd->se_cmd_flags & SCF_SE_DISABLE_ONLINE_CHECK)) { | |
2435 | if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) { | |
2436 | cmd->transport_error_status = | |
2437 | PYX_TRANSPORT_LU_COMM_FAILURE; | |
2438 | transport_generic_request_failure(cmd, NULL, 0, 1); | |
2439 | return 0; | |
2440 | } | |
2441 | } | |
2442 | /* | |
2443 | * Call transport_cmd_check_stop() to see if a fabric exception | |
25985edc | 2444 | * has occurred that prevents execution. |
c66ac9db NB |
2445 | */ |
2446 | if (!(transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING))) { | |
2447 | /* | |
2448 | * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE | |
2449 | * attribute for the tasks of the received struct se_cmd CDB | |
2450 | */ | |
2451 | add_tasks = transport_execute_task_attr(cmd); | |
e3d6f909 | 2452 | if (!add_tasks) |
c66ac9db NB |
2453 | goto execute_tasks; |
2454 | /* | |
2455 | * This calls transport_add_tasks_from_cmd() to handle | |
2456 | * HEAD_OF_QUEUE ordering for SAM Task Attribute emulation | |
2457 | * (if enabled) in __transport_add_task_to_execute_queue() and | |
2458 | * transport_add_task_check_sam_attr(). | |
2459 | */ | |
2460 | transport_add_tasks_from_cmd(cmd); | |
2461 | } | |
2462 | /* | |
2463 | * Kick the execution queue for the cmd associated struct se_device | |
2464 | * storage object. | |
2465 | */ | |
2466 | execute_tasks: | |
5951146d | 2467 | __transport_execute_tasks(cmd->se_dev); |
c66ac9db NB |
2468 | return 0; |
2469 | } | |
2470 | ||
2471 | /* | |
2472 | * Called to check struct se_device tcq depth window, and once open pull struct se_task | |
2473 | * from struct se_device->execute_task_list and | |
2474 | * | |
2475 | * Called from transport_processing_thread() | |
2476 | */ | |
2477 | static int __transport_execute_tasks(struct se_device *dev) | |
2478 | { | |
2479 | int error; | |
2480 | struct se_cmd *cmd = NULL; | |
e3d6f909 | 2481 | struct se_task *task = NULL; |
c66ac9db NB |
2482 | unsigned long flags; |
2483 | ||
2484 | /* | |
2485 | * Check if there is enough room in the device and HBA queue to send | |
a1d8b49a | 2486 | * struct se_tasks to the selected transport. |
c66ac9db NB |
2487 | */ |
2488 | check_depth: | |
e3d6f909 | 2489 | if (!atomic_read(&dev->depth_left)) |
c66ac9db | 2490 | return transport_tcq_window_closed(dev); |
c66ac9db | 2491 | |
e3d6f909 | 2492 | dev->dev_tcq_window_closed = 0; |
c66ac9db | 2493 | |
e3d6f909 AG |
2494 | spin_lock_irq(&dev->execute_task_lock); |
2495 | if (list_empty(&dev->execute_task_list)) { | |
2496 | spin_unlock_irq(&dev->execute_task_lock); | |
c66ac9db NB |
2497 | return 0; |
2498 | } | |
e3d6f909 AG |
2499 | task = list_first_entry(&dev->execute_task_list, |
2500 | struct se_task, t_execute_list); | |
2501 | list_del(&task->t_execute_list); | |
2502 | atomic_set(&task->task_execute_queue, 0); | |
2503 | atomic_dec(&dev->execute_tasks); | |
2504 | spin_unlock_irq(&dev->execute_task_lock); | |
c66ac9db NB |
2505 | |
2506 | atomic_dec(&dev->depth_left); | |
c66ac9db | 2507 | |
e3d6f909 | 2508 | cmd = task->task_se_cmd; |
c66ac9db | 2509 | |
a1d8b49a | 2510 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db NB |
2511 | atomic_set(&task->task_active, 1); |
2512 | atomic_set(&task->task_sent, 1); | |
a1d8b49a | 2513 | atomic_inc(&cmd->t_task_cdbs_sent); |
c66ac9db | 2514 | |
a1d8b49a AG |
2515 | if (atomic_read(&cmd->t_task_cdbs_sent) == |
2516 | cmd->t_task_list_num) | |
c66ac9db NB |
2517 | atomic_set(&cmd->transport_sent, 1); |
2518 | ||
2519 | transport_start_task_timer(task); | |
a1d8b49a | 2520 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
2521 | /* |
2522 | * The struct se_cmd->transport_emulate_cdb() function pointer is used | |
e3d6f909 | 2523 | * to grab REPORT_LUNS and other CDBs we want to handle before they hit the |
c66ac9db NB |
2524 | * struct se_subsystem_api->do_task() caller below. |
2525 | */ | |
2526 | if (cmd->transport_emulate_cdb) { | |
2527 | error = cmd->transport_emulate_cdb(cmd); | |
2528 | if (error != 0) { | |
2529 | cmd->transport_error_status = error; | |
2530 | atomic_set(&task->task_active, 0); | |
2531 | atomic_set(&cmd->transport_sent, 0); | |
2532 | transport_stop_tasks_for_cmd(cmd); | |
2533 | transport_generic_request_failure(cmd, dev, 0, 1); | |
2534 | goto check_depth; | |
2535 | } | |
2536 | /* | |
2537 | * Handle the successful completion for transport_emulate_cdb() | |
2538 | * for synchronous operation, following SCF_EMULATE_CDB_ASYNC | |
2539 | * Otherwise the caller is expected to complete the task with | |
2540 | * proper status. | |
2541 | */ | |
2542 | if (!(cmd->se_cmd_flags & SCF_EMULATE_CDB_ASYNC)) { | |
2543 | cmd->scsi_status = SAM_STAT_GOOD; | |
2544 | task->task_scsi_status = GOOD; | |
2545 | transport_complete_task(task, 1); | |
2546 | } | |
2547 | } else { | |
2548 | /* | |
2549 | * Currently for all virtual TCM plugins including IBLOCK, FILEIO and | |
2550 | * RAMDISK we use the internal transport_emulate_control_cdb() logic | |
2551 | * with struct se_subsystem_api callers for the primary SPC-3 TYPE_DISK | |
2552 | * LUN emulation code. | |
2553 | * | |
2554 | * For TCM/pSCSI and all other SCF_SCSI_DATA_SG_IO_CDB I/O tasks we | |
2555 | * call ->do_task() directly and let the underlying TCM subsystem plugin | |
2556 | * code handle the CDB emulation. | |
2557 | */ | |
e3d6f909 AG |
2558 | if ((dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) && |
2559 | (!(task->task_se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB))) | |
c66ac9db NB |
2560 | error = transport_emulate_control_cdb(task); |
2561 | else | |
e3d6f909 | 2562 | error = dev->transport->do_task(task); |
c66ac9db NB |
2563 | |
2564 | if (error != 0) { | |
2565 | cmd->transport_error_status = error; | |
2566 | atomic_set(&task->task_active, 0); | |
2567 | atomic_set(&cmd->transport_sent, 0); | |
2568 | transport_stop_tasks_for_cmd(cmd); | |
2569 | transport_generic_request_failure(cmd, dev, 0, 1); | |
2570 | } | |
2571 | } | |
2572 | ||
2573 | goto check_depth; | |
2574 | ||
2575 | return 0; | |
2576 | } | |
2577 | ||
2578 | void transport_new_cmd_failure(struct se_cmd *se_cmd) | |
2579 | { | |
2580 | unsigned long flags; | |
2581 | /* | |
2582 | * Any unsolicited data will get dumped for failed command inside of | |
2583 | * the fabric plugin | |
2584 | */ | |
a1d8b49a | 2585 | spin_lock_irqsave(&se_cmd->t_state_lock, flags); |
c66ac9db NB |
2586 | se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED; |
2587 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
a1d8b49a | 2588 | spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); |
c66ac9db | 2589 | |
e3d6f909 | 2590 | se_cmd->se_tfo->new_cmd_failure(se_cmd); |
c66ac9db NB |
2591 | } |
2592 | ||
2593 | static void transport_nop_wait_for_tasks(struct se_cmd *, int, int); | |
2594 | ||
2595 | static inline u32 transport_get_sectors_6( | |
2596 | unsigned char *cdb, | |
2597 | struct se_cmd *cmd, | |
2598 | int *ret) | |
2599 | { | |
5951146d | 2600 | struct se_device *dev = cmd->se_dev; |
c66ac9db NB |
2601 | |
2602 | /* | |
2603 | * Assume TYPE_DISK for non struct se_device objects. | |
2604 | * Use 8-bit sector value. | |
2605 | */ | |
2606 | if (!dev) | |
2607 | goto type_disk; | |
2608 | ||
2609 | /* | |
2610 | * Use 24-bit allocation length for TYPE_TAPE. | |
2611 | */ | |
e3d6f909 | 2612 | if (dev->transport->get_device_type(dev) == TYPE_TAPE) |
c66ac9db NB |
2613 | return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4]; |
2614 | ||
2615 | /* | |
2616 | * Everything else assume TYPE_DISK Sector CDB location. | |
2617 | * Use 8-bit sector value. | |
2618 | */ | |
2619 | type_disk: | |
2620 | return (u32)cdb[4]; | |
2621 | } | |
2622 | ||
2623 | static inline u32 transport_get_sectors_10( | |
2624 | unsigned char *cdb, | |
2625 | struct se_cmd *cmd, | |
2626 | int *ret) | |
2627 | { | |
5951146d | 2628 | struct se_device *dev = cmd->se_dev; |
c66ac9db NB |
2629 | |
2630 | /* | |
2631 | * Assume TYPE_DISK for non struct se_device objects. | |
2632 | * Use 16-bit sector value. | |
2633 | */ | |
2634 | if (!dev) | |
2635 | goto type_disk; | |
2636 | ||
2637 | /* | |
2638 | * XXX_10 is not defined in SSC, throw an exception | |
2639 | */ | |
e3d6f909 AG |
2640 | if (dev->transport->get_device_type(dev) == TYPE_TAPE) { |
2641 | *ret = -EINVAL; | |
c66ac9db NB |
2642 | return 0; |
2643 | } | |
2644 | ||
2645 | /* | |
2646 | * Everything else assume TYPE_DISK Sector CDB location. | |
2647 | * Use 16-bit sector value. | |
2648 | */ | |
2649 | type_disk: | |
2650 | return (u32)(cdb[7] << 8) + cdb[8]; | |
2651 | } | |
2652 | ||
2653 | static inline u32 transport_get_sectors_12( | |
2654 | unsigned char *cdb, | |
2655 | struct se_cmd *cmd, | |
2656 | int *ret) | |
2657 | { | |
5951146d | 2658 | struct se_device *dev = cmd->se_dev; |
c66ac9db NB |
2659 | |
2660 | /* | |
2661 | * Assume TYPE_DISK for non struct se_device objects. | |
2662 | * Use 32-bit sector value. | |
2663 | */ | |
2664 | if (!dev) | |
2665 | goto type_disk; | |
2666 | ||
2667 | /* | |
2668 | * XXX_12 is not defined in SSC, throw an exception | |
2669 | */ | |
e3d6f909 AG |
2670 | if (dev->transport->get_device_type(dev) == TYPE_TAPE) { |
2671 | *ret = -EINVAL; | |
c66ac9db NB |
2672 | return 0; |
2673 | } | |
2674 | ||
2675 | /* | |
2676 | * Everything else assume TYPE_DISK Sector CDB location. | |
2677 | * Use 32-bit sector value. | |
2678 | */ | |
2679 | type_disk: | |
2680 | return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9]; | |
2681 | } | |
2682 | ||
2683 | static inline u32 transport_get_sectors_16( | |
2684 | unsigned char *cdb, | |
2685 | struct se_cmd *cmd, | |
2686 | int *ret) | |
2687 | { | |
5951146d | 2688 | struct se_device *dev = cmd->se_dev; |
c66ac9db NB |
2689 | |
2690 | /* | |
2691 | * Assume TYPE_DISK for non struct se_device objects. | |
2692 | * Use 32-bit sector value. | |
2693 | */ | |
2694 | if (!dev) | |
2695 | goto type_disk; | |
2696 | ||
2697 | /* | |
2698 | * Use 24-bit allocation length for TYPE_TAPE. | |
2699 | */ | |
e3d6f909 | 2700 | if (dev->transport->get_device_type(dev) == TYPE_TAPE) |
c66ac9db NB |
2701 | return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14]; |
2702 | ||
2703 | type_disk: | |
2704 | return (u32)(cdb[10] << 24) + (cdb[11] << 16) + | |
2705 | (cdb[12] << 8) + cdb[13]; | |
2706 | } | |
2707 | ||
2708 | /* | |
2709 | * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants | |
2710 | */ | |
2711 | static inline u32 transport_get_sectors_32( | |
2712 | unsigned char *cdb, | |
2713 | struct se_cmd *cmd, | |
2714 | int *ret) | |
2715 | { | |
2716 | /* | |
2717 | * Assume TYPE_DISK for non struct se_device objects. | |
2718 | * Use 32-bit sector value. | |
2719 | */ | |
2720 | return (u32)(cdb[28] << 24) + (cdb[29] << 16) + | |
2721 | (cdb[30] << 8) + cdb[31]; | |
2722 | ||
2723 | } | |
2724 | ||
2725 | static inline u32 transport_get_size( | |
2726 | u32 sectors, | |
2727 | unsigned char *cdb, | |
2728 | struct se_cmd *cmd) | |
2729 | { | |
5951146d | 2730 | struct se_device *dev = cmd->se_dev; |
c66ac9db | 2731 | |
e3d6f909 | 2732 | if (dev->transport->get_device_type(dev) == TYPE_TAPE) { |
c66ac9db | 2733 | if (cdb[1] & 1) { /* sectors */ |
e3d6f909 | 2734 | return dev->se_sub_dev->se_dev_attrib.block_size * sectors; |
c66ac9db NB |
2735 | } else /* bytes */ |
2736 | return sectors; | |
2737 | } | |
2738 | #if 0 | |
2739 | printk(KERN_INFO "Returning block_size: %u, sectors: %u == %u for" | |
e3d6f909 AG |
2740 | " %s object\n", dev->se_sub_dev->se_dev_attrib.block_size, sectors, |
2741 | dev->se_sub_dev->se_dev_attrib.block_size * sectors, | |
2742 | dev->transport->name); | |
c66ac9db | 2743 | #endif |
e3d6f909 | 2744 | return dev->se_sub_dev->se_dev_attrib.block_size * sectors; |
c66ac9db NB |
2745 | } |
2746 | ||
2747 | unsigned char transport_asciihex_to_binaryhex(unsigned char val[2]) | |
2748 | { | |
2749 | unsigned char result = 0; | |
2750 | /* | |
2751 | * MSB | |
2752 | */ | |
2753 | if ((val[0] >= 'a') && (val[0] <= 'f')) | |
2754 | result = ((val[0] - 'a' + 10) & 0xf) << 4; | |
2755 | else | |
2756 | if ((val[0] >= 'A') && (val[0] <= 'F')) | |
2757 | result = ((val[0] - 'A' + 10) & 0xf) << 4; | |
2758 | else /* digit */ | |
2759 | result = ((val[0] - '0') & 0xf) << 4; | |
2760 | /* | |
2761 | * LSB | |
2762 | */ | |
2763 | if ((val[1] >= 'a') && (val[1] <= 'f')) | |
2764 | result |= ((val[1] - 'a' + 10) & 0xf); | |
2765 | else | |
2766 | if ((val[1] >= 'A') && (val[1] <= 'F')) | |
2767 | result |= ((val[1] - 'A' + 10) & 0xf); | |
2768 | else /* digit */ | |
2769 | result |= ((val[1] - '0') & 0xf); | |
2770 | ||
2771 | return result; | |
2772 | } | |
2773 | EXPORT_SYMBOL(transport_asciihex_to_binaryhex); | |
2774 | ||
2775 | static void transport_xor_callback(struct se_cmd *cmd) | |
2776 | { | |
2777 | unsigned char *buf, *addr; | |
2778 | struct se_mem *se_mem; | |
2779 | unsigned int offset; | |
2780 | int i; | |
2781 | /* | |
2782 | * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command | |
2783 | * | |
2784 | * 1) read the specified logical block(s); | |
2785 | * 2) transfer logical blocks from the data-out buffer; | |
2786 | * 3) XOR the logical blocks transferred from the data-out buffer with | |
2787 | * the logical blocks read, storing the resulting XOR data in a buffer; | |
2788 | * 4) if the DISABLE WRITE bit is set to zero, then write the logical | |
2789 | * blocks transferred from the data-out buffer; and | |
2790 | * 5) transfer the resulting XOR data to the data-in buffer. | |
2791 | */ | |
2792 | buf = kmalloc(cmd->data_length, GFP_KERNEL); | |
2793 | if (!(buf)) { | |
2794 | printk(KERN_ERR "Unable to allocate xor_callback buf\n"); | |
2795 | return; | |
2796 | } | |
2797 | /* | |
a1d8b49a | 2798 | * Copy the scatterlist WRITE buffer located at cmd->t_mem_list |
c66ac9db NB |
2799 | * into the locally allocated *buf |
2800 | */ | |
a1d8b49a AG |
2801 | transport_memcpy_se_mem_read_contig(buf, &cmd->t_mem_list, |
2802 | cmd->data_length); | |
c66ac9db NB |
2803 | /* |
2804 | * Now perform the XOR against the BIDI read memory located at | |
a1d8b49a | 2805 | * cmd->t_mem_bidi_list |
c66ac9db NB |
2806 | */ |
2807 | ||
2808 | offset = 0; | |
a1d8b49a | 2809 | list_for_each_entry(se_mem, &cmd->t_mem_bidi_list, se_list) { |
c66ac9db NB |
2810 | addr = (unsigned char *)kmap_atomic(se_mem->se_page, KM_USER0); |
2811 | if (!(addr)) | |
2812 | goto out; | |
2813 | ||
2814 | for (i = 0; i < se_mem->se_len; i++) | |
2815 | *(addr + se_mem->se_off + i) ^= *(buf + offset + i); | |
2816 | ||
2817 | offset += se_mem->se_len; | |
2818 | kunmap_atomic(addr, KM_USER0); | |
2819 | } | |
2820 | out: | |
2821 | kfree(buf); | |
2822 | } | |
2823 | ||
2824 | /* | |
2825 | * Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd | |
2826 | */ | |
2827 | static int transport_get_sense_data(struct se_cmd *cmd) | |
2828 | { | |
2829 | unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL; | |
2830 | struct se_device *dev; | |
2831 | struct se_task *task = NULL, *task_tmp; | |
2832 | unsigned long flags; | |
2833 | u32 offset = 0; | |
2834 | ||
e3d6f909 AG |
2835 | WARN_ON(!cmd->se_lun); |
2836 | ||
a1d8b49a | 2837 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db | 2838 | if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { |
a1d8b49a | 2839 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
2840 | return 0; |
2841 | } | |
2842 | ||
2843 | list_for_each_entry_safe(task, task_tmp, | |
a1d8b49a | 2844 | &cmd->t_task_list, t_list) { |
c66ac9db NB |
2845 | |
2846 | if (!task->task_sense) | |
2847 | continue; | |
2848 | ||
2849 | dev = task->se_dev; | |
2850 | if (!(dev)) | |
2851 | continue; | |
2852 | ||
e3d6f909 AG |
2853 | if (!dev->transport->get_sense_buffer) { |
2854 | printk(KERN_ERR "dev->transport->get_sense_buffer" | |
c66ac9db NB |
2855 | " is NULL\n"); |
2856 | continue; | |
2857 | } | |
2858 | ||
e3d6f909 | 2859 | sense_buffer = dev->transport->get_sense_buffer(task); |
c66ac9db NB |
2860 | if (!(sense_buffer)) { |
2861 | printk(KERN_ERR "ITT[0x%08x]_TASK[%d]: Unable to locate" | |
2862 | " sense buffer for task with sense\n", | |
e3d6f909 | 2863 | cmd->se_tfo->get_task_tag(cmd), task->task_no); |
c66ac9db NB |
2864 | continue; |
2865 | } | |
a1d8b49a | 2866 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db | 2867 | |
e3d6f909 | 2868 | offset = cmd->se_tfo->set_fabric_sense_len(cmd, |
c66ac9db NB |
2869 | TRANSPORT_SENSE_BUFFER); |
2870 | ||
5951146d | 2871 | memcpy(&buffer[offset], sense_buffer, |
c66ac9db NB |
2872 | TRANSPORT_SENSE_BUFFER); |
2873 | cmd->scsi_status = task->task_scsi_status; | |
2874 | /* Automatically padded */ | |
2875 | cmd->scsi_sense_length = | |
2876 | (TRANSPORT_SENSE_BUFFER + offset); | |
2877 | ||
2878 | printk(KERN_INFO "HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x" | |
2879 | " and sense\n", | |
e3d6f909 | 2880 | dev->se_hba->hba_id, dev->transport->name, |
c66ac9db NB |
2881 | cmd->scsi_status); |
2882 | return 0; | |
2883 | } | |
a1d8b49a | 2884 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
2885 | |
2886 | return -1; | |
2887 | } | |
2888 | ||
2889 | static int transport_allocate_resources(struct se_cmd *cmd) | |
2890 | { | |
2891 | u32 length = cmd->data_length; | |
2892 | ||
2893 | if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) || | |
2894 | (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) | |
a1d8b49a | 2895 | return transport_generic_get_mem(cmd, length); |
c66ac9db NB |
2896 | else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) |
2897 | return transport_generic_allocate_buf(cmd, length); | |
2898 | else | |
2899 | return 0; | |
2900 | } | |
2901 | ||
2902 | static int | |
2903 | transport_handle_reservation_conflict(struct se_cmd *cmd) | |
2904 | { | |
2905 | cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks; | |
2906 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
2907 | cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT; | |
2908 | cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; | |
2909 | /* | |
2910 | * For UA Interlock Code 11b, a RESERVATION CONFLICT will | |
2911 | * establish a UNIT ATTENTION with PREVIOUS RESERVATION | |
2912 | * CONFLICT STATUS. | |
2913 | * | |
2914 | * See spc4r17, section 7.4.6 Control Mode Page, Table 349 | |
2915 | */ | |
e3d6f909 AG |
2916 | if (cmd->se_sess && |
2917 | cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2) | |
2918 | core_scsi3_ua_allocate(cmd->se_sess->se_node_acl, | |
c66ac9db NB |
2919 | cmd->orig_fe_lun, 0x2C, |
2920 | ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); | |
5951146d | 2921 | return -EINVAL; |
c66ac9db NB |
2922 | } |
2923 | ||
2924 | /* transport_generic_cmd_sequencer(): | |
2925 | * | |
2926 | * Generic Command Sequencer that should work for most DAS transport | |
2927 | * drivers. | |
2928 | * | |
2929 | * Called from transport_generic_allocate_tasks() in the $FABRIC_MOD | |
2930 | * RX Thread. | |
2931 | * | |
2932 | * FIXME: Need to support other SCSI OPCODES where as well. | |
2933 | */ | |
2934 | static int transport_generic_cmd_sequencer( | |
2935 | struct se_cmd *cmd, | |
2936 | unsigned char *cdb) | |
2937 | { | |
5951146d | 2938 | struct se_device *dev = cmd->se_dev; |
c66ac9db NB |
2939 | struct se_subsystem_dev *su_dev = dev->se_sub_dev; |
2940 | int ret = 0, sector_ret = 0, passthrough; | |
2941 | u32 sectors = 0, size = 0, pr_reg_type = 0; | |
2942 | u16 service_action; | |
2943 | u8 alua_ascq = 0; | |
2944 | /* | |
2945 | * Check for an existing UNIT ATTENTION condition | |
2946 | */ | |
2947 | if (core_scsi3_ua_check(cmd, cdb) < 0) { | |
2948 | cmd->transport_wait_for_tasks = | |
2949 | &transport_nop_wait_for_tasks; | |
2950 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
2951 | cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION; | |
5951146d | 2952 | return -EINVAL; |
c66ac9db NB |
2953 | } |
2954 | /* | |
2955 | * Check status of Asymmetric Logical Unit Assignment port | |
2956 | */ | |
e3d6f909 | 2957 | ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq); |
c66ac9db NB |
2958 | if (ret != 0) { |
2959 | cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks; | |
2960 | /* | |
25985edc | 2961 | * Set SCSI additional sense code (ASC) to 'LUN Not Accessible'; |
c66ac9db NB |
2962 | * The ALUA additional sense code qualifier (ASCQ) is determined |
2963 | * by the ALUA primary or secondary access state.. | |
2964 | */ | |
2965 | if (ret > 0) { | |
2966 | #if 0 | |
2967 | printk(KERN_INFO "[%s]: ALUA TG Port not available," | |
2968 | " SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n", | |
e3d6f909 | 2969 | cmd->se_tfo->get_fabric_name(), alua_ascq); |
c66ac9db NB |
2970 | #endif |
2971 | transport_set_sense_codes(cmd, 0x04, alua_ascq); | |
2972 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
2973 | cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY; | |
5951146d | 2974 | return -EINVAL; |
c66ac9db NB |
2975 | } |
2976 | goto out_invalid_cdb_field; | |
2977 | } | |
2978 | /* | |
2979 | * Check status for SPC-3 Persistent Reservations | |
2980 | */ | |
e3d6f909 AG |
2981 | if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) { |
2982 | if (su_dev->t10_pr.pr_ops.t10_seq_non_holder( | |
c66ac9db NB |
2983 | cmd, cdb, pr_reg_type) != 0) |
2984 | return transport_handle_reservation_conflict(cmd); | |
2985 | /* | |
2986 | * This means the CDB is allowed for the SCSI Initiator port | |
2987 | * when said port is *NOT* holding the legacy SPC-2 or | |
2988 | * SPC-3 Persistent Reservation. | |
2989 | */ | |
2990 | } | |
2991 | ||
2992 | switch (cdb[0]) { | |
2993 | case READ_6: | |
2994 | sectors = transport_get_sectors_6(cdb, cmd, §or_ret); | |
2995 | if (sector_ret) | |
2996 | goto out_unsupported_cdb; | |
2997 | size = transport_get_size(sectors, cdb, cmd); | |
2998 | cmd->transport_split_cdb = &split_cdb_XX_6; | |
a1d8b49a | 2999 | cmd->t_task_lba = transport_lba_21(cdb); |
c66ac9db NB |
3000 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
3001 | break; | |
3002 | case READ_10: | |
3003 | sectors = transport_get_sectors_10(cdb, cmd, §or_ret); | |
3004 | if (sector_ret) | |
3005 | goto out_unsupported_cdb; | |
3006 | size = transport_get_size(sectors, cdb, cmd); | |
3007 | cmd->transport_split_cdb = &split_cdb_XX_10; | |
a1d8b49a | 3008 | cmd->t_task_lba = transport_lba_32(cdb); |
c66ac9db NB |
3009 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
3010 | break; | |
3011 | case READ_12: | |
3012 | sectors = transport_get_sectors_12(cdb, cmd, §or_ret); | |
3013 | if (sector_ret) | |
3014 | goto out_unsupported_cdb; | |
3015 | size = transport_get_size(sectors, cdb, cmd); | |
3016 | cmd->transport_split_cdb = &split_cdb_XX_12; | |
a1d8b49a | 3017 | cmd->t_task_lba = transport_lba_32(cdb); |
c66ac9db NB |
3018 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
3019 | break; | |
3020 | case READ_16: | |
3021 | sectors = transport_get_sectors_16(cdb, cmd, §or_ret); | |
3022 | if (sector_ret) | |
3023 | goto out_unsupported_cdb; | |
3024 | size = transport_get_size(sectors, cdb, cmd); | |
3025 | cmd->transport_split_cdb = &split_cdb_XX_16; | |
a1d8b49a | 3026 | cmd->t_task_lba = transport_lba_64(cdb); |
c66ac9db NB |
3027 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
3028 | break; | |
3029 | case WRITE_6: | |
3030 | sectors = transport_get_sectors_6(cdb, cmd, §or_ret); | |
3031 | if (sector_ret) | |
3032 | goto out_unsupported_cdb; | |
3033 | size = transport_get_size(sectors, cdb, cmd); | |
3034 | cmd->transport_split_cdb = &split_cdb_XX_6; | |
a1d8b49a | 3035 | cmd->t_task_lba = transport_lba_21(cdb); |
c66ac9db NB |
3036 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
3037 | break; | |
3038 | case WRITE_10: | |
3039 | sectors = transport_get_sectors_10(cdb, cmd, §or_ret); | |
3040 | if (sector_ret) | |
3041 | goto out_unsupported_cdb; | |
3042 | size = transport_get_size(sectors, cdb, cmd); | |
3043 | cmd->transport_split_cdb = &split_cdb_XX_10; | |
a1d8b49a AG |
3044 | cmd->t_task_lba = transport_lba_32(cdb); |
3045 | cmd->t_tasks_fua = (cdb[1] & 0x8); | |
c66ac9db NB |
3046 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
3047 | break; | |
3048 | case WRITE_12: | |
3049 | sectors = transport_get_sectors_12(cdb, cmd, §or_ret); | |
3050 | if (sector_ret) | |
3051 | goto out_unsupported_cdb; | |
3052 | size = transport_get_size(sectors, cdb, cmd); | |
3053 | cmd->transport_split_cdb = &split_cdb_XX_12; | |
a1d8b49a AG |
3054 | cmd->t_task_lba = transport_lba_32(cdb); |
3055 | cmd->t_tasks_fua = (cdb[1] & 0x8); | |
c66ac9db NB |
3056 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
3057 | break; | |
3058 | case WRITE_16: | |
3059 | sectors = transport_get_sectors_16(cdb, cmd, §or_ret); | |
3060 | if (sector_ret) | |
3061 | goto out_unsupported_cdb; | |
3062 | size = transport_get_size(sectors, cdb, cmd); | |
3063 | cmd->transport_split_cdb = &split_cdb_XX_16; | |
a1d8b49a AG |
3064 | cmd->t_task_lba = transport_lba_64(cdb); |
3065 | cmd->t_tasks_fua = (cdb[1] & 0x8); | |
c66ac9db NB |
3066 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
3067 | break; | |
3068 | case XDWRITEREAD_10: | |
3069 | if ((cmd->data_direction != DMA_TO_DEVICE) || | |
a1d8b49a | 3070 | !(cmd->t_tasks_bidi)) |
c66ac9db NB |
3071 | goto out_invalid_cdb_field; |
3072 | sectors = transport_get_sectors_10(cdb, cmd, §or_ret); | |
3073 | if (sector_ret) | |
3074 | goto out_unsupported_cdb; | |
3075 | size = transport_get_size(sectors, cdb, cmd); | |
3076 | cmd->transport_split_cdb = &split_cdb_XX_10; | |
a1d8b49a | 3077 | cmd->t_task_lba = transport_lba_32(cdb); |
c66ac9db | 3078 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
e3d6f909 | 3079 | passthrough = (dev->transport->transport_type == |
c66ac9db NB |
3080 | TRANSPORT_PLUGIN_PHBA_PDEV); |
3081 | /* | |
3082 | * Skip the remaining assignments for TCM/PSCSI passthrough | |
3083 | */ | |
3084 | if (passthrough) | |
3085 | break; | |
3086 | /* | |
3087 | * Setup BIDI XOR callback to be run during transport_generic_complete_ok() | |
3088 | */ | |
3089 | cmd->transport_complete_callback = &transport_xor_callback; | |
a1d8b49a | 3090 | cmd->t_tasks_fua = (cdb[1] & 0x8); |
c66ac9db NB |
3091 | break; |
3092 | case VARIABLE_LENGTH_CMD: | |
3093 | service_action = get_unaligned_be16(&cdb[8]); | |
3094 | /* | |
3095 | * Determine if this is TCM/PSCSI device and we should disable | |
3096 | * internal emulation for this CDB. | |
3097 | */ | |
e3d6f909 | 3098 | passthrough = (dev->transport->transport_type == |
c66ac9db NB |
3099 | TRANSPORT_PLUGIN_PHBA_PDEV); |
3100 | ||
3101 | switch (service_action) { | |
3102 | case XDWRITEREAD_32: | |
3103 | sectors = transport_get_sectors_32(cdb, cmd, §or_ret); | |
3104 | if (sector_ret) | |
3105 | goto out_unsupported_cdb; | |
3106 | size = transport_get_size(sectors, cdb, cmd); | |
3107 | /* | |
3108 | * Use WRITE_32 and READ_32 opcodes for the emulated | |
3109 | * XDWRITE_READ_32 logic. | |
3110 | */ | |
3111 | cmd->transport_split_cdb = &split_cdb_XX_32; | |
a1d8b49a | 3112 | cmd->t_task_lba = transport_lba_64_ext(cdb); |
c66ac9db NB |
3113 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
3114 | ||
3115 | /* | |
3116 | * Skip the remaining assignments for TCM/PSCSI passthrough | |
3117 | */ | |
3118 | if (passthrough) | |
3119 | break; | |
3120 | ||
3121 | /* | |
3122 | * Setup BIDI XOR callback to be run during | |
3123 | * transport_generic_complete_ok() | |
3124 | */ | |
3125 | cmd->transport_complete_callback = &transport_xor_callback; | |
a1d8b49a | 3126 | cmd->t_tasks_fua = (cdb[10] & 0x8); |
c66ac9db NB |
3127 | break; |
3128 | case WRITE_SAME_32: | |
3129 | sectors = transport_get_sectors_32(cdb, cmd, §or_ret); | |
3130 | if (sector_ret) | |
3131 | goto out_unsupported_cdb; | |
dd3a5ad8 NB |
3132 | |
3133 | if (sectors != 0) | |
3134 | size = transport_get_size(sectors, cdb, cmd); | |
3135 | else | |
3136 | size = dev->se_sub_dev->se_dev_attrib.block_size; | |
3137 | ||
a1d8b49a | 3138 | cmd->t_task_lba = get_unaligned_be64(&cdb[12]); |
c66ac9db NB |
3139 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
3140 | ||
3141 | /* | |
3142 | * Skip the remaining assignments for TCM/PSCSI passthrough | |
3143 | */ | |
3144 | if (passthrough) | |
3145 | break; | |
3146 | ||
3147 | if ((cdb[10] & 0x04) || (cdb[10] & 0x02)) { | |
3148 | printk(KERN_ERR "WRITE_SAME PBDATA and LBDATA" | |
3149 | " bits not supported for Block Discard" | |
3150 | " Emulation\n"); | |
3151 | goto out_invalid_cdb_field; | |
3152 | } | |
3153 | /* | |
3154 | * Currently for the emulated case we only accept | |
3155 | * tpws with the UNMAP=1 bit set. | |
3156 | */ | |
3157 | if (!(cdb[10] & 0x08)) { | |
3158 | printk(KERN_ERR "WRITE_SAME w/o UNMAP bit not" | |
3159 | " supported for Block Discard Emulation\n"); | |
3160 | goto out_invalid_cdb_field; | |
3161 | } | |
3162 | break; | |
3163 | default: | |
3164 | printk(KERN_ERR "VARIABLE_LENGTH_CMD service action" | |
3165 | " 0x%04x not supported\n", service_action); | |
3166 | goto out_unsupported_cdb; | |
3167 | } | |
3168 | break; | |
e434f1f1 | 3169 | case MAINTENANCE_IN: |
e3d6f909 | 3170 | if (dev->transport->get_device_type(dev) != TYPE_ROM) { |
c66ac9db NB |
3171 | /* MAINTENANCE_IN from SCC-2 */ |
3172 | /* | |
3173 | * Check for emulated MI_REPORT_TARGET_PGS. | |
3174 | */ | |
3175 | if (cdb[1] == MI_REPORT_TARGET_PGS) { | |
3176 | cmd->transport_emulate_cdb = | |
e3d6f909 | 3177 | (su_dev->t10_alua.alua_type == |
c66ac9db | 3178 | SPC3_ALUA_EMULATED) ? |
e3d6f909 | 3179 | core_emulate_report_target_port_groups : |
c66ac9db NB |
3180 | NULL; |
3181 | } | |
3182 | size = (cdb[6] << 24) | (cdb[7] << 16) | | |
3183 | (cdb[8] << 8) | cdb[9]; | |
3184 | } else { | |
3185 | /* GPCMD_SEND_KEY from multi media commands */ | |
3186 | size = (cdb[8] << 8) + cdb[9]; | |
3187 | } | |
3188 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | |
3189 | break; | |
3190 | case MODE_SELECT: | |
3191 | size = cdb[4]; | |
3192 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | |
3193 | break; | |
3194 | case MODE_SELECT_10: | |
3195 | size = (cdb[7] << 8) + cdb[8]; | |
3196 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | |
3197 | break; | |
3198 | case MODE_SENSE: | |
3199 | size = cdb[4]; | |
3200 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | |
3201 | break; | |
3202 | case MODE_SENSE_10: | |
3203 | case GPCMD_READ_BUFFER_CAPACITY: | |
3204 | case GPCMD_SEND_OPC: | |
3205 | case LOG_SELECT: | |
3206 | case LOG_SENSE: | |
3207 | size = (cdb[7] << 8) + cdb[8]; | |
3208 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | |
3209 | break; | |
3210 | case READ_BLOCK_LIMITS: | |
3211 | size = READ_BLOCK_LEN; | |
3212 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | |
3213 | break; | |
3214 | case GPCMD_GET_CONFIGURATION: | |
3215 | case GPCMD_READ_FORMAT_CAPACITIES: | |
3216 | case GPCMD_READ_DISC_INFO: | |
3217 | case GPCMD_READ_TRACK_RZONE_INFO: | |
3218 | size = (cdb[7] << 8) + cdb[8]; | |
3219 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | |
3220 | break; | |
3221 | case PERSISTENT_RESERVE_IN: | |
3222 | case PERSISTENT_RESERVE_OUT: | |
3223 | cmd->transport_emulate_cdb = | |
e3d6f909 | 3224 | (su_dev->t10_pr.res_type == |
c66ac9db | 3225 | SPC3_PERSISTENT_RESERVATIONS) ? |
e3d6f909 | 3226 | core_scsi3_emulate_pr : NULL; |
c66ac9db NB |
3227 | size = (cdb[7] << 8) + cdb[8]; |
3228 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | |
3229 | break; | |
3230 | case GPCMD_MECHANISM_STATUS: | |
3231 | case GPCMD_READ_DVD_STRUCTURE: | |
3232 | size = (cdb[8] << 8) + cdb[9]; | |
3233 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | |
3234 | break; | |
3235 | case READ_POSITION: | |
3236 | size = READ_POSITION_LEN; | |
3237 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | |
3238 | break; | |
e434f1f1 | 3239 | case MAINTENANCE_OUT: |
e3d6f909 | 3240 | if (dev->transport->get_device_type(dev) != TYPE_ROM) { |
c66ac9db NB |
3241 | /* MAINTENANCE_OUT from SCC-2 |
3242 | * | |
3243 | * Check for emulated MO_SET_TARGET_PGS. | |
3244 | */ | |
3245 | if (cdb[1] == MO_SET_TARGET_PGS) { | |
3246 | cmd->transport_emulate_cdb = | |
e3d6f909 | 3247 | (su_dev->t10_alua.alua_type == |
c66ac9db | 3248 | SPC3_ALUA_EMULATED) ? |
e3d6f909 | 3249 | core_emulate_set_target_port_groups : |
c66ac9db NB |
3250 | NULL; |
3251 | } | |
3252 | ||
3253 | size = (cdb[6] << 24) | (cdb[7] << 16) | | |
3254 | (cdb[8] << 8) | cdb[9]; | |
3255 | } else { | |
3256 | /* GPCMD_REPORT_KEY from multi media commands */ | |
3257 | size = (cdb[8] << 8) + cdb[9]; | |
3258 | } | |
3259 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | |
3260 | break; | |
3261 | case INQUIRY: | |
3262 | size = (cdb[3] << 8) + cdb[4]; | |
3263 | /* | |
3264 | * Do implict HEAD_OF_QUEUE processing for INQUIRY. | |
3265 | * See spc4r17 section 5.3 | |
3266 | */ | |
5951146d | 3267 | if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) |
e66ecd50 | 3268 | cmd->sam_task_attr = MSG_HEAD_TAG; |
c66ac9db NB |
3269 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; |
3270 | break; | |
3271 | case READ_BUFFER: | |
3272 | size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; | |
3273 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | |
3274 | break; | |
3275 | case READ_CAPACITY: | |
3276 | size = READ_CAP_LEN; | |
3277 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | |
3278 | break; | |
3279 | case READ_MEDIA_SERIAL_NUMBER: | |
3280 | case SECURITY_PROTOCOL_IN: | |
3281 | case SECURITY_PROTOCOL_OUT: | |
3282 | size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; | |
3283 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | |
3284 | break; | |
3285 | case SERVICE_ACTION_IN: | |
3286 | case ACCESS_CONTROL_IN: | |
3287 | case ACCESS_CONTROL_OUT: | |
3288 | case EXTENDED_COPY: | |
3289 | case READ_ATTRIBUTE: | |
3290 | case RECEIVE_COPY_RESULTS: | |
3291 | case WRITE_ATTRIBUTE: | |
3292 | size = (cdb[10] << 24) | (cdb[11] << 16) | | |
3293 | (cdb[12] << 8) | cdb[13]; | |
3294 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | |
3295 | break; | |
3296 | case RECEIVE_DIAGNOSTIC: | |
3297 | case SEND_DIAGNOSTIC: | |
3298 | size = (cdb[3] << 8) | cdb[4]; | |
3299 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | |
3300 | break; | |
3301 | /* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */ | |
3302 | #if 0 | |
3303 | case GPCMD_READ_CD: | |
3304 | sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; | |
3305 | size = (2336 * sectors); | |
3306 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | |
3307 | break; | |
3308 | #endif | |
3309 | case READ_TOC: | |
3310 | size = cdb[8]; | |
3311 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | |
3312 | break; | |
3313 | case REQUEST_SENSE: | |
3314 | size = cdb[4]; | |
3315 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | |
3316 | break; | |
3317 | case READ_ELEMENT_STATUS: | |
3318 | size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9]; | |
3319 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | |
3320 | break; | |
3321 | case WRITE_BUFFER: | |
3322 | size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; | |
3323 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | |
3324 | break; | |
3325 | case RESERVE: | |
3326 | case RESERVE_10: | |
3327 | /* | |
3328 | * The SPC-2 RESERVE does not contain a size in the SCSI CDB. | |
3329 | * Assume the passthrough or $FABRIC_MOD will tell us about it. | |
3330 | */ | |
3331 | if (cdb[0] == RESERVE_10) | |
3332 | size = (cdb[7] << 8) | cdb[8]; | |
3333 | else | |
3334 | size = cmd->data_length; | |
3335 | ||
3336 | /* | |
3337 | * Setup the legacy emulated handler for SPC-2 and | |
3338 | * >= SPC-3 compatible reservation handling (CRH=1) | |
3339 | * Otherwise, we assume the underlying SCSI logic is | |
3340 | * is running in SPC_PASSTHROUGH, and wants reservations | |
3341 | * emulation disabled. | |
3342 | */ | |
3343 | cmd->transport_emulate_cdb = | |
e3d6f909 | 3344 | (su_dev->t10_pr.res_type != |
c66ac9db | 3345 | SPC_PASSTHROUGH) ? |
e3d6f909 | 3346 | core_scsi2_emulate_crh : NULL; |
c66ac9db NB |
3347 | cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; |
3348 | break; | |
3349 | case RELEASE: | |
3350 | case RELEASE_10: | |
3351 | /* | |
3352 | * The SPC-2 RELEASE does not contain a size in the SCSI CDB. | |
3353 | * Assume the passthrough or $FABRIC_MOD will tell us about it. | |
3354 | */ | |
3355 | if (cdb[0] == RELEASE_10) | |
3356 | size = (cdb[7] << 8) | cdb[8]; | |
3357 | else | |
3358 | size = cmd->data_length; | |
3359 | ||
3360 | cmd->transport_emulate_cdb = | |
e3d6f909 | 3361 | (su_dev->t10_pr.res_type != |
c66ac9db | 3362 | SPC_PASSTHROUGH) ? |
e3d6f909 | 3363 | core_scsi2_emulate_crh : NULL; |
c66ac9db NB |
3364 | cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; |
3365 | break; | |
3366 | case SYNCHRONIZE_CACHE: | |
3367 | case 0x91: /* SYNCHRONIZE_CACHE_16: */ | |
3368 | /* | |
3369 | * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE | |
3370 | */ | |
3371 | if (cdb[0] == SYNCHRONIZE_CACHE) { | |
3372 | sectors = transport_get_sectors_10(cdb, cmd, §or_ret); | |
a1d8b49a | 3373 | cmd->t_task_lba = transport_lba_32(cdb); |
c66ac9db NB |
3374 | } else { |
3375 | sectors = transport_get_sectors_16(cdb, cmd, §or_ret); | |
a1d8b49a | 3376 | cmd->t_task_lba = transport_lba_64(cdb); |
c66ac9db NB |
3377 | } |
3378 | if (sector_ret) | |
3379 | goto out_unsupported_cdb; | |
3380 | ||
3381 | size = transport_get_size(sectors, cdb, cmd); | |
3382 | cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; | |
3383 | ||
3384 | /* | |
3385 | * For TCM/pSCSI passthrough, skip cmd->transport_emulate_cdb() | |
3386 | */ | |
e3d6f909 | 3387 | if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) |
c66ac9db NB |
3388 | break; |
3389 | /* | |
3390 | * Set SCF_EMULATE_CDB_ASYNC to ensure asynchronous operation | |
3391 | * for SYNCHRONIZE_CACHE* Immed=1 case in __transport_execute_tasks() | |
3392 | */ | |
3393 | cmd->se_cmd_flags |= SCF_EMULATE_CDB_ASYNC; | |
3394 | /* | |
3395 | * Check to ensure that LBA + Range does not exceed past end of | |
3396 | * device. | |
3397 | */ | |
a1d8b49a | 3398 | if (!transport_cmd_get_valid_sectors(cmd)) |
c66ac9db NB |
3399 | goto out_invalid_cdb_field; |
3400 | break; | |
3401 | case UNMAP: | |
3402 | size = get_unaligned_be16(&cdb[7]); | |
e3d6f909 | 3403 | passthrough = (dev->transport->transport_type == |
c66ac9db NB |
3404 | TRANSPORT_PLUGIN_PHBA_PDEV); |
3405 | /* | |
3406 | * Determine if the received UNMAP used to for direct passthrough | |
3407 | * into Linux/SCSI with struct request via TCM/pSCSI or we are | |
3408 | * signaling the use of internal transport_generic_unmap() emulation | |
3409 | * for UNMAP -> Linux/BLOCK disbard with TCM/IBLOCK and TCM/FILEIO | |
3410 | * subsystem plugin backstores. | |
3411 | */ | |
3412 | if (!(passthrough)) | |
3413 | cmd->se_cmd_flags |= SCF_EMULATE_SYNC_UNMAP; | |
3414 | ||
3415 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | |
3416 | break; | |
3417 | case WRITE_SAME_16: | |
3418 | sectors = transport_get_sectors_16(cdb, cmd, §or_ret); | |
3419 | if (sector_ret) | |
3420 | goto out_unsupported_cdb; | |
dd3a5ad8 NB |
3421 | |
3422 | if (sectors != 0) | |
3423 | size = transport_get_size(sectors, cdb, cmd); | |
3424 | else | |
3425 | size = dev->se_sub_dev->se_dev_attrib.block_size; | |
3426 | ||
a1d8b49a | 3427 | cmd->t_task_lba = get_unaligned_be16(&cdb[2]); |
e3d6f909 | 3428 | passthrough = (dev->transport->transport_type == |
c66ac9db NB |
3429 | TRANSPORT_PLUGIN_PHBA_PDEV); |
3430 | /* | |
3431 | * Determine if the received WRITE_SAME_16 is used to for direct | |
3432 | * passthrough into Linux/SCSI with struct request via TCM/pSCSI | |
3433 | * or we are signaling the use of internal WRITE_SAME + UNMAP=1 | |
3434 | * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK and | |
3435 | * TCM/FILEIO subsystem plugin backstores. | |
3436 | */ | |
3437 | if (!(passthrough)) { | |
3438 | if ((cdb[1] & 0x04) || (cdb[1] & 0x02)) { | |
3439 | printk(KERN_ERR "WRITE_SAME PBDATA and LBDATA" | |
3440 | " bits not supported for Block Discard" | |
3441 | " Emulation\n"); | |
3442 | goto out_invalid_cdb_field; | |
3443 | } | |
3444 | /* | |
3445 | * Currently for the emulated case we only accept | |
3446 | * tpws with the UNMAP=1 bit set. | |
3447 | */ | |
3448 | if (!(cdb[1] & 0x08)) { | |
3449 | printk(KERN_ERR "WRITE_SAME w/o UNMAP bit not " | |
3450 | " supported for Block Discard Emulation\n"); | |
3451 | goto out_invalid_cdb_field; | |
3452 | } | |
3453 | } | |
3454 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | |
3455 | break; | |
3456 | case ALLOW_MEDIUM_REMOVAL: | |
3457 | case GPCMD_CLOSE_TRACK: | |
3458 | case ERASE: | |
3459 | case INITIALIZE_ELEMENT_STATUS: | |
3460 | case GPCMD_LOAD_UNLOAD: | |
3461 | case REZERO_UNIT: | |
3462 | case SEEK_10: | |
3463 | case GPCMD_SET_SPEED: | |
3464 | case SPACE: | |
3465 | case START_STOP: | |
3466 | case TEST_UNIT_READY: | |
3467 | case VERIFY: | |
3468 | case WRITE_FILEMARKS: | |
3469 | case MOVE_MEDIUM: | |
3470 | cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; | |
3471 | break; | |
3472 | case REPORT_LUNS: | |
3473 | cmd->transport_emulate_cdb = | |
e3d6f909 | 3474 | transport_core_report_lun_response; |
c66ac9db NB |
3475 | size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; |
3476 | /* | |
3477 | * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS | |
3478 | * See spc4r17 section 5.3 | |
3479 | */ | |
5951146d | 3480 | if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) |
e66ecd50 | 3481 | cmd->sam_task_attr = MSG_HEAD_TAG; |
c66ac9db NB |
3482 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; |
3483 | break; | |
3484 | default: | |
3485 | printk(KERN_WARNING "TARGET_CORE[%s]: Unsupported SCSI Opcode" | |
3486 | " 0x%02x, sending CHECK_CONDITION.\n", | |
e3d6f909 | 3487 | cmd->se_tfo->get_fabric_name(), cdb[0]); |
c66ac9db NB |
3488 | cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks; |
3489 | goto out_unsupported_cdb; | |
3490 | } | |
3491 | ||
3492 | if (size != cmd->data_length) { | |
3493 | printk(KERN_WARNING "TARGET_CORE[%s]: Expected Transfer Length:" | |
3494 | " %u does not match SCSI CDB Length: %u for SAM Opcode:" | |
e3d6f909 | 3495 | " 0x%02x\n", cmd->se_tfo->get_fabric_name(), |
c66ac9db NB |
3496 | cmd->data_length, size, cdb[0]); |
3497 | ||
3498 | cmd->cmd_spdtl = size; | |
3499 | ||
3500 | if (cmd->data_direction == DMA_TO_DEVICE) { | |
3501 | printk(KERN_ERR "Rejecting underflow/overflow" | |
3502 | " WRITE data\n"); | |
3503 | goto out_invalid_cdb_field; | |
3504 | } | |
3505 | /* | |
3506 | * Reject READ_* or WRITE_* with overflow/underflow for | |
3507 | * type SCF_SCSI_DATA_SG_IO_CDB. | |
3508 | */ | |
e3d6f909 | 3509 | if (!(ret) && (dev->se_sub_dev->se_dev_attrib.block_size != 512)) { |
c66ac9db NB |
3510 | printk(KERN_ERR "Failing OVERFLOW/UNDERFLOW for LBA op" |
3511 | " CDB on non 512-byte sector setup subsystem" | |
e3d6f909 | 3512 | " plugin: %s\n", dev->transport->name); |
c66ac9db NB |
3513 | /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */ |
3514 | goto out_invalid_cdb_field; | |
3515 | } | |
3516 | ||
3517 | if (size > cmd->data_length) { | |
3518 | cmd->se_cmd_flags |= SCF_OVERFLOW_BIT; | |
3519 | cmd->residual_count = (size - cmd->data_length); | |
3520 | } else { | |
3521 | cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; | |
3522 | cmd->residual_count = (cmd->data_length - size); | |
3523 | } | |
3524 | cmd->data_length = size; | |
3525 | } | |
3526 | ||
3527 | transport_set_supported_SAM_opcode(cmd); | |
3528 | return ret; | |
3529 | ||
3530 | out_unsupported_cdb: | |
3531 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
3532 | cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; | |
5951146d | 3533 | return -EINVAL; |
c66ac9db NB |
3534 | out_invalid_cdb_field: |
3535 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
3536 | cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; | |
5951146d | 3537 | return -EINVAL; |
c66ac9db NB |
3538 | } |
3539 | ||
3540 | static inline void transport_release_tasks(struct se_cmd *); | |
3541 | ||
c66ac9db | 3542 | static void transport_memcpy_se_mem_read_contig( |
c66ac9db | 3543 | unsigned char *dst, |
a1d8b49a AG |
3544 | struct list_head *se_mem_list, |
3545 | u32 tot_len) | |
c66ac9db NB |
3546 | { |
3547 | struct se_mem *se_mem; | |
3548 | void *src; | |
a1d8b49a | 3549 | u32 length; |
c66ac9db NB |
3550 | |
3551 | list_for_each_entry(se_mem, se_mem_list, se_list) { | |
a1d8b49a | 3552 | length = min_t(u32, se_mem->se_len, tot_len); |
c66ac9db | 3553 | src = page_address(se_mem->se_page) + se_mem->se_off; |
c66ac9db | 3554 | memcpy(dst, src, length); |
a1d8b49a AG |
3555 | tot_len -= length; |
3556 | if (!tot_len) | |
3557 | break; | |
c66ac9db NB |
3558 | dst += length; |
3559 | } | |
3560 | } | |
3561 | ||
3562 | /* | |
3563 | * Called from transport_generic_complete_ok() and | |
3564 | * transport_generic_request_failure() to determine which dormant/delayed | |
3565 | * and ordered cmds need to have their tasks added to the execution queue. | |
3566 | */ | |
3567 | static void transport_complete_task_attr(struct se_cmd *cmd) | |
3568 | { | |
5951146d | 3569 | struct se_device *dev = cmd->se_dev; |
c66ac9db NB |
3570 | struct se_cmd *cmd_p, *cmd_tmp; |
3571 | int new_active_tasks = 0; | |
3572 | ||
e66ecd50 | 3573 | if (cmd->sam_task_attr == MSG_SIMPLE_TAG) { |
c66ac9db NB |
3574 | atomic_dec(&dev->simple_cmds); |
3575 | smp_mb__after_atomic_dec(); | |
3576 | dev->dev_cur_ordered_id++; | |
3577 | DEBUG_STA("Incremented dev->dev_cur_ordered_id: %u for" | |
3578 | " SIMPLE: %u\n", dev->dev_cur_ordered_id, | |
3579 | cmd->se_ordered_id); | |
e66ecd50 | 3580 | } else if (cmd->sam_task_attr == MSG_HEAD_TAG) { |
c66ac9db NB |
3581 | atomic_dec(&dev->dev_hoq_count); |
3582 | smp_mb__after_atomic_dec(); | |
3583 | dev->dev_cur_ordered_id++; | |
3584 | DEBUG_STA("Incremented dev_cur_ordered_id: %u for" | |
3585 | " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id, | |
3586 | cmd->se_ordered_id); | |
e66ecd50 | 3587 | } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { |
c66ac9db | 3588 | spin_lock(&dev->ordered_cmd_lock); |
5951146d | 3589 | list_del(&cmd->se_ordered_node); |
c66ac9db NB |
3590 | atomic_dec(&dev->dev_ordered_sync); |
3591 | smp_mb__after_atomic_dec(); | |
3592 | spin_unlock(&dev->ordered_cmd_lock); | |
3593 | ||
3594 | dev->dev_cur_ordered_id++; | |
3595 | DEBUG_STA("Incremented dev_cur_ordered_id: %u for ORDERED:" | |
3596 | " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id); | |
3597 | } | |
3598 | /* | |
3599 | * Process all commands up to the last received | |
3600 | * ORDERED task attribute which requires another blocking | |
3601 | * boundary | |
3602 | */ | |
3603 | spin_lock(&dev->delayed_cmd_lock); | |
3604 | list_for_each_entry_safe(cmd_p, cmd_tmp, | |
5951146d | 3605 | &dev->delayed_cmd_list, se_delayed_node) { |
c66ac9db | 3606 | |
5951146d | 3607 | list_del(&cmd_p->se_delayed_node); |
c66ac9db NB |
3608 | spin_unlock(&dev->delayed_cmd_lock); |
3609 | ||
3610 | DEBUG_STA("Calling add_tasks() for" | |
3611 | " cmd_p: 0x%02x Task Attr: 0x%02x" | |
3612 | " Dormant -> Active, se_ordered_id: %u\n", | |
3613 | T_TASK(cmd_p)->t_task_cdb[0], | |
3614 | cmd_p->sam_task_attr, cmd_p->se_ordered_id); | |
3615 | ||
3616 | transport_add_tasks_from_cmd(cmd_p); | |
3617 | new_active_tasks++; | |
3618 | ||
3619 | spin_lock(&dev->delayed_cmd_lock); | |
e66ecd50 | 3620 | if (cmd_p->sam_task_attr == MSG_ORDERED_TAG) |
c66ac9db NB |
3621 | break; |
3622 | } | |
3623 | spin_unlock(&dev->delayed_cmd_lock); | |
3624 | /* | |
3625 | * If new tasks have become active, wake up the transport thread | |
3626 | * to do the processing of the Active tasks. | |
3627 | */ | |
3628 | if (new_active_tasks != 0) | |
e3d6f909 | 3629 | wake_up_interruptible(&dev->dev_queue_obj.thread_wq); |
c66ac9db NB |
3630 | } |
3631 | ||
3632 | static void transport_generic_complete_ok(struct se_cmd *cmd) | |
3633 | { | |
3634 | int reason = 0; | |
3635 | /* | |
3636 | * Check if we need to move delayed/dormant tasks from cmds on the | |
3637 | * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task | |
3638 | * Attribute. | |
3639 | */ | |
5951146d | 3640 | if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) |
c66ac9db NB |
3641 | transport_complete_task_attr(cmd); |
3642 | /* | |
3643 | * Check if we need to retrieve a sense buffer from | |
3644 | * the struct se_cmd in question. | |
3645 | */ | |
3646 | if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { | |
3647 | if (transport_get_sense_data(cmd) < 0) | |
3648 | reason = TCM_NON_EXISTENT_LUN; | |
3649 | ||
3650 | /* | |
3651 | * Only set when an struct se_task->task_scsi_status returned | |
3652 | * a non GOOD status. | |
3653 | */ | |
3654 | if (cmd->scsi_status) { | |
3655 | transport_send_check_condition_and_sense( | |
3656 | cmd, reason, 1); | |
3657 | transport_lun_remove_cmd(cmd); | |
3658 | transport_cmd_check_stop_to_fabric(cmd); | |
3659 | return; | |
3660 | } | |
3661 | } | |
3662 | /* | |
25985edc | 3663 | * Check for a callback, used by amongst other things |
c66ac9db NB |
3664 | * XDWRITE_READ_10 emulation. |
3665 | */ | |
3666 | if (cmd->transport_complete_callback) | |
3667 | cmd->transport_complete_callback(cmd); | |
3668 | ||
3669 | switch (cmd->data_direction) { | |
3670 | case DMA_FROM_DEVICE: | |
3671 | spin_lock(&cmd->se_lun->lun_sep_lock); | |
e3d6f909 AG |
3672 | if (cmd->se_lun->lun_sep) { |
3673 | cmd->se_lun->lun_sep->sep_stats.tx_data_octets += | |
c66ac9db NB |
3674 | cmd->data_length; |
3675 | } | |
3676 | spin_unlock(&cmd->se_lun->lun_sep_lock); | |
3677 | /* | |
a1d8b49a | 3678 | * If enabled by TCM fabric module pre-registered SGL |
c66ac9db | 3679 | * memory, perform the memcpy() from the TCM internal |
a1d8b49a | 3680 | * contiguous buffer back to the original SGL. |
c66ac9db NB |
3681 | */ |
3682 | if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG) | |
a1d8b49a AG |
3683 | sg_copy_from_buffer(cmd->t_task_pt_sgl, |
3684 | cmd->t_task_pt_sgl_num, | |
3685 | cmd->t_task_buf, | |
3686 | cmd->data_length); | |
c66ac9db | 3687 | |
e3d6f909 | 3688 | cmd->se_tfo->queue_data_in(cmd); |
c66ac9db NB |
3689 | break; |
3690 | case DMA_TO_DEVICE: | |
3691 | spin_lock(&cmd->se_lun->lun_sep_lock); | |
e3d6f909 AG |
3692 | if (cmd->se_lun->lun_sep) { |
3693 | cmd->se_lun->lun_sep->sep_stats.rx_data_octets += | |
c66ac9db NB |
3694 | cmd->data_length; |
3695 | } | |
3696 | spin_unlock(&cmd->se_lun->lun_sep_lock); | |
3697 | /* | |
3698 | * Check if we need to send READ payload for BIDI-COMMAND | |
3699 | */ | |
a1d8b49a | 3700 | if (!list_empty(&cmd->t_mem_bidi_list)) { |
c66ac9db | 3701 | spin_lock(&cmd->se_lun->lun_sep_lock); |
e3d6f909 AG |
3702 | if (cmd->se_lun->lun_sep) { |
3703 | cmd->se_lun->lun_sep->sep_stats.tx_data_octets += | |
c66ac9db NB |
3704 | cmd->data_length; |
3705 | } | |
3706 | spin_unlock(&cmd->se_lun->lun_sep_lock); | |
e3d6f909 | 3707 | cmd->se_tfo->queue_data_in(cmd); |
c66ac9db NB |
3708 | break; |
3709 | } | |
3710 | /* Fall through for DMA_TO_DEVICE */ | |
3711 | case DMA_NONE: | |
e3d6f909 | 3712 | cmd->se_tfo->queue_status(cmd); |
c66ac9db NB |
3713 | break; |
3714 | default: | |
3715 | break; | |
3716 | } | |
3717 | ||
3718 | transport_lun_remove_cmd(cmd); | |
3719 | transport_cmd_check_stop_to_fabric(cmd); | |
3720 | } | |
3721 | ||
3722 | static void transport_free_dev_tasks(struct se_cmd *cmd) | |
3723 | { | |
3724 | struct se_task *task, *task_tmp; | |
3725 | unsigned long flags; | |
3726 | ||
a1d8b49a | 3727 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db | 3728 | list_for_each_entry_safe(task, task_tmp, |
a1d8b49a | 3729 | &cmd->t_task_list, t_list) { |
c66ac9db NB |
3730 | if (atomic_read(&task->task_active)) |
3731 | continue; | |
3732 | ||
3733 | kfree(task->task_sg_bidi); | |
3734 | kfree(task->task_sg); | |
3735 | ||
3736 | list_del(&task->t_list); | |
3737 | ||
a1d8b49a | 3738 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db | 3739 | if (task->se_dev) |
e3d6f909 | 3740 | task->se_dev->transport->free_task(task); |
c66ac9db NB |
3741 | else |
3742 | printk(KERN_ERR "task[%u] - task->se_dev is NULL\n", | |
3743 | task->task_no); | |
a1d8b49a | 3744 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db | 3745 | } |
a1d8b49a | 3746 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
3747 | } |
3748 | ||
3749 | static inline void transport_free_pages(struct se_cmd *cmd) | |
3750 | { | |
3751 | struct se_mem *se_mem, *se_mem_tmp; | |
3752 | int free_page = 1; | |
3753 | ||
3754 | if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) | |
3755 | free_page = 0; | |
3756 | if (cmd->se_dev->transport->do_se_mem_map) | |
3757 | free_page = 0; | |
3758 | ||
a1d8b49a AG |
3759 | if (cmd->t_task_buf) { |
3760 | kfree(cmd->t_task_buf); | |
3761 | cmd->t_task_buf = NULL; | |
c66ac9db NB |
3762 | return; |
3763 | } | |
3764 | ||
3765 | /* | |
3766 | * Caller will handle releasing of struct se_mem. | |
3767 | */ | |
3768 | if (cmd->se_cmd_flags & SCF_CMD_PASSTHROUGH_NOALLOC) | |
3769 | return; | |
3770 | ||
c66ac9db | 3771 | list_for_each_entry_safe(se_mem, se_mem_tmp, |
a1d8b49a | 3772 | &cmd->t_mem_list, se_list) { |
c66ac9db NB |
3773 | /* |
3774 | * We only release call __free_page(struct se_mem->se_page) when | |
3775 | * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use, | |
3776 | */ | |
3777 | if (free_page) | |
3778 | __free_page(se_mem->se_page); | |
3779 | ||
3780 | list_del(&se_mem->se_list); | |
3781 | kmem_cache_free(se_mem_cache, se_mem); | |
3782 | } | |
a1d8b49a | 3783 | cmd->t_tasks_se_num = 0; |
c66ac9db | 3784 | |
5951146d | 3785 | list_for_each_entry_safe(se_mem, se_mem_tmp, |
a1d8b49a | 3786 | &cmd->t_mem_bidi_list, se_list) { |
5951146d AG |
3787 | /* |
3788 | * We only release call __free_page(struct se_mem->se_page) when | |
3789 | * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use, | |
3790 | */ | |
3791 | if (free_page) | |
3792 | __free_page(se_mem->se_page); | |
c66ac9db | 3793 | |
5951146d AG |
3794 | list_del(&se_mem->se_list); |
3795 | kmem_cache_free(se_mem_cache, se_mem); | |
c66ac9db | 3796 | } |
a1d8b49a | 3797 | cmd->t_tasks_se_bidi_num = 0; |
c66ac9db NB |
3798 | } |
3799 | ||
3800 | static inline void transport_release_tasks(struct se_cmd *cmd) | |
3801 | { | |
3802 | transport_free_dev_tasks(cmd); | |
3803 | } | |
3804 | ||
3805 | static inline int transport_dec_and_check(struct se_cmd *cmd) | |
3806 | { | |
3807 | unsigned long flags; | |
3808 | ||
a1d8b49a AG |
3809 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
3810 | if (atomic_read(&cmd->t_fe_count)) { | |
3811 | if (!(atomic_dec_and_test(&cmd->t_fe_count))) { | |
3812 | spin_unlock_irqrestore(&cmd->t_state_lock, | |
c66ac9db NB |
3813 | flags); |
3814 | return 1; | |
3815 | } | |
3816 | } | |
3817 | ||
a1d8b49a AG |
3818 | if (atomic_read(&cmd->t_se_count)) { |
3819 | if (!(atomic_dec_and_test(&cmd->t_se_count))) { | |
3820 | spin_unlock_irqrestore(&cmd->t_state_lock, | |
c66ac9db NB |
3821 | flags); |
3822 | return 1; | |
3823 | } | |
3824 | } | |
a1d8b49a | 3825 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
3826 | |
3827 | return 0; | |
3828 | } | |
3829 | ||
3830 | static void transport_release_fe_cmd(struct se_cmd *cmd) | |
3831 | { | |
3832 | unsigned long flags; | |
3833 | ||
3834 | if (transport_dec_and_check(cmd)) | |
3835 | return; | |
3836 | ||
a1d8b49a AG |
3837 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
3838 | if (!(atomic_read(&cmd->transport_dev_active))) { | |
3839 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | |
c66ac9db NB |
3840 | goto free_pages; |
3841 | } | |
a1d8b49a | 3842 | atomic_set(&cmd->transport_dev_active, 0); |
c66ac9db | 3843 | transport_all_task_dev_remove_state(cmd); |
a1d8b49a | 3844 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
3845 | |
3846 | transport_release_tasks(cmd); | |
3847 | free_pages: | |
3848 | transport_free_pages(cmd); | |
3849 | transport_free_se_cmd(cmd); | |
e3d6f909 | 3850 | cmd->se_tfo->release_cmd_direct(cmd); |
c66ac9db NB |
3851 | } |
3852 | ||
3853 | static int transport_generic_remove( | |
3854 | struct se_cmd *cmd, | |
3855 | int release_to_pool, | |
3856 | int session_reinstatement) | |
3857 | { | |
3858 | unsigned long flags; | |
3859 | ||
c66ac9db NB |
3860 | if (transport_dec_and_check(cmd)) { |
3861 | if (session_reinstatement) { | |
a1d8b49a | 3862 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db | 3863 | transport_all_task_dev_remove_state(cmd); |
a1d8b49a | 3864 | spin_unlock_irqrestore(&cmd->t_state_lock, |
c66ac9db NB |
3865 | flags); |
3866 | } | |
3867 | return 1; | |
3868 | } | |
3869 | ||
a1d8b49a AG |
3870 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
3871 | if (!(atomic_read(&cmd->transport_dev_active))) { | |
3872 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | |
c66ac9db NB |
3873 | goto free_pages; |
3874 | } | |
a1d8b49a | 3875 | atomic_set(&cmd->transport_dev_active, 0); |
c66ac9db | 3876 | transport_all_task_dev_remove_state(cmd); |
a1d8b49a | 3877 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
3878 | |
3879 | transport_release_tasks(cmd); | |
5951146d | 3880 | |
c66ac9db NB |
3881 | free_pages: |
3882 | transport_free_pages(cmd); | |
3883 | ||
c66ac9db NB |
3884 | if (release_to_pool) { |
3885 | transport_release_cmd_to_pool(cmd); | |
3886 | } else { | |
3887 | transport_free_se_cmd(cmd); | |
e3d6f909 | 3888 | cmd->se_tfo->release_cmd_direct(cmd); |
c66ac9db NB |
3889 | } |
3890 | ||
3891 | return 0; | |
3892 | } | |
3893 | ||
3894 | /* | |
3895 | * transport_generic_map_mem_to_cmd - Perform SGL -> struct se_mem map | |
3896 | * @cmd: Associated se_cmd descriptor | |
3897 | * @mem: SGL style memory for TCM WRITE / READ | |
3898 | * @sg_mem_num: Number of SGL elements | |
3899 | * @mem_bidi_in: SGL style memory for TCM BIDI READ | |
3900 | * @sg_mem_bidi_num: Number of BIDI READ SGL elements | |
3901 | * | |
3902 | * Return: nonzero return cmd was rejected for -ENOMEM or inproper usage | |
3903 | * of parameters. | |
3904 | */ | |
3905 | int transport_generic_map_mem_to_cmd( | |
3906 | struct se_cmd *cmd, | |
5951146d AG |
3907 | struct scatterlist *sgl, |
3908 | u32 sgl_count, | |
3909 | struct scatterlist *sgl_bidi, | |
3910 | u32 sgl_bidi_count) | |
c66ac9db | 3911 | { |
c66ac9db NB |
3912 | int ret; |
3913 | ||
5951146d | 3914 | if (!sgl || !sgl_count) |
c66ac9db | 3915 | return 0; |
c66ac9db | 3916 | |
c66ac9db | 3917 | /* |
5951146d | 3918 | * Convert sgls (sgl, sgl_bidi) to list of se_mems |
c66ac9db NB |
3919 | */ |
3920 | if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) || | |
3921 | (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) { | |
3922 | /* | |
3923 | * For CDB using TCM struct se_mem linked list scatterlist memory | |
3924 | * processed into a TCM struct se_subsystem_dev, we do the mapping | |
3925 | * from the passed physical memory to struct se_mem->se_page here. | |
3926 | */ | |
a1d8b49a | 3927 | ret = transport_map_sg_to_mem(cmd, &cmd->t_mem_list, sgl); |
c66ac9db NB |
3928 | if (ret < 0) |
3929 | return -ENOMEM; | |
3930 | ||
a1d8b49a | 3931 | cmd->t_tasks_se_num = ret; |
c66ac9db NB |
3932 | /* |
3933 | * Setup BIDI READ list of struct se_mem elements | |
3934 | */ | |
5951146d | 3935 | if (sgl_bidi && sgl_bidi_count) { |
a1d8b49a | 3936 | ret = transport_map_sg_to_mem(cmd, &cmd->t_mem_bidi_list, sgl_bidi); |
5951146d | 3937 | if (ret < 0) |
c66ac9db | 3938 | return -ENOMEM; |
c66ac9db | 3939 | |
a1d8b49a | 3940 | cmd->t_tasks_se_bidi_num = ret; |
c66ac9db NB |
3941 | } |
3942 | cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; | |
3943 | ||
3944 | } else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) { | |
5951146d | 3945 | if (sgl_bidi || sgl_bidi_count) { |
c66ac9db NB |
3946 | printk(KERN_ERR "BIDI-Commands not supported using " |
3947 | "SCF_SCSI_CONTROL_NONSG_IO_CDB\n"); | |
3948 | return -ENOSYS; | |
3949 | } | |
3950 | /* | |
a1d8b49a | 3951 | * For incoming CDBs using a contiguous buffer internal with TCM, |
c66ac9db NB |
3952 | * save the passed struct scatterlist memory. After TCM storage object |
3953 | * processing has completed for this struct se_cmd, TCM core will call | |
3954 | * transport_memcpy_[write,read]_contig() as necessary from | |
3955 | * transport_generic_complete_ok() and transport_write_pending() in order | |
3956 | * to copy the TCM buffer to/from the original passed *mem in SGL -> | |
3957 | * struct scatterlist format. | |
3958 | */ | |
3959 | cmd->se_cmd_flags |= SCF_PASSTHROUGH_CONTIG_TO_SG; | |
a1d8b49a AG |
3960 | cmd->t_task_pt_sgl = sgl; |
3961 | cmd->t_task_pt_sgl_num = sgl_count; | |
c66ac9db NB |
3962 | } |
3963 | ||
3964 | return 0; | |
3965 | } | |
3966 | EXPORT_SYMBOL(transport_generic_map_mem_to_cmd); | |
3967 | ||
3968 | ||
3969 | static inline long long transport_dev_end_lba(struct se_device *dev) | |
3970 | { | |
3971 | return dev->transport->get_blocks(dev) + 1; | |
3972 | } | |
3973 | ||
a1d8b49a | 3974 | static int transport_cmd_get_valid_sectors(struct se_cmd *cmd) |
c66ac9db | 3975 | { |
5951146d | 3976 | struct se_device *dev = cmd->se_dev; |
a1d8b49a | 3977 | u32 sectors; |
c66ac9db | 3978 | |
e3d6f909 | 3979 | if (dev->transport->get_device_type(dev) != TYPE_DISK) |
c66ac9db NB |
3980 | return 0; |
3981 | ||
a1d8b49a AG |
3982 | sectors = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size); |
3983 | ||
3984 | if ((cmd->t_task_lba + sectors) > | |
c66ac9db NB |
3985 | transport_dev_end_lba(dev)) { |
3986 | printk(KERN_ERR "LBA: %llu Sectors: %u exceeds" | |
3987 | " transport_dev_end_lba(): %llu\n", | |
a1d8b49a | 3988 | cmd->t_task_lba, sectors, |
c66ac9db | 3989 | transport_dev_end_lba(dev)); |
a1d8b49a | 3990 | return 0; |
c66ac9db NB |
3991 | } |
3992 | ||
a1d8b49a | 3993 | return sectors; |
c66ac9db NB |
3994 | } |
3995 | ||
3996 | static int transport_new_cmd_obj(struct se_cmd *cmd) | |
3997 | { | |
5951146d | 3998 | struct se_device *dev = cmd->se_dev; |
a1d8b49a AG |
3999 | u32 task_cdbs; |
4000 | u32 rc; | |
c66ac9db NB |
4001 | |
4002 | if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) { | |
a1d8b49a AG |
4003 | task_cdbs = 1; |
4004 | cmd->t_task_list_num = 1; | |
c66ac9db NB |
4005 | } else { |
4006 | int set_counts = 1; | |
4007 | ||
4008 | /* | |
4009 | * Setup any BIDI READ tasks and memory from | |
a1d8b49a | 4010 | * cmd->t_mem_bidi_list so the READ struct se_tasks |
c66ac9db NB |
4011 | * are queued first for the non pSCSI passthrough case. |
4012 | */ | |
a1d8b49a | 4013 | if (!list_empty(&cmd->t_mem_bidi_list) && |
e3d6f909 | 4014 | (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) { |
a1d8b49a AG |
4015 | rc = transport_allocate_tasks(cmd, |
4016 | cmd->t_task_lba, | |
4017 | transport_cmd_get_valid_sectors(cmd), | |
4018 | DMA_FROM_DEVICE, &cmd->t_mem_bidi_list, | |
c66ac9db NB |
4019 | set_counts); |
4020 | if (!(rc)) { | |
4021 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
4022 | cmd->scsi_sense_reason = | |
4023 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | |
4024 | return PYX_TRANSPORT_LU_COMM_FAILURE; | |
4025 | } | |
4026 | set_counts = 0; | |
4027 | } | |
4028 | /* | |
a1d8b49a | 4029 | * Setup the tasks and memory from cmd->t_mem_list |
c66ac9db NB |
4030 | * Note for BIDI transfers this will contain the WRITE payload |
4031 | */ | |
a1d8b49a AG |
4032 | task_cdbs = transport_allocate_tasks(cmd, |
4033 | cmd->t_task_lba, | |
4034 | transport_cmd_get_valid_sectors(cmd), | |
4035 | cmd->data_direction, &cmd->t_mem_list, | |
c66ac9db NB |
4036 | set_counts); |
4037 | if (!(task_cdbs)) { | |
4038 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
4039 | cmd->scsi_sense_reason = | |
4040 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | |
4041 | return PYX_TRANSPORT_LU_COMM_FAILURE; | |
4042 | } | |
a1d8b49a | 4043 | cmd->t_task_list_num = task_cdbs; |
c66ac9db NB |
4044 | |
4045 | #if 0 | |
4046 | printk(KERN_INFO "data_length: %u, LBA: %llu t_tasks_sectors:" | |
4047 | " %u, t_task_cdbs: %u\n", obj_ptr, cmd->data_length, | |
a1d8b49a AG |
4048 | cmd->t_task_lba, cmd->t_tasks_sectors, |
4049 | cmd->t_task_cdbs); | |
c66ac9db NB |
4050 | #endif |
4051 | } | |
4052 | ||
a1d8b49a AG |
4053 | atomic_set(&cmd->t_task_cdbs_left, task_cdbs); |
4054 | atomic_set(&cmd->t_task_cdbs_ex_left, task_cdbs); | |
4055 | atomic_set(&cmd->t_task_cdbs_timeout_left, task_cdbs); | |
c66ac9db NB |
4056 | return 0; |
4057 | } | |
4058 | ||
c66ac9db | 4059 | static int |
a1d8b49a | 4060 | transport_generic_get_mem(struct se_cmd *cmd, u32 length) |
c66ac9db | 4061 | { |
c66ac9db NB |
4062 | struct se_mem *se_mem; |
4063 | ||
c66ac9db NB |
4064 | /* |
4065 | * If the device uses memory mapping this is enough. | |
4066 | */ | |
4067 | if (cmd->se_dev->transport->do_se_mem_map) | |
4068 | return 0; | |
4069 | ||
c66ac9db NB |
4070 | while (length) { |
4071 | se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL); | |
4072 | if (!(se_mem)) { | |
4073 | printk(KERN_ERR "Unable to allocate struct se_mem\n"); | |
4074 | goto out; | |
4075 | } | |
c66ac9db NB |
4076 | |
4077 | /* #warning FIXME Allocate contigous pages for struct se_mem elements */ | |
a1d8b49a | 4078 | se_mem->se_page = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0); |
c66ac9db NB |
4079 | if (!(se_mem->se_page)) { |
4080 | printk(KERN_ERR "alloc_pages() failed\n"); | |
4081 | goto out; | |
4082 | } | |
4083 | ||
87210568 | 4084 | INIT_LIST_HEAD(&se_mem->se_list); |
a1d8b49a AG |
4085 | se_mem->se_len = min_t(u32, length, PAGE_SIZE); |
4086 | list_add_tail(&se_mem->se_list, &cmd->t_mem_list); | |
4087 | cmd->t_tasks_se_num++; | |
c66ac9db NB |
4088 | |
4089 | DEBUG_MEM("Allocated struct se_mem page(%p) Length(%u)" | |
4090 | " Offset(%u)\n", se_mem->se_page, se_mem->se_len, | |
4091 | se_mem->se_off); | |
4092 | ||
4093 | length -= se_mem->se_len; | |
4094 | } | |
4095 | ||
4096 | DEBUG_MEM("Allocated total struct se_mem elements(%u)\n", | |
a1d8b49a | 4097 | cmd->t_tasks_se_num); |
c66ac9db NB |
4098 | |
4099 | return 0; | |
4100 | out: | |
87210568 JJ |
4101 | if (se_mem) |
4102 | __free_pages(se_mem->se_page, 0); | |
4103 | kmem_cache_free(se_mem_cache, se_mem); | |
e3d6f909 | 4104 | return -ENOMEM; |
c66ac9db NB |
4105 | } |
4106 | ||
e3d6f909 | 4107 | int transport_init_task_sg( |
c66ac9db NB |
4108 | struct se_task *task, |
4109 | struct se_mem *in_se_mem, | |
4110 | u32 task_offset) | |
4111 | { | |
4112 | struct se_cmd *se_cmd = task->task_se_cmd; | |
5951146d | 4113 | struct se_device *se_dev = se_cmd->se_dev; |
c66ac9db | 4114 | struct se_mem *se_mem = in_se_mem; |
e3d6f909 | 4115 | struct target_core_fabric_ops *tfo = se_cmd->se_tfo; |
c66ac9db NB |
4116 | u32 sg_length, task_size = task->task_size, task_sg_num_padded; |
4117 | ||
4118 | while (task_size != 0) { | |
4119 | DEBUG_SC("se_mem->se_page(%p) se_mem->se_len(%u)" | |
4120 | " se_mem->se_off(%u) task_offset(%u)\n", | |
4121 | se_mem->se_page, se_mem->se_len, | |
4122 | se_mem->se_off, task_offset); | |
4123 | ||
4124 | if (task_offset == 0) { | |
4125 | if (task_size >= se_mem->se_len) { | |
4126 | sg_length = se_mem->se_len; | |
4127 | ||
4128 | if (!(list_is_last(&se_mem->se_list, | |
a1d8b49a | 4129 | &se_cmd->t_mem_list))) |
c66ac9db NB |
4130 | se_mem = list_entry(se_mem->se_list.next, |
4131 | struct se_mem, se_list); | |
4132 | } else { | |
4133 | sg_length = task_size; | |
4134 | task_size -= sg_length; | |
4135 | goto next; | |
4136 | } | |
4137 | ||
4138 | DEBUG_SC("sg_length(%u) task_size(%u)\n", | |
4139 | sg_length, task_size); | |
4140 | } else { | |
4141 | if ((se_mem->se_len - task_offset) > task_size) { | |
4142 | sg_length = task_size; | |
4143 | task_size -= sg_length; | |
4144 | goto next; | |
4145 | } else { | |
4146 | sg_length = (se_mem->se_len - task_offset); | |
4147 | ||
4148 | if (!(list_is_last(&se_mem->se_list, | |
a1d8b49a | 4149 | &se_cmd->t_mem_list))) |
c66ac9db NB |
4150 | se_mem = list_entry(se_mem->se_list.next, |
4151 | struct se_mem, se_list); | |
4152 | } | |
4153 | ||
4154 | DEBUG_SC("sg_length(%u) task_size(%u)\n", | |
4155 | sg_length, task_size); | |
4156 | ||
4157 | task_offset = 0; | |
4158 | } | |
4159 | task_size -= sg_length; | |
4160 | next: | |
4161 | DEBUG_SC("task[%u] - Reducing task_size to(%u)\n", | |
4162 | task->task_no, task_size); | |
4163 | ||
4164 | task->task_sg_num++; | |
4165 | } | |
4166 | /* | |
4167 | * Check if the fabric module driver is requesting that all | |
4168 | * struct se_task->task_sg[] be chained together.. If so, | |
4169 | * then allocate an extra padding SG entry for linking and | |
4170 | * marking the end of the chained SGL. | |
4171 | */ | |
4172 | if (tfo->task_sg_chaining) { | |
4173 | task_sg_num_padded = (task->task_sg_num + 1); | |
4174 | task->task_padded_sg = 1; | |
4175 | } else | |
4176 | task_sg_num_padded = task->task_sg_num; | |
4177 | ||
4178 | task->task_sg = kzalloc(task_sg_num_padded * | |
4179 | sizeof(struct scatterlist), GFP_KERNEL); | |
4180 | if (!(task->task_sg)) { | |
4181 | printk(KERN_ERR "Unable to allocate memory for" | |
4182 | " task->task_sg\n"); | |
e3d6f909 | 4183 | return -ENOMEM; |
c66ac9db NB |
4184 | } |
4185 | sg_init_table(&task->task_sg[0], task_sg_num_padded); | |
4186 | /* | |
4187 | * Setup task->task_sg_bidi for SCSI READ payload for | |
4188 | * TCM/pSCSI passthrough if present for BIDI-COMMAND | |
4189 | */ | |
a1d8b49a | 4190 | if (!list_empty(&se_cmd->t_mem_bidi_list) && |
e3d6f909 | 4191 | (se_dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)) { |
c66ac9db NB |
4192 | task->task_sg_bidi = kzalloc(task_sg_num_padded * |
4193 | sizeof(struct scatterlist), GFP_KERNEL); | |
4194 | if (!(task->task_sg_bidi)) { | |
e3d6f909 AG |
4195 | kfree(task->task_sg); |
4196 | task->task_sg = NULL; | |
c66ac9db NB |
4197 | printk(KERN_ERR "Unable to allocate memory for" |
4198 | " task->task_sg_bidi\n"); | |
e3d6f909 | 4199 | return -ENOMEM; |
c66ac9db NB |
4200 | } |
4201 | sg_init_table(&task->task_sg_bidi[0], task_sg_num_padded); | |
4202 | } | |
4203 | /* | |
4204 | * For the chaining case, setup the proper end of SGL for the | |
4205 | * initial submission struct task into struct se_subsystem_api. | |
4206 | * This will be cleared later by transport_do_task_sg_chain() | |
4207 | */ | |
4208 | if (task->task_padded_sg) { | |
4209 | sg_mark_end(&task->task_sg[task->task_sg_num - 1]); | |
4210 | /* | |
4211 | * Added the 'if' check before marking end of bi-directional | |
4212 | * scatterlist (which gets created only in case of request | |
4213 | * (RD + WR). | |
4214 | */ | |
4215 | if (task->task_sg_bidi) | |
4216 | sg_mark_end(&task->task_sg_bidi[task->task_sg_num - 1]); | |
4217 | } | |
4218 | ||
4219 | DEBUG_SC("Successfully allocated task->task_sg_num(%u)," | |
4220 | " task_sg_num_padded(%u)\n", task->task_sg_num, | |
4221 | task_sg_num_padded); | |
4222 | ||
4223 | return task->task_sg_num; | |
4224 | } | |
4225 | ||
a1d8b49a AG |
4226 | /* Reduce sectors if they are too long for the device */ |
4227 | static inline sector_t transport_limit_task_sectors( | |
c66ac9db NB |
4228 | struct se_device *dev, |
4229 | unsigned long long lba, | |
a1d8b49a | 4230 | sector_t sectors) |
c66ac9db | 4231 | { |
a1d8b49a | 4232 | sectors = min_t(sector_t, sectors, dev->se_sub_dev->se_dev_attrib.max_sectors); |
c66ac9db | 4233 | |
a1d8b49a AG |
4234 | if (dev->transport->get_device_type(dev) == TYPE_DISK) |
4235 | if ((lba + sectors) > transport_dev_end_lba(dev)) | |
4236 | sectors = ((transport_dev_end_lba(dev) - lba) + 1); | |
c66ac9db | 4237 | |
a1d8b49a | 4238 | return sectors; |
c66ac9db NB |
4239 | } |
4240 | ||
5951146d AG |
4241 | /* |
4242 | * Convert a sgl into a linked list of se_mems. | |
4243 | */ | |
c66ac9db NB |
4244 | static int transport_map_sg_to_mem( |
4245 | struct se_cmd *cmd, | |
4246 | struct list_head *se_mem_list, | |
a1d8b49a | 4247 | struct scatterlist *sg) |
c66ac9db NB |
4248 | { |
4249 | struct se_mem *se_mem; | |
5951146d | 4250 | u32 cmd_size = cmd->data_length; |
a1d8b49a | 4251 | int sg_count = 0; |
c66ac9db | 4252 | |
5951146d | 4253 | WARN_ON(!sg); |
c66ac9db NB |
4254 | |
4255 | while (cmd_size) { | |
5951146d AG |
4256 | /* |
4257 | * NOTE: it is safe to return -ENOMEM at any time in creating this | |
4258 | * list because transport_free_pages() will eventually be called, and is | |
4259 | * smart enough to deallocate all list items for sg and sg_bidi lists. | |
4260 | */ | |
c66ac9db NB |
4261 | se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL); |
4262 | if (!(se_mem)) { | |
4263 | printk(KERN_ERR "Unable to allocate struct se_mem\n"); | |
e3d6f909 | 4264 | return -ENOMEM; |
c66ac9db NB |
4265 | } |
4266 | INIT_LIST_HEAD(&se_mem->se_list); | |
4267 | DEBUG_MEM("sg_to_mem: Starting loop with cmd_size: %u" | |
4268 | " sg_page: %p offset: %d length: %d\n", cmd_size, | |
4269 | sg_page(sg), sg->offset, sg->length); | |
4270 | ||
4271 | se_mem->se_page = sg_page(sg); | |
4272 | se_mem->se_off = sg->offset; | |
4273 | ||
4274 | if (cmd_size > sg->length) { | |
4275 | se_mem->se_len = sg->length; | |
4276 | sg = sg_next(sg); | |
c66ac9db NB |
4277 | } else |
4278 | se_mem->se_len = cmd_size; | |
4279 | ||
4280 | cmd_size -= se_mem->se_len; | |
a1d8b49a | 4281 | sg_count++; |
c66ac9db | 4282 | |
5951146d AG |
4283 | DEBUG_MEM("sg_to_mem: sg_count: %u cmd_size: %u\n", |
4284 | sg_count, cmd_size); | |
c66ac9db NB |
4285 | DEBUG_MEM("sg_to_mem: Final se_page: %p se_off: %d se_len: %d\n", |
4286 | se_mem->se_page, se_mem->se_off, se_mem->se_len); | |
4287 | ||
4288 | list_add_tail(&se_mem->se_list, se_mem_list); | |
c66ac9db NB |
4289 | } |
4290 | ||
5951146d | 4291 | DEBUG_MEM("task[0] - Mapped(%u) struct scatterlist segments\n", sg_count); |
c66ac9db | 4292 | |
a1d8b49a | 4293 | return sg_count; |
c66ac9db NB |
4294 | } |
4295 | ||
4296 | /* transport_map_mem_to_sg(): | |
4297 | * | |
4298 | * | |
4299 | */ | |
4300 | int transport_map_mem_to_sg( | |
4301 | struct se_task *task, | |
4302 | struct list_head *se_mem_list, | |
a1d8b49a | 4303 | struct scatterlist *sg, |
c66ac9db NB |
4304 | struct se_mem *in_se_mem, |
4305 | struct se_mem **out_se_mem, | |
4306 | u32 *se_mem_cnt, | |
4307 | u32 *task_offset) | |
4308 | { | |
4309 | struct se_cmd *se_cmd = task->task_se_cmd; | |
4310 | struct se_mem *se_mem = in_se_mem; | |
c66ac9db NB |
4311 | u32 task_size = task->task_size, sg_no = 0; |
4312 | ||
4313 | if (!sg) { | |
4314 | printk(KERN_ERR "Unable to locate valid struct" | |
4315 | " scatterlist pointer\n"); | |
e3d6f909 | 4316 | return -EINVAL; |
c66ac9db NB |
4317 | } |
4318 | ||
4319 | while (task_size != 0) { | |
4320 | /* | |
a1d8b49a | 4321 | * Setup the contiguous array of scatterlists for |
c66ac9db NB |
4322 | * this struct se_task. |
4323 | */ | |
4324 | sg_assign_page(sg, se_mem->se_page); | |
4325 | ||
4326 | if (*task_offset == 0) { | |
4327 | sg->offset = se_mem->se_off; | |
4328 | ||
4329 | if (task_size >= se_mem->se_len) { | |
4330 | sg->length = se_mem->se_len; | |
4331 | ||
4332 | if (!(list_is_last(&se_mem->se_list, | |
a1d8b49a | 4333 | &se_cmd->t_mem_list))) { |
c66ac9db NB |
4334 | se_mem = list_entry(se_mem->se_list.next, |
4335 | struct se_mem, se_list); | |
4336 | (*se_mem_cnt)++; | |
4337 | } | |
4338 | } else { | |
4339 | sg->length = task_size; | |
4340 | /* | |
4341 | * Determine if we need to calculate an offset | |
4342 | * into the struct se_mem on the next go around.. | |
4343 | */ | |
4344 | task_size -= sg->length; | |
4345 | if (!(task_size)) | |
4346 | *task_offset = sg->length; | |
4347 | ||
4348 | goto next; | |
4349 | } | |
4350 | ||
4351 | } else { | |
4352 | sg->offset = (*task_offset + se_mem->se_off); | |
4353 | ||
4354 | if ((se_mem->se_len - *task_offset) > task_size) { | |
4355 | sg->length = task_size; | |
4356 | /* | |
4357 | * Determine if we need to calculate an offset | |
4358 | * into the struct se_mem on the next go around.. | |
4359 | */ | |
4360 | task_size -= sg->length; | |
4361 | if (!(task_size)) | |
4362 | *task_offset += sg->length; | |
4363 | ||
4364 | goto next; | |
4365 | } else { | |
4366 | sg->length = (se_mem->se_len - *task_offset); | |
4367 | ||
4368 | if (!(list_is_last(&se_mem->se_list, | |
a1d8b49a | 4369 | &se_cmd->t_mem_list))) { |
c66ac9db NB |
4370 | se_mem = list_entry(se_mem->se_list.next, |
4371 | struct se_mem, se_list); | |
4372 | (*se_mem_cnt)++; | |
4373 | } | |
4374 | } | |
4375 | ||
4376 | *task_offset = 0; | |
4377 | } | |
4378 | task_size -= sg->length; | |
4379 | next: | |
4380 | DEBUG_MEM("task[%u] mem_to_sg - sg[%u](%p)(%u)(%u) - Reducing" | |
4381 | " task_size to(%u), task_offset: %u\n", task->task_no, sg_no, | |
4382 | sg_page(sg), sg->length, sg->offset, task_size, *task_offset); | |
4383 | ||
4384 | sg_no++; | |
4385 | if (!(task_size)) | |
4386 | break; | |
4387 | ||
4388 | sg = sg_next(sg); | |
4389 | ||
4390 | if (task_size > se_cmd->data_length) | |
4391 | BUG(); | |
4392 | } | |
4393 | *out_se_mem = se_mem; | |
4394 | ||
4395 | DEBUG_MEM("task[%u] - Mapped(%u) struct se_mem segments to total(%u)" | |
4396 | " SGs\n", task->task_no, *se_mem_cnt, sg_no); | |
4397 | ||
4398 | return 0; | |
4399 | } | |
4400 | ||
4401 | /* | |
4402 | * This function can be used by HW target mode drivers to create a linked | |
4403 | * scatterlist from all contiguously allocated struct se_task->task_sg[]. | |
4404 | * This is intended to be called during the completion path by TCM Core | |
4405 | * when struct target_core_fabric_ops->check_task_sg_chaining is enabled. | |
4406 | */ | |
4407 | void transport_do_task_sg_chain(struct se_cmd *cmd) | |
4408 | { | |
4409 | struct scatterlist *sg_head = NULL, *sg_link = NULL, *sg_first = NULL; | |
4410 | struct scatterlist *sg_head_cur = NULL, *sg_link_cur = NULL; | |
4411 | struct scatterlist *sg, *sg_end = NULL, *sg_end_cur = NULL; | |
4412 | struct se_task *task; | |
e3d6f909 | 4413 | struct target_core_fabric_ops *tfo = cmd->se_tfo; |
c66ac9db NB |
4414 | u32 task_sg_num = 0, sg_count = 0; |
4415 | int i; | |
4416 | ||
4417 | if (tfo->task_sg_chaining == 0) { | |
4418 | printk(KERN_ERR "task_sg_chaining is diabled for fabric module:" | |
4419 | " %s\n", tfo->get_fabric_name()); | |
4420 | dump_stack(); | |
4421 | return; | |
4422 | } | |
4423 | /* | |
4424 | * Walk the struct se_task list and setup scatterlist chains | |
a1d8b49a | 4425 | * for each contiguously allocated struct se_task->task_sg[]. |
c66ac9db | 4426 | */ |
a1d8b49a | 4427 | list_for_each_entry(task, &cmd->t_task_list, t_list) { |
c66ac9db NB |
4428 | if (!(task->task_sg) || !(task->task_padded_sg)) |
4429 | continue; | |
4430 | ||
4431 | if (sg_head && sg_link) { | |
4432 | sg_head_cur = &task->task_sg[0]; | |
4433 | sg_link_cur = &task->task_sg[task->task_sg_num]; | |
4434 | /* | |
4435 | * Either add chain or mark end of scatterlist | |
4436 | */ | |
4437 | if (!(list_is_last(&task->t_list, | |
a1d8b49a | 4438 | &cmd->t_task_list))) { |
c66ac9db NB |
4439 | /* |
4440 | * Clear existing SGL termination bit set in | |
e3d6f909 | 4441 | * transport_init_task_sg(), see sg_mark_end() |
c66ac9db NB |
4442 | */ |
4443 | sg_end_cur = &task->task_sg[task->task_sg_num - 1]; | |
4444 | sg_end_cur->page_link &= ~0x02; | |
4445 | ||
4446 | sg_chain(sg_head, task_sg_num, sg_head_cur); | |
c66ac9db | 4447 | sg_count += task->task_sg_num; |
97868c89 NB |
4448 | task_sg_num = (task->task_sg_num + 1); |
4449 | } else { | |
4450 | sg_chain(sg_head, task_sg_num, sg_head_cur); | |
4451 | sg_count += task->task_sg_num; | |
4452 | task_sg_num = task->task_sg_num; | |
4453 | } | |
c66ac9db NB |
4454 | |
4455 | sg_head = sg_head_cur; | |
4456 | sg_link = sg_link_cur; | |
c66ac9db NB |
4457 | continue; |
4458 | } | |
4459 | sg_head = sg_first = &task->task_sg[0]; | |
4460 | sg_link = &task->task_sg[task->task_sg_num]; | |
c66ac9db NB |
4461 | /* |
4462 | * Check for single task.. | |
4463 | */ | |
a1d8b49a | 4464 | if (!(list_is_last(&task->t_list, &cmd->t_task_list))) { |
c66ac9db NB |
4465 | /* |
4466 | * Clear existing SGL termination bit set in | |
e3d6f909 | 4467 | * transport_init_task_sg(), see sg_mark_end() |
c66ac9db NB |
4468 | */ |
4469 | sg_end = &task->task_sg[task->task_sg_num - 1]; | |
4470 | sg_end->page_link &= ~0x02; | |
c66ac9db | 4471 | sg_count += task->task_sg_num; |
97868c89 NB |
4472 | task_sg_num = (task->task_sg_num + 1); |
4473 | } else { | |
4474 | sg_count += task->task_sg_num; | |
4475 | task_sg_num = task->task_sg_num; | |
4476 | } | |
c66ac9db NB |
4477 | } |
4478 | /* | |
4479 | * Setup the starting pointer and total t_tasks_sg_linked_no including | |
4480 | * padding SGs for linking and to mark the end. | |
4481 | */ | |
a1d8b49a AG |
4482 | cmd->t_tasks_sg_chained = sg_first; |
4483 | cmd->t_tasks_sg_chained_no = sg_count; | |
c66ac9db | 4484 | |
a1d8b49a AG |
4485 | DEBUG_CMD_M("Setup cmd: %p cmd->t_tasks_sg_chained: %p and" |
4486 | " t_tasks_sg_chained_no: %u\n", cmd, cmd->t_tasks_sg_chained, | |
4487 | cmd->t_tasks_sg_chained_no); | |
c66ac9db | 4488 | |
a1d8b49a AG |
4489 | for_each_sg(cmd->t_tasks_sg_chained, sg, |
4490 | cmd->t_tasks_sg_chained_no, i) { | |
c66ac9db | 4491 | |
5951146d AG |
4492 | DEBUG_CMD_M("SG[%d]: %p page: %p length: %d offset: %d\n", |
4493 | i, sg, sg_page(sg), sg->length, sg->offset); | |
c66ac9db NB |
4494 | if (sg_is_chain(sg)) |
4495 | DEBUG_CMD_M("SG: %p sg_is_chain=1\n", sg); | |
4496 | if (sg_is_last(sg)) | |
4497 | DEBUG_CMD_M("SG: %p sg_is_last=1\n", sg); | |
4498 | } | |
c66ac9db NB |
4499 | } |
4500 | EXPORT_SYMBOL(transport_do_task_sg_chain); | |
4501 | ||
4502 | static int transport_do_se_mem_map( | |
4503 | struct se_device *dev, | |
4504 | struct se_task *task, | |
4505 | struct list_head *se_mem_list, | |
4506 | void *in_mem, | |
4507 | struct se_mem *in_se_mem, | |
4508 | struct se_mem **out_se_mem, | |
4509 | u32 *se_mem_cnt, | |
4510 | u32 *task_offset_in) | |
4511 | { | |
4512 | u32 task_offset = *task_offset_in; | |
4513 | int ret = 0; | |
4514 | /* | |
4515 | * se_subsystem_api_t->do_se_mem_map is used when internal allocation | |
4516 | * has been done by the transport plugin. | |
4517 | */ | |
e3d6f909 AG |
4518 | if (dev->transport->do_se_mem_map) { |
4519 | ret = dev->transport->do_se_mem_map(task, se_mem_list, | |
c66ac9db NB |
4520 | in_mem, in_se_mem, out_se_mem, se_mem_cnt, |
4521 | task_offset_in); | |
4522 | if (ret == 0) | |
a1d8b49a | 4523 | task->task_se_cmd->t_tasks_se_num += *se_mem_cnt; |
c66ac9db NB |
4524 | |
4525 | return ret; | |
4526 | } | |
e63af958 NB |
4527 | |
4528 | BUG_ON(list_empty(se_mem_list)); | |
c66ac9db NB |
4529 | /* |
4530 | * This is the normal path for all normal non BIDI and BIDI-COMMAND | |
4531 | * WRITE payloads.. If we need to do BIDI READ passthrough for | |
4532 | * TCM/pSCSI the first call to transport_do_se_mem_map -> | |
e3d6f909 | 4533 | * transport_init_task_sg() -> transport_map_mem_to_sg() will do the |
c66ac9db NB |
4534 | * allocation for task->task_sg_bidi, and the subsequent call to |
4535 | * transport_do_se_mem_map() from transport_generic_get_cdb_count() | |
4536 | */ | |
4537 | if (!(task->task_sg_bidi)) { | |
4538 | /* | |
4539 | * Assume default that transport plugin speaks preallocated | |
4540 | * scatterlists. | |
4541 | */ | |
e3d6f909 AG |
4542 | ret = transport_init_task_sg(task, in_se_mem, task_offset); |
4543 | if (ret <= 0) | |
4544 | return ret; | |
c66ac9db NB |
4545 | /* |
4546 | * struct se_task->task_sg now contains the struct scatterlist array. | |
4547 | */ | |
4548 | return transport_map_mem_to_sg(task, se_mem_list, task->task_sg, | |
4549 | in_se_mem, out_se_mem, se_mem_cnt, | |
4550 | task_offset_in); | |
4551 | } | |
4552 | /* | |
4553 | * Handle the se_mem_list -> struct task->task_sg_bidi | |
4554 | * memory map for the extra BIDI READ payload | |
4555 | */ | |
4556 | return transport_map_mem_to_sg(task, se_mem_list, task->task_sg_bidi, | |
4557 | in_se_mem, out_se_mem, se_mem_cnt, | |
4558 | task_offset_in); | |
4559 | } | |
4560 | ||
a1d8b49a AG |
4561 | /* |
4562 | * Break up cmd into chunks transport can handle | |
4563 | */ | |
4564 | static u32 transport_allocate_tasks( | |
c66ac9db NB |
4565 | struct se_cmd *cmd, |
4566 | unsigned long long lba, | |
4567 | u32 sectors, | |
4568 | enum dma_data_direction data_direction, | |
4569 | struct list_head *mem_list, | |
4570 | int set_counts) | |
4571 | { | |
4572 | unsigned char *cdb = NULL; | |
4573 | struct se_task *task; | |
a1d8b49a AG |
4574 | struct se_mem *se_mem = NULL; |
4575 | struct se_mem *se_mem_lout = NULL; | |
4576 | struct se_mem *se_mem_bidi = NULL; | |
4577 | struct se_mem *se_mem_bidi_lout = NULL; | |
5951146d | 4578 | struct se_device *dev = cmd->se_dev; |
a1d8b49a AG |
4579 | int ret; |
4580 | u32 task_offset_in = 0; | |
4581 | u32 se_mem_cnt = 0; | |
4582 | u32 se_mem_bidi_cnt = 0; | |
4583 | u32 task_cdbs = 0; | |
c66ac9db | 4584 | |
a1d8b49a | 4585 | BUG_ON(!mem_list); |
c66ac9db NB |
4586 | /* |
4587 | * While using RAMDISK_DR backstores is the only case where | |
4588 | * mem_list will ever be empty at this point. | |
4589 | */ | |
4590 | if (!(list_empty(mem_list))) | |
5951146d | 4591 | se_mem = list_first_entry(mem_list, struct se_mem, se_list); |
c66ac9db NB |
4592 | /* |
4593 | * Check for extra se_mem_bidi mapping for BIDI-COMMANDs to | |
4594 | * struct se_task->task_sg_bidi for TCM/pSCSI passthrough operation | |
4595 | */ | |
a1d8b49a | 4596 | if (!list_empty(&cmd->t_mem_bidi_list) && |
e3d6f909 | 4597 | (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)) |
a1d8b49a | 4598 | se_mem_bidi = list_first_entry(&cmd->t_mem_bidi_list, |
c66ac9db NB |
4599 | struct se_mem, se_list); |
4600 | ||
4601 | while (sectors) { | |
a1d8b49a AG |
4602 | sector_t limited_sectors; |
4603 | ||
c66ac9db | 4604 | DEBUG_VOL("ITT[0x%08x] LBA(%llu) SectorsLeft(%u) EOBJ(%llu)\n", |
e3d6f909 | 4605 | cmd->se_tfo->get_task_tag(cmd), lba, sectors, |
c66ac9db NB |
4606 | transport_dev_end_lba(dev)); |
4607 | ||
a1d8b49a AG |
4608 | limited_sectors = transport_limit_task_sectors(dev, lba, sectors); |
4609 | if (!limited_sectors) | |
4610 | break; | |
4611 | ||
c66ac9db | 4612 | task = transport_generic_get_task(cmd, data_direction); |
a1d8b49a | 4613 | if (!task) |
c66ac9db NB |
4614 | goto out; |
4615 | ||
c66ac9db | 4616 | task->task_lba = lba; |
a1d8b49a | 4617 | task->task_sectors = limited_sectors; |
c66ac9db NB |
4618 | lba += task->task_sectors; |
4619 | sectors -= task->task_sectors; | |
4620 | task->task_size = (task->task_sectors * | |
e3d6f909 | 4621 | dev->se_sub_dev->se_dev_attrib.block_size); |
c66ac9db | 4622 | |
e3d6f909 | 4623 | cdb = dev->transport->get_cdb(task); |
a1d8b49a AG |
4624 | /* Should be part of task, can't fail */ |
4625 | BUG_ON(!cdb); | |
4626 | ||
4627 | memcpy(cdb, cmd->t_task_cdb, | |
4628 | scsi_command_size(cmd->t_task_cdb)); | |
4629 | ||
4630 | /* Update new cdb with updated lba/sectors */ | |
4631 | cmd->transport_split_cdb(task->task_lba, | |
4632 | &task->task_sectors, cdb); | |
c66ac9db NB |
4633 | |
4634 | /* | |
4635 | * Perform the SE OBJ plugin and/or Transport plugin specific | |
a1d8b49a | 4636 | * mapping for cmd->t_mem_list. And setup the |
c66ac9db NB |
4637 | * task->task_sg and if necessary task->task_sg_bidi |
4638 | */ | |
4639 | ret = transport_do_se_mem_map(dev, task, mem_list, | |
4640 | NULL, se_mem, &se_mem_lout, &se_mem_cnt, | |
4641 | &task_offset_in); | |
4642 | if (ret < 0) | |
4643 | goto out; | |
4644 | ||
4645 | se_mem = se_mem_lout; | |
4646 | /* | |
a1d8b49a | 4647 | * Setup the cmd->t_mem_bidi_list -> task->task_sg_bidi |
c66ac9db NB |
4648 | * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI |
4649 | * | |
4650 | * Note that the first call to transport_do_se_mem_map() above will | |
4651 | * allocate struct se_task->task_sg_bidi in transport_do_se_mem_map() | |
e3d6f909 | 4652 | * -> transport_init_task_sg(), and the second here will do the |
c66ac9db NB |
4653 | * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI. |
4654 | */ | |
4655 | if (task->task_sg_bidi != NULL) { | |
4656 | ret = transport_do_se_mem_map(dev, task, | |
a1d8b49a | 4657 | &cmd->t_mem_bidi_list, NULL, |
c66ac9db NB |
4658 | se_mem_bidi, &se_mem_bidi_lout, &se_mem_bidi_cnt, |
4659 | &task_offset_in); | |
4660 | if (ret < 0) | |
4661 | goto out; | |
4662 | ||
4663 | se_mem_bidi = se_mem_bidi_lout; | |
4664 | } | |
4665 | task_cdbs++; | |
4666 | ||
4667 | DEBUG_VOL("Incremented task_cdbs(%u) task->task_sg_num(%u)\n", | |
4668 | task_cdbs, task->task_sg_num); | |
c66ac9db NB |
4669 | } |
4670 | ||
4671 | if (set_counts) { | |
a1d8b49a AG |
4672 | atomic_inc(&cmd->t_fe_count); |
4673 | atomic_inc(&cmd->t_se_count); | |
c66ac9db NB |
4674 | } |
4675 | ||
4676 | DEBUG_VOL("ITT[0x%08x] total %s cdbs(%u)\n", | |
e3d6f909 | 4677 | cmd->se_tfo->get_task_tag(cmd), (data_direction == DMA_TO_DEVICE) |
c66ac9db NB |
4678 | ? "DMA_TO_DEVICE" : "DMA_FROM_DEVICE", task_cdbs); |
4679 | ||
4680 | return task_cdbs; | |
4681 | out: | |
4682 | return 0; | |
4683 | } | |
4684 | ||
4685 | static int | |
4686 | transport_map_control_cmd_to_task(struct se_cmd *cmd) | |
4687 | { | |
5951146d | 4688 | struct se_device *dev = cmd->se_dev; |
c66ac9db NB |
4689 | unsigned char *cdb; |
4690 | struct se_task *task; | |
4691 | int ret; | |
4692 | ||
4693 | task = transport_generic_get_task(cmd, cmd->data_direction); | |
4694 | if (!task) | |
4695 | return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; | |
4696 | ||
e3d6f909 | 4697 | cdb = dev->transport->get_cdb(task); |
a1d8b49a AG |
4698 | BUG_ON(!cdb); |
4699 | memcpy(cdb, cmd->t_task_cdb, | |
4700 | scsi_command_size(cmd->t_task_cdb)); | |
c66ac9db NB |
4701 | |
4702 | task->task_size = cmd->data_length; | |
4703 | task->task_sg_num = | |
4704 | (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) ? 1 : 0; | |
4705 | ||
a1d8b49a AG |
4706 | atomic_inc(&cmd->t_fe_count); |
4707 | atomic_inc(&cmd->t_se_count); | |
c66ac9db NB |
4708 | |
4709 | if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) { | |
4710 | struct se_mem *se_mem = NULL, *se_mem_lout = NULL; | |
4711 | u32 se_mem_cnt = 0, task_offset = 0; | |
4712 | ||
a1d8b49a AG |
4713 | if (!list_empty(&cmd->t_mem_list)) |
4714 | se_mem = list_first_entry(&cmd->t_mem_list, | |
e63af958 | 4715 | struct se_mem, se_list); |
c66ac9db NB |
4716 | |
4717 | ret = transport_do_se_mem_map(dev, task, | |
a1d8b49a | 4718 | &cmd->t_mem_list, NULL, se_mem, |
c66ac9db NB |
4719 | &se_mem_lout, &se_mem_cnt, &task_offset); |
4720 | if (ret < 0) | |
4721 | return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; | |
4722 | ||
4723 | if (dev->transport->map_task_SG) | |
4724 | return dev->transport->map_task_SG(task); | |
4725 | return 0; | |
4726 | } else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) { | |
4727 | if (dev->transport->map_task_non_SG) | |
4728 | return dev->transport->map_task_non_SG(task); | |
4729 | return 0; | |
4730 | } else if (cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) { | |
4731 | if (dev->transport->cdb_none) | |
4732 | return dev->transport->cdb_none(task); | |
4733 | return 0; | |
4734 | } else { | |
4735 | BUG(); | |
4736 | return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; | |
4737 | } | |
4738 | } | |
4739 | ||
4740 | /* transport_generic_new_cmd(): Called from transport_processing_thread() | |
4741 | * | |
4742 | * Allocate storage transport resources from a set of values predefined | |
4743 | * by transport_generic_cmd_sequencer() from the iSCSI Target RX process. | |
4744 | * Any non zero return here is treated as an "out of resource' op here. | |
4745 | */ | |
4746 | /* | |
4747 | * Generate struct se_task(s) and/or their payloads for this CDB. | |
4748 | */ | |
a1d8b49a | 4749 | int transport_generic_new_cmd(struct se_cmd *cmd) |
c66ac9db | 4750 | { |
c66ac9db | 4751 | struct se_task *task; |
5951146d | 4752 | struct se_device *dev = cmd->se_dev; |
c66ac9db NB |
4753 | int ret = 0; |
4754 | ||
4755 | /* | |
4756 | * Determine is the TCM fabric module has already allocated physical | |
4757 | * memory, and is directly calling transport_generic_map_mem_to_cmd() | |
4758 | * to setup beforehand the linked list of physical memory at | |
a1d8b49a | 4759 | * cmd->t_mem_list of struct se_mem->se_page |
c66ac9db NB |
4760 | */ |
4761 | if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)) { | |
4762 | ret = transport_allocate_resources(cmd); | |
4763 | if (ret < 0) | |
4764 | return ret; | |
4765 | } | |
4766 | ||
c66ac9db NB |
4767 | ret = transport_new_cmd_obj(cmd); |
4768 | if (ret < 0) | |
4769 | return ret; | |
4770 | ||
c66ac9db | 4771 | if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { |
a1d8b49a | 4772 | list_for_each_entry(task, &cmd->t_task_list, t_list) { |
c66ac9db NB |
4773 | if (atomic_read(&task->task_sent)) |
4774 | continue; | |
4775 | if (!dev->transport->map_task_SG) | |
4776 | continue; | |
4777 | ||
4778 | ret = dev->transport->map_task_SG(task); | |
4779 | if (ret < 0) | |
4780 | return ret; | |
4781 | } | |
4782 | } else { | |
4783 | ret = transport_map_control_cmd_to_task(cmd); | |
4784 | if (ret < 0) | |
4785 | return ret; | |
4786 | } | |
4787 | ||
4788 | /* | |
a1d8b49a | 4789 | * For WRITEs, let the fabric know its buffer is ready.. |
c66ac9db NB |
4790 | * This WRITE struct se_cmd (and all of its associated struct se_task's) |
4791 | * will be added to the struct se_device execution queue after its WRITE | |
4792 | * data has arrived. (ie: It gets handled by the transport processing | |
4793 | * thread a second time) | |
4794 | */ | |
4795 | if (cmd->data_direction == DMA_TO_DEVICE) { | |
4796 | transport_add_tasks_to_state_queue(cmd); | |
4797 | return transport_generic_write_pending(cmd); | |
4798 | } | |
4799 | /* | |
4800 | * Everything else but a WRITE, add the struct se_cmd's struct se_task's | |
4801 | * to the execution queue. | |
4802 | */ | |
4803 | transport_execute_tasks(cmd); | |
4804 | return 0; | |
4805 | } | |
a1d8b49a | 4806 | EXPORT_SYMBOL(transport_generic_new_cmd); |
c66ac9db NB |
4807 | |
4808 | /* transport_generic_process_write(): | |
4809 | * | |
4810 | * | |
4811 | */ | |
4812 | void transport_generic_process_write(struct se_cmd *cmd) | |
4813 | { | |
4814 | #if 0 | |
4815 | /* | |
4816 | * Copy SCSI Presented DTL sector(s) from received buffers allocated to | |
4817 | * original EDTL | |
4818 | */ | |
4819 | if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { | |
a1d8b49a | 4820 | if (!cmd->t_tasks_se_num) { |
c66ac9db | 4821 | unsigned char *dst, *buf = |
a1d8b49a | 4822 | (unsigned char *)cmd->t_task_buf; |
c66ac9db NB |
4823 | |
4824 | dst = kzalloc(cmd->cmd_spdtl), GFP_KERNEL); | |
4825 | if (!(dst)) { | |
4826 | printk(KERN_ERR "Unable to allocate memory for" | |
4827 | " WRITE underflow\n"); | |
4828 | transport_generic_request_failure(cmd, NULL, | |
4829 | PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1); | |
4830 | return; | |
4831 | } | |
4832 | memcpy(dst, buf, cmd->cmd_spdtl); | |
4833 | ||
a1d8b49a AG |
4834 | kfree(cmd->t_task_buf); |
4835 | cmd->t_task_buf = dst; | |
c66ac9db NB |
4836 | } else { |
4837 | struct scatterlist *sg = | |
a1d8b49a | 4838 | (struct scatterlist *sg)cmd->t_task_buf; |
c66ac9db NB |
4839 | struct scatterlist *orig_sg; |
4840 | ||
4841 | orig_sg = kzalloc(sizeof(struct scatterlist) * | |
a1d8b49a | 4842 | cmd->t_tasks_se_num, |
c66ac9db NB |
4843 | GFP_KERNEL))) { |
4844 | if (!(orig_sg)) { | |
4845 | printk(KERN_ERR "Unable to allocate memory" | |
4846 | " for WRITE underflow\n"); | |
4847 | transport_generic_request_failure(cmd, NULL, | |
4848 | PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1); | |
4849 | return; | |
4850 | } | |
4851 | ||
a1d8b49a | 4852 | memcpy(orig_sg, cmd->t_task_buf, |
c66ac9db | 4853 | sizeof(struct scatterlist) * |
a1d8b49a | 4854 | cmd->t_tasks_se_num); |
c66ac9db NB |
4855 | |
4856 | cmd->data_length = cmd->cmd_spdtl; | |
4857 | /* | |
4858 | * FIXME, clear out original struct se_task and state | |
4859 | * information. | |
4860 | */ | |
4861 | if (transport_generic_new_cmd(cmd) < 0) { | |
4862 | transport_generic_request_failure(cmd, NULL, | |
4863 | PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1); | |
4864 | kfree(orig_sg); | |
4865 | return; | |
4866 | } | |
4867 | ||
4868 | transport_memcpy_write_sg(cmd, orig_sg); | |
4869 | } | |
4870 | } | |
4871 | #endif | |
4872 | transport_execute_tasks(cmd); | |
4873 | } | |
4874 | EXPORT_SYMBOL(transport_generic_process_write); | |
4875 | ||
4876 | /* transport_generic_write_pending(): | |
4877 | * | |
4878 | * | |
4879 | */ | |
4880 | static int transport_generic_write_pending(struct se_cmd *cmd) | |
4881 | { | |
4882 | unsigned long flags; | |
4883 | int ret; | |
4884 | ||
a1d8b49a | 4885 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db | 4886 | cmd->t_state = TRANSPORT_WRITE_PENDING; |
a1d8b49a | 4887 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
4888 | /* |
4889 | * For the TCM control CDBs using a contiguous buffer, do the memcpy | |
4890 | * from the passed Linux/SCSI struct scatterlist located at | |
a1d8b49a AG |
4891 | * se_cmd->t_task_pt_sgl to the contiguous buffer at |
4892 | * se_cmd->t_task_buf. | |
c66ac9db NB |
4893 | */ |
4894 | if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG) | |
a1d8b49a AG |
4895 | sg_copy_to_buffer(cmd->t_task_pt_sgl, |
4896 | cmd->t_task_pt_sgl_num, | |
4897 | cmd->t_task_buf, | |
4898 | cmd->data_length); | |
c66ac9db NB |
4899 | /* |
4900 | * Clear the se_cmd for WRITE_PENDING status in order to set | |
a1d8b49a | 4901 | * cmd->t_transport_active=0 so that transport_generic_handle_data |
c66ac9db | 4902 | * can be called from HW target mode interrupt code. This is safe |
e3d6f909 | 4903 | * to be called with transport_off=1 before the cmd->se_tfo->write_pending |
c66ac9db NB |
4904 | * because the se_cmd->se_lun pointer is not being cleared. |
4905 | */ | |
4906 | transport_cmd_check_stop(cmd, 1, 0); | |
4907 | ||
4908 | /* | |
4909 | * Call the fabric write_pending function here to let the | |
4910 | * frontend know that WRITE buffers are ready. | |
4911 | */ | |
e3d6f909 | 4912 | ret = cmd->se_tfo->write_pending(cmd); |
c66ac9db NB |
4913 | if (ret < 0) |
4914 | return ret; | |
4915 | ||
4916 | return PYX_TRANSPORT_WRITE_PENDING; | |
4917 | } | |
4918 | ||
4919 | /* transport_release_cmd_to_pool(): | |
4920 | * | |
4921 | * | |
4922 | */ | |
4923 | void transport_release_cmd_to_pool(struct se_cmd *cmd) | |
4924 | { | |
e3d6f909 | 4925 | BUG_ON(!cmd->se_tfo); |
c66ac9db NB |
4926 | |
4927 | transport_free_se_cmd(cmd); | |
e3d6f909 | 4928 | cmd->se_tfo->release_cmd_to_pool(cmd); |
c66ac9db NB |
4929 | } |
4930 | EXPORT_SYMBOL(transport_release_cmd_to_pool); | |
4931 | ||
4932 | /* transport_generic_free_cmd(): | |
4933 | * | |
4934 | * Called from processing frontend to release storage engine resources | |
4935 | */ | |
4936 | void transport_generic_free_cmd( | |
4937 | struct se_cmd *cmd, | |
4938 | int wait_for_tasks, | |
4939 | int release_to_pool, | |
4940 | int session_reinstatement) | |
4941 | { | |
5951146d | 4942 | if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) |
c66ac9db NB |
4943 | transport_release_cmd_to_pool(cmd); |
4944 | else { | |
4945 | core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd); | |
4946 | ||
e3d6f909 | 4947 | if (cmd->se_lun) { |
c66ac9db NB |
4948 | #if 0 |
4949 | printk(KERN_INFO "cmd: %p ITT: 0x%08x contains" | |
e3d6f909 AG |
4950 | " cmd->se_lun\n", cmd, |
4951 | cmd->se_tfo->get_task_tag(cmd)); | |
c66ac9db NB |
4952 | #endif |
4953 | transport_lun_remove_cmd(cmd); | |
4954 | } | |
4955 | ||
4956 | if (wait_for_tasks && cmd->transport_wait_for_tasks) | |
4957 | cmd->transport_wait_for_tasks(cmd, 0, 0); | |
4958 | ||
f4366772 NB |
4959 | transport_free_dev_tasks(cmd); |
4960 | ||
c66ac9db NB |
4961 | transport_generic_remove(cmd, release_to_pool, |
4962 | session_reinstatement); | |
4963 | } | |
4964 | } | |
4965 | EXPORT_SYMBOL(transport_generic_free_cmd); | |
4966 | ||
4967 | static void transport_nop_wait_for_tasks( | |
4968 | struct se_cmd *cmd, | |
4969 | int remove_cmd, | |
4970 | int session_reinstatement) | |
4971 | { | |
4972 | return; | |
4973 | } | |
4974 | ||
4975 | /* transport_lun_wait_for_tasks(): | |
4976 | * | |
4977 | * Called from ConfigFS context to stop the passed struct se_cmd to allow | |
4978 | * an struct se_lun to be successfully shutdown. | |
4979 | */ | |
4980 | static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun) | |
4981 | { | |
4982 | unsigned long flags; | |
4983 | int ret; | |
4984 | /* | |
4985 | * If the frontend has already requested this struct se_cmd to | |
4986 | * be stopped, we can safely ignore this struct se_cmd. | |
4987 | */ | |
a1d8b49a AG |
4988 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
4989 | if (atomic_read(&cmd->t_transport_stop)) { | |
4990 | atomic_set(&cmd->transport_lun_stop, 0); | |
c66ac9db | 4991 | DEBUG_TRANSPORT_S("ConfigFS ITT[0x%08x] - t_transport_stop ==" |
e3d6f909 | 4992 | " TRUE, skipping\n", cmd->se_tfo->get_task_tag(cmd)); |
a1d8b49a | 4993 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db | 4994 | transport_cmd_check_stop(cmd, 1, 0); |
e3d6f909 | 4995 | return -EPERM; |
c66ac9db | 4996 | } |
a1d8b49a AG |
4997 | atomic_set(&cmd->transport_lun_fe_stop, 1); |
4998 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | |
c66ac9db | 4999 | |
5951146d | 5000 | wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq); |
c66ac9db NB |
5001 | |
5002 | ret = transport_stop_tasks_for_cmd(cmd); | |
5003 | ||
5004 | DEBUG_TRANSPORT_S("ConfigFS: cmd: %p t_task_cdbs: %d stop tasks ret:" | |
a1d8b49a | 5005 | " %d\n", cmd, cmd->t_task_cdbs, ret); |
c66ac9db NB |
5006 | if (!ret) { |
5007 | DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopping cmd....\n", | |
e3d6f909 | 5008 | cmd->se_tfo->get_task_tag(cmd)); |
a1d8b49a | 5009 | wait_for_completion(&cmd->transport_lun_stop_comp); |
c66ac9db | 5010 | DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopped cmd....\n", |
e3d6f909 | 5011 | cmd->se_tfo->get_task_tag(cmd)); |
c66ac9db | 5012 | } |
5951146d | 5013 | transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj); |
c66ac9db NB |
5014 | |
5015 | return 0; | |
5016 | } | |
5017 | ||
5018 | /* #define DEBUG_CLEAR_LUN */ | |
5019 | #ifdef DEBUG_CLEAR_LUN | |
5020 | #define DEBUG_CLEAR_L(x...) printk(KERN_INFO x) | |
5021 | #else | |
5022 | #define DEBUG_CLEAR_L(x...) | |
5023 | #endif | |
5024 | ||
5025 | static void __transport_clear_lun_from_sessions(struct se_lun *lun) | |
5026 | { | |
5027 | struct se_cmd *cmd = NULL; | |
5028 | unsigned long lun_flags, cmd_flags; | |
5029 | /* | |
5030 | * Do exception processing and return CHECK_CONDITION status to the | |
5031 | * Initiator Port. | |
5032 | */ | |
5033 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); | |
5951146d AG |
5034 | while (!list_empty(&lun->lun_cmd_list)) { |
5035 | cmd = list_first_entry(&lun->lun_cmd_list, | |
5036 | struct se_cmd, se_lun_node); | |
5037 | list_del(&cmd->se_lun_node); | |
5038 | ||
a1d8b49a | 5039 | atomic_set(&cmd->transport_lun_active, 0); |
c66ac9db NB |
5040 | /* |
5041 | * This will notify iscsi_target_transport.c: | |
5042 | * transport_cmd_check_stop() that a LUN shutdown is in | |
5043 | * progress for the iscsi_cmd_t. | |
5044 | */ | |
a1d8b49a AG |
5045 | spin_lock(&cmd->t_state_lock); |
5046 | DEBUG_CLEAR_L("SE_LUN[%d] - Setting cmd->transport" | |
c66ac9db | 5047 | "_lun_stop for ITT: 0x%08x\n", |
e3d6f909 AG |
5048 | cmd->se_lun->unpacked_lun, |
5049 | cmd->se_tfo->get_task_tag(cmd)); | |
a1d8b49a AG |
5050 | atomic_set(&cmd->transport_lun_stop, 1); |
5051 | spin_unlock(&cmd->t_state_lock); | |
c66ac9db NB |
5052 | |
5053 | spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); | |
5054 | ||
e3d6f909 | 5055 | if (!(cmd->se_lun)) { |
c66ac9db | 5056 | printk(KERN_ERR "ITT: 0x%08x, [i,t]_state: %u/%u\n", |
e3d6f909 AG |
5057 | cmd->se_tfo->get_task_tag(cmd), |
5058 | cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); | |
c66ac9db NB |
5059 | BUG(); |
5060 | } | |
5061 | /* | |
5062 | * If the Storage engine still owns the iscsi_cmd_t, determine | |
5063 | * and/or stop its context. | |
5064 | */ | |
5065 | DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x before transport" | |
e3d6f909 AG |
5066 | "_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun, |
5067 | cmd->se_tfo->get_task_tag(cmd)); | |
c66ac9db | 5068 | |
e3d6f909 | 5069 | if (transport_lun_wait_for_tasks(cmd, cmd->se_lun) < 0) { |
c66ac9db NB |
5070 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); |
5071 | continue; | |
5072 | } | |
5073 | ||
5074 | DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x after transport_lun" | |
5075 | "_wait_for_tasks(): SUCCESS\n", | |
e3d6f909 AG |
5076 | cmd->se_lun->unpacked_lun, |
5077 | cmd->se_tfo->get_task_tag(cmd)); | |
c66ac9db | 5078 | |
a1d8b49a AG |
5079 | spin_lock_irqsave(&cmd->t_state_lock, cmd_flags); |
5080 | if (!(atomic_read(&cmd->transport_dev_active))) { | |
5081 | spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); | |
c66ac9db NB |
5082 | goto check_cond; |
5083 | } | |
a1d8b49a | 5084 | atomic_set(&cmd->transport_dev_active, 0); |
c66ac9db | 5085 | transport_all_task_dev_remove_state(cmd); |
a1d8b49a | 5086 | spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); |
c66ac9db NB |
5087 | |
5088 | transport_free_dev_tasks(cmd); | |
5089 | /* | |
5090 | * The Storage engine stopped this struct se_cmd before it was | |
5091 | * send to the fabric frontend for delivery back to the | |
5092 | * Initiator Node. Return this SCSI CDB back with an | |
5093 | * CHECK_CONDITION status. | |
5094 | */ | |
5095 | check_cond: | |
5096 | transport_send_check_condition_and_sense(cmd, | |
5097 | TCM_NON_EXISTENT_LUN, 0); | |
5098 | /* | |
5099 | * If the fabric frontend is waiting for this iscsi_cmd_t to | |
5100 | * be released, notify the waiting thread now that LU has | |
5101 | * finished accessing it. | |
5102 | */ | |
a1d8b49a AG |
5103 | spin_lock_irqsave(&cmd->t_state_lock, cmd_flags); |
5104 | if (atomic_read(&cmd->transport_lun_fe_stop)) { | |
c66ac9db NB |
5105 | DEBUG_CLEAR_L("SE_LUN[%d] - Detected FE stop for" |
5106 | " struct se_cmd: %p ITT: 0x%08x\n", | |
5107 | lun->unpacked_lun, | |
e3d6f909 | 5108 | cmd, cmd->se_tfo->get_task_tag(cmd)); |
c66ac9db | 5109 | |
a1d8b49a | 5110 | spin_unlock_irqrestore(&cmd->t_state_lock, |
c66ac9db NB |
5111 | cmd_flags); |
5112 | transport_cmd_check_stop(cmd, 1, 0); | |
a1d8b49a | 5113 | complete(&cmd->transport_lun_fe_stop_comp); |
c66ac9db NB |
5114 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); |
5115 | continue; | |
5116 | } | |
5117 | DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x finished processing\n", | |
e3d6f909 | 5118 | lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd)); |
c66ac9db | 5119 | |
a1d8b49a | 5120 | spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); |
c66ac9db NB |
5121 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); |
5122 | } | |
5123 | spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); | |
5124 | } | |
5125 | ||
5126 | static int transport_clear_lun_thread(void *p) | |
5127 | { | |
5128 | struct se_lun *lun = (struct se_lun *)p; | |
5129 | ||
5130 | __transport_clear_lun_from_sessions(lun); | |
5131 | complete(&lun->lun_shutdown_comp); | |
5132 | ||
5133 | return 0; | |
5134 | } | |
5135 | ||
5136 | int transport_clear_lun_from_sessions(struct se_lun *lun) | |
5137 | { | |
5138 | struct task_struct *kt; | |
5139 | ||
5951146d | 5140 | kt = kthread_run(transport_clear_lun_thread, lun, |
c66ac9db NB |
5141 | "tcm_cl_%u", lun->unpacked_lun); |
5142 | if (IS_ERR(kt)) { | |
5143 | printk(KERN_ERR "Unable to start clear_lun thread\n"); | |
e3d6f909 | 5144 | return PTR_ERR(kt); |
c66ac9db NB |
5145 | } |
5146 | wait_for_completion(&lun->lun_shutdown_comp); | |
5147 | ||
5148 | return 0; | |
5149 | } | |
5150 | ||
5151 | /* transport_generic_wait_for_tasks(): | |
5152 | * | |
5153 | * Called from frontend or passthrough context to wait for storage engine | |
5154 | * to pause and/or release frontend generated struct se_cmd. | |
5155 | */ | |
5156 | static void transport_generic_wait_for_tasks( | |
5157 | struct se_cmd *cmd, | |
5158 | int remove_cmd, | |
5159 | int session_reinstatement) | |
5160 | { | |
5161 | unsigned long flags; | |
5162 | ||
5163 | if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req)) | |
5164 | return; | |
5165 | ||
a1d8b49a | 5166 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db NB |
5167 | /* |
5168 | * If we are already stopped due to an external event (ie: LUN shutdown) | |
5169 | * sleep until the connection can have the passed struct se_cmd back. | |
a1d8b49a | 5170 | * The cmd->transport_lun_stopped_sem will be upped by |
c66ac9db NB |
5171 | * transport_clear_lun_from_sessions() once the ConfigFS context caller |
5172 | * has completed its operation on the struct se_cmd. | |
5173 | */ | |
a1d8b49a | 5174 | if (atomic_read(&cmd->transport_lun_stop)) { |
c66ac9db NB |
5175 | |
5176 | DEBUG_TRANSPORT_S("wait_for_tasks: Stopping" | |
e3d6f909 | 5177 | " wait_for_completion(&cmd->t_tasktransport_lun_fe" |
c66ac9db | 5178 | "_stop_comp); for ITT: 0x%08x\n", |
e3d6f909 | 5179 | cmd->se_tfo->get_task_tag(cmd)); |
c66ac9db NB |
5180 | /* |
5181 | * There is a special case for WRITES where a FE exception + | |
5182 | * LUN shutdown means ConfigFS context is still sleeping on | |
5183 | * transport_lun_stop_comp in transport_lun_wait_for_tasks(). | |
5184 | * We go ahead and up transport_lun_stop_comp just to be sure | |
5185 | * here. | |
5186 | */ | |
a1d8b49a AG |
5187 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
5188 | complete(&cmd->transport_lun_stop_comp); | |
5189 | wait_for_completion(&cmd->transport_lun_fe_stop_comp); | |
5190 | spin_lock_irqsave(&cmd->t_state_lock, flags); | |
c66ac9db NB |
5191 | |
5192 | transport_all_task_dev_remove_state(cmd); | |
5193 | /* | |
5194 | * At this point, the frontend who was the originator of this | |
5195 | * struct se_cmd, now owns the structure and can be released through | |
5196 | * normal means below. | |
5197 | */ | |
5198 | DEBUG_TRANSPORT_S("wait_for_tasks: Stopped" | |
e3d6f909 | 5199 | " wait_for_completion(&cmd->t_tasktransport_lun_fe_" |
c66ac9db | 5200 | "stop_comp); for ITT: 0x%08x\n", |
e3d6f909 | 5201 | cmd->se_tfo->get_task_tag(cmd)); |
c66ac9db | 5202 | |
a1d8b49a | 5203 | atomic_set(&cmd->transport_lun_stop, 0); |
c66ac9db | 5204 | } |
a1d8b49a AG |
5205 | if (!atomic_read(&cmd->t_transport_active) || |
5206 | atomic_read(&cmd->t_transport_aborted)) | |
c66ac9db NB |
5207 | goto remove; |
5208 | ||
a1d8b49a | 5209 | atomic_set(&cmd->t_transport_stop, 1); |
c66ac9db NB |
5210 | |
5211 | DEBUG_TRANSPORT_S("wait_for_tasks: Stopping %p ITT: 0x%08x" | |
5212 | " i_state: %d, t_state/def_t_state: %d/%d, t_transport_stop" | |
e3d6f909 AG |
5213 | " = TRUE\n", cmd, cmd->se_tfo->get_task_tag(cmd), |
5214 | cmd->se_tfo->get_cmd_state(cmd), cmd->t_state, | |
c66ac9db NB |
5215 | cmd->deferred_t_state); |
5216 | ||
a1d8b49a | 5217 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db | 5218 | |
5951146d | 5219 | wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq); |
c66ac9db | 5220 | |
a1d8b49a | 5221 | wait_for_completion(&cmd->t_transport_stop_comp); |
c66ac9db | 5222 | |
a1d8b49a AG |
5223 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
5224 | atomic_set(&cmd->t_transport_active, 0); | |
5225 | atomic_set(&cmd->t_transport_stop, 0); | |
c66ac9db NB |
5226 | |
5227 | DEBUG_TRANSPORT_S("wait_for_tasks: Stopped wait_for_compltion(" | |
a1d8b49a | 5228 | "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n", |
e3d6f909 | 5229 | cmd->se_tfo->get_task_tag(cmd)); |
c66ac9db | 5230 | remove: |
a1d8b49a | 5231 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
5232 | if (!remove_cmd) |
5233 | return; | |
5234 | ||
5235 | transport_generic_free_cmd(cmd, 0, 0, session_reinstatement); | |
5236 | } | |
5237 | ||
5238 | static int transport_get_sense_codes( | |
5239 | struct se_cmd *cmd, | |
5240 | u8 *asc, | |
5241 | u8 *ascq) | |
5242 | { | |
5243 | *asc = cmd->scsi_asc; | |
5244 | *ascq = cmd->scsi_ascq; | |
5245 | ||
5246 | return 0; | |
5247 | } | |
5248 | ||
5249 | static int transport_set_sense_codes( | |
5250 | struct se_cmd *cmd, | |
5251 | u8 asc, | |
5252 | u8 ascq) | |
5253 | { | |
5254 | cmd->scsi_asc = asc; | |
5255 | cmd->scsi_ascq = ascq; | |
5256 | ||
5257 | return 0; | |
5258 | } | |
5259 | ||
5260 | int transport_send_check_condition_and_sense( | |
5261 | struct se_cmd *cmd, | |
5262 | u8 reason, | |
5263 | int from_transport) | |
5264 | { | |
5265 | unsigned char *buffer = cmd->sense_buffer; | |
5266 | unsigned long flags; | |
5267 | int offset; | |
5268 | u8 asc = 0, ascq = 0; | |
5269 | ||
a1d8b49a | 5270 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db | 5271 | if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { |
a1d8b49a | 5272 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
5273 | return 0; |
5274 | } | |
5275 | cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION; | |
a1d8b49a | 5276 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
5277 | |
5278 | if (!reason && from_transport) | |
5279 | goto after_reason; | |
5280 | ||
5281 | if (!from_transport) | |
5282 | cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE; | |
5283 | /* | |
5284 | * Data Segment and SenseLength of the fabric response PDU. | |
5285 | * | |
5286 | * TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE | |
5287 | * from include/scsi/scsi_cmnd.h | |
5288 | */ | |
e3d6f909 | 5289 | offset = cmd->se_tfo->set_fabric_sense_len(cmd, |
c66ac9db NB |
5290 | TRANSPORT_SENSE_BUFFER); |
5291 | /* | |
5292 | * Actual SENSE DATA, see SPC-3 7.23.2 SPC_SENSE_KEY_OFFSET uses | |
5293 | * SENSE KEY values from include/scsi/scsi.h | |
5294 | */ | |
5295 | switch (reason) { | |
5296 | case TCM_NON_EXISTENT_LUN: | |
5297 | case TCM_UNSUPPORTED_SCSI_OPCODE: | |
5298 | case TCM_SECTOR_COUNT_TOO_MANY: | |
5299 | /* CURRENT ERROR */ | |
5300 | buffer[offset] = 0x70; | |
5301 | /* ILLEGAL REQUEST */ | |
5302 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; | |
5303 | /* INVALID COMMAND OPERATION CODE */ | |
5304 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x20; | |
5305 | break; | |
5306 | case TCM_UNKNOWN_MODE_PAGE: | |
5307 | /* CURRENT ERROR */ | |
5308 | buffer[offset] = 0x70; | |
5309 | /* ILLEGAL REQUEST */ | |
5310 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; | |
5311 | /* INVALID FIELD IN CDB */ | |
5312 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24; | |
5313 | break; | |
5314 | case TCM_CHECK_CONDITION_ABORT_CMD: | |
5315 | /* CURRENT ERROR */ | |
5316 | buffer[offset] = 0x70; | |
5317 | /* ABORTED COMMAND */ | |
5318 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | |
5319 | /* BUS DEVICE RESET FUNCTION OCCURRED */ | |
5320 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x29; | |
5321 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x03; | |
5322 | break; | |
5323 | case TCM_INCORRECT_AMOUNT_OF_DATA: | |
5324 | /* CURRENT ERROR */ | |
5325 | buffer[offset] = 0x70; | |
5326 | /* ABORTED COMMAND */ | |
5327 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | |
5328 | /* WRITE ERROR */ | |
5329 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c; | |
5330 | /* NOT ENOUGH UNSOLICITED DATA */ | |
5331 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0d; | |
5332 | break; | |
5333 | case TCM_INVALID_CDB_FIELD: | |
5334 | /* CURRENT ERROR */ | |
5335 | buffer[offset] = 0x70; | |
5336 | /* ABORTED COMMAND */ | |
5337 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | |
5338 | /* INVALID FIELD IN CDB */ | |
5339 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24; | |
5340 | break; | |
5341 | case TCM_INVALID_PARAMETER_LIST: | |
5342 | /* CURRENT ERROR */ | |
5343 | buffer[offset] = 0x70; | |
5344 | /* ABORTED COMMAND */ | |
5345 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | |
5346 | /* INVALID FIELD IN PARAMETER LIST */ | |
5347 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26; | |
5348 | break; | |
5349 | case TCM_UNEXPECTED_UNSOLICITED_DATA: | |
5350 | /* CURRENT ERROR */ | |
5351 | buffer[offset] = 0x70; | |
5352 | /* ABORTED COMMAND */ | |
5353 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | |
5354 | /* WRITE ERROR */ | |
5355 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c; | |
5356 | /* UNEXPECTED_UNSOLICITED_DATA */ | |
5357 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0c; | |
5358 | break; | |
5359 | case TCM_SERVICE_CRC_ERROR: | |
5360 | /* CURRENT ERROR */ | |
5361 | buffer[offset] = 0x70; | |
5362 | /* ABORTED COMMAND */ | |
5363 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | |
5364 | /* PROTOCOL SERVICE CRC ERROR */ | |
5365 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x47; | |
5366 | /* N/A */ | |
5367 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x05; | |
5368 | break; | |
5369 | case TCM_SNACK_REJECTED: | |
5370 | /* CURRENT ERROR */ | |
5371 | buffer[offset] = 0x70; | |
5372 | /* ABORTED COMMAND */ | |
5373 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | |
5374 | /* READ ERROR */ | |
5375 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x11; | |
5376 | /* FAILED RETRANSMISSION REQUEST */ | |
5377 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x13; | |
5378 | break; | |
5379 | case TCM_WRITE_PROTECTED: | |
5380 | /* CURRENT ERROR */ | |
5381 | buffer[offset] = 0x70; | |
5382 | /* DATA PROTECT */ | |
5383 | buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT; | |
5384 | /* WRITE PROTECTED */ | |
5385 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27; | |
5386 | break; | |
5387 | case TCM_CHECK_CONDITION_UNIT_ATTENTION: | |
5388 | /* CURRENT ERROR */ | |
5389 | buffer[offset] = 0x70; | |
5390 | /* UNIT ATTENTION */ | |
5391 | buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION; | |
5392 | core_scsi3_ua_for_check_condition(cmd, &asc, &ascq); | |
5393 | buffer[offset+SPC_ASC_KEY_OFFSET] = asc; | |
5394 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq; | |
5395 | break; | |
5396 | case TCM_CHECK_CONDITION_NOT_READY: | |
5397 | /* CURRENT ERROR */ | |
5398 | buffer[offset] = 0x70; | |
5399 | /* Not Ready */ | |
5400 | buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY; | |
5401 | transport_get_sense_codes(cmd, &asc, &ascq); | |
5402 | buffer[offset+SPC_ASC_KEY_OFFSET] = asc; | |
5403 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq; | |
5404 | break; | |
5405 | case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: | |
5406 | default: | |
5407 | /* CURRENT ERROR */ | |
5408 | buffer[offset] = 0x70; | |
5409 | /* ILLEGAL REQUEST */ | |
5410 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; | |
5411 | /* LOGICAL UNIT COMMUNICATION FAILURE */ | |
5412 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x80; | |
5413 | break; | |
5414 | } | |
5415 | /* | |
5416 | * This code uses linux/include/scsi/scsi.h SAM status codes! | |
5417 | */ | |
5418 | cmd->scsi_status = SAM_STAT_CHECK_CONDITION; | |
5419 | /* | |
5420 | * Automatically padded, this value is encoded in the fabric's | |
5421 | * data_length response PDU containing the SCSI defined sense data. | |
5422 | */ | |
5423 | cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset; | |
5424 | ||
5425 | after_reason: | |
e3d6f909 | 5426 | cmd->se_tfo->queue_status(cmd); |
c66ac9db NB |
5427 | return 0; |
5428 | } | |
5429 | EXPORT_SYMBOL(transport_send_check_condition_and_sense); | |
5430 | ||
5431 | int transport_check_aborted_status(struct se_cmd *cmd, int send_status) | |
5432 | { | |
5433 | int ret = 0; | |
5434 | ||
a1d8b49a | 5435 | if (atomic_read(&cmd->t_transport_aborted) != 0) { |
c66ac9db NB |
5436 | if (!(send_status) || |
5437 | (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS)) | |
5438 | return 1; | |
5439 | #if 0 | |
5440 | printk(KERN_INFO "Sending delayed SAM_STAT_TASK_ABORTED" | |
5441 | " status for CDB: 0x%02x ITT: 0x%08x\n", | |
a1d8b49a | 5442 | cmd->t_task_cdb[0], |
e3d6f909 | 5443 | cmd->se_tfo->get_task_tag(cmd)); |
c66ac9db NB |
5444 | #endif |
5445 | cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS; | |
e3d6f909 | 5446 | cmd->se_tfo->queue_status(cmd); |
c66ac9db NB |
5447 | ret = 1; |
5448 | } | |
5449 | return ret; | |
5450 | } | |
5451 | EXPORT_SYMBOL(transport_check_aborted_status); | |
5452 | ||
5453 | void transport_send_task_abort(struct se_cmd *cmd) | |
5454 | { | |
5455 | /* | |
5456 | * If there are still expected incoming fabric WRITEs, we wait | |
5457 | * until until they have completed before sending a TASK_ABORTED | |
5458 | * response. This response with TASK_ABORTED status will be | |
5459 | * queued back to fabric module by transport_check_aborted_status(). | |
5460 | */ | |
5461 | if (cmd->data_direction == DMA_TO_DEVICE) { | |
e3d6f909 | 5462 | if (cmd->se_tfo->write_pending_status(cmd) != 0) { |
a1d8b49a | 5463 | atomic_inc(&cmd->t_transport_aborted); |
c66ac9db NB |
5464 | smp_mb__after_atomic_inc(); |
5465 | cmd->scsi_status = SAM_STAT_TASK_ABORTED; | |
5466 | transport_new_cmd_failure(cmd); | |
5467 | return; | |
5468 | } | |
5469 | } | |
5470 | cmd->scsi_status = SAM_STAT_TASK_ABORTED; | |
5471 | #if 0 | |
5472 | printk(KERN_INFO "Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x," | |
a1d8b49a | 5473 | " ITT: 0x%08x\n", cmd->t_task_cdb[0], |
e3d6f909 | 5474 | cmd->se_tfo->get_task_tag(cmd)); |
c66ac9db | 5475 | #endif |
e3d6f909 | 5476 | cmd->se_tfo->queue_status(cmd); |
c66ac9db NB |
5477 | } |
5478 | ||
5479 | /* transport_generic_do_tmr(): | |
5480 | * | |
5481 | * | |
5482 | */ | |
5483 | int transport_generic_do_tmr(struct se_cmd *cmd) | |
5484 | { | |
5951146d | 5485 | struct se_device *dev = cmd->se_dev; |
c66ac9db NB |
5486 | struct se_tmr_req *tmr = cmd->se_tmr_req; |
5487 | int ret; | |
5488 | ||
5489 | switch (tmr->function) { | |
5c6cd613 | 5490 | case TMR_ABORT_TASK: |
c66ac9db NB |
5491 | tmr->response = TMR_FUNCTION_REJECTED; |
5492 | break; | |
5c6cd613 NB |
5493 | case TMR_ABORT_TASK_SET: |
5494 | case TMR_CLEAR_ACA: | |
5495 | case TMR_CLEAR_TASK_SET: | |
c66ac9db NB |
5496 | tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED; |
5497 | break; | |
5c6cd613 | 5498 | case TMR_LUN_RESET: |
c66ac9db NB |
5499 | ret = core_tmr_lun_reset(dev, tmr, NULL, NULL); |
5500 | tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE : | |
5501 | TMR_FUNCTION_REJECTED; | |
5502 | break; | |
5c6cd613 | 5503 | case TMR_TARGET_WARM_RESET: |
c66ac9db NB |
5504 | tmr->response = TMR_FUNCTION_REJECTED; |
5505 | break; | |
5c6cd613 | 5506 | case TMR_TARGET_COLD_RESET: |
c66ac9db NB |
5507 | tmr->response = TMR_FUNCTION_REJECTED; |
5508 | break; | |
c66ac9db NB |
5509 | default: |
5510 | printk(KERN_ERR "Uknown TMR function: 0x%02x.\n", | |
5511 | tmr->function); | |
5512 | tmr->response = TMR_FUNCTION_REJECTED; | |
5513 | break; | |
5514 | } | |
5515 | ||
5516 | cmd->t_state = TRANSPORT_ISTATE_PROCESSING; | |
e3d6f909 | 5517 | cmd->se_tfo->queue_tm_rsp(cmd); |
c66ac9db NB |
5518 | |
5519 | transport_cmd_check_stop(cmd, 2, 0); | |
5520 | return 0; | |
5521 | } | |
5522 | ||
5523 | /* | |
5524 | * Called with spin_lock_irq(&dev->execute_task_lock); held | |
5525 | * | |
5526 | */ | |
5527 | static struct se_task * | |
5528 | transport_get_task_from_state_list(struct se_device *dev) | |
5529 | { | |
5530 | struct se_task *task; | |
5531 | ||
5532 | if (list_empty(&dev->state_task_list)) | |
5533 | return NULL; | |
5534 | ||
5535 | list_for_each_entry(task, &dev->state_task_list, t_state_list) | |
5536 | break; | |
5537 | ||
5538 | list_del(&task->t_state_list); | |
5539 | atomic_set(&task->task_state_active, 0); | |
5540 | ||
5541 | return task; | |
5542 | } | |
5543 | ||
5544 | static void transport_processing_shutdown(struct se_device *dev) | |
5545 | { | |
5546 | struct se_cmd *cmd; | |
c66ac9db | 5547 | struct se_task *task; |
c66ac9db NB |
5548 | unsigned long flags; |
5549 | /* | |
5550 | * Empty the struct se_device's struct se_task state list. | |
5551 | */ | |
5552 | spin_lock_irqsave(&dev->execute_task_lock, flags); | |
5553 | while ((task = transport_get_task_from_state_list(dev))) { | |
e3d6f909 AG |
5554 | if (!task->task_se_cmd) { |
5555 | printk(KERN_ERR "task->task_se_cmd is NULL!\n"); | |
c66ac9db NB |
5556 | continue; |
5557 | } | |
e3d6f909 | 5558 | cmd = task->task_se_cmd; |
c66ac9db | 5559 | |
c66ac9db NB |
5560 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); |
5561 | ||
a1d8b49a | 5562 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db NB |
5563 | |
5564 | DEBUG_DO("PT: cmd: %p task: %p ITT/CmdSN: 0x%08x/0x%08x," | |
5565 | " i_state/def_i_state: %d/%d, t_state/def_t_state:" | |
5566 | " %d/%d cdb: 0x%02x\n", cmd, task, | |
e3d6f909 AG |
5567 | cmd->se_tfo->get_task_tag(cmd), cmd->cmd_sn, |
5568 | cmd->se_tfo->get_cmd_state(cmd), cmd->deferred_i_state, | |
c66ac9db | 5569 | cmd->t_state, cmd->deferred_t_state, |
a1d8b49a | 5570 | cmd->t_task_cdb[0]); |
c66ac9db NB |
5571 | DEBUG_DO("PT: ITT[0x%08x] - t_task_cdbs: %d t_task_cdbs_left:" |
5572 | " %d t_task_cdbs_sent: %d -- t_transport_active: %d" | |
5573 | " t_transport_stop: %d t_transport_sent: %d\n", | |
e3d6f909 | 5574 | cmd->se_tfo->get_task_tag(cmd), |
a1d8b49a AG |
5575 | cmd->t_task_cdbs, |
5576 | atomic_read(&cmd->t_task_cdbs_left), | |
5577 | atomic_read(&cmd->t_task_cdbs_sent), | |
5578 | atomic_read(&cmd->t_transport_active), | |
5579 | atomic_read(&cmd->t_transport_stop), | |
5580 | atomic_read(&cmd->t_transport_sent)); | |
c66ac9db NB |
5581 | |
5582 | if (atomic_read(&task->task_active)) { | |
5583 | atomic_set(&task->task_stop, 1); | |
5584 | spin_unlock_irqrestore( | |
a1d8b49a | 5585 | &cmd->t_state_lock, flags); |
c66ac9db NB |
5586 | |
5587 | DEBUG_DO("Waiting for task: %p to shutdown for dev:" | |
5588 | " %p\n", task, dev); | |
5589 | wait_for_completion(&task->task_stop_comp); | |
5590 | DEBUG_DO("Completed task: %p shutdown for dev: %p\n", | |
5591 | task, dev); | |
5592 | ||
a1d8b49a AG |
5593 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
5594 | atomic_dec(&cmd->t_task_cdbs_left); | |
c66ac9db NB |
5595 | |
5596 | atomic_set(&task->task_active, 0); | |
5597 | atomic_set(&task->task_stop, 0); | |
52208ae3 NB |
5598 | } else { |
5599 | if (atomic_read(&task->task_execute_queue) != 0) | |
5600 | transport_remove_task_from_execute_queue(task, dev); | |
c66ac9db NB |
5601 | } |
5602 | __transport_stop_task_timer(task, &flags); | |
5603 | ||
a1d8b49a | 5604 | if (!(atomic_dec_and_test(&cmd->t_task_cdbs_ex_left))) { |
c66ac9db | 5605 | spin_unlock_irqrestore( |
a1d8b49a | 5606 | &cmd->t_state_lock, flags); |
c66ac9db NB |
5607 | |
5608 | DEBUG_DO("Skipping task: %p, dev: %p for" | |
5609 | " t_task_cdbs_ex_left: %d\n", task, dev, | |
a1d8b49a | 5610 | atomic_read(&cmd->t_task_cdbs_ex_left)); |
c66ac9db NB |
5611 | |
5612 | spin_lock_irqsave(&dev->execute_task_lock, flags); | |
5613 | continue; | |
5614 | } | |
5615 | ||
a1d8b49a | 5616 | if (atomic_read(&cmd->t_transport_active)) { |
c66ac9db NB |
5617 | DEBUG_DO("got t_transport_active = 1 for task: %p, dev:" |
5618 | " %p\n", task, dev); | |
5619 | ||
a1d8b49a | 5620 | if (atomic_read(&cmd->t_fe_count)) { |
c66ac9db | 5621 | spin_unlock_irqrestore( |
a1d8b49a | 5622 | &cmd->t_state_lock, flags); |
c66ac9db NB |
5623 | transport_send_check_condition_and_sense( |
5624 | cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, | |
5625 | 0); | |
5626 | transport_remove_cmd_from_queue(cmd, | |
5951146d | 5627 | &cmd->se_dev->dev_queue_obj); |
c66ac9db NB |
5628 | |
5629 | transport_lun_remove_cmd(cmd); | |
5630 | transport_cmd_check_stop(cmd, 1, 0); | |
5631 | } else { | |
5632 | spin_unlock_irqrestore( | |
a1d8b49a | 5633 | &cmd->t_state_lock, flags); |
c66ac9db NB |
5634 | |
5635 | transport_remove_cmd_from_queue(cmd, | |
5951146d | 5636 | &cmd->se_dev->dev_queue_obj); |
c66ac9db NB |
5637 | |
5638 | transport_lun_remove_cmd(cmd); | |
5639 | ||
5640 | if (transport_cmd_check_stop(cmd, 1, 0)) | |
5641 | transport_generic_remove(cmd, 0, 0); | |
5642 | } | |
5643 | ||
5644 | spin_lock_irqsave(&dev->execute_task_lock, flags); | |
5645 | continue; | |
5646 | } | |
5647 | DEBUG_DO("Got t_transport_active = 0 for task: %p, dev: %p\n", | |
5648 | task, dev); | |
5649 | ||
a1d8b49a | 5650 | if (atomic_read(&cmd->t_fe_count)) { |
c66ac9db | 5651 | spin_unlock_irqrestore( |
a1d8b49a | 5652 | &cmd->t_state_lock, flags); |
c66ac9db NB |
5653 | transport_send_check_condition_and_sense(cmd, |
5654 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); | |
5655 | transport_remove_cmd_from_queue(cmd, | |
5951146d | 5656 | &cmd->se_dev->dev_queue_obj); |
c66ac9db NB |
5657 | |
5658 | transport_lun_remove_cmd(cmd); | |
5659 | transport_cmd_check_stop(cmd, 1, 0); | |
5660 | } else { | |
5661 | spin_unlock_irqrestore( | |
a1d8b49a | 5662 | &cmd->t_state_lock, flags); |
c66ac9db NB |
5663 | |
5664 | transport_remove_cmd_from_queue(cmd, | |
5951146d | 5665 | &cmd->se_dev->dev_queue_obj); |
c66ac9db NB |
5666 | transport_lun_remove_cmd(cmd); |
5667 | ||
5668 | if (transport_cmd_check_stop(cmd, 1, 0)) | |
5669 | transport_generic_remove(cmd, 0, 0); | |
5670 | } | |
5671 | ||
5672 | spin_lock_irqsave(&dev->execute_task_lock, flags); | |
5673 | } | |
5674 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); | |
5675 | /* | |
5676 | * Empty the struct se_device's struct se_cmd list. | |
5677 | */ | |
5951146d | 5678 | while ((cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj))) { |
c66ac9db NB |
5679 | |
5680 | DEBUG_DO("From Device Queue: cmd: %p t_state: %d\n", | |
5951146d | 5681 | cmd, cmd->t_state); |
c66ac9db | 5682 | |
a1d8b49a | 5683 | if (atomic_read(&cmd->t_fe_count)) { |
c66ac9db NB |
5684 | transport_send_check_condition_and_sense(cmd, |
5685 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); | |
5686 | ||
5687 | transport_lun_remove_cmd(cmd); | |
5688 | transport_cmd_check_stop(cmd, 1, 0); | |
5689 | } else { | |
5690 | transport_lun_remove_cmd(cmd); | |
5691 | if (transport_cmd_check_stop(cmd, 1, 0)) | |
5692 | transport_generic_remove(cmd, 0, 0); | |
5693 | } | |
c66ac9db | 5694 | } |
c66ac9db NB |
5695 | } |
5696 | ||
5697 | /* transport_processing_thread(): | |
5698 | * | |
5699 | * | |
5700 | */ | |
5701 | static int transport_processing_thread(void *param) | |
5702 | { | |
5951146d | 5703 | int ret; |
c66ac9db NB |
5704 | struct se_cmd *cmd; |
5705 | struct se_device *dev = (struct se_device *) param; | |
c66ac9db NB |
5706 | |
5707 | set_user_nice(current, -20); | |
5708 | ||
5709 | while (!kthread_should_stop()) { | |
e3d6f909 AG |
5710 | ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq, |
5711 | atomic_read(&dev->dev_queue_obj.queue_cnt) || | |
c66ac9db NB |
5712 | kthread_should_stop()); |
5713 | if (ret < 0) | |
5714 | goto out; | |
5715 | ||
5716 | spin_lock_irq(&dev->dev_status_lock); | |
5717 | if (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) { | |
5718 | spin_unlock_irq(&dev->dev_status_lock); | |
5719 | transport_processing_shutdown(dev); | |
5720 | continue; | |
5721 | } | |
5722 | spin_unlock_irq(&dev->dev_status_lock); | |
5723 | ||
5724 | get_cmd: | |
5725 | __transport_execute_tasks(dev); | |
5726 | ||
5951146d AG |
5727 | cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj); |
5728 | if (!cmd) | |
c66ac9db NB |
5729 | continue; |
5730 | ||
5951146d | 5731 | switch (cmd->t_state) { |
c66ac9db | 5732 | case TRANSPORT_NEW_CMD_MAP: |
e3d6f909 AG |
5733 | if (!(cmd->se_tfo->new_cmd_map)) { |
5734 | printk(KERN_ERR "cmd->se_tfo->new_cmd_map is" | |
c66ac9db NB |
5735 | " NULL for TRANSPORT_NEW_CMD_MAP\n"); |
5736 | BUG(); | |
5737 | } | |
e3d6f909 | 5738 | ret = cmd->se_tfo->new_cmd_map(cmd); |
c66ac9db NB |
5739 | if (ret < 0) { |
5740 | cmd->transport_error_status = ret; | |
5741 | transport_generic_request_failure(cmd, NULL, | |
5742 | 0, (cmd->data_direction != | |
5743 | DMA_TO_DEVICE)); | |
5744 | break; | |
5745 | } | |
5746 | /* Fall through */ | |
5747 | case TRANSPORT_NEW_CMD: | |
5748 | ret = transport_generic_new_cmd(cmd); | |
5749 | if (ret < 0) { | |
5750 | cmd->transport_error_status = ret; | |
5751 | transport_generic_request_failure(cmd, NULL, | |
5752 | 0, (cmd->data_direction != | |
5753 | DMA_TO_DEVICE)); | |
5754 | } | |
5755 | break; | |
5756 | case TRANSPORT_PROCESS_WRITE: | |
5757 | transport_generic_process_write(cmd); | |
5758 | break; | |
5759 | case TRANSPORT_COMPLETE_OK: | |
5760 | transport_stop_all_task_timers(cmd); | |
5761 | transport_generic_complete_ok(cmd); | |
5762 | break; | |
5763 | case TRANSPORT_REMOVE: | |
5764 | transport_generic_remove(cmd, 1, 0); | |
5765 | break; | |
f4366772 NB |
5766 | case TRANSPORT_FREE_CMD_INTR: |
5767 | transport_generic_free_cmd(cmd, 0, 1, 0); | |
5768 | break; | |
c66ac9db NB |
5769 | case TRANSPORT_PROCESS_TMR: |
5770 | transport_generic_do_tmr(cmd); | |
5771 | break; | |
5772 | case TRANSPORT_COMPLETE_FAILURE: | |
5773 | transport_generic_request_failure(cmd, NULL, 1, 1); | |
5774 | break; | |
5775 | case TRANSPORT_COMPLETE_TIMEOUT: | |
5776 | transport_stop_all_task_timers(cmd); | |
5777 | transport_generic_request_timeout(cmd); | |
5778 | break; | |
5779 | default: | |
5780 | printk(KERN_ERR "Unknown t_state: %d deferred_t_state:" | |
5781 | " %d for ITT: 0x%08x i_state: %d on SE LUN:" | |
5951146d | 5782 | " %u\n", cmd->t_state, cmd->deferred_t_state, |
e3d6f909 AG |
5783 | cmd->se_tfo->get_task_tag(cmd), |
5784 | cmd->se_tfo->get_cmd_state(cmd), | |
5785 | cmd->se_lun->unpacked_lun); | |
c66ac9db NB |
5786 | BUG(); |
5787 | } | |
5788 | ||
5789 | goto get_cmd; | |
5790 | } | |
5791 | ||
5792 | out: | |
5793 | transport_release_all_cmds(dev); | |
5794 | dev->process_thread = NULL; | |
5795 | return 0; | |
5796 | } |