Commit | Line | Data |
---|---|---|
c66ac9db NB |
1 | /******************************************************************************* |
2 | * Filename: target_core_transport.c | |
3 | * | |
4 | * This file contains the Generic Target Engine Core. | |
5 | * | |
6 | * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc. | |
7 | * Copyright (c) 2005, 2006, 2007 SBE, Inc. | |
8 | * Copyright (c) 2007-2010 Rising Tide Systems | |
9 | * Copyright (c) 2008-2010 Linux-iSCSI.org | |
10 | * | |
11 | * Nicholas A. Bellinger <nab@kernel.org> | |
12 | * | |
13 | * This program is free software; you can redistribute it and/or modify | |
14 | * it under the terms of the GNU General Public License as published by | |
15 | * the Free Software Foundation; either version 2 of the License, or | |
16 | * (at your option) any later version. | |
17 | * | |
18 | * This program is distributed in the hope that it will be useful, | |
19 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
20 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
21 | * GNU General Public License for more details. | |
22 | * | |
23 | * You should have received a copy of the GNU General Public License | |
24 | * along with this program; if not, write to the Free Software | |
25 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
26 | * | |
27 | ******************************************************************************/ | |
28 | ||
c66ac9db NB |
29 | #include <linux/net.h> |
30 | #include <linux/delay.h> | |
31 | #include <linux/string.h> | |
32 | #include <linux/timer.h> | |
33 | #include <linux/slab.h> | |
34 | #include <linux/blkdev.h> | |
35 | #include <linux/spinlock.h> | |
c66ac9db NB |
36 | #include <linux/kthread.h> |
37 | #include <linux/in.h> | |
38 | #include <linux/cdrom.h> | |
39 | #include <asm/unaligned.h> | |
40 | #include <net/sock.h> | |
41 | #include <net/tcp.h> | |
42 | #include <scsi/scsi.h> | |
43 | #include <scsi/scsi_cmnd.h> | |
e66ecd50 | 44 | #include <scsi/scsi_tcq.h> |
c66ac9db NB |
45 | |
46 | #include <target/target_core_base.h> | |
47 | #include <target/target_core_device.h> | |
48 | #include <target/target_core_tmr.h> | |
49 | #include <target/target_core_tpg.h> | |
50 | #include <target/target_core_transport.h> | |
51 | #include <target/target_core_fabric_ops.h> | |
52 | #include <target/target_core_configfs.h> | |
53 | ||
54 | #include "target_core_alua.h" | |
55 | #include "target_core_hba.h" | |
56 | #include "target_core_pr.h" | |
c66ac9db NB |
57 | #include "target_core_ua.h" |
58 | ||
e3d6f909 | 59 | static int sub_api_initialized; |
c66ac9db | 60 | |
35e0e757 | 61 | static struct workqueue_struct *target_completion_wq; |
c66ac9db NB |
62 | static struct kmem_cache *se_cmd_cache; |
63 | static struct kmem_cache *se_sess_cache; | |
64 | struct kmem_cache *se_tmr_req_cache; | |
65 | struct kmem_cache *se_ua_cache; | |
c66ac9db NB |
66 | struct kmem_cache *t10_pr_reg_cache; |
67 | struct kmem_cache *t10_alua_lu_gp_cache; | |
68 | struct kmem_cache *t10_alua_lu_gp_mem_cache; | |
69 | struct kmem_cache *t10_alua_tg_pt_gp_cache; | |
70 | struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; | |
71 | ||
c66ac9db | 72 | static int transport_generic_write_pending(struct se_cmd *); |
5951146d | 73 | static int transport_processing_thread(void *param); |
c66ac9db NB |
74 | static int __transport_execute_tasks(struct se_device *dev); |
75 | static void transport_complete_task_attr(struct se_cmd *cmd); | |
07bde79a | 76 | static void transport_handle_queue_full(struct se_cmd *cmd, |
e057f533 | 77 | struct se_device *dev); |
c66ac9db | 78 | static void transport_free_dev_tasks(struct se_cmd *cmd); |
05d1c7c0 | 79 | static int transport_generic_get_mem(struct se_cmd *cmd); |
39c05f32 | 80 | static void transport_put_cmd(struct se_cmd *cmd); |
3df8d40b | 81 | static void transport_remove_cmd_from_queue(struct se_cmd *cmd); |
c66ac9db | 82 | static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq); |
35e0e757 CH |
83 | static void transport_generic_request_failure(struct se_cmd *, int, int); |
84 | static void target_complete_ok_work(struct work_struct *work); | |
c66ac9db | 85 | |
e3d6f909 | 86 | int init_se_kmem_caches(void) |
c66ac9db | 87 | { |
c66ac9db NB |
88 | se_cmd_cache = kmem_cache_create("se_cmd_cache", |
89 | sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL); | |
6708bb27 AG |
90 | if (!se_cmd_cache) { |
91 | pr_err("kmem_cache_create for struct se_cmd failed\n"); | |
c66ac9db NB |
92 | goto out; |
93 | } | |
94 | se_tmr_req_cache = kmem_cache_create("se_tmr_cache", | |
95 | sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req), | |
96 | 0, NULL); | |
6708bb27 AG |
97 | if (!se_tmr_req_cache) { |
98 | pr_err("kmem_cache_create() for struct se_tmr_req" | |
c66ac9db | 99 | " failed\n"); |
35e0e757 | 100 | goto out_free_cmd_cache; |
c66ac9db NB |
101 | } |
102 | se_sess_cache = kmem_cache_create("se_sess_cache", | |
103 | sizeof(struct se_session), __alignof__(struct se_session), | |
104 | 0, NULL); | |
6708bb27 AG |
105 | if (!se_sess_cache) { |
106 | pr_err("kmem_cache_create() for struct se_session" | |
c66ac9db | 107 | " failed\n"); |
35e0e757 | 108 | goto out_free_tmr_req_cache; |
c66ac9db NB |
109 | } |
110 | se_ua_cache = kmem_cache_create("se_ua_cache", | |
111 | sizeof(struct se_ua), __alignof__(struct se_ua), | |
112 | 0, NULL); | |
6708bb27 AG |
113 | if (!se_ua_cache) { |
114 | pr_err("kmem_cache_create() for struct se_ua failed\n"); | |
35e0e757 | 115 | goto out_free_sess_cache; |
c66ac9db | 116 | } |
c66ac9db NB |
117 | t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache", |
118 | sizeof(struct t10_pr_registration), | |
119 | __alignof__(struct t10_pr_registration), 0, NULL); | |
6708bb27 AG |
120 | if (!t10_pr_reg_cache) { |
121 | pr_err("kmem_cache_create() for struct t10_pr_registration" | |
c66ac9db | 122 | " failed\n"); |
35e0e757 | 123 | goto out_free_ua_cache; |
c66ac9db NB |
124 | } |
125 | t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache", | |
126 | sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp), | |
127 | 0, NULL); | |
6708bb27 AG |
128 | if (!t10_alua_lu_gp_cache) { |
129 | pr_err("kmem_cache_create() for t10_alua_lu_gp_cache" | |
c66ac9db | 130 | " failed\n"); |
35e0e757 | 131 | goto out_free_pr_reg_cache; |
c66ac9db NB |
132 | } |
133 | t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache", | |
134 | sizeof(struct t10_alua_lu_gp_member), | |
135 | __alignof__(struct t10_alua_lu_gp_member), 0, NULL); | |
6708bb27 AG |
136 | if (!t10_alua_lu_gp_mem_cache) { |
137 | pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_" | |
c66ac9db | 138 | "cache failed\n"); |
35e0e757 | 139 | goto out_free_lu_gp_cache; |
c66ac9db NB |
140 | } |
141 | t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache", | |
142 | sizeof(struct t10_alua_tg_pt_gp), | |
143 | __alignof__(struct t10_alua_tg_pt_gp), 0, NULL); | |
6708bb27 AG |
144 | if (!t10_alua_tg_pt_gp_cache) { |
145 | pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" | |
c66ac9db | 146 | "cache failed\n"); |
35e0e757 | 147 | goto out_free_lu_gp_mem_cache; |
c66ac9db NB |
148 | } |
149 | t10_alua_tg_pt_gp_mem_cache = kmem_cache_create( | |
150 | "t10_alua_tg_pt_gp_mem_cache", | |
151 | sizeof(struct t10_alua_tg_pt_gp_member), | |
152 | __alignof__(struct t10_alua_tg_pt_gp_member), | |
153 | 0, NULL); | |
6708bb27 AG |
154 | if (!t10_alua_tg_pt_gp_mem_cache) { |
155 | pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" | |
c66ac9db | 156 | "mem_t failed\n"); |
35e0e757 | 157 | goto out_free_tg_pt_gp_cache; |
c66ac9db NB |
158 | } |
159 | ||
35e0e757 CH |
160 | target_completion_wq = alloc_workqueue("target_completion", |
161 | WQ_MEM_RECLAIM, 0); | |
162 | if (!target_completion_wq) | |
163 | goto out_free_tg_pt_gp_mem_cache; | |
164 | ||
c66ac9db | 165 | return 0; |
35e0e757 CH |
166 | |
167 | out_free_tg_pt_gp_mem_cache: | |
168 | kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); | |
169 | out_free_tg_pt_gp_cache: | |
170 | kmem_cache_destroy(t10_alua_tg_pt_gp_cache); | |
171 | out_free_lu_gp_mem_cache: | |
172 | kmem_cache_destroy(t10_alua_lu_gp_mem_cache); | |
173 | out_free_lu_gp_cache: | |
174 | kmem_cache_destroy(t10_alua_lu_gp_cache); | |
175 | out_free_pr_reg_cache: | |
176 | kmem_cache_destroy(t10_pr_reg_cache); | |
177 | out_free_ua_cache: | |
178 | kmem_cache_destroy(se_ua_cache); | |
179 | out_free_sess_cache: | |
180 | kmem_cache_destroy(se_sess_cache); | |
181 | out_free_tmr_req_cache: | |
182 | kmem_cache_destroy(se_tmr_req_cache); | |
183 | out_free_cmd_cache: | |
184 | kmem_cache_destroy(se_cmd_cache); | |
c66ac9db | 185 | out: |
e3d6f909 | 186 | return -ENOMEM; |
c66ac9db NB |
187 | } |
188 | ||
e3d6f909 | 189 | void release_se_kmem_caches(void) |
c66ac9db | 190 | { |
35e0e757 | 191 | destroy_workqueue(target_completion_wq); |
c66ac9db NB |
192 | kmem_cache_destroy(se_cmd_cache); |
193 | kmem_cache_destroy(se_tmr_req_cache); | |
194 | kmem_cache_destroy(se_sess_cache); | |
195 | kmem_cache_destroy(se_ua_cache); | |
c66ac9db NB |
196 | kmem_cache_destroy(t10_pr_reg_cache); |
197 | kmem_cache_destroy(t10_alua_lu_gp_cache); | |
198 | kmem_cache_destroy(t10_alua_lu_gp_mem_cache); | |
199 | kmem_cache_destroy(t10_alua_tg_pt_gp_cache); | |
200 | kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); | |
c66ac9db NB |
201 | } |
202 | ||
e3d6f909 AG |
203 | /* This code ensures unique mib indexes are handed out. */ |
204 | static DEFINE_SPINLOCK(scsi_mib_index_lock); | |
205 | static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX]; | |
e89d15ee NB |
206 | |
207 | /* | |
208 | * Allocate a new row index for the entry type specified | |
209 | */ | |
210 | u32 scsi_get_new_index(scsi_index_t type) | |
211 | { | |
212 | u32 new_index; | |
213 | ||
e3d6f909 | 214 | BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)); |
e89d15ee | 215 | |
e3d6f909 AG |
216 | spin_lock(&scsi_mib_index_lock); |
217 | new_index = ++scsi_mib_index[type]; | |
218 | spin_unlock(&scsi_mib_index_lock); | |
e89d15ee NB |
219 | |
220 | return new_index; | |
221 | } | |
222 | ||
c66ac9db NB |
223 | void transport_init_queue_obj(struct se_queue_obj *qobj) |
224 | { | |
225 | atomic_set(&qobj->queue_cnt, 0); | |
226 | INIT_LIST_HEAD(&qobj->qobj_list); | |
227 | init_waitqueue_head(&qobj->thread_wq); | |
228 | spin_lock_init(&qobj->cmd_queue_lock); | |
229 | } | |
230 | EXPORT_SYMBOL(transport_init_queue_obj); | |
231 | ||
dbc5623e | 232 | void transport_subsystem_check_init(void) |
c66ac9db NB |
233 | { |
234 | int ret; | |
235 | ||
dbc5623e NB |
236 | if (sub_api_initialized) |
237 | return; | |
238 | ||
c66ac9db NB |
239 | ret = request_module("target_core_iblock"); |
240 | if (ret != 0) | |
6708bb27 | 241 | pr_err("Unable to load target_core_iblock\n"); |
c66ac9db NB |
242 | |
243 | ret = request_module("target_core_file"); | |
244 | if (ret != 0) | |
6708bb27 | 245 | pr_err("Unable to load target_core_file\n"); |
c66ac9db NB |
246 | |
247 | ret = request_module("target_core_pscsi"); | |
248 | if (ret != 0) | |
6708bb27 | 249 | pr_err("Unable to load target_core_pscsi\n"); |
c66ac9db NB |
250 | |
251 | ret = request_module("target_core_stgt"); | |
252 | if (ret != 0) | |
6708bb27 | 253 | pr_err("Unable to load target_core_stgt\n"); |
c66ac9db | 254 | |
e3d6f909 | 255 | sub_api_initialized = 1; |
dbc5623e | 256 | return; |
c66ac9db NB |
257 | } |
258 | ||
259 | struct se_session *transport_init_session(void) | |
260 | { | |
261 | struct se_session *se_sess; | |
262 | ||
263 | se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL); | |
6708bb27 AG |
264 | if (!se_sess) { |
265 | pr_err("Unable to allocate struct se_session from" | |
c66ac9db NB |
266 | " se_sess_cache\n"); |
267 | return ERR_PTR(-ENOMEM); | |
268 | } | |
269 | INIT_LIST_HEAD(&se_sess->sess_list); | |
270 | INIT_LIST_HEAD(&se_sess->sess_acl_list); | |
a17f091d NB |
271 | INIT_LIST_HEAD(&se_sess->sess_cmd_list); |
272 | INIT_LIST_HEAD(&se_sess->sess_wait_list); | |
273 | spin_lock_init(&se_sess->sess_cmd_lock); | |
c66ac9db NB |
274 | |
275 | return se_sess; | |
276 | } | |
277 | EXPORT_SYMBOL(transport_init_session); | |
278 | ||
279 | /* | |
280 | * Called with spin_lock_bh(&struct se_portal_group->session_lock called. | |
281 | */ | |
282 | void __transport_register_session( | |
283 | struct se_portal_group *se_tpg, | |
284 | struct se_node_acl *se_nacl, | |
285 | struct se_session *se_sess, | |
286 | void *fabric_sess_ptr) | |
287 | { | |
288 | unsigned char buf[PR_REG_ISID_LEN]; | |
289 | ||
290 | se_sess->se_tpg = se_tpg; | |
291 | se_sess->fabric_sess_ptr = fabric_sess_ptr; | |
292 | /* | |
293 | * Used by struct se_node_acl's under ConfigFS to locate active se_session-t | |
294 | * | |
295 | * Only set for struct se_session's that will actually be moving I/O. | |
296 | * eg: *NOT* discovery sessions. | |
297 | */ | |
298 | if (se_nacl) { | |
299 | /* | |
300 | * If the fabric module supports an ISID based TransportID, | |
301 | * save this value in binary from the fabric I_T Nexus now. | |
302 | */ | |
e3d6f909 | 303 | if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) { |
c66ac9db | 304 | memset(&buf[0], 0, PR_REG_ISID_LEN); |
e3d6f909 | 305 | se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, |
c66ac9db NB |
306 | &buf[0], PR_REG_ISID_LEN); |
307 | se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]); | |
308 | } | |
309 | spin_lock_irq(&se_nacl->nacl_sess_lock); | |
310 | /* | |
311 | * The se_nacl->nacl_sess pointer will be set to the | |
312 | * last active I_T Nexus for each struct se_node_acl. | |
313 | */ | |
314 | se_nacl->nacl_sess = se_sess; | |
315 | ||
316 | list_add_tail(&se_sess->sess_acl_list, | |
317 | &se_nacl->acl_sess_list); | |
318 | spin_unlock_irq(&se_nacl->nacl_sess_lock); | |
319 | } | |
320 | list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list); | |
321 | ||
6708bb27 | 322 | pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n", |
e3d6f909 | 323 | se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr); |
c66ac9db NB |
324 | } |
325 | EXPORT_SYMBOL(__transport_register_session); | |
326 | ||
327 | void transport_register_session( | |
328 | struct se_portal_group *se_tpg, | |
329 | struct se_node_acl *se_nacl, | |
330 | struct se_session *se_sess, | |
331 | void *fabric_sess_ptr) | |
332 | { | |
333 | spin_lock_bh(&se_tpg->session_lock); | |
334 | __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr); | |
335 | spin_unlock_bh(&se_tpg->session_lock); | |
336 | } | |
337 | EXPORT_SYMBOL(transport_register_session); | |
338 | ||
339 | void transport_deregister_session_configfs(struct se_session *se_sess) | |
340 | { | |
341 | struct se_node_acl *se_nacl; | |
23388864 | 342 | unsigned long flags; |
c66ac9db NB |
343 | /* |
344 | * Used by struct se_node_acl's under ConfigFS to locate active struct se_session | |
345 | */ | |
346 | se_nacl = se_sess->se_node_acl; | |
6708bb27 | 347 | if (se_nacl) { |
23388864 | 348 | spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); |
c66ac9db NB |
349 | list_del(&se_sess->sess_acl_list); |
350 | /* | |
351 | * If the session list is empty, then clear the pointer. | |
352 | * Otherwise, set the struct se_session pointer from the tail | |
353 | * element of the per struct se_node_acl active session list. | |
354 | */ | |
355 | if (list_empty(&se_nacl->acl_sess_list)) | |
356 | se_nacl->nacl_sess = NULL; | |
357 | else { | |
358 | se_nacl->nacl_sess = container_of( | |
359 | se_nacl->acl_sess_list.prev, | |
360 | struct se_session, sess_acl_list); | |
361 | } | |
23388864 | 362 | spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); |
c66ac9db NB |
363 | } |
364 | } | |
365 | EXPORT_SYMBOL(transport_deregister_session_configfs); | |
366 | ||
367 | void transport_free_session(struct se_session *se_sess) | |
368 | { | |
369 | kmem_cache_free(se_sess_cache, se_sess); | |
370 | } | |
371 | EXPORT_SYMBOL(transport_free_session); | |
372 | ||
373 | void transport_deregister_session(struct se_session *se_sess) | |
374 | { | |
375 | struct se_portal_group *se_tpg = se_sess->se_tpg; | |
376 | struct se_node_acl *se_nacl; | |
e63a8e19 | 377 | unsigned long flags; |
c66ac9db | 378 | |
6708bb27 | 379 | if (!se_tpg) { |
c66ac9db NB |
380 | transport_free_session(se_sess); |
381 | return; | |
382 | } | |
c66ac9db | 383 | |
e63a8e19 | 384 | spin_lock_irqsave(&se_tpg->session_lock, flags); |
c66ac9db NB |
385 | list_del(&se_sess->sess_list); |
386 | se_sess->se_tpg = NULL; | |
387 | se_sess->fabric_sess_ptr = NULL; | |
e63a8e19 | 388 | spin_unlock_irqrestore(&se_tpg->session_lock, flags); |
c66ac9db NB |
389 | |
390 | /* | |
391 | * Determine if we need to do extra work for this initiator node's | |
392 | * struct se_node_acl if it had been previously dynamically generated. | |
393 | */ | |
394 | se_nacl = se_sess->se_node_acl; | |
6708bb27 | 395 | if (se_nacl) { |
e63a8e19 | 396 | spin_lock_irqsave(&se_tpg->acl_node_lock, flags); |
c66ac9db | 397 | if (se_nacl->dynamic_node_acl) { |
6708bb27 AG |
398 | if (!se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache( |
399 | se_tpg)) { | |
c66ac9db NB |
400 | list_del(&se_nacl->acl_list); |
401 | se_tpg->num_node_acls--; | |
e63a8e19 | 402 | spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags); |
c66ac9db NB |
403 | |
404 | core_tpg_wait_for_nacl_pr_ref(se_nacl); | |
c66ac9db | 405 | core_free_device_list_for_node(se_nacl, se_tpg); |
e3d6f909 | 406 | se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, |
c66ac9db | 407 | se_nacl); |
e63a8e19 | 408 | spin_lock_irqsave(&se_tpg->acl_node_lock, flags); |
c66ac9db NB |
409 | } |
410 | } | |
e63a8e19 | 411 | spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags); |
c66ac9db NB |
412 | } |
413 | ||
414 | transport_free_session(se_sess); | |
415 | ||
6708bb27 | 416 | pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n", |
e3d6f909 | 417 | se_tpg->se_tpg_tfo->get_fabric_name()); |
c66ac9db NB |
418 | } |
419 | EXPORT_SYMBOL(transport_deregister_session); | |
420 | ||
421 | /* | |
a1d8b49a | 422 | * Called with cmd->t_state_lock held. |
c66ac9db NB |
423 | */ |
424 | static void transport_all_task_dev_remove_state(struct se_cmd *cmd) | |
425 | { | |
42bf829e | 426 | struct se_device *dev = cmd->se_dev; |
c66ac9db NB |
427 | struct se_task *task; |
428 | unsigned long flags; | |
429 | ||
42bf829e CH |
430 | if (!dev) |
431 | return; | |
c66ac9db | 432 | |
42bf829e | 433 | list_for_each_entry(task, &cmd->t_task_list, t_list) { |
6c76bf95 | 434 | if (task->task_flags & TF_ACTIVE) |
c66ac9db NB |
435 | continue; |
436 | ||
6708bb27 | 437 | if (!atomic_read(&task->task_state_active)) |
c66ac9db NB |
438 | continue; |
439 | ||
440 | spin_lock_irqsave(&dev->execute_task_lock, flags); | |
441 | list_del(&task->t_state_list); | |
6708bb27 AG |
442 | pr_debug("Removed ITT: 0x%08x dev: %p task[%p]\n", |
443 | cmd->se_tfo->get_task_tag(cmd), dev, task); | |
c66ac9db NB |
444 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); |
445 | ||
446 | atomic_set(&task->task_state_active, 0); | |
a1d8b49a | 447 | atomic_dec(&cmd->t_task_cdbs_ex_left); |
c66ac9db NB |
448 | } |
449 | } | |
450 | ||
451 | /* transport_cmd_check_stop(): | |
452 | * | |
453 | * 'transport_off = 1' determines if t_transport_active should be cleared. | |
454 | * 'transport_off = 2' determines if task_dev_state should be removed. | |
455 | * | |
456 | * A non-zero u8 t_state sets cmd->t_state. | |
457 | * Returns 1 when command is stopped, else 0. | |
458 | */ | |
459 | static int transport_cmd_check_stop( | |
460 | struct se_cmd *cmd, | |
461 | int transport_off, | |
462 | u8 t_state) | |
463 | { | |
464 | unsigned long flags; | |
465 | ||
a1d8b49a | 466 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db NB |
467 | /* |
468 | * Determine if IOCTL context caller in requesting the stopping of this | |
469 | * command for LUN shutdown purposes. | |
470 | */ | |
a1d8b49a | 471 | if (atomic_read(&cmd->transport_lun_stop)) { |
6708bb27 | 472 | pr_debug("%s:%d atomic_read(&cmd->transport_lun_stop)" |
c66ac9db | 473 | " == TRUE for ITT: 0x%08x\n", __func__, __LINE__, |
e3d6f909 | 474 | cmd->se_tfo->get_task_tag(cmd)); |
c66ac9db | 475 | |
a1d8b49a | 476 | atomic_set(&cmd->t_transport_active, 0); |
c66ac9db NB |
477 | if (transport_off == 2) |
478 | transport_all_task_dev_remove_state(cmd); | |
a1d8b49a | 479 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db | 480 | |
a1d8b49a | 481 | complete(&cmd->transport_lun_stop_comp); |
c66ac9db NB |
482 | return 1; |
483 | } | |
484 | /* | |
485 | * Determine if frontend context caller is requesting the stopping of | |
e3d6f909 | 486 | * this command for frontend exceptions. |
c66ac9db | 487 | */ |
a1d8b49a | 488 | if (atomic_read(&cmd->t_transport_stop)) { |
6708bb27 | 489 | pr_debug("%s:%d atomic_read(&cmd->t_transport_stop) ==" |
c66ac9db | 490 | " TRUE for ITT: 0x%08x\n", __func__, __LINE__, |
e3d6f909 | 491 | cmd->se_tfo->get_task_tag(cmd)); |
c66ac9db | 492 | |
c66ac9db NB |
493 | if (transport_off == 2) |
494 | transport_all_task_dev_remove_state(cmd); | |
495 | ||
496 | /* | |
497 | * Clear struct se_cmd->se_lun before the transport_off == 2 handoff | |
498 | * to FE. | |
499 | */ | |
500 | if (transport_off == 2) | |
501 | cmd->se_lun = NULL; | |
a1d8b49a | 502 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db | 503 | |
a1d8b49a | 504 | complete(&cmd->t_transport_stop_comp); |
c66ac9db NB |
505 | return 1; |
506 | } | |
507 | if (transport_off) { | |
a1d8b49a | 508 | atomic_set(&cmd->t_transport_active, 0); |
c66ac9db NB |
509 | if (transport_off == 2) { |
510 | transport_all_task_dev_remove_state(cmd); | |
511 | /* | |
512 | * Clear struct se_cmd->se_lun before the transport_off == 2 | |
513 | * handoff to fabric module. | |
514 | */ | |
515 | cmd->se_lun = NULL; | |
516 | /* | |
517 | * Some fabric modules like tcm_loop can release | |
25985edc | 518 | * their internally allocated I/O reference now and |
c66ac9db | 519 | * struct se_cmd now. |
88dd9e26 NB |
520 | * |
521 | * Fabric modules are expected to return '1' here if the | |
522 | * se_cmd being passed is released at this point, | |
523 | * or zero if not being released. | |
c66ac9db | 524 | */ |
e3d6f909 | 525 | if (cmd->se_tfo->check_stop_free != NULL) { |
c66ac9db | 526 | spin_unlock_irqrestore( |
a1d8b49a | 527 | &cmd->t_state_lock, flags); |
c66ac9db | 528 | |
88dd9e26 | 529 | return cmd->se_tfo->check_stop_free(cmd); |
c66ac9db NB |
530 | } |
531 | } | |
a1d8b49a | 532 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
533 | |
534 | return 0; | |
535 | } else if (t_state) | |
536 | cmd->t_state = t_state; | |
a1d8b49a | 537 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
538 | |
539 | return 0; | |
540 | } | |
541 | ||
542 | static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) | |
543 | { | |
544 | return transport_cmd_check_stop(cmd, 2, 0); | |
545 | } | |
546 | ||
547 | static void transport_lun_remove_cmd(struct se_cmd *cmd) | |
548 | { | |
e3d6f909 | 549 | struct se_lun *lun = cmd->se_lun; |
c66ac9db NB |
550 | unsigned long flags; |
551 | ||
552 | if (!lun) | |
553 | return; | |
554 | ||
a1d8b49a | 555 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
6708bb27 | 556 | if (!atomic_read(&cmd->transport_dev_active)) { |
a1d8b49a | 557 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
558 | goto check_lun; |
559 | } | |
a1d8b49a | 560 | atomic_set(&cmd->transport_dev_active, 0); |
c66ac9db | 561 | transport_all_task_dev_remove_state(cmd); |
a1d8b49a | 562 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db | 563 | |
c66ac9db NB |
564 | |
565 | check_lun: | |
566 | spin_lock_irqsave(&lun->lun_cmd_lock, flags); | |
a1d8b49a | 567 | if (atomic_read(&cmd->transport_lun_active)) { |
5951146d | 568 | list_del(&cmd->se_lun_node); |
a1d8b49a | 569 | atomic_set(&cmd->transport_lun_active, 0); |
c66ac9db | 570 | #if 0 |
6708bb27 | 571 | pr_debug("Removed ITT: 0x%08x from LUN LIST[%d]\n" |
e3d6f909 | 572 | cmd->se_tfo->get_task_tag(cmd), lun->unpacked_lun); |
c66ac9db NB |
573 | #endif |
574 | } | |
575 | spin_unlock_irqrestore(&lun->lun_cmd_lock, flags); | |
576 | } | |
577 | ||
578 | void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) | |
579 | { | |
8dc52b54 NB |
580 | if (!cmd->se_tmr_req) |
581 | transport_lun_remove_cmd(cmd); | |
c66ac9db NB |
582 | |
583 | if (transport_cmd_check_stop_to_fabric(cmd)) | |
584 | return; | |
77039d1e | 585 | if (remove) { |
3df8d40b | 586 | transport_remove_cmd_from_queue(cmd); |
e6a2573f | 587 | transport_put_cmd(cmd); |
77039d1e | 588 | } |
c66ac9db NB |
589 | } |
590 | ||
f7a5cc0b CH |
591 | static void transport_add_cmd_to_queue(struct se_cmd *cmd, int t_state, |
592 | bool at_head) | |
c66ac9db NB |
593 | { |
594 | struct se_device *dev = cmd->se_dev; | |
e3d6f909 | 595 | struct se_queue_obj *qobj = &dev->dev_queue_obj; |
c66ac9db NB |
596 | unsigned long flags; |
597 | ||
c66ac9db | 598 | if (t_state) { |
a1d8b49a | 599 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db | 600 | cmd->t_state = t_state; |
a1d8b49a AG |
601 | atomic_set(&cmd->t_transport_active, 1); |
602 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | |
c66ac9db NB |
603 | } |
604 | ||
605 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); | |
79a7fef2 RD |
606 | |
607 | /* If the cmd is already on the list, remove it before we add it */ | |
608 | if (!list_empty(&cmd->se_queue_node)) | |
609 | list_del(&cmd->se_queue_node); | |
610 | else | |
611 | atomic_inc(&qobj->queue_cnt); | |
612 | ||
f7a5cc0b | 613 | if (at_head) |
07bde79a | 614 | list_add(&cmd->se_queue_node, &qobj->qobj_list); |
f7a5cc0b | 615 | else |
07bde79a | 616 | list_add_tail(&cmd->se_queue_node, &qobj->qobj_list); |
79a7fef2 | 617 | atomic_set(&cmd->t_transport_queue_active, 1); |
c66ac9db NB |
618 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); |
619 | ||
c66ac9db | 620 | wake_up_interruptible(&qobj->thread_wq); |
c66ac9db NB |
621 | } |
622 | ||
5951146d AG |
623 | static struct se_cmd * |
624 | transport_get_cmd_from_queue(struct se_queue_obj *qobj) | |
c66ac9db | 625 | { |
5951146d | 626 | struct se_cmd *cmd; |
c66ac9db NB |
627 | unsigned long flags; |
628 | ||
629 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); | |
630 | if (list_empty(&qobj->qobj_list)) { | |
631 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | |
632 | return NULL; | |
633 | } | |
5951146d | 634 | cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node); |
c66ac9db | 635 | |
79a7fef2 | 636 | atomic_set(&cmd->t_transport_queue_active, 0); |
c66ac9db | 637 | |
79a7fef2 | 638 | list_del_init(&cmd->se_queue_node); |
c66ac9db NB |
639 | atomic_dec(&qobj->queue_cnt); |
640 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | |
641 | ||
5951146d | 642 | return cmd; |
c66ac9db NB |
643 | } |
644 | ||
3df8d40b | 645 | static void transport_remove_cmd_from_queue(struct se_cmd *cmd) |
c66ac9db | 646 | { |
3df8d40b | 647 | struct se_queue_obj *qobj = &cmd->se_dev->dev_queue_obj; |
c66ac9db NB |
648 | unsigned long flags; |
649 | ||
650 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); | |
6708bb27 | 651 | if (!atomic_read(&cmd->t_transport_queue_active)) { |
c66ac9db NB |
652 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); |
653 | return; | |
654 | } | |
79a7fef2 RD |
655 | atomic_set(&cmd->t_transport_queue_active, 0); |
656 | atomic_dec(&qobj->queue_cnt); | |
657 | list_del_init(&cmd->se_queue_node); | |
c66ac9db NB |
658 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); |
659 | ||
a1d8b49a | 660 | if (atomic_read(&cmd->t_transport_queue_active)) { |
6708bb27 | 661 | pr_err("ITT: 0x%08x t_transport_queue_active: %d\n", |
e3d6f909 | 662 | cmd->se_tfo->get_task_tag(cmd), |
a1d8b49a | 663 | atomic_read(&cmd->t_transport_queue_active)); |
c66ac9db NB |
664 | } |
665 | } | |
666 | ||
667 | /* | |
668 | * Completion function used by TCM subsystem plugins (such as FILEIO) | |
669 | * for queueing up response from struct se_subsystem_api->do_task() | |
670 | */ | |
671 | void transport_complete_sync_cache(struct se_cmd *cmd, int good) | |
672 | { | |
a1d8b49a | 673 | struct se_task *task = list_entry(cmd->t_task_list.next, |
c66ac9db NB |
674 | struct se_task, t_list); |
675 | ||
676 | if (good) { | |
677 | cmd->scsi_status = SAM_STAT_GOOD; | |
678 | task->task_scsi_status = GOOD; | |
679 | } else { | |
680 | task->task_scsi_status = SAM_STAT_CHECK_CONDITION; | |
681 | task->task_error_status = PYX_TRANSPORT_ILLEGAL_REQUEST; | |
e3d6f909 | 682 | task->task_se_cmd->transport_error_status = |
c66ac9db NB |
683 | PYX_TRANSPORT_ILLEGAL_REQUEST; |
684 | } | |
685 | ||
686 | transport_complete_task(task, good); | |
687 | } | |
688 | EXPORT_SYMBOL(transport_complete_sync_cache); | |
689 | ||
35e0e757 CH |
690 | static void target_complete_failure_work(struct work_struct *work) |
691 | { | |
692 | struct se_cmd *cmd = container_of(work, struct se_cmd, work); | |
693 | ||
694 | transport_generic_request_failure(cmd, 1, 1); | |
695 | } | |
696 | ||
c66ac9db NB |
697 | /* transport_complete_task(): |
698 | * | |
699 | * Called from interrupt and non interrupt context depending | |
700 | * on the transport plugin. | |
701 | */ | |
702 | void transport_complete_task(struct se_task *task, int success) | |
703 | { | |
e3d6f909 | 704 | struct se_cmd *cmd = task->task_se_cmd; |
42bf829e | 705 | struct se_device *dev = cmd->se_dev; |
c66ac9db NB |
706 | unsigned long flags; |
707 | #if 0 | |
6708bb27 | 708 | pr_debug("task: %p CDB: 0x%02x obj_ptr: %p\n", task, |
a1d8b49a | 709 | cmd->t_task_cdb[0], dev); |
c66ac9db | 710 | #endif |
e3d6f909 | 711 | if (dev) |
c66ac9db | 712 | atomic_inc(&dev->depth_left); |
c66ac9db | 713 | |
a1d8b49a | 714 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
6c76bf95 | 715 | task->task_flags &= ~TF_ACTIVE; |
c66ac9db NB |
716 | |
717 | /* | |
718 | * See if any sense data exists, if so set the TASK_SENSE flag. | |
719 | * Also check for any other post completion work that needs to be | |
720 | * done by the plugins. | |
721 | */ | |
722 | if (dev && dev->transport->transport_complete) { | |
723 | if (dev->transport->transport_complete(task) != 0) { | |
724 | cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE; | |
725 | task->task_sense = 1; | |
726 | success = 1; | |
727 | } | |
728 | } | |
729 | ||
730 | /* | |
731 | * See if we are waiting for outstanding struct se_task | |
732 | * to complete for an exception condition | |
733 | */ | |
6c76bf95 | 734 | if (task->task_flags & TF_REQUEST_STOP) { |
a1d8b49a | 735 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
736 | complete(&task->task_stop_comp); |
737 | return; | |
738 | } | |
2235007c CH |
739 | |
740 | if (!success) | |
741 | cmd->t_tasks_failed = 1; | |
742 | ||
c66ac9db NB |
743 | /* |
744 | * Decrement the outstanding t_task_cdbs_left count. The last | |
745 | * struct se_task from struct se_cmd will complete itself into the | |
746 | * device queue depending upon int success. | |
747 | */ | |
6708bb27 | 748 | if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) { |
a1d8b49a | 749 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
750 | return; |
751 | } | |
752 | ||
2235007c | 753 | if (cmd->t_tasks_failed) { |
c66ac9db NB |
754 | if (!task->task_error_status) { |
755 | task->task_error_status = | |
756 | PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; | |
757 | cmd->transport_error_status = | |
758 | PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; | |
759 | } | |
35e0e757 | 760 | INIT_WORK(&cmd->work, target_complete_failure_work); |
c66ac9db | 761 | } else { |
a1d8b49a | 762 | atomic_set(&cmd->t_transport_complete, 1); |
35e0e757 | 763 | INIT_WORK(&cmd->work, target_complete_ok_work); |
c66ac9db | 764 | } |
35e0e757 | 765 | |
35e0e757 CH |
766 | cmd->t_state = TRANSPORT_COMPLETE; |
767 | atomic_set(&cmd->t_transport_active, 1); | |
a1d8b49a | 768 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db | 769 | |
35e0e757 | 770 | queue_work(target_completion_wq, &cmd->work); |
c66ac9db NB |
771 | } |
772 | EXPORT_SYMBOL(transport_complete_task); | |
773 | ||
774 | /* | |
775 | * Called by transport_add_tasks_from_cmd() once a struct se_cmd's | |
776 | * struct se_task list are ready to be added to the active execution list | |
777 | * struct se_device | |
778 | ||
779 | * Called with se_dev_t->execute_task_lock called. | |
780 | */ | |
781 | static inline int transport_add_task_check_sam_attr( | |
782 | struct se_task *task, | |
783 | struct se_task *task_prev, | |
784 | struct se_device *dev) | |
785 | { | |
786 | /* | |
787 | * No SAM Task attribute emulation enabled, add to tail of | |
788 | * execution queue | |
789 | */ | |
790 | if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) { | |
791 | list_add_tail(&task->t_execute_list, &dev->execute_task_list); | |
792 | return 0; | |
793 | } | |
794 | /* | |
795 | * HEAD_OF_QUEUE attribute for received CDB, which means | |
796 | * the first task that is associated with a struct se_cmd goes to | |
797 | * head of the struct se_device->execute_task_list, and task_prev | |
798 | * after that for each subsequent task | |
799 | */ | |
e66ecd50 | 800 | if (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG) { |
c66ac9db NB |
801 | list_add(&task->t_execute_list, |
802 | (task_prev != NULL) ? | |
803 | &task_prev->t_execute_list : | |
804 | &dev->execute_task_list); | |
805 | ||
6708bb27 | 806 | pr_debug("Set HEAD_OF_QUEUE for task CDB: 0x%02x" |
c66ac9db | 807 | " in execution queue\n", |
6708bb27 | 808 | task->task_se_cmd->t_task_cdb[0]); |
c66ac9db NB |
809 | return 1; |
810 | } | |
811 | /* | |
812 | * For ORDERED, SIMPLE or UNTAGGED attribute tasks once they have been | |
813 | * transitioned from Dermant -> Active state, and are added to the end | |
814 | * of the struct se_device->execute_task_list | |
815 | */ | |
816 | list_add_tail(&task->t_execute_list, &dev->execute_task_list); | |
817 | return 0; | |
818 | } | |
819 | ||
820 | /* __transport_add_task_to_execute_queue(): | |
821 | * | |
822 | * Called with se_dev_t->execute_task_lock called. | |
823 | */ | |
824 | static void __transport_add_task_to_execute_queue( | |
825 | struct se_task *task, | |
826 | struct se_task *task_prev, | |
827 | struct se_device *dev) | |
828 | { | |
829 | int head_of_queue; | |
830 | ||
831 | head_of_queue = transport_add_task_check_sam_attr(task, task_prev, dev); | |
832 | atomic_inc(&dev->execute_tasks); | |
833 | ||
834 | if (atomic_read(&task->task_state_active)) | |
835 | return; | |
836 | /* | |
837 | * Determine if this task needs to go to HEAD_OF_QUEUE for the | |
838 | * state list as well. Running with SAM Task Attribute emulation | |
839 | * will always return head_of_queue == 0 here | |
840 | */ | |
841 | if (head_of_queue) | |
842 | list_add(&task->t_state_list, (task_prev) ? | |
843 | &task_prev->t_state_list : | |
844 | &dev->state_task_list); | |
845 | else | |
846 | list_add_tail(&task->t_state_list, &dev->state_task_list); | |
847 | ||
848 | atomic_set(&task->task_state_active, 1); | |
849 | ||
6708bb27 | 850 | pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n", |
e3d6f909 | 851 | task->task_se_cmd->se_tfo->get_task_tag(task->task_se_cmd), |
c66ac9db NB |
852 | task, dev); |
853 | } | |
854 | ||
855 | static void transport_add_tasks_to_state_queue(struct se_cmd *cmd) | |
856 | { | |
42bf829e | 857 | struct se_device *dev = cmd->se_dev; |
c66ac9db NB |
858 | struct se_task *task; |
859 | unsigned long flags; | |
860 | ||
a1d8b49a AG |
861 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
862 | list_for_each_entry(task, &cmd->t_task_list, t_list) { | |
c66ac9db NB |
863 | if (atomic_read(&task->task_state_active)) |
864 | continue; | |
865 | ||
866 | spin_lock(&dev->execute_task_lock); | |
867 | list_add_tail(&task->t_state_list, &dev->state_task_list); | |
868 | atomic_set(&task->task_state_active, 1); | |
869 | ||
6708bb27 AG |
870 | pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n", |
871 | task->task_se_cmd->se_tfo->get_task_tag( | |
c66ac9db NB |
872 | task->task_se_cmd), task, dev); |
873 | ||
874 | spin_unlock(&dev->execute_task_lock); | |
875 | } | |
a1d8b49a | 876 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
877 | } |
878 | ||
879 | static void transport_add_tasks_from_cmd(struct se_cmd *cmd) | |
880 | { | |
5951146d | 881 | struct se_device *dev = cmd->se_dev; |
c66ac9db NB |
882 | struct se_task *task, *task_prev = NULL; |
883 | unsigned long flags; | |
884 | ||
885 | spin_lock_irqsave(&dev->execute_task_lock, flags); | |
a1d8b49a | 886 | list_for_each_entry(task, &cmd->t_task_list, t_list) { |
04629b7b | 887 | if (!list_empty(&task->t_execute_list)) |
c66ac9db NB |
888 | continue; |
889 | /* | |
890 | * __transport_add_task_to_execute_queue() handles the | |
891 | * SAM Task Attribute emulation if enabled | |
892 | */ | |
893 | __transport_add_task_to_execute_queue(task, task_prev, dev); | |
c66ac9db NB |
894 | task_prev = task; |
895 | } | |
896 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); | |
c66ac9db NB |
897 | } |
898 | ||
04629b7b CH |
899 | void __transport_remove_task_from_execute_queue(struct se_task *task, |
900 | struct se_device *dev) | |
901 | { | |
902 | list_del_init(&task->t_execute_list); | |
903 | atomic_dec(&dev->execute_tasks); | |
904 | } | |
905 | ||
52208ae3 | 906 | void transport_remove_task_from_execute_queue( |
c66ac9db NB |
907 | struct se_task *task, |
908 | struct se_device *dev) | |
909 | { | |
910 | unsigned long flags; | |
911 | ||
04629b7b | 912 | if (WARN_ON(list_empty(&task->t_execute_list))) |
af57c3ac | 913 | return; |
af57c3ac | 914 | |
c66ac9db | 915 | spin_lock_irqsave(&dev->execute_task_lock, flags); |
04629b7b | 916 | __transport_remove_task_from_execute_queue(task, dev); |
c66ac9db NB |
917 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); |
918 | } | |
919 | ||
07bde79a | 920 | /* |
f147abb4 | 921 | * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status |
07bde79a NB |
922 | */ |
923 | ||
924 | static void target_qf_do_work(struct work_struct *work) | |
925 | { | |
926 | struct se_device *dev = container_of(work, struct se_device, | |
927 | qf_work_queue); | |
bcac364a | 928 | LIST_HEAD(qf_cmd_list); |
07bde79a NB |
929 | struct se_cmd *cmd, *cmd_tmp; |
930 | ||
931 | spin_lock_irq(&dev->qf_cmd_lock); | |
bcac364a RD |
932 | list_splice_init(&dev->qf_cmd_list, &qf_cmd_list); |
933 | spin_unlock_irq(&dev->qf_cmd_lock); | |
07bde79a | 934 | |
bcac364a | 935 | list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) { |
07bde79a NB |
936 | list_del(&cmd->se_qf_node); |
937 | atomic_dec(&dev->dev_qf_count); | |
938 | smp_mb__after_atomic_dec(); | |
07bde79a | 939 | |
6708bb27 | 940 | pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" |
07bde79a | 941 | " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, |
e057f533 | 942 | (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" : |
07bde79a NB |
943 | (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING" |
944 | : "UNKNOWN"); | |
f7a5cc0b CH |
945 | |
946 | transport_add_cmd_to_queue(cmd, cmd->t_state, true); | |
07bde79a | 947 | } |
07bde79a NB |
948 | } |
949 | ||
c66ac9db NB |
950 | unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd) |
951 | { | |
952 | switch (cmd->data_direction) { | |
953 | case DMA_NONE: | |
954 | return "NONE"; | |
955 | case DMA_FROM_DEVICE: | |
956 | return "READ"; | |
957 | case DMA_TO_DEVICE: | |
958 | return "WRITE"; | |
959 | case DMA_BIDIRECTIONAL: | |
960 | return "BIDI"; | |
961 | default: | |
962 | break; | |
963 | } | |
964 | ||
965 | return "UNKNOWN"; | |
966 | } | |
967 | ||
968 | void transport_dump_dev_state( | |
969 | struct se_device *dev, | |
970 | char *b, | |
971 | int *bl) | |
972 | { | |
973 | *bl += sprintf(b + *bl, "Status: "); | |
974 | switch (dev->dev_status) { | |
975 | case TRANSPORT_DEVICE_ACTIVATED: | |
976 | *bl += sprintf(b + *bl, "ACTIVATED"); | |
977 | break; | |
978 | case TRANSPORT_DEVICE_DEACTIVATED: | |
979 | *bl += sprintf(b + *bl, "DEACTIVATED"); | |
980 | break; | |
981 | case TRANSPORT_DEVICE_SHUTDOWN: | |
982 | *bl += sprintf(b + *bl, "SHUTDOWN"); | |
983 | break; | |
984 | case TRANSPORT_DEVICE_OFFLINE_ACTIVATED: | |
985 | case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED: | |
986 | *bl += sprintf(b + *bl, "OFFLINE"); | |
987 | break; | |
988 | default: | |
989 | *bl += sprintf(b + *bl, "UNKNOWN=%d", dev->dev_status); | |
990 | break; | |
991 | } | |
992 | ||
993 | *bl += sprintf(b + *bl, " Execute/Left/Max Queue Depth: %d/%d/%d", | |
994 | atomic_read(&dev->execute_tasks), atomic_read(&dev->depth_left), | |
995 | dev->queue_depth); | |
996 | *bl += sprintf(b + *bl, " SectorSize: %u MaxSectors: %u\n", | |
e3d6f909 | 997 | dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.max_sectors); |
c66ac9db NB |
998 | *bl += sprintf(b + *bl, " "); |
999 | } | |
1000 | ||
c66ac9db NB |
1001 | void transport_dump_vpd_proto_id( |
1002 | struct t10_vpd *vpd, | |
1003 | unsigned char *p_buf, | |
1004 | int p_buf_len) | |
1005 | { | |
1006 | unsigned char buf[VPD_TMP_BUF_SIZE]; | |
1007 | int len; | |
1008 | ||
1009 | memset(buf, 0, VPD_TMP_BUF_SIZE); | |
1010 | len = sprintf(buf, "T10 VPD Protocol Identifier: "); | |
1011 | ||
1012 | switch (vpd->protocol_identifier) { | |
1013 | case 0x00: | |
1014 | sprintf(buf+len, "Fibre Channel\n"); | |
1015 | break; | |
1016 | case 0x10: | |
1017 | sprintf(buf+len, "Parallel SCSI\n"); | |
1018 | break; | |
1019 | case 0x20: | |
1020 | sprintf(buf+len, "SSA\n"); | |
1021 | break; | |
1022 | case 0x30: | |
1023 | sprintf(buf+len, "IEEE 1394\n"); | |
1024 | break; | |
1025 | case 0x40: | |
1026 | sprintf(buf+len, "SCSI Remote Direct Memory Access" | |
1027 | " Protocol\n"); | |
1028 | break; | |
1029 | case 0x50: | |
1030 | sprintf(buf+len, "Internet SCSI (iSCSI)\n"); | |
1031 | break; | |
1032 | case 0x60: | |
1033 | sprintf(buf+len, "SAS Serial SCSI Protocol\n"); | |
1034 | break; | |
1035 | case 0x70: | |
1036 | sprintf(buf+len, "Automation/Drive Interface Transport" | |
1037 | " Protocol\n"); | |
1038 | break; | |
1039 | case 0x80: | |
1040 | sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n"); | |
1041 | break; | |
1042 | default: | |
1043 | sprintf(buf+len, "Unknown 0x%02x\n", | |
1044 | vpd->protocol_identifier); | |
1045 | break; | |
1046 | } | |
1047 | ||
1048 | if (p_buf) | |
1049 | strncpy(p_buf, buf, p_buf_len); | |
1050 | else | |
6708bb27 | 1051 | pr_debug("%s", buf); |
c66ac9db NB |
1052 | } |
1053 | ||
1054 | void | |
1055 | transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83) | |
1056 | { | |
1057 | /* | |
1058 | * Check if the Protocol Identifier Valid (PIV) bit is set.. | |
1059 | * | |
1060 | * from spc3r23.pdf section 7.5.1 | |
1061 | */ | |
1062 | if (page_83[1] & 0x80) { | |
1063 | vpd->protocol_identifier = (page_83[0] & 0xf0); | |
1064 | vpd->protocol_identifier_set = 1; | |
1065 | transport_dump_vpd_proto_id(vpd, NULL, 0); | |
1066 | } | |
1067 | } | |
1068 | EXPORT_SYMBOL(transport_set_vpd_proto_id); | |
1069 | ||
1070 | int transport_dump_vpd_assoc( | |
1071 | struct t10_vpd *vpd, | |
1072 | unsigned char *p_buf, | |
1073 | int p_buf_len) | |
1074 | { | |
1075 | unsigned char buf[VPD_TMP_BUF_SIZE]; | |
e3d6f909 AG |
1076 | int ret = 0; |
1077 | int len; | |
c66ac9db NB |
1078 | |
1079 | memset(buf, 0, VPD_TMP_BUF_SIZE); | |
1080 | len = sprintf(buf, "T10 VPD Identifier Association: "); | |
1081 | ||
1082 | switch (vpd->association) { | |
1083 | case 0x00: | |
1084 | sprintf(buf+len, "addressed logical unit\n"); | |
1085 | break; | |
1086 | case 0x10: | |
1087 | sprintf(buf+len, "target port\n"); | |
1088 | break; | |
1089 | case 0x20: | |
1090 | sprintf(buf+len, "SCSI target device\n"); | |
1091 | break; | |
1092 | default: | |
1093 | sprintf(buf+len, "Unknown 0x%02x\n", vpd->association); | |
e3d6f909 | 1094 | ret = -EINVAL; |
c66ac9db NB |
1095 | break; |
1096 | } | |
1097 | ||
1098 | if (p_buf) | |
1099 | strncpy(p_buf, buf, p_buf_len); | |
1100 | else | |
6708bb27 | 1101 | pr_debug("%s", buf); |
c66ac9db NB |
1102 | |
1103 | return ret; | |
1104 | } | |
1105 | ||
1106 | int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83) | |
1107 | { | |
1108 | /* | |
1109 | * The VPD identification association.. | |
1110 | * | |
1111 | * from spc3r23.pdf Section 7.6.3.1 Table 297 | |
1112 | */ | |
1113 | vpd->association = (page_83[1] & 0x30); | |
1114 | return transport_dump_vpd_assoc(vpd, NULL, 0); | |
1115 | } | |
1116 | EXPORT_SYMBOL(transport_set_vpd_assoc); | |
1117 | ||
1118 | int transport_dump_vpd_ident_type( | |
1119 | struct t10_vpd *vpd, | |
1120 | unsigned char *p_buf, | |
1121 | int p_buf_len) | |
1122 | { | |
1123 | unsigned char buf[VPD_TMP_BUF_SIZE]; | |
e3d6f909 AG |
1124 | int ret = 0; |
1125 | int len; | |
c66ac9db NB |
1126 | |
1127 | memset(buf, 0, VPD_TMP_BUF_SIZE); | |
1128 | len = sprintf(buf, "T10 VPD Identifier Type: "); | |
1129 | ||
1130 | switch (vpd->device_identifier_type) { | |
1131 | case 0x00: | |
1132 | sprintf(buf+len, "Vendor specific\n"); | |
1133 | break; | |
1134 | case 0x01: | |
1135 | sprintf(buf+len, "T10 Vendor ID based\n"); | |
1136 | break; | |
1137 | case 0x02: | |
1138 | sprintf(buf+len, "EUI-64 based\n"); | |
1139 | break; | |
1140 | case 0x03: | |
1141 | sprintf(buf+len, "NAA\n"); | |
1142 | break; | |
1143 | case 0x04: | |
1144 | sprintf(buf+len, "Relative target port identifier\n"); | |
1145 | break; | |
1146 | case 0x08: | |
1147 | sprintf(buf+len, "SCSI name string\n"); | |
1148 | break; | |
1149 | default: | |
1150 | sprintf(buf+len, "Unsupported: 0x%02x\n", | |
1151 | vpd->device_identifier_type); | |
e3d6f909 | 1152 | ret = -EINVAL; |
c66ac9db NB |
1153 | break; |
1154 | } | |
1155 | ||
e3d6f909 AG |
1156 | if (p_buf) { |
1157 | if (p_buf_len < strlen(buf)+1) | |
1158 | return -EINVAL; | |
c66ac9db | 1159 | strncpy(p_buf, buf, p_buf_len); |
e3d6f909 | 1160 | } else { |
6708bb27 | 1161 | pr_debug("%s", buf); |
e3d6f909 | 1162 | } |
c66ac9db NB |
1163 | |
1164 | return ret; | |
1165 | } | |
1166 | ||
1167 | int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83) | |
1168 | { | |
1169 | /* | |
1170 | * The VPD identifier type.. | |
1171 | * | |
1172 | * from spc3r23.pdf Section 7.6.3.1 Table 298 | |
1173 | */ | |
1174 | vpd->device_identifier_type = (page_83[1] & 0x0f); | |
1175 | return transport_dump_vpd_ident_type(vpd, NULL, 0); | |
1176 | } | |
1177 | EXPORT_SYMBOL(transport_set_vpd_ident_type); | |
1178 | ||
1179 | int transport_dump_vpd_ident( | |
1180 | struct t10_vpd *vpd, | |
1181 | unsigned char *p_buf, | |
1182 | int p_buf_len) | |
1183 | { | |
1184 | unsigned char buf[VPD_TMP_BUF_SIZE]; | |
1185 | int ret = 0; | |
1186 | ||
1187 | memset(buf, 0, VPD_TMP_BUF_SIZE); | |
1188 | ||
1189 | switch (vpd->device_identifier_code_set) { | |
1190 | case 0x01: /* Binary */ | |
1191 | sprintf(buf, "T10 VPD Binary Device Identifier: %s\n", | |
1192 | &vpd->device_identifier[0]); | |
1193 | break; | |
1194 | case 0x02: /* ASCII */ | |
1195 | sprintf(buf, "T10 VPD ASCII Device Identifier: %s\n", | |
1196 | &vpd->device_identifier[0]); | |
1197 | break; | |
1198 | case 0x03: /* UTF-8 */ | |
1199 | sprintf(buf, "T10 VPD UTF-8 Device Identifier: %s\n", | |
1200 | &vpd->device_identifier[0]); | |
1201 | break; | |
1202 | default: | |
1203 | sprintf(buf, "T10 VPD Device Identifier encoding unsupported:" | |
1204 | " 0x%02x", vpd->device_identifier_code_set); | |
e3d6f909 | 1205 | ret = -EINVAL; |
c66ac9db NB |
1206 | break; |
1207 | } | |
1208 | ||
1209 | if (p_buf) | |
1210 | strncpy(p_buf, buf, p_buf_len); | |
1211 | else | |
6708bb27 | 1212 | pr_debug("%s", buf); |
c66ac9db NB |
1213 | |
1214 | return ret; | |
1215 | } | |
1216 | ||
1217 | int | |
1218 | transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83) | |
1219 | { | |
1220 | static const char hex_str[] = "0123456789abcdef"; | |
1221 | int j = 0, i = 4; /* offset to start of the identifer */ | |
1222 | ||
1223 | /* | |
1224 | * The VPD Code Set (encoding) | |
1225 | * | |
1226 | * from spc3r23.pdf Section 7.6.3.1 Table 296 | |
1227 | */ | |
1228 | vpd->device_identifier_code_set = (page_83[0] & 0x0f); | |
1229 | switch (vpd->device_identifier_code_set) { | |
1230 | case 0x01: /* Binary */ | |
1231 | vpd->device_identifier[j++] = | |
1232 | hex_str[vpd->device_identifier_type]; | |
1233 | while (i < (4 + page_83[3])) { | |
1234 | vpd->device_identifier[j++] = | |
1235 | hex_str[(page_83[i] & 0xf0) >> 4]; | |
1236 | vpd->device_identifier[j++] = | |
1237 | hex_str[page_83[i] & 0x0f]; | |
1238 | i++; | |
1239 | } | |
1240 | break; | |
1241 | case 0x02: /* ASCII */ | |
1242 | case 0x03: /* UTF-8 */ | |
1243 | while (i < (4 + page_83[3])) | |
1244 | vpd->device_identifier[j++] = page_83[i++]; | |
1245 | break; | |
1246 | default: | |
1247 | break; | |
1248 | } | |
1249 | ||
1250 | return transport_dump_vpd_ident(vpd, NULL, 0); | |
1251 | } | |
1252 | EXPORT_SYMBOL(transport_set_vpd_ident); | |
1253 | ||
1254 | static void core_setup_task_attr_emulation(struct se_device *dev) | |
1255 | { | |
1256 | /* | |
1257 | * If this device is from Target_Core_Mod/pSCSI, disable the | |
1258 | * SAM Task Attribute emulation. | |
1259 | * | |
1260 | * This is currently not available in upsream Linux/SCSI Target | |
1261 | * mode code, and is assumed to be disabled while using TCM/pSCSI. | |
1262 | */ | |
e3d6f909 | 1263 | if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { |
c66ac9db NB |
1264 | dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH; |
1265 | return; | |
1266 | } | |
1267 | ||
1268 | dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED; | |
6708bb27 | 1269 | pr_debug("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x" |
e3d6f909 AG |
1270 | " device\n", dev->transport->name, |
1271 | dev->transport->get_device_rev(dev)); | |
c66ac9db NB |
1272 | } |
1273 | ||
1274 | static void scsi_dump_inquiry(struct se_device *dev) | |
1275 | { | |
e3d6f909 | 1276 | struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn; |
c66ac9db NB |
1277 | int i, device_type; |
1278 | /* | |
1279 | * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer | |
1280 | */ | |
6708bb27 | 1281 | pr_debug(" Vendor: "); |
c66ac9db NB |
1282 | for (i = 0; i < 8; i++) |
1283 | if (wwn->vendor[i] >= 0x20) | |
6708bb27 | 1284 | pr_debug("%c", wwn->vendor[i]); |
c66ac9db | 1285 | else |
6708bb27 | 1286 | pr_debug(" "); |
c66ac9db | 1287 | |
6708bb27 | 1288 | pr_debug(" Model: "); |
c66ac9db NB |
1289 | for (i = 0; i < 16; i++) |
1290 | if (wwn->model[i] >= 0x20) | |
6708bb27 | 1291 | pr_debug("%c", wwn->model[i]); |
c66ac9db | 1292 | else |
6708bb27 | 1293 | pr_debug(" "); |
c66ac9db | 1294 | |
6708bb27 | 1295 | pr_debug(" Revision: "); |
c66ac9db NB |
1296 | for (i = 0; i < 4; i++) |
1297 | if (wwn->revision[i] >= 0x20) | |
6708bb27 | 1298 | pr_debug("%c", wwn->revision[i]); |
c66ac9db | 1299 | else |
6708bb27 | 1300 | pr_debug(" "); |
c66ac9db | 1301 | |
6708bb27 | 1302 | pr_debug("\n"); |
c66ac9db | 1303 | |
e3d6f909 | 1304 | device_type = dev->transport->get_device_type(dev); |
6708bb27 AG |
1305 | pr_debug(" Type: %s ", scsi_device_type(device_type)); |
1306 | pr_debug(" ANSI SCSI revision: %02x\n", | |
e3d6f909 | 1307 | dev->transport->get_device_rev(dev)); |
c66ac9db NB |
1308 | } |
1309 | ||
1310 | struct se_device *transport_add_device_to_core_hba( | |
1311 | struct se_hba *hba, | |
1312 | struct se_subsystem_api *transport, | |
1313 | struct se_subsystem_dev *se_dev, | |
1314 | u32 device_flags, | |
1315 | void *transport_dev, | |
1316 | struct se_dev_limits *dev_limits, | |
1317 | const char *inquiry_prod, | |
1318 | const char *inquiry_rev) | |
1319 | { | |
12a18bdc | 1320 | int force_pt; |
c66ac9db NB |
1321 | struct se_device *dev; |
1322 | ||
1323 | dev = kzalloc(sizeof(struct se_device), GFP_KERNEL); | |
6708bb27 AG |
1324 | if (!dev) { |
1325 | pr_err("Unable to allocate memory for se_dev_t\n"); | |
c66ac9db NB |
1326 | return NULL; |
1327 | } | |
c66ac9db | 1328 | |
e3d6f909 | 1329 | transport_init_queue_obj(&dev->dev_queue_obj); |
c66ac9db NB |
1330 | dev->dev_flags = device_flags; |
1331 | dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED; | |
5951146d | 1332 | dev->dev_ptr = transport_dev; |
c66ac9db NB |
1333 | dev->se_hba = hba; |
1334 | dev->se_sub_dev = se_dev; | |
1335 | dev->transport = transport; | |
1336 | atomic_set(&dev->active_cmds, 0); | |
1337 | INIT_LIST_HEAD(&dev->dev_list); | |
1338 | INIT_LIST_HEAD(&dev->dev_sep_list); | |
1339 | INIT_LIST_HEAD(&dev->dev_tmr_list); | |
1340 | INIT_LIST_HEAD(&dev->execute_task_list); | |
1341 | INIT_LIST_HEAD(&dev->delayed_cmd_list); | |
1342 | INIT_LIST_HEAD(&dev->ordered_cmd_list); | |
1343 | INIT_LIST_HEAD(&dev->state_task_list); | |
07bde79a | 1344 | INIT_LIST_HEAD(&dev->qf_cmd_list); |
c66ac9db NB |
1345 | spin_lock_init(&dev->execute_task_lock); |
1346 | spin_lock_init(&dev->delayed_cmd_lock); | |
1347 | spin_lock_init(&dev->ordered_cmd_lock); | |
1348 | spin_lock_init(&dev->state_task_lock); | |
1349 | spin_lock_init(&dev->dev_alua_lock); | |
1350 | spin_lock_init(&dev->dev_reservation_lock); | |
1351 | spin_lock_init(&dev->dev_status_lock); | |
1352 | spin_lock_init(&dev->dev_status_thr_lock); | |
1353 | spin_lock_init(&dev->se_port_lock); | |
1354 | spin_lock_init(&dev->se_tmr_lock); | |
07bde79a | 1355 | spin_lock_init(&dev->qf_cmd_lock); |
c66ac9db NB |
1356 | |
1357 | dev->queue_depth = dev_limits->queue_depth; | |
1358 | atomic_set(&dev->depth_left, dev->queue_depth); | |
1359 | atomic_set(&dev->dev_ordered_id, 0); | |
1360 | ||
1361 | se_dev_set_default_attribs(dev, dev_limits); | |
1362 | ||
1363 | dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX); | |
1364 | dev->creation_time = get_jiffies_64(); | |
1365 | spin_lock_init(&dev->stats_lock); | |
1366 | ||
1367 | spin_lock(&hba->device_lock); | |
1368 | list_add_tail(&dev->dev_list, &hba->hba_dev_list); | |
1369 | hba->dev_count++; | |
1370 | spin_unlock(&hba->device_lock); | |
1371 | /* | |
1372 | * Setup the SAM Task Attribute emulation for struct se_device | |
1373 | */ | |
1374 | core_setup_task_attr_emulation(dev); | |
1375 | /* | |
1376 | * Force PR and ALUA passthrough emulation with internal object use. | |
1377 | */ | |
1378 | force_pt = (hba->hba_flags & HBA_FLAGS_INTERNAL_USE); | |
1379 | /* | |
1380 | * Setup the Reservations infrastructure for struct se_device | |
1381 | */ | |
1382 | core_setup_reservations(dev, force_pt); | |
1383 | /* | |
1384 | * Setup the Asymmetric Logical Unit Assignment for struct se_device | |
1385 | */ | |
1386 | if (core_setup_alua(dev, force_pt) < 0) | |
1387 | goto out; | |
1388 | ||
1389 | /* | |
1390 | * Startup the struct se_device processing thread | |
1391 | */ | |
1392 | dev->process_thread = kthread_run(transport_processing_thread, dev, | |
e3d6f909 | 1393 | "LIO_%s", dev->transport->name); |
c66ac9db | 1394 | if (IS_ERR(dev->process_thread)) { |
6708bb27 | 1395 | pr_err("Unable to create kthread: LIO_%s\n", |
e3d6f909 | 1396 | dev->transport->name); |
c66ac9db NB |
1397 | goto out; |
1398 | } | |
07bde79a NB |
1399 | /* |
1400 | * Setup work_queue for QUEUE_FULL | |
1401 | */ | |
1402 | INIT_WORK(&dev->qf_work_queue, target_qf_do_work); | |
c66ac9db NB |
1403 | /* |
1404 | * Preload the initial INQUIRY const values if we are doing | |
1405 | * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI | |
1406 | * passthrough because this is being provided by the backend LLD. | |
1407 | * This is required so that transport_get_inquiry() copies these | |
1408 | * originals once back into DEV_T10_WWN(dev) for the virtual device | |
1409 | * setup. | |
1410 | */ | |
e3d6f909 | 1411 | if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) { |
f22c1196 | 1412 | if (!inquiry_prod || !inquiry_rev) { |
6708bb27 | 1413 | pr_err("All non TCM/pSCSI plugins require" |
c66ac9db NB |
1414 | " INQUIRY consts\n"); |
1415 | goto out; | |
1416 | } | |
1417 | ||
e3d6f909 AG |
1418 | strncpy(&dev->se_sub_dev->t10_wwn.vendor[0], "LIO-ORG", 8); |
1419 | strncpy(&dev->se_sub_dev->t10_wwn.model[0], inquiry_prod, 16); | |
1420 | strncpy(&dev->se_sub_dev->t10_wwn.revision[0], inquiry_rev, 4); | |
c66ac9db NB |
1421 | } |
1422 | scsi_dump_inquiry(dev); | |
1423 | ||
12a18bdc | 1424 | return dev; |
c66ac9db | 1425 | out: |
c66ac9db NB |
1426 | kthread_stop(dev->process_thread); |
1427 | ||
1428 | spin_lock(&hba->device_lock); | |
1429 | list_del(&dev->dev_list); | |
1430 | hba->dev_count--; | |
1431 | spin_unlock(&hba->device_lock); | |
1432 | ||
1433 | se_release_vpd_for_dev(dev); | |
1434 | ||
c66ac9db NB |
1435 | kfree(dev); |
1436 | ||
1437 | return NULL; | |
1438 | } | |
1439 | EXPORT_SYMBOL(transport_add_device_to_core_hba); | |
1440 | ||
1441 | /* transport_generic_prepare_cdb(): | |
1442 | * | |
1443 | * Since the Initiator sees iSCSI devices as LUNs, the SCSI CDB will | |
1444 | * contain the iSCSI LUN in bits 7-5 of byte 1 as per SAM-2. | |
1445 | * The point of this is since we are mapping iSCSI LUNs to | |
1446 | * SCSI Target IDs having a non-zero LUN in the CDB will throw the | |
1447 | * devices and HBAs for a loop. | |
1448 | */ | |
1449 | static inline void transport_generic_prepare_cdb( | |
1450 | unsigned char *cdb) | |
1451 | { | |
1452 | switch (cdb[0]) { | |
1453 | case READ_10: /* SBC - RDProtect */ | |
1454 | case READ_12: /* SBC - RDProtect */ | |
1455 | case READ_16: /* SBC - RDProtect */ | |
1456 | case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */ | |
1457 | case VERIFY: /* SBC - VRProtect */ | |
1458 | case VERIFY_16: /* SBC - VRProtect */ | |
1459 | case WRITE_VERIFY: /* SBC - VRProtect */ | |
1460 | case WRITE_VERIFY_12: /* SBC - VRProtect */ | |
1461 | break; | |
1462 | default: | |
1463 | cdb[1] &= 0x1f; /* clear logical unit number */ | |
1464 | break; | |
1465 | } | |
1466 | } | |
1467 | ||
1468 | static struct se_task * | |
1469 | transport_generic_get_task(struct se_cmd *cmd, | |
1470 | enum dma_data_direction data_direction) | |
1471 | { | |
1472 | struct se_task *task; | |
5951146d | 1473 | struct se_device *dev = cmd->se_dev; |
c66ac9db | 1474 | |
6708bb27 | 1475 | task = dev->transport->alloc_task(cmd->t_task_cdb); |
c66ac9db | 1476 | if (!task) { |
6708bb27 | 1477 | pr_err("Unable to allocate struct se_task\n"); |
c66ac9db NB |
1478 | return NULL; |
1479 | } | |
1480 | ||
1481 | INIT_LIST_HEAD(&task->t_list); | |
1482 | INIT_LIST_HEAD(&task->t_execute_list); | |
1483 | INIT_LIST_HEAD(&task->t_state_list); | |
1484 | init_completion(&task->task_stop_comp); | |
c66ac9db | 1485 | task->task_se_cmd = cmd; |
c66ac9db NB |
1486 | task->task_data_direction = data_direction; |
1487 | ||
c66ac9db NB |
1488 | return task; |
1489 | } | |
1490 | ||
1491 | static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *); | |
1492 | ||
c66ac9db NB |
1493 | /* |
1494 | * Used by fabric modules containing a local struct se_cmd within their | |
1495 | * fabric dependent per I/O descriptor. | |
1496 | */ | |
1497 | void transport_init_se_cmd( | |
1498 | struct se_cmd *cmd, | |
1499 | struct target_core_fabric_ops *tfo, | |
1500 | struct se_session *se_sess, | |
1501 | u32 data_length, | |
1502 | int data_direction, | |
1503 | int task_attr, | |
1504 | unsigned char *sense_buffer) | |
1505 | { | |
5951146d AG |
1506 | INIT_LIST_HEAD(&cmd->se_lun_node); |
1507 | INIT_LIST_HEAD(&cmd->se_delayed_node); | |
1508 | INIT_LIST_HEAD(&cmd->se_ordered_node); | |
07bde79a | 1509 | INIT_LIST_HEAD(&cmd->se_qf_node); |
79a7fef2 | 1510 | INIT_LIST_HEAD(&cmd->se_queue_node); |
a17f091d | 1511 | INIT_LIST_HEAD(&cmd->se_cmd_list); |
a1d8b49a AG |
1512 | INIT_LIST_HEAD(&cmd->t_task_list); |
1513 | init_completion(&cmd->transport_lun_fe_stop_comp); | |
1514 | init_completion(&cmd->transport_lun_stop_comp); | |
1515 | init_completion(&cmd->t_transport_stop_comp); | |
a17f091d | 1516 | init_completion(&cmd->cmd_wait_comp); |
a1d8b49a AG |
1517 | spin_lock_init(&cmd->t_state_lock); |
1518 | atomic_set(&cmd->transport_dev_active, 1); | |
c66ac9db NB |
1519 | |
1520 | cmd->se_tfo = tfo; | |
1521 | cmd->se_sess = se_sess; | |
1522 | cmd->data_length = data_length; | |
1523 | cmd->data_direction = data_direction; | |
1524 | cmd->sam_task_attr = task_attr; | |
1525 | cmd->sense_buffer = sense_buffer; | |
1526 | } | |
1527 | EXPORT_SYMBOL(transport_init_se_cmd); | |
1528 | ||
1529 | static int transport_check_alloc_task_attr(struct se_cmd *cmd) | |
1530 | { | |
1531 | /* | |
1532 | * Check if SAM Task Attribute emulation is enabled for this | |
1533 | * struct se_device storage object | |
1534 | */ | |
5951146d | 1535 | if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) |
c66ac9db NB |
1536 | return 0; |
1537 | ||
e66ecd50 | 1538 | if (cmd->sam_task_attr == MSG_ACA_TAG) { |
6708bb27 | 1539 | pr_debug("SAM Task Attribute ACA" |
c66ac9db | 1540 | " emulation is not supported\n"); |
e3d6f909 | 1541 | return -EINVAL; |
c66ac9db NB |
1542 | } |
1543 | /* | |
1544 | * Used to determine when ORDERED commands should go from | |
1545 | * Dormant to Active status. | |
1546 | */ | |
5951146d | 1547 | cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id); |
c66ac9db | 1548 | smp_mb__after_atomic_inc(); |
6708bb27 | 1549 | pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n", |
c66ac9db | 1550 | cmd->se_ordered_id, cmd->sam_task_attr, |
6708bb27 | 1551 | cmd->se_dev->transport->name); |
c66ac9db NB |
1552 | return 0; |
1553 | } | |
1554 | ||
c66ac9db NB |
1555 | /* transport_generic_allocate_tasks(): |
1556 | * | |
1557 | * Called from fabric RX Thread. | |
1558 | */ | |
1559 | int transport_generic_allocate_tasks( | |
1560 | struct se_cmd *cmd, | |
1561 | unsigned char *cdb) | |
1562 | { | |
1563 | int ret; | |
1564 | ||
1565 | transport_generic_prepare_cdb(cdb); | |
c66ac9db NB |
1566 | /* |
1567 | * Ensure that the received CDB is less than the max (252 + 8) bytes | |
1568 | * for VARIABLE_LENGTH_CMD | |
1569 | */ | |
1570 | if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) { | |
6708bb27 | 1571 | pr_err("Received SCSI CDB with command_size: %d that" |
c66ac9db NB |
1572 | " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", |
1573 | scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE); | |
e3d6f909 | 1574 | return -EINVAL; |
c66ac9db NB |
1575 | } |
1576 | /* | |
1577 | * If the received CDB is larger than TCM_MAX_COMMAND_SIZE, | |
1578 | * allocate the additional extended CDB buffer now.. Otherwise | |
1579 | * setup the pointer from __t_task_cdb to t_task_cdb. | |
1580 | */ | |
a1d8b49a AG |
1581 | if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) { |
1582 | cmd->t_task_cdb = kzalloc(scsi_command_size(cdb), | |
c66ac9db | 1583 | GFP_KERNEL); |
6708bb27 AG |
1584 | if (!cmd->t_task_cdb) { |
1585 | pr_err("Unable to allocate cmd->t_task_cdb" | |
a1d8b49a | 1586 | " %u > sizeof(cmd->__t_task_cdb): %lu ops\n", |
c66ac9db | 1587 | scsi_command_size(cdb), |
a1d8b49a | 1588 | (unsigned long)sizeof(cmd->__t_task_cdb)); |
e3d6f909 | 1589 | return -ENOMEM; |
c66ac9db NB |
1590 | } |
1591 | } else | |
a1d8b49a | 1592 | cmd->t_task_cdb = &cmd->__t_task_cdb[0]; |
c66ac9db | 1593 | /* |
a1d8b49a | 1594 | * Copy the original CDB into cmd-> |
c66ac9db | 1595 | */ |
a1d8b49a | 1596 | memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb)); |
c66ac9db NB |
1597 | /* |
1598 | * Setup the received CDB based on SCSI defined opcodes and | |
1599 | * perform unit attention, persistent reservations and ALUA | |
a1d8b49a | 1600 | * checks for virtual device backends. The cmd->t_task_cdb |
c66ac9db NB |
1601 | * pointer is expected to be setup before we reach this point. |
1602 | */ | |
1603 | ret = transport_generic_cmd_sequencer(cmd, cdb); | |
1604 | if (ret < 0) | |
1605 | return ret; | |
1606 | /* | |
1607 | * Check for SAM Task Attribute Emulation | |
1608 | */ | |
1609 | if (transport_check_alloc_task_attr(cmd) < 0) { | |
1610 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
1611 | cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; | |
5951146d | 1612 | return -EINVAL; |
c66ac9db NB |
1613 | } |
1614 | spin_lock(&cmd->se_lun->lun_sep_lock); | |
1615 | if (cmd->se_lun->lun_sep) | |
1616 | cmd->se_lun->lun_sep->sep_stats.cmd_pdus++; | |
1617 | spin_unlock(&cmd->se_lun->lun_sep_lock); | |
1618 | return 0; | |
1619 | } | |
1620 | EXPORT_SYMBOL(transport_generic_allocate_tasks); | |
1621 | ||
695434e1 NB |
1622 | /* |
1623 | * Used by fabric module frontends to queue tasks directly. | |
1624 | * Many only be used from process context only | |
1625 | */ | |
1626 | int transport_handle_cdb_direct( | |
1627 | struct se_cmd *cmd) | |
1628 | { | |
dd8ae59d NB |
1629 | int ret; |
1630 | ||
695434e1 NB |
1631 | if (!cmd->se_lun) { |
1632 | dump_stack(); | |
6708bb27 | 1633 | pr_err("cmd->se_lun is NULL\n"); |
695434e1 NB |
1634 | return -EINVAL; |
1635 | } | |
1636 | if (in_interrupt()) { | |
1637 | dump_stack(); | |
6708bb27 | 1638 | pr_err("transport_generic_handle_cdb cannot be called" |
695434e1 NB |
1639 | " from interrupt context\n"); |
1640 | return -EINVAL; | |
1641 | } | |
dd8ae59d NB |
1642 | /* |
1643 | * Set TRANSPORT_NEW_CMD state and cmd->t_transport_active=1 following | |
1644 | * transport_generic_handle_cdb*() -> transport_add_cmd_to_queue() | |
1645 | * in existing usage to ensure that outstanding descriptors are handled | |
d14921d6 | 1646 | * correctly during shutdown via transport_wait_for_tasks() |
dd8ae59d NB |
1647 | * |
1648 | * Also, we don't take cmd->t_state_lock here as we only expect | |
1649 | * this to be called for initial descriptor submission. | |
1650 | */ | |
1651 | cmd->t_state = TRANSPORT_NEW_CMD; | |
1652 | atomic_set(&cmd->t_transport_active, 1); | |
1653 | /* | |
1654 | * transport_generic_new_cmd() is already handling QUEUE_FULL, | |
1655 | * so follow TRANSPORT_NEW_CMD processing thread context usage | |
1656 | * and call transport_generic_request_failure() if necessary.. | |
1657 | */ | |
1658 | ret = transport_generic_new_cmd(cmd); | |
f147abb4 | 1659 | if (ret < 0) { |
dd8ae59d | 1660 | cmd->transport_error_status = ret; |
4499dda8 | 1661 | transport_generic_request_failure(cmd, 0, |
dd8ae59d NB |
1662 | (cmd->data_direction != DMA_TO_DEVICE)); |
1663 | } | |
1664 | return 0; | |
695434e1 NB |
1665 | } |
1666 | EXPORT_SYMBOL(transport_handle_cdb_direct); | |
1667 | ||
c66ac9db NB |
1668 | /* |
1669 | * Used by fabric module frontends defining a TFO->new_cmd_map() caller | |
1670 | * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to | |
1671 | * complete setup in TCM process context w/ TFO->new_cmd_map(). | |
1672 | */ | |
1673 | int transport_generic_handle_cdb_map( | |
1674 | struct se_cmd *cmd) | |
1675 | { | |
e3d6f909 | 1676 | if (!cmd->se_lun) { |
c66ac9db | 1677 | dump_stack(); |
6708bb27 | 1678 | pr_err("cmd->se_lun is NULL\n"); |
e3d6f909 | 1679 | return -EINVAL; |
c66ac9db NB |
1680 | } |
1681 | ||
f7a5cc0b | 1682 | transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP, false); |
c66ac9db NB |
1683 | return 0; |
1684 | } | |
1685 | EXPORT_SYMBOL(transport_generic_handle_cdb_map); | |
1686 | ||
1687 | /* transport_generic_handle_data(): | |
1688 | * | |
1689 | * | |
1690 | */ | |
1691 | int transport_generic_handle_data( | |
1692 | struct se_cmd *cmd) | |
1693 | { | |
1694 | /* | |
1695 | * For the software fabric case, then we assume the nexus is being | |
1696 | * failed/shutdown when signals are pending from the kthread context | |
1697 | * caller, so we return a failure. For the HW target mode case running | |
1698 | * in interrupt code, the signal_pending() check is skipped. | |
1699 | */ | |
1700 | if (!in_interrupt() && signal_pending(current)) | |
e3d6f909 | 1701 | return -EPERM; |
c66ac9db NB |
1702 | /* |
1703 | * If the received CDB has aleady been ABORTED by the generic | |
1704 | * target engine, we now call transport_check_aborted_status() | |
1705 | * to queue any delated TASK_ABORTED status for the received CDB to the | |
25985edc | 1706 | * fabric module as we are expecting no further incoming DATA OUT |
c66ac9db NB |
1707 | * sequences at this point. |
1708 | */ | |
1709 | if (transport_check_aborted_status(cmd, 1) != 0) | |
1710 | return 0; | |
1711 | ||
f7a5cc0b | 1712 | transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE, false); |
c66ac9db NB |
1713 | return 0; |
1714 | } | |
1715 | EXPORT_SYMBOL(transport_generic_handle_data); | |
1716 | ||
1717 | /* transport_generic_handle_tmr(): | |
1718 | * | |
1719 | * | |
1720 | */ | |
1721 | int transport_generic_handle_tmr( | |
1722 | struct se_cmd *cmd) | |
1723 | { | |
f7a5cc0b | 1724 | transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR, false); |
c66ac9db NB |
1725 | return 0; |
1726 | } | |
1727 | EXPORT_SYMBOL(transport_generic_handle_tmr); | |
1728 | ||
cdbb70bb CH |
1729 | /* |
1730 | * If the task is active, request it to be stopped and sleep until it | |
1731 | * has completed. | |
1732 | */ | |
1733 | bool target_stop_task(struct se_task *task, unsigned long *flags) | |
1734 | { | |
1735 | struct se_cmd *cmd = task->task_se_cmd; | |
1736 | bool was_active = false; | |
1737 | ||
1738 | if (task->task_flags & TF_ACTIVE) { | |
1739 | task->task_flags |= TF_REQUEST_STOP; | |
1740 | spin_unlock_irqrestore(&cmd->t_state_lock, *flags); | |
1741 | ||
1742 | pr_debug("Task %p waiting to complete\n", task); | |
1743 | wait_for_completion(&task->task_stop_comp); | |
1744 | pr_debug("Task %p stopped successfully\n", task); | |
1745 | ||
1746 | spin_lock_irqsave(&cmd->t_state_lock, *flags); | |
1747 | atomic_dec(&cmd->t_task_cdbs_left); | |
1748 | task->task_flags &= ~(TF_ACTIVE | TF_REQUEST_STOP); | |
1749 | was_active = true; | |
1750 | } | |
1751 | ||
cdbb70bb CH |
1752 | return was_active; |
1753 | } | |
1754 | ||
c66ac9db NB |
1755 | static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) |
1756 | { | |
1757 | struct se_task *task, *task_tmp; | |
1758 | unsigned long flags; | |
1759 | int ret = 0; | |
1760 | ||
6708bb27 | 1761 | pr_debug("ITT[0x%08x] - Stopping tasks\n", |
e3d6f909 | 1762 | cmd->se_tfo->get_task_tag(cmd)); |
c66ac9db NB |
1763 | |
1764 | /* | |
1765 | * No tasks remain in the execution queue | |
1766 | */ | |
a1d8b49a | 1767 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db | 1768 | list_for_each_entry_safe(task, task_tmp, |
a1d8b49a | 1769 | &cmd->t_task_list, t_list) { |
04629b7b | 1770 | pr_debug("Processing task %p\n", task); |
c66ac9db NB |
1771 | /* |
1772 | * If the struct se_task has not been sent and is not active, | |
1773 | * remove the struct se_task from the execution queue. | |
1774 | */ | |
6c76bf95 | 1775 | if (!(task->task_flags & (TF_ACTIVE | TF_SENT))) { |
a1d8b49a | 1776 | spin_unlock_irqrestore(&cmd->t_state_lock, |
c66ac9db NB |
1777 | flags); |
1778 | transport_remove_task_from_execute_queue(task, | |
42bf829e | 1779 | cmd->se_dev); |
c66ac9db | 1780 | |
04629b7b | 1781 | pr_debug("Task %p removed from execute queue\n", task); |
a1d8b49a | 1782 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db NB |
1783 | continue; |
1784 | } | |
1785 | ||
cdbb70bb | 1786 | if (!target_stop_task(task, &flags)) { |
04629b7b | 1787 | pr_debug("Task %p - did nothing\n", task); |
c66ac9db NB |
1788 | ret++; |
1789 | } | |
c66ac9db | 1790 | } |
a1d8b49a | 1791 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
1792 | |
1793 | return ret; | |
1794 | } | |
1795 | ||
c66ac9db NB |
1796 | /* |
1797 | * Handle SAM-esque emulation for generic transport request failures. | |
1798 | */ | |
1799 | static void transport_generic_request_failure( | |
1800 | struct se_cmd *cmd, | |
c66ac9db NB |
1801 | int complete, |
1802 | int sc) | |
1803 | { | |
07bde79a NB |
1804 | int ret = 0; |
1805 | ||
6708bb27 | 1806 | pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x" |
e3d6f909 | 1807 | " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd), |
a1d8b49a | 1808 | cmd->t_task_cdb[0]); |
f2da9dbd | 1809 | pr_debug("-----[ i_state: %d t_state: %d transport_error_status: %d\n", |
e3d6f909 | 1810 | cmd->se_tfo->get_cmd_state(cmd), |
f2da9dbd | 1811 | cmd->t_state, |
c66ac9db | 1812 | cmd->transport_error_status); |
6708bb27 | 1813 | pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d" |
c66ac9db NB |
1814 | " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --" |
1815 | " t_transport_active: %d t_transport_stop: %d" | |
6708bb27 | 1816 | " t_transport_sent: %d\n", cmd->t_task_list_num, |
a1d8b49a AG |
1817 | atomic_read(&cmd->t_task_cdbs_left), |
1818 | atomic_read(&cmd->t_task_cdbs_sent), | |
1819 | atomic_read(&cmd->t_task_cdbs_ex_left), | |
1820 | atomic_read(&cmd->t_transport_active), | |
1821 | atomic_read(&cmd->t_transport_stop), | |
1822 | atomic_read(&cmd->t_transport_sent)); | |
c66ac9db | 1823 | |
c66ac9db NB |
1824 | /* |
1825 | * For SAM Task Attribute emulation for failed struct se_cmd | |
1826 | */ | |
1827 | if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) | |
1828 | transport_complete_task_attr(cmd); | |
1829 | ||
1830 | if (complete) { | |
c66ac9db NB |
1831 | cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE; |
1832 | } | |
1833 | ||
1834 | switch (cmd->transport_error_status) { | |
1835 | case PYX_TRANSPORT_UNKNOWN_SAM_OPCODE: | |
1836 | cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; | |
1837 | break; | |
1838 | case PYX_TRANSPORT_REQ_TOO_MANY_SECTORS: | |
1839 | cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY; | |
1840 | break; | |
1841 | case PYX_TRANSPORT_INVALID_CDB_FIELD: | |
1842 | cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; | |
1843 | break; | |
1844 | case PYX_TRANSPORT_INVALID_PARAMETER_LIST: | |
1845 | cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; | |
1846 | break; | |
1847 | case PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES: | |
1848 | if (!sc) | |
1849 | transport_new_cmd_failure(cmd); | |
1850 | /* | |
1851 | * Currently for PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES, | |
1852 | * we force this session to fall back to session | |
1853 | * recovery. | |
1854 | */ | |
e3d6f909 AG |
1855 | cmd->se_tfo->fall_back_to_erl0(cmd->se_sess); |
1856 | cmd->se_tfo->stop_session(cmd->se_sess, 0, 0); | |
c66ac9db NB |
1857 | |
1858 | goto check_stop; | |
1859 | case PYX_TRANSPORT_LU_COMM_FAILURE: | |
1860 | case PYX_TRANSPORT_ILLEGAL_REQUEST: | |
1861 | cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | |
1862 | break; | |
1863 | case PYX_TRANSPORT_UNKNOWN_MODE_PAGE: | |
1864 | cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE; | |
1865 | break; | |
1866 | case PYX_TRANSPORT_WRITE_PROTECTED: | |
1867 | cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; | |
1868 | break; | |
1869 | case PYX_TRANSPORT_RESERVATION_CONFLICT: | |
1870 | /* | |
1871 | * No SENSE Data payload for this case, set SCSI Status | |
1872 | * and queue the response to $FABRIC_MOD. | |
1873 | * | |
1874 | * Uses linux/include/scsi/scsi.h SAM status codes defs | |
1875 | */ | |
1876 | cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; | |
1877 | /* | |
1878 | * For UA Interlock Code 11b, a RESERVATION CONFLICT will | |
1879 | * establish a UNIT ATTENTION with PREVIOUS RESERVATION | |
1880 | * CONFLICT STATUS. | |
1881 | * | |
1882 | * See spc4r17, section 7.4.6 Control Mode Page, Table 349 | |
1883 | */ | |
e3d6f909 AG |
1884 | if (cmd->se_sess && |
1885 | cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2) | |
1886 | core_scsi3_ua_allocate(cmd->se_sess->se_node_acl, | |
c66ac9db NB |
1887 | cmd->orig_fe_lun, 0x2C, |
1888 | ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); | |
1889 | ||
07bde79a | 1890 | ret = cmd->se_tfo->queue_status(cmd); |
f147abb4 | 1891 | if (ret == -EAGAIN || ret == -ENOMEM) |
07bde79a | 1892 | goto queue_full; |
c66ac9db NB |
1893 | goto check_stop; |
1894 | case PYX_TRANSPORT_USE_SENSE_REASON: | |
1895 | /* | |
1896 | * struct se_cmd->scsi_sense_reason already set | |
1897 | */ | |
1898 | break; | |
1899 | default: | |
6708bb27 | 1900 | pr_err("Unknown transport error for CDB 0x%02x: %d\n", |
a1d8b49a | 1901 | cmd->t_task_cdb[0], |
c66ac9db NB |
1902 | cmd->transport_error_status); |
1903 | cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; | |
1904 | break; | |
1905 | } | |
16ab8e60 NB |
1906 | /* |
1907 | * If a fabric does not define a cmd->se_tfo->new_cmd_map caller, | |
1908 | * make the call to transport_send_check_condition_and_sense() | |
1909 | * directly. Otherwise expect the fabric to make the call to | |
1910 | * transport_send_check_condition_and_sense() after handling | |
1911 | * possible unsoliticied write data payloads. | |
1912 | */ | |
1913 | if (!sc && !cmd->se_tfo->new_cmd_map) | |
c66ac9db | 1914 | transport_new_cmd_failure(cmd); |
07bde79a NB |
1915 | else { |
1916 | ret = transport_send_check_condition_and_sense(cmd, | |
1917 | cmd->scsi_sense_reason, 0); | |
f147abb4 | 1918 | if (ret == -EAGAIN || ret == -ENOMEM) |
07bde79a NB |
1919 | goto queue_full; |
1920 | } | |
1921 | ||
c66ac9db NB |
1922 | check_stop: |
1923 | transport_lun_remove_cmd(cmd); | |
6708bb27 | 1924 | if (!transport_cmd_check_stop_to_fabric(cmd)) |
c66ac9db | 1925 | ; |
07bde79a NB |
1926 | return; |
1927 | ||
1928 | queue_full: | |
e057f533 CH |
1929 | cmd->t_state = TRANSPORT_COMPLETE_QF_OK; |
1930 | transport_handle_queue_full(cmd, cmd->se_dev); | |
c66ac9db NB |
1931 | } |
1932 | ||
c66ac9db NB |
1933 | static inline u32 transport_lba_21(unsigned char *cdb) |
1934 | { | |
1935 | return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3]; | |
1936 | } | |
1937 | ||
1938 | static inline u32 transport_lba_32(unsigned char *cdb) | |
1939 | { | |
1940 | return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; | |
1941 | } | |
1942 | ||
1943 | static inline unsigned long long transport_lba_64(unsigned char *cdb) | |
1944 | { | |
1945 | unsigned int __v1, __v2; | |
1946 | ||
1947 | __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; | |
1948 | __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; | |
1949 | ||
1950 | return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; | |
1951 | } | |
1952 | ||
1953 | /* | |
1954 | * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs | |
1955 | */ | |
1956 | static inline unsigned long long transport_lba_64_ext(unsigned char *cdb) | |
1957 | { | |
1958 | unsigned int __v1, __v2; | |
1959 | ||
1960 | __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15]; | |
1961 | __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19]; | |
1962 | ||
1963 | return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; | |
1964 | } | |
1965 | ||
1966 | static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd) | |
1967 | { | |
1968 | unsigned long flags; | |
1969 | ||
a1d8b49a | 1970 | spin_lock_irqsave(&se_cmd->t_state_lock, flags); |
c66ac9db | 1971 | se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; |
a1d8b49a | 1972 | spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); |
c66ac9db NB |
1973 | } |
1974 | ||
c66ac9db NB |
1975 | static inline int transport_tcq_window_closed(struct se_device *dev) |
1976 | { | |
1977 | if (dev->dev_tcq_window_closed++ < | |
1978 | PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD) { | |
1979 | msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT); | |
1980 | } else | |
1981 | msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG); | |
1982 | ||
e3d6f909 | 1983 | wake_up_interruptible(&dev->dev_queue_obj.thread_wq); |
c66ac9db NB |
1984 | return 0; |
1985 | } | |
1986 | ||
1987 | /* | |
1988 | * Called from Fabric Module context from transport_execute_tasks() | |
1989 | * | |
1990 | * The return of this function determins if the tasks from struct se_cmd | |
1991 | * get added to the execution queue in transport_execute_tasks(), | |
1992 | * or are added to the delayed or ordered lists here. | |
1993 | */ | |
1994 | static inline int transport_execute_task_attr(struct se_cmd *cmd) | |
1995 | { | |
5951146d | 1996 | if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) |
c66ac9db NB |
1997 | return 1; |
1998 | /* | |
25985edc | 1999 | * Check for the existence of HEAD_OF_QUEUE, and if true return 1 |
c66ac9db NB |
2000 | * to allow the passed struct se_cmd list of tasks to the front of the list. |
2001 | */ | |
e66ecd50 | 2002 | if (cmd->sam_task_attr == MSG_HEAD_TAG) { |
5951146d | 2003 | atomic_inc(&cmd->se_dev->dev_hoq_count); |
c66ac9db | 2004 | smp_mb__after_atomic_inc(); |
6708bb27 | 2005 | pr_debug("Added HEAD_OF_QUEUE for CDB:" |
c66ac9db | 2006 | " 0x%02x, se_ordered_id: %u\n", |
6708bb27 | 2007 | cmd->t_task_cdb[0], |
c66ac9db NB |
2008 | cmd->se_ordered_id); |
2009 | return 1; | |
e66ecd50 | 2010 | } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { |
5951146d AG |
2011 | spin_lock(&cmd->se_dev->ordered_cmd_lock); |
2012 | list_add_tail(&cmd->se_ordered_node, | |
2013 | &cmd->se_dev->ordered_cmd_list); | |
2014 | spin_unlock(&cmd->se_dev->ordered_cmd_lock); | |
c66ac9db | 2015 | |
5951146d | 2016 | atomic_inc(&cmd->se_dev->dev_ordered_sync); |
c66ac9db NB |
2017 | smp_mb__after_atomic_inc(); |
2018 | ||
6708bb27 | 2019 | pr_debug("Added ORDERED for CDB: 0x%02x to ordered" |
c66ac9db | 2020 | " list, se_ordered_id: %u\n", |
a1d8b49a | 2021 | cmd->t_task_cdb[0], |
c66ac9db NB |
2022 | cmd->se_ordered_id); |
2023 | /* | |
2024 | * Add ORDERED command to tail of execution queue if | |
2025 | * no other older commands exist that need to be | |
2026 | * completed first. | |
2027 | */ | |
6708bb27 | 2028 | if (!atomic_read(&cmd->se_dev->simple_cmds)) |
c66ac9db NB |
2029 | return 1; |
2030 | } else { | |
2031 | /* | |
2032 | * For SIMPLE and UNTAGGED Task Attribute commands | |
2033 | */ | |
5951146d | 2034 | atomic_inc(&cmd->se_dev->simple_cmds); |
c66ac9db NB |
2035 | smp_mb__after_atomic_inc(); |
2036 | } | |
2037 | /* | |
2038 | * Otherwise if one or more outstanding ORDERED task attribute exist, | |
2039 | * add the dormant task(s) built for the passed struct se_cmd to the | |
2040 | * execution queue and become in Active state for this struct se_device. | |
2041 | */ | |
5951146d | 2042 | if (atomic_read(&cmd->se_dev->dev_ordered_sync) != 0) { |
c66ac9db NB |
2043 | /* |
2044 | * Otherwise, add cmd w/ tasks to delayed cmd queue that | |
25985edc | 2045 | * will be drained upon completion of HEAD_OF_QUEUE task. |
c66ac9db | 2046 | */ |
5951146d | 2047 | spin_lock(&cmd->se_dev->delayed_cmd_lock); |
c66ac9db | 2048 | cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR; |
5951146d AG |
2049 | list_add_tail(&cmd->se_delayed_node, |
2050 | &cmd->se_dev->delayed_cmd_list); | |
2051 | spin_unlock(&cmd->se_dev->delayed_cmd_lock); | |
c66ac9db | 2052 | |
6708bb27 | 2053 | pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to" |
c66ac9db | 2054 | " delayed CMD list, se_ordered_id: %u\n", |
a1d8b49a | 2055 | cmd->t_task_cdb[0], cmd->sam_task_attr, |
c66ac9db NB |
2056 | cmd->se_ordered_id); |
2057 | /* | |
2058 | * Return zero to let transport_execute_tasks() know | |
2059 | * not to add the delayed tasks to the execution list. | |
2060 | */ | |
2061 | return 0; | |
2062 | } | |
2063 | /* | |
2064 | * Otherwise, no ORDERED task attributes exist.. | |
2065 | */ | |
2066 | return 1; | |
2067 | } | |
2068 | ||
2069 | /* | |
2070 | * Called from fabric module context in transport_generic_new_cmd() and | |
2071 | * transport_generic_process_write() | |
2072 | */ | |
2073 | static int transport_execute_tasks(struct se_cmd *cmd) | |
2074 | { | |
2075 | int add_tasks; | |
2076 | ||
db1620a2 CH |
2077 | if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) { |
2078 | cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE; | |
4499dda8 | 2079 | transport_generic_request_failure(cmd, 0, 1); |
db1620a2 | 2080 | return 0; |
c66ac9db | 2081 | } |
db1620a2 | 2082 | |
c66ac9db NB |
2083 | /* |
2084 | * Call transport_cmd_check_stop() to see if a fabric exception | |
25985edc | 2085 | * has occurred that prevents execution. |
c66ac9db | 2086 | */ |
6708bb27 | 2087 | if (!transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING)) { |
c66ac9db NB |
2088 | /* |
2089 | * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE | |
2090 | * attribute for the tasks of the received struct se_cmd CDB | |
2091 | */ | |
2092 | add_tasks = transport_execute_task_attr(cmd); | |
e3d6f909 | 2093 | if (!add_tasks) |
c66ac9db NB |
2094 | goto execute_tasks; |
2095 | /* | |
2096 | * This calls transport_add_tasks_from_cmd() to handle | |
2097 | * HEAD_OF_QUEUE ordering for SAM Task Attribute emulation | |
2098 | * (if enabled) in __transport_add_task_to_execute_queue() and | |
2099 | * transport_add_task_check_sam_attr(). | |
2100 | */ | |
2101 | transport_add_tasks_from_cmd(cmd); | |
2102 | } | |
2103 | /* | |
2104 | * Kick the execution queue for the cmd associated struct se_device | |
2105 | * storage object. | |
2106 | */ | |
2107 | execute_tasks: | |
5951146d | 2108 | __transport_execute_tasks(cmd->se_dev); |
c66ac9db NB |
2109 | return 0; |
2110 | } | |
2111 | ||
2112 | /* | |
2113 | * Called to check struct se_device tcq depth window, and once open pull struct se_task | |
2114 | * from struct se_device->execute_task_list and | |
2115 | * | |
2116 | * Called from transport_processing_thread() | |
2117 | */ | |
2118 | static int __transport_execute_tasks(struct se_device *dev) | |
2119 | { | |
2120 | int error; | |
2121 | struct se_cmd *cmd = NULL; | |
e3d6f909 | 2122 | struct se_task *task = NULL; |
c66ac9db NB |
2123 | unsigned long flags; |
2124 | ||
2125 | /* | |
2126 | * Check if there is enough room in the device and HBA queue to send | |
a1d8b49a | 2127 | * struct se_tasks to the selected transport. |
c66ac9db NB |
2128 | */ |
2129 | check_depth: | |
e3d6f909 | 2130 | if (!atomic_read(&dev->depth_left)) |
c66ac9db | 2131 | return transport_tcq_window_closed(dev); |
c66ac9db | 2132 | |
e3d6f909 | 2133 | dev->dev_tcq_window_closed = 0; |
c66ac9db | 2134 | |
e3d6f909 AG |
2135 | spin_lock_irq(&dev->execute_task_lock); |
2136 | if (list_empty(&dev->execute_task_list)) { | |
2137 | spin_unlock_irq(&dev->execute_task_lock); | |
c66ac9db NB |
2138 | return 0; |
2139 | } | |
e3d6f909 AG |
2140 | task = list_first_entry(&dev->execute_task_list, |
2141 | struct se_task, t_execute_list); | |
04629b7b | 2142 | __transport_remove_task_from_execute_queue(task, dev); |
e3d6f909 | 2143 | spin_unlock_irq(&dev->execute_task_lock); |
c66ac9db NB |
2144 | |
2145 | atomic_dec(&dev->depth_left); | |
c66ac9db | 2146 | |
e3d6f909 | 2147 | cmd = task->task_se_cmd; |
c66ac9db | 2148 | |
a1d8b49a | 2149 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
6c76bf95 | 2150 | task->task_flags |= (TF_ACTIVE | TF_SENT); |
a1d8b49a | 2151 | atomic_inc(&cmd->t_task_cdbs_sent); |
c66ac9db | 2152 | |
a1d8b49a AG |
2153 | if (atomic_read(&cmd->t_task_cdbs_sent) == |
2154 | cmd->t_task_list_num) | |
415a090a | 2155 | atomic_set(&cmd->t_transport_sent, 1); |
c66ac9db | 2156 | |
a1d8b49a | 2157 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db | 2158 | /* |
e76a35d6 | 2159 | * The struct se_cmd->execute_task() function pointer is used |
e3d6f909 | 2160 | * to grab REPORT_LUNS and other CDBs we want to handle before they hit the |
c66ac9db NB |
2161 | * struct se_subsystem_api->do_task() caller below. |
2162 | */ | |
e76a35d6 CH |
2163 | if (cmd->execute_task) { |
2164 | error = cmd->execute_task(task); | |
c66ac9db NB |
2165 | if (error != 0) { |
2166 | cmd->transport_error_status = error; | |
6c76bf95 CH |
2167 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
2168 | task->task_flags &= ~TF_ACTIVE; | |
2169 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | |
415a090a | 2170 | atomic_set(&cmd->t_transport_sent, 0); |
c66ac9db | 2171 | transport_stop_tasks_for_cmd(cmd); |
4499dda8 CH |
2172 | atomic_inc(&dev->depth_left); |
2173 | transport_generic_request_failure(cmd, 0, 1); | |
c66ac9db NB |
2174 | goto check_depth; |
2175 | } | |
2176 | /* | |
e76a35d6 | 2177 | * Handle the successful completion for execute_task() |
c66ac9db NB |
2178 | * for synchronous operation, following SCF_EMULATE_CDB_ASYNC |
2179 | * Otherwise the caller is expected to complete the task with | |
2180 | * proper status. | |
2181 | */ | |
2182 | if (!(cmd->se_cmd_flags & SCF_EMULATE_CDB_ASYNC)) { | |
2183 | cmd->scsi_status = SAM_STAT_GOOD; | |
2184 | task->task_scsi_status = GOOD; | |
2185 | transport_complete_task(task, 1); | |
2186 | } | |
2187 | } else { | |
2188 | /* | |
2189 | * Currently for all virtual TCM plugins including IBLOCK, FILEIO and | |
2190 | * RAMDISK we use the internal transport_emulate_control_cdb() logic | |
2191 | * with struct se_subsystem_api callers for the primary SPC-3 TYPE_DISK | |
2192 | * LUN emulation code. | |
2193 | * | |
2194 | * For TCM/pSCSI and all other SCF_SCSI_DATA_SG_IO_CDB I/O tasks we | |
2195 | * call ->do_task() directly and let the underlying TCM subsystem plugin | |
2196 | * code handle the CDB emulation. | |
2197 | */ | |
e3d6f909 AG |
2198 | if ((dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) && |
2199 | (!(task->task_se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB))) | |
c66ac9db NB |
2200 | error = transport_emulate_control_cdb(task); |
2201 | else | |
e3d6f909 | 2202 | error = dev->transport->do_task(task); |
c66ac9db NB |
2203 | |
2204 | if (error != 0) { | |
2205 | cmd->transport_error_status = error; | |
6c76bf95 CH |
2206 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
2207 | task->task_flags &= ~TF_ACTIVE; | |
2208 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | |
415a090a | 2209 | atomic_set(&cmd->t_transport_sent, 0); |
c66ac9db | 2210 | transport_stop_tasks_for_cmd(cmd); |
4499dda8 CH |
2211 | atomic_inc(&dev->depth_left); |
2212 | transport_generic_request_failure(cmd, 0, 1); | |
c66ac9db NB |
2213 | } |
2214 | } | |
2215 | ||
2216 | goto check_depth; | |
2217 | ||
2218 | return 0; | |
2219 | } | |
2220 | ||
2221 | void transport_new_cmd_failure(struct se_cmd *se_cmd) | |
2222 | { | |
2223 | unsigned long flags; | |
2224 | /* | |
2225 | * Any unsolicited data will get dumped for failed command inside of | |
2226 | * the fabric plugin | |
2227 | */ | |
a1d8b49a | 2228 | spin_lock_irqsave(&se_cmd->t_state_lock, flags); |
c66ac9db NB |
2229 | se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED; |
2230 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
a1d8b49a | 2231 | spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); |
c66ac9db NB |
2232 | } |
2233 | ||
c66ac9db NB |
2234 | static inline u32 transport_get_sectors_6( |
2235 | unsigned char *cdb, | |
2236 | struct se_cmd *cmd, | |
2237 | int *ret) | |
2238 | { | |
5951146d | 2239 | struct se_device *dev = cmd->se_dev; |
c66ac9db NB |
2240 | |
2241 | /* | |
2242 | * Assume TYPE_DISK for non struct se_device objects. | |
2243 | * Use 8-bit sector value. | |
2244 | */ | |
2245 | if (!dev) | |
2246 | goto type_disk; | |
2247 | ||
2248 | /* | |
2249 | * Use 24-bit allocation length for TYPE_TAPE. | |
2250 | */ | |
e3d6f909 | 2251 | if (dev->transport->get_device_type(dev) == TYPE_TAPE) |
c66ac9db NB |
2252 | return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4]; |
2253 | ||
2254 | /* | |
2255 | * Everything else assume TYPE_DISK Sector CDB location. | |
2256 | * Use 8-bit sector value. | |
2257 | */ | |
2258 | type_disk: | |
2259 | return (u32)cdb[4]; | |
2260 | } | |
2261 | ||
2262 | static inline u32 transport_get_sectors_10( | |
2263 | unsigned char *cdb, | |
2264 | struct se_cmd *cmd, | |
2265 | int *ret) | |
2266 | { | |
5951146d | 2267 | struct se_device *dev = cmd->se_dev; |
c66ac9db NB |
2268 | |
2269 | /* | |
2270 | * Assume TYPE_DISK for non struct se_device objects. | |
2271 | * Use 16-bit sector value. | |
2272 | */ | |
2273 | if (!dev) | |
2274 | goto type_disk; | |
2275 | ||
2276 | /* | |
2277 | * XXX_10 is not defined in SSC, throw an exception | |
2278 | */ | |
e3d6f909 AG |
2279 | if (dev->transport->get_device_type(dev) == TYPE_TAPE) { |
2280 | *ret = -EINVAL; | |
c66ac9db NB |
2281 | return 0; |
2282 | } | |
2283 | ||
2284 | /* | |
2285 | * Everything else assume TYPE_DISK Sector CDB location. | |
2286 | * Use 16-bit sector value. | |
2287 | */ | |
2288 | type_disk: | |
2289 | return (u32)(cdb[7] << 8) + cdb[8]; | |
2290 | } | |
2291 | ||
2292 | static inline u32 transport_get_sectors_12( | |
2293 | unsigned char *cdb, | |
2294 | struct se_cmd *cmd, | |
2295 | int *ret) | |
2296 | { | |
5951146d | 2297 | struct se_device *dev = cmd->se_dev; |
c66ac9db NB |
2298 | |
2299 | /* | |
2300 | * Assume TYPE_DISK for non struct se_device objects. | |
2301 | * Use 32-bit sector value. | |
2302 | */ | |
2303 | if (!dev) | |
2304 | goto type_disk; | |
2305 | ||
2306 | /* | |
2307 | * XXX_12 is not defined in SSC, throw an exception | |
2308 | */ | |
e3d6f909 AG |
2309 | if (dev->transport->get_device_type(dev) == TYPE_TAPE) { |
2310 | *ret = -EINVAL; | |
c66ac9db NB |
2311 | return 0; |
2312 | } | |
2313 | ||
2314 | /* | |
2315 | * Everything else assume TYPE_DISK Sector CDB location. | |
2316 | * Use 32-bit sector value. | |
2317 | */ | |
2318 | type_disk: | |
2319 | return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9]; | |
2320 | } | |
2321 | ||
2322 | static inline u32 transport_get_sectors_16( | |
2323 | unsigned char *cdb, | |
2324 | struct se_cmd *cmd, | |
2325 | int *ret) | |
2326 | { | |
5951146d | 2327 | struct se_device *dev = cmd->se_dev; |
c66ac9db NB |
2328 | |
2329 | /* | |
2330 | * Assume TYPE_DISK for non struct se_device objects. | |
2331 | * Use 32-bit sector value. | |
2332 | */ | |
2333 | if (!dev) | |
2334 | goto type_disk; | |
2335 | ||
2336 | /* | |
2337 | * Use 24-bit allocation length for TYPE_TAPE. | |
2338 | */ | |
e3d6f909 | 2339 | if (dev->transport->get_device_type(dev) == TYPE_TAPE) |
c66ac9db NB |
2340 | return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14]; |
2341 | ||
2342 | type_disk: | |
2343 | return (u32)(cdb[10] << 24) + (cdb[11] << 16) + | |
2344 | (cdb[12] << 8) + cdb[13]; | |
2345 | } | |
2346 | ||
2347 | /* | |
2348 | * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants | |
2349 | */ | |
2350 | static inline u32 transport_get_sectors_32( | |
2351 | unsigned char *cdb, | |
2352 | struct se_cmd *cmd, | |
2353 | int *ret) | |
2354 | { | |
2355 | /* | |
2356 | * Assume TYPE_DISK for non struct se_device objects. | |
2357 | * Use 32-bit sector value. | |
2358 | */ | |
2359 | return (u32)(cdb[28] << 24) + (cdb[29] << 16) + | |
2360 | (cdb[30] << 8) + cdb[31]; | |
2361 | ||
2362 | } | |
2363 | ||
2364 | static inline u32 transport_get_size( | |
2365 | u32 sectors, | |
2366 | unsigned char *cdb, | |
2367 | struct se_cmd *cmd) | |
2368 | { | |
5951146d | 2369 | struct se_device *dev = cmd->se_dev; |
c66ac9db | 2370 | |
e3d6f909 | 2371 | if (dev->transport->get_device_type(dev) == TYPE_TAPE) { |
c66ac9db | 2372 | if (cdb[1] & 1) { /* sectors */ |
e3d6f909 | 2373 | return dev->se_sub_dev->se_dev_attrib.block_size * sectors; |
c66ac9db NB |
2374 | } else /* bytes */ |
2375 | return sectors; | |
2376 | } | |
2377 | #if 0 | |
6708bb27 | 2378 | pr_debug("Returning block_size: %u, sectors: %u == %u for" |
e3d6f909 AG |
2379 | " %s object\n", dev->se_sub_dev->se_dev_attrib.block_size, sectors, |
2380 | dev->se_sub_dev->se_dev_attrib.block_size * sectors, | |
2381 | dev->transport->name); | |
c66ac9db | 2382 | #endif |
e3d6f909 | 2383 | return dev->se_sub_dev->se_dev_attrib.block_size * sectors; |
c66ac9db NB |
2384 | } |
2385 | ||
c66ac9db NB |
2386 | static void transport_xor_callback(struct se_cmd *cmd) |
2387 | { | |
2388 | unsigned char *buf, *addr; | |
ec98f782 | 2389 | struct scatterlist *sg; |
c66ac9db NB |
2390 | unsigned int offset; |
2391 | int i; | |
ec98f782 | 2392 | int count; |
c66ac9db NB |
2393 | /* |
2394 | * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command | |
2395 | * | |
2396 | * 1) read the specified logical block(s); | |
2397 | * 2) transfer logical blocks from the data-out buffer; | |
2398 | * 3) XOR the logical blocks transferred from the data-out buffer with | |
2399 | * the logical blocks read, storing the resulting XOR data in a buffer; | |
2400 | * 4) if the DISABLE WRITE bit is set to zero, then write the logical | |
2401 | * blocks transferred from the data-out buffer; and | |
2402 | * 5) transfer the resulting XOR data to the data-in buffer. | |
2403 | */ | |
2404 | buf = kmalloc(cmd->data_length, GFP_KERNEL); | |
6708bb27 AG |
2405 | if (!buf) { |
2406 | pr_err("Unable to allocate xor_callback buf\n"); | |
c66ac9db NB |
2407 | return; |
2408 | } | |
2409 | /* | |
ec98f782 | 2410 | * Copy the scatterlist WRITE buffer located at cmd->t_data_sg |
c66ac9db NB |
2411 | * into the locally allocated *buf |
2412 | */ | |
ec98f782 AG |
2413 | sg_copy_to_buffer(cmd->t_data_sg, |
2414 | cmd->t_data_nents, | |
2415 | buf, | |
2416 | cmd->data_length); | |
2417 | ||
c66ac9db NB |
2418 | /* |
2419 | * Now perform the XOR against the BIDI read memory located at | |
a1d8b49a | 2420 | * cmd->t_mem_bidi_list |
c66ac9db NB |
2421 | */ |
2422 | ||
2423 | offset = 0; | |
ec98f782 AG |
2424 | for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) { |
2425 | addr = kmap_atomic(sg_page(sg), KM_USER0); | |
2426 | if (!addr) | |
c66ac9db NB |
2427 | goto out; |
2428 | ||
ec98f782 AG |
2429 | for (i = 0; i < sg->length; i++) |
2430 | *(addr + sg->offset + i) ^= *(buf + offset + i); | |
c66ac9db | 2431 | |
ec98f782 | 2432 | offset += sg->length; |
c66ac9db NB |
2433 | kunmap_atomic(addr, KM_USER0); |
2434 | } | |
ec98f782 | 2435 | |
c66ac9db NB |
2436 | out: |
2437 | kfree(buf); | |
2438 | } | |
2439 | ||
2440 | /* | |
2441 | * Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd | |
2442 | */ | |
2443 | static int transport_get_sense_data(struct se_cmd *cmd) | |
2444 | { | |
2445 | unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL; | |
42bf829e | 2446 | struct se_device *dev = cmd->se_dev; |
c66ac9db NB |
2447 | struct se_task *task = NULL, *task_tmp; |
2448 | unsigned long flags; | |
2449 | u32 offset = 0; | |
2450 | ||
e3d6f909 AG |
2451 | WARN_ON(!cmd->se_lun); |
2452 | ||
42bf829e CH |
2453 | if (!dev) |
2454 | return 0; | |
2455 | ||
a1d8b49a | 2456 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db | 2457 | if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { |
a1d8b49a | 2458 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
2459 | return 0; |
2460 | } | |
2461 | ||
2462 | list_for_each_entry_safe(task, task_tmp, | |
a1d8b49a | 2463 | &cmd->t_task_list, t_list) { |
c66ac9db NB |
2464 | if (!task->task_sense) |
2465 | continue; | |
2466 | ||
e3d6f909 | 2467 | if (!dev->transport->get_sense_buffer) { |
6708bb27 | 2468 | pr_err("dev->transport->get_sense_buffer" |
c66ac9db NB |
2469 | " is NULL\n"); |
2470 | continue; | |
2471 | } | |
2472 | ||
e3d6f909 | 2473 | sense_buffer = dev->transport->get_sense_buffer(task); |
6708bb27 | 2474 | if (!sense_buffer) { |
04629b7b | 2475 | pr_err("ITT[0x%08x]_TASK[%p]: Unable to locate" |
c66ac9db | 2476 | " sense buffer for task with sense\n", |
04629b7b | 2477 | cmd->se_tfo->get_task_tag(cmd), task); |
c66ac9db NB |
2478 | continue; |
2479 | } | |
a1d8b49a | 2480 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db | 2481 | |
e3d6f909 | 2482 | offset = cmd->se_tfo->set_fabric_sense_len(cmd, |
c66ac9db NB |
2483 | TRANSPORT_SENSE_BUFFER); |
2484 | ||
5951146d | 2485 | memcpy(&buffer[offset], sense_buffer, |
c66ac9db NB |
2486 | TRANSPORT_SENSE_BUFFER); |
2487 | cmd->scsi_status = task->task_scsi_status; | |
2488 | /* Automatically padded */ | |
2489 | cmd->scsi_sense_length = | |
2490 | (TRANSPORT_SENSE_BUFFER + offset); | |
2491 | ||
6708bb27 | 2492 | pr_debug("HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x" |
c66ac9db | 2493 | " and sense\n", |
e3d6f909 | 2494 | dev->se_hba->hba_id, dev->transport->name, |
c66ac9db NB |
2495 | cmd->scsi_status); |
2496 | return 0; | |
2497 | } | |
a1d8b49a | 2498 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
2499 | |
2500 | return -1; | |
2501 | } | |
2502 | ||
c66ac9db NB |
2503 | static int |
2504 | transport_handle_reservation_conflict(struct se_cmd *cmd) | |
2505 | { | |
c66ac9db NB |
2506 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
2507 | cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT; | |
2508 | cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; | |
2509 | /* | |
2510 | * For UA Interlock Code 11b, a RESERVATION CONFLICT will | |
2511 | * establish a UNIT ATTENTION with PREVIOUS RESERVATION | |
2512 | * CONFLICT STATUS. | |
2513 | * | |
2514 | * See spc4r17, section 7.4.6 Control Mode Page, Table 349 | |
2515 | */ | |
e3d6f909 AG |
2516 | if (cmd->se_sess && |
2517 | cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2) | |
2518 | core_scsi3_ua_allocate(cmd->se_sess->se_node_acl, | |
c66ac9db NB |
2519 | cmd->orig_fe_lun, 0x2C, |
2520 | ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); | |
5951146d | 2521 | return -EINVAL; |
c66ac9db NB |
2522 | } |
2523 | ||
ec98f782 AG |
2524 | static inline long long transport_dev_end_lba(struct se_device *dev) |
2525 | { | |
2526 | return dev->transport->get_blocks(dev) + 1; | |
2527 | } | |
2528 | ||
2529 | static int transport_cmd_get_valid_sectors(struct se_cmd *cmd) | |
2530 | { | |
2531 | struct se_device *dev = cmd->se_dev; | |
2532 | u32 sectors; | |
2533 | ||
2534 | if (dev->transport->get_device_type(dev) != TYPE_DISK) | |
2535 | return 0; | |
2536 | ||
2537 | sectors = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size); | |
2538 | ||
6708bb27 AG |
2539 | if ((cmd->t_task_lba + sectors) > transport_dev_end_lba(dev)) { |
2540 | pr_err("LBA: %llu Sectors: %u exceeds" | |
ec98f782 AG |
2541 | " transport_dev_end_lba(): %llu\n", |
2542 | cmd->t_task_lba, sectors, | |
2543 | transport_dev_end_lba(dev)); | |
7abbe7f3 | 2544 | return -EINVAL; |
ec98f782 AG |
2545 | } |
2546 | ||
7abbe7f3 | 2547 | return 0; |
ec98f782 AG |
2548 | } |
2549 | ||
706d5860 NB |
2550 | static int target_check_write_same_discard(unsigned char *flags, struct se_device *dev) |
2551 | { | |
2552 | /* | |
2553 | * Determine if the received WRITE_SAME is used to for direct | |
2554 | * passthrough into Linux/SCSI with struct request via TCM/pSCSI | |
2555 | * or we are signaling the use of internal WRITE_SAME + UNMAP=1 | |
2556 | * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK code. | |
2557 | */ | |
2558 | int passthrough = (dev->transport->transport_type == | |
2559 | TRANSPORT_PLUGIN_PHBA_PDEV); | |
2560 | ||
2561 | if (!passthrough) { | |
2562 | if ((flags[0] & 0x04) || (flags[0] & 0x02)) { | |
2563 | pr_err("WRITE_SAME PBDATA and LBDATA" | |
2564 | " bits not supported for Block Discard" | |
2565 | " Emulation\n"); | |
2566 | return -ENOSYS; | |
2567 | } | |
2568 | /* | |
2569 | * Currently for the emulated case we only accept | |
2570 | * tpws with the UNMAP=1 bit set. | |
2571 | */ | |
2572 | if (!(flags[0] & 0x08)) { | |
2573 | pr_err("WRITE_SAME w/o UNMAP bit not" | |
2574 | " supported for Block Discard Emulation\n"); | |
2575 | return -ENOSYS; | |
2576 | } | |
2577 | } | |
2578 | ||
2579 | return 0; | |
2580 | } | |
2581 | ||
c66ac9db NB |
2582 | /* transport_generic_cmd_sequencer(): |
2583 | * | |
2584 | * Generic Command Sequencer that should work for most DAS transport | |
2585 | * drivers. | |
2586 | * | |
2587 | * Called from transport_generic_allocate_tasks() in the $FABRIC_MOD | |
2588 | * RX Thread. | |
2589 | * | |
2590 | * FIXME: Need to support other SCSI OPCODES where as well. | |
2591 | */ | |
2592 | static int transport_generic_cmd_sequencer( | |
2593 | struct se_cmd *cmd, | |
2594 | unsigned char *cdb) | |
2595 | { | |
5951146d | 2596 | struct se_device *dev = cmd->se_dev; |
c66ac9db NB |
2597 | struct se_subsystem_dev *su_dev = dev->se_sub_dev; |
2598 | int ret = 0, sector_ret = 0, passthrough; | |
2599 | u32 sectors = 0, size = 0, pr_reg_type = 0; | |
2600 | u16 service_action; | |
2601 | u8 alua_ascq = 0; | |
2602 | /* | |
2603 | * Check for an existing UNIT ATTENTION condition | |
2604 | */ | |
2605 | if (core_scsi3_ua_check(cmd, cdb) < 0) { | |
c66ac9db NB |
2606 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
2607 | cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION; | |
5951146d | 2608 | return -EINVAL; |
c66ac9db NB |
2609 | } |
2610 | /* | |
2611 | * Check status of Asymmetric Logical Unit Assignment port | |
2612 | */ | |
e3d6f909 | 2613 | ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq); |
c66ac9db | 2614 | if (ret != 0) { |
c66ac9db | 2615 | /* |
25985edc | 2616 | * Set SCSI additional sense code (ASC) to 'LUN Not Accessible'; |
c66ac9db NB |
2617 | * The ALUA additional sense code qualifier (ASCQ) is determined |
2618 | * by the ALUA primary or secondary access state.. | |
2619 | */ | |
2620 | if (ret > 0) { | |
2621 | #if 0 | |
6708bb27 | 2622 | pr_debug("[%s]: ALUA TG Port not available," |
c66ac9db | 2623 | " SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n", |
e3d6f909 | 2624 | cmd->se_tfo->get_fabric_name(), alua_ascq); |
c66ac9db NB |
2625 | #endif |
2626 | transport_set_sense_codes(cmd, 0x04, alua_ascq); | |
2627 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
2628 | cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY; | |
5951146d | 2629 | return -EINVAL; |
c66ac9db NB |
2630 | } |
2631 | goto out_invalid_cdb_field; | |
2632 | } | |
2633 | /* | |
2634 | * Check status for SPC-3 Persistent Reservations | |
2635 | */ | |
e3d6f909 AG |
2636 | if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) { |
2637 | if (su_dev->t10_pr.pr_ops.t10_seq_non_holder( | |
c66ac9db NB |
2638 | cmd, cdb, pr_reg_type) != 0) |
2639 | return transport_handle_reservation_conflict(cmd); | |
2640 | /* | |
2641 | * This means the CDB is allowed for the SCSI Initiator port | |
2642 | * when said port is *NOT* holding the legacy SPC-2 or | |
2643 | * SPC-3 Persistent Reservation. | |
2644 | */ | |
2645 | } | |
2646 | ||
2647 | switch (cdb[0]) { | |
2648 | case READ_6: | |
2649 | sectors = transport_get_sectors_6(cdb, cmd, §or_ret); | |
2650 | if (sector_ret) | |
2651 | goto out_unsupported_cdb; | |
2652 | size = transport_get_size(sectors, cdb, cmd); | |
a1d8b49a | 2653 | cmd->t_task_lba = transport_lba_21(cdb); |
c66ac9db NB |
2654 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
2655 | break; | |
2656 | case READ_10: | |
2657 | sectors = transport_get_sectors_10(cdb, cmd, §or_ret); | |
2658 | if (sector_ret) | |
2659 | goto out_unsupported_cdb; | |
2660 | size = transport_get_size(sectors, cdb, cmd); | |
a1d8b49a | 2661 | cmd->t_task_lba = transport_lba_32(cdb); |
c66ac9db NB |
2662 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
2663 | break; | |
2664 | case READ_12: | |
2665 | sectors = transport_get_sectors_12(cdb, cmd, §or_ret); | |
2666 | if (sector_ret) | |
2667 | goto out_unsupported_cdb; | |
2668 | size = transport_get_size(sectors, cdb, cmd); | |
a1d8b49a | 2669 | cmd->t_task_lba = transport_lba_32(cdb); |
c66ac9db NB |
2670 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
2671 | break; | |
2672 | case READ_16: | |
2673 | sectors = transport_get_sectors_16(cdb, cmd, §or_ret); | |
2674 | if (sector_ret) | |
2675 | goto out_unsupported_cdb; | |
2676 | size = transport_get_size(sectors, cdb, cmd); | |
a1d8b49a | 2677 | cmd->t_task_lba = transport_lba_64(cdb); |
c66ac9db NB |
2678 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
2679 | break; | |
2680 | case WRITE_6: | |
2681 | sectors = transport_get_sectors_6(cdb, cmd, §or_ret); | |
2682 | if (sector_ret) | |
2683 | goto out_unsupported_cdb; | |
2684 | size = transport_get_size(sectors, cdb, cmd); | |
a1d8b49a | 2685 | cmd->t_task_lba = transport_lba_21(cdb); |
c66ac9db NB |
2686 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
2687 | break; | |
2688 | case WRITE_10: | |
2689 | sectors = transport_get_sectors_10(cdb, cmd, §or_ret); | |
2690 | if (sector_ret) | |
2691 | goto out_unsupported_cdb; | |
2692 | size = transport_get_size(sectors, cdb, cmd); | |
a1d8b49a AG |
2693 | cmd->t_task_lba = transport_lba_32(cdb); |
2694 | cmd->t_tasks_fua = (cdb[1] & 0x8); | |
c66ac9db NB |
2695 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
2696 | break; | |
2697 | case WRITE_12: | |
2698 | sectors = transport_get_sectors_12(cdb, cmd, §or_ret); | |
2699 | if (sector_ret) | |
2700 | goto out_unsupported_cdb; | |
2701 | size = transport_get_size(sectors, cdb, cmd); | |
a1d8b49a AG |
2702 | cmd->t_task_lba = transport_lba_32(cdb); |
2703 | cmd->t_tasks_fua = (cdb[1] & 0x8); | |
c66ac9db NB |
2704 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
2705 | break; | |
2706 | case WRITE_16: | |
2707 | sectors = transport_get_sectors_16(cdb, cmd, §or_ret); | |
2708 | if (sector_ret) | |
2709 | goto out_unsupported_cdb; | |
2710 | size = transport_get_size(sectors, cdb, cmd); | |
a1d8b49a AG |
2711 | cmd->t_task_lba = transport_lba_64(cdb); |
2712 | cmd->t_tasks_fua = (cdb[1] & 0x8); | |
c66ac9db NB |
2713 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
2714 | break; | |
2715 | case XDWRITEREAD_10: | |
2716 | if ((cmd->data_direction != DMA_TO_DEVICE) || | |
a1d8b49a | 2717 | !(cmd->t_tasks_bidi)) |
c66ac9db NB |
2718 | goto out_invalid_cdb_field; |
2719 | sectors = transport_get_sectors_10(cdb, cmd, §or_ret); | |
2720 | if (sector_ret) | |
2721 | goto out_unsupported_cdb; | |
2722 | size = transport_get_size(sectors, cdb, cmd); | |
a1d8b49a | 2723 | cmd->t_task_lba = transport_lba_32(cdb); |
c66ac9db | 2724 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
7c1c6af3 CH |
2725 | |
2726 | if (dev->transport->transport_type == | |
2727 | TRANSPORT_PLUGIN_PHBA_PDEV) | |
2728 | goto out_unsupported_cdb; | |
c66ac9db | 2729 | /* |
35e0e757 | 2730 | * Setup BIDI XOR callback to be run after I/O completion. |
c66ac9db NB |
2731 | */ |
2732 | cmd->transport_complete_callback = &transport_xor_callback; | |
a1d8b49a | 2733 | cmd->t_tasks_fua = (cdb[1] & 0x8); |
c66ac9db NB |
2734 | break; |
2735 | case VARIABLE_LENGTH_CMD: | |
2736 | service_action = get_unaligned_be16(&cdb[8]); | |
2737 | /* | |
2738 | * Determine if this is TCM/PSCSI device and we should disable | |
2739 | * internal emulation for this CDB. | |
2740 | */ | |
e3d6f909 | 2741 | passthrough = (dev->transport->transport_type == |
c66ac9db NB |
2742 | TRANSPORT_PLUGIN_PHBA_PDEV); |
2743 | ||
2744 | switch (service_action) { | |
2745 | case XDWRITEREAD_32: | |
2746 | sectors = transport_get_sectors_32(cdb, cmd, §or_ret); | |
2747 | if (sector_ret) | |
2748 | goto out_unsupported_cdb; | |
2749 | size = transport_get_size(sectors, cdb, cmd); | |
2750 | /* | |
2751 | * Use WRITE_32 and READ_32 opcodes for the emulated | |
2752 | * XDWRITE_READ_32 logic. | |
2753 | */ | |
a1d8b49a | 2754 | cmd->t_task_lba = transport_lba_64_ext(cdb); |
c66ac9db NB |
2755 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
2756 | ||
c66ac9db | 2757 | if (passthrough) |
7c1c6af3 | 2758 | goto out_unsupported_cdb; |
c66ac9db | 2759 | /* |
35e0e757 CH |
2760 | * Setup BIDI XOR callback to be run during after I/O |
2761 | * completion. | |
c66ac9db NB |
2762 | */ |
2763 | cmd->transport_complete_callback = &transport_xor_callback; | |
a1d8b49a | 2764 | cmd->t_tasks_fua = (cdb[10] & 0x8); |
c66ac9db NB |
2765 | break; |
2766 | case WRITE_SAME_32: | |
2767 | sectors = transport_get_sectors_32(cdb, cmd, §or_ret); | |
2768 | if (sector_ret) | |
2769 | goto out_unsupported_cdb; | |
dd3a5ad8 | 2770 | |
6708bb27 | 2771 | if (sectors) |
12850626 | 2772 | size = transport_get_size(1, cdb, cmd); |
6708bb27 AG |
2773 | else { |
2774 | pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not" | |
2775 | " supported\n"); | |
2776 | goto out_invalid_cdb_field; | |
2777 | } | |
dd3a5ad8 | 2778 | |
a1d8b49a | 2779 | cmd->t_task_lba = get_unaligned_be64(&cdb[12]); |
c66ac9db NB |
2780 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
2781 | ||
706d5860 | 2782 | if (target_check_write_same_discard(&cdb[10], dev) < 0) |
c66ac9db | 2783 | goto out_invalid_cdb_field; |
706d5860 | 2784 | |
c66ac9db NB |
2785 | break; |
2786 | default: | |
6708bb27 | 2787 | pr_err("VARIABLE_LENGTH_CMD service action" |
c66ac9db NB |
2788 | " 0x%04x not supported\n", service_action); |
2789 | goto out_unsupported_cdb; | |
2790 | } | |
2791 | break; | |
e434f1f1 | 2792 | case MAINTENANCE_IN: |
e3d6f909 | 2793 | if (dev->transport->get_device_type(dev) != TYPE_ROM) { |
c66ac9db NB |
2794 | /* MAINTENANCE_IN from SCC-2 */ |
2795 | /* | |
2796 | * Check for emulated MI_REPORT_TARGET_PGS. | |
2797 | */ | |
e76a35d6 CH |
2798 | if (cdb[1] == MI_REPORT_TARGET_PGS && |
2799 | su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) { | |
2800 | cmd->execute_task = | |
2801 | target_emulate_report_target_port_groups; | |
c66ac9db NB |
2802 | } |
2803 | size = (cdb[6] << 24) | (cdb[7] << 16) | | |
2804 | (cdb[8] << 8) | cdb[9]; | |
2805 | } else { | |
2806 | /* GPCMD_SEND_KEY from multi media commands */ | |
2807 | size = (cdb[8] << 8) + cdb[9]; | |
2808 | } | |
05d1c7c0 | 2809 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
2810 | break; |
2811 | case MODE_SELECT: | |
2812 | size = cdb[4]; | |
2813 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | |
2814 | break; | |
2815 | case MODE_SELECT_10: | |
2816 | size = (cdb[7] << 8) + cdb[8]; | |
2817 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | |
2818 | break; | |
2819 | case MODE_SENSE: | |
2820 | size = cdb[4]; | |
05d1c7c0 | 2821 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
2822 | break; |
2823 | case MODE_SENSE_10: | |
2824 | case GPCMD_READ_BUFFER_CAPACITY: | |
2825 | case GPCMD_SEND_OPC: | |
2826 | case LOG_SELECT: | |
2827 | case LOG_SENSE: | |
2828 | size = (cdb[7] << 8) + cdb[8]; | |
05d1c7c0 | 2829 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
2830 | break; |
2831 | case READ_BLOCK_LIMITS: | |
2832 | size = READ_BLOCK_LEN; | |
05d1c7c0 | 2833 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
2834 | break; |
2835 | case GPCMD_GET_CONFIGURATION: | |
2836 | case GPCMD_READ_FORMAT_CAPACITIES: | |
2837 | case GPCMD_READ_DISC_INFO: | |
2838 | case GPCMD_READ_TRACK_RZONE_INFO: | |
2839 | size = (cdb[7] << 8) + cdb[8]; | |
2840 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | |
2841 | break; | |
2842 | case PERSISTENT_RESERVE_IN: | |
617c0e06 | 2843 | if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS) |
e76a35d6 | 2844 | cmd->execute_task = target_scsi3_emulate_pr_in; |
617c0e06 CH |
2845 | size = (cdb[7] << 8) + cdb[8]; |
2846 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | |
2847 | break; | |
c66ac9db | 2848 | case PERSISTENT_RESERVE_OUT: |
617c0e06 | 2849 | if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS) |
e76a35d6 | 2850 | cmd->execute_task = target_scsi3_emulate_pr_out; |
c66ac9db | 2851 | size = (cdb[7] << 8) + cdb[8]; |
05d1c7c0 | 2852 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
2853 | break; |
2854 | case GPCMD_MECHANISM_STATUS: | |
2855 | case GPCMD_READ_DVD_STRUCTURE: | |
2856 | size = (cdb[8] << 8) + cdb[9]; | |
2857 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | |
2858 | break; | |
2859 | case READ_POSITION: | |
2860 | size = READ_POSITION_LEN; | |
05d1c7c0 | 2861 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db | 2862 | break; |
e434f1f1 | 2863 | case MAINTENANCE_OUT: |
e3d6f909 | 2864 | if (dev->transport->get_device_type(dev) != TYPE_ROM) { |
c66ac9db NB |
2865 | /* MAINTENANCE_OUT from SCC-2 |
2866 | * | |
2867 | * Check for emulated MO_SET_TARGET_PGS. | |
2868 | */ | |
e76a35d6 CH |
2869 | if (cdb[1] == MO_SET_TARGET_PGS && |
2870 | su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) { | |
2871 | cmd->execute_task = | |
2872 | target_emulate_set_target_port_groups; | |
c66ac9db NB |
2873 | } |
2874 | ||
2875 | size = (cdb[6] << 24) | (cdb[7] << 16) | | |
2876 | (cdb[8] << 8) | cdb[9]; | |
2877 | } else { | |
2878 | /* GPCMD_REPORT_KEY from multi media commands */ | |
2879 | size = (cdb[8] << 8) + cdb[9]; | |
2880 | } | |
05d1c7c0 | 2881 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
2882 | break; |
2883 | case INQUIRY: | |
2884 | size = (cdb[3] << 8) + cdb[4]; | |
2885 | /* | |
2886 | * Do implict HEAD_OF_QUEUE processing for INQUIRY. | |
2887 | * See spc4r17 section 5.3 | |
2888 | */ | |
5951146d | 2889 | if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) |
e66ecd50 | 2890 | cmd->sam_task_attr = MSG_HEAD_TAG; |
05d1c7c0 | 2891 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
2892 | break; |
2893 | case READ_BUFFER: | |
2894 | size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; | |
05d1c7c0 | 2895 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
2896 | break; |
2897 | case READ_CAPACITY: | |
2898 | size = READ_CAP_LEN; | |
05d1c7c0 | 2899 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
2900 | break; |
2901 | case READ_MEDIA_SERIAL_NUMBER: | |
2902 | case SECURITY_PROTOCOL_IN: | |
2903 | case SECURITY_PROTOCOL_OUT: | |
2904 | size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; | |
05d1c7c0 | 2905 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
2906 | break; |
2907 | case SERVICE_ACTION_IN: | |
2908 | case ACCESS_CONTROL_IN: | |
2909 | case ACCESS_CONTROL_OUT: | |
2910 | case EXTENDED_COPY: | |
2911 | case READ_ATTRIBUTE: | |
2912 | case RECEIVE_COPY_RESULTS: | |
2913 | case WRITE_ATTRIBUTE: | |
2914 | size = (cdb[10] << 24) | (cdb[11] << 16) | | |
2915 | (cdb[12] << 8) | cdb[13]; | |
05d1c7c0 | 2916 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
2917 | break; |
2918 | case RECEIVE_DIAGNOSTIC: | |
2919 | case SEND_DIAGNOSTIC: | |
2920 | size = (cdb[3] << 8) | cdb[4]; | |
05d1c7c0 | 2921 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
2922 | break; |
2923 | /* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */ | |
2924 | #if 0 | |
2925 | case GPCMD_READ_CD: | |
2926 | sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; | |
2927 | size = (2336 * sectors); | |
05d1c7c0 | 2928 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
2929 | break; |
2930 | #endif | |
2931 | case READ_TOC: | |
2932 | size = cdb[8]; | |
05d1c7c0 | 2933 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
2934 | break; |
2935 | case REQUEST_SENSE: | |
2936 | size = cdb[4]; | |
05d1c7c0 | 2937 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
2938 | break; |
2939 | case READ_ELEMENT_STATUS: | |
2940 | size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9]; | |
05d1c7c0 | 2941 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
2942 | break; |
2943 | case WRITE_BUFFER: | |
2944 | size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; | |
05d1c7c0 | 2945 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
2946 | break; |
2947 | case RESERVE: | |
2948 | case RESERVE_10: | |
2949 | /* | |
2950 | * The SPC-2 RESERVE does not contain a size in the SCSI CDB. | |
2951 | * Assume the passthrough or $FABRIC_MOD will tell us about it. | |
2952 | */ | |
2953 | if (cdb[0] == RESERVE_10) | |
2954 | size = (cdb[7] << 8) | cdb[8]; | |
2955 | else | |
2956 | size = cmd->data_length; | |
2957 | ||
2958 | /* | |
2959 | * Setup the legacy emulated handler for SPC-2 and | |
2960 | * >= SPC-3 compatible reservation handling (CRH=1) | |
2961 | * Otherwise, we assume the underlying SCSI logic is | |
2962 | * is running in SPC_PASSTHROUGH, and wants reservations | |
2963 | * emulation disabled. | |
2964 | */ | |
e76a35d6 CH |
2965 | if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH) |
2966 | cmd->execute_task = target_scsi2_reservation_reserve; | |
c66ac9db NB |
2967 | cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; |
2968 | break; | |
2969 | case RELEASE: | |
2970 | case RELEASE_10: | |
2971 | /* | |
2972 | * The SPC-2 RELEASE does not contain a size in the SCSI CDB. | |
2973 | * Assume the passthrough or $FABRIC_MOD will tell us about it. | |
2974 | */ | |
2975 | if (cdb[0] == RELEASE_10) | |
2976 | size = (cdb[7] << 8) | cdb[8]; | |
2977 | else | |
2978 | size = cmd->data_length; | |
2979 | ||
e76a35d6 CH |
2980 | if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH) |
2981 | cmd->execute_task = target_scsi2_reservation_release; | |
c66ac9db NB |
2982 | cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; |
2983 | break; | |
2984 | case SYNCHRONIZE_CACHE: | |
2985 | case 0x91: /* SYNCHRONIZE_CACHE_16: */ | |
2986 | /* | |
2987 | * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE | |
2988 | */ | |
2989 | if (cdb[0] == SYNCHRONIZE_CACHE) { | |
2990 | sectors = transport_get_sectors_10(cdb, cmd, §or_ret); | |
a1d8b49a | 2991 | cmd->t_task_lba = transport_lba_32(cdb); |
c66ac9db NB |
2992 | } else { |
2993 | sectors = transport_get_sectors_16(cdb, cmd, §or_ret); | |
a1d8b49a | 2994 | cmd->t_task_lba = transport_lba_64(cdb); |
c66ac9db NB |
2995 | } |
2996 | if (sector_ret) | |
2997 | goto out_unsupported_cdb; | |
2998 | ||
2999 | size = transport_get_size(sectors, cdb, cmd); | |
3000 | cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; | |
3001 | ||
e3d6f909 | 3002 | if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) |
c66ac9db NB |
3003 | break; |
3004 | /* | |
3005 | * Set SCF_EMULATE_CDB_ASYNC to ensure asynchronous operation | |
3006 | * for SYNCHRONIZE_CACHE* Immed=1 case in __transport_execute_tasks() | |
3007 | */ | |
3008 | cmd->se_cmd_flags |= SCF_EMULATE_CDB_ASYNC; | |
3009 | /* | |
3010 | * Check to ensure that LBA + Range does not exceed past end of | |
7abbe7f3 | 3011 | * device for IBLOCK and FILEIO ->do_sync_cache() backend calls |
c66ac9db | 3012 | */ |
7abbe7f3 NB |
3013 | if ((cmd->t_task_lba != 0) || (sectors != 0)) { |
3014 | if (transport_cmd_get_valid_sectors(cmd) < 0) | |
3015 | goto out_invalid_cdb_field; | |
3016 | } | |
c66ac9db NB |
3017 | break; |
3018 | case UNMAP: | |
3019 | size = get_unaligned_be16(&cdb[7]); | |
05d1c7c0 | 3020 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
3021 | break; |
3022 | case WRITE_SAME_16: | |
3023 | sectors = transport_get_sectors_16(cdb, cmd, §or_ret); | |
3024 | if (sector_ret) | |
3025 | goto out_unsupported_cdb; | |
dd3a5ad8 | 3026 | |
6708bb27 | 3027 | if (sectors) |
12850626 | 3028 | size = transport_get_size(1, cdb, cmd); |
6708bb27 AG |
3029 | else { |
3030 | pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); | |
3031 | goto out_invalid_cdb_field; | |
3032 | } | |
dd3a5ad8 | 3033 | |
5db0753b | 3034 | cmd->t_task_lba = get_unaligned_be64(&cdb[2]); |
706d5860 NB |
3035 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
3036 | ||
3037 | if (target_check_write_same_discard(&cdb[1], dev) < 0) | |
3038 | goto out_invalid_cdb_field; | |
3039 | break; | |
3040 | case WRITE_SAME: | |
3041 | sectors = transport_get_sectors_10(cdb, cmd, §or_ret); | |
3042 | if (sector_ret) | |
3043 | goto out_unsupported_cdb; | |
3044 | ||
3045 | if (sectors) | |
12850626 | 3046 | size = transport_get_size(1, cdb, cmd); |
706d5860 NB |
3047 | else { |
3048 | pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); | |
3049 | goto out_invalid_cdb_field; | |
c66ac9db | 3050 | } |
706d5860 NB |
3051 | |
3052 | cmd->t_task_lba = get_unaligned_be32(&cdb[2]); | |
c66ac9db | 3053 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
706d5860 NB |
3054 | /* |
3055 | * Follow sbcr26 with WRITE_SAME (10) and check for the existence | |
3056 | * of byte 1 bit 3 UNMAP instead of original reserved field | |
3057 | */ | |
3058 | if (target_check_write_same_discard(&cdb[1], dev) < 0) | |
3059 | goto out_invalid_cdb_field; | |
c66ac9db NB |
3060 | break; |
3061 | case ALLOW_MEDIUM_REMOVAL: | |
3062 | case GPCMD_CLOSE_TRACK: | |
3063 | case ERASE: | |
3064 | case INITIALIZE_ELEMENT_STATUS: | |
3065 | case GPCMD_LOAD_UNLOAD: | |
3066 | case REZERO_UNIT: | |
3067 | case SEEK_10: | |
3068 | case GPCMD_SET_SPEED: | |
3069 | case SPACE: | |
3070 | case START_STOP: | |
3071 | case TEST_UNIT_READY: | |
3072 | case VERIFY: | |
3073 | case WRITE_FILEMARKS: | |
3074 | case MOVE_MEDIUM: | |
3075 | cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; | |
3076 | break; | |
3077 | case REPORT_LUNS: | |
e76a35d6 | 3078 | cmd->execute_task = target_report_luns; |
c66ac9db NB |
3079 | size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; |
3080 | /* | |
3081 | * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS | |
3082 | * See spc4r17 section 5.3 | |
3083 | */ | |
5951146d | 3084 | if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) |
e66ecd50 | 3085 | cmd->sam_task_attr = MSG_HEAD_TAG; |
05d1c7c0 | 3086 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
c66ac9db NB |
3087 | break; |
3088 | default: | |
6708bb27 | 3089 | pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode" |
c66ac9db | 3090 | " 0x%02x, sending CHECK_CONDITION.\n", |
e3d6f909 | 3091 | cmd->se_tfo->get_fabric_name(), cdb[0]); |
c66ac9db NB |
3092 | goto out_unsupported_cdb; |
3093 | } | |
3094 | ||
3095 | if (size != cmd->data_length) { | |
6708bb27 | 3096 | pr_warn("TARGET_CORE[%s]: Expected Transfer Length:" |
c66ac9db | 3097 | " %u does not match SCSI CDB Length: %u for SAM Opcode:" |
e3d6f909 | 3098 | " 0x%02x\n", cmd->se_tfo->get_fabric_name(), |
c66ac9db NB |
3099 | cmd->data_length, size, cdb[0]); |
3100 | ||
3101 | cmd->cmd_spdtl = size; | |
3102 | ||
3103 | if (cmd->data_direction == DMA_TO_DEVICE) { | |
6708bb27 | 3104 | pr_err("Rejecting underflow/overflow" |
c66ac9db NB |
3105 | " WRITE data\n"); |
3106 | goto out_invalid_cdb_field; | |
3107 | } | |
3108 | /* | |
3109 | * Reject READ_* or WRITE_* with overflow/underflow for | |
3110 | * type SCF_SCSI_DATA_SG_IO_CDB. | |
3111 | */ | |
6708bb27 AG |
3112 | if (!ret && (dev->se_sub_dev->se_dev_attrib.block_size != 512)) { |
3113 | pr_err("Failing OVERFLOW/UNDERFLOW for LBA op" | |
c66ac9db | 3114 | " CDB on non 512-byte sector setup subsystem" |
e3d6f909 | 3115 | " plugin: %s\n", dev->transport->name); |
c66ac9db NB |
3116 | /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */ |
3117 | goto out_invalid_cdb_field; | |
3118 | } | |
3119 | ||
3120 | if (size > cmd->data_length) { | |
3121 | cmd->se_cmd_flags |= SCF_OVERFLOW_BIT; | |
3122 | cmd->residual_count = (size - cmd->data_length); | |
3123 | } else { | |
3124 | cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; | |
3125 | cmd->residual_count = (cmd->data_length - size); | |
3126 | } | |
3127 | cmd->data_length = size; | |
3128 | } | |
3129 | ||
d0229ae3 AG |
3130 | /* Let's limit control cdbs to a page, for simplicity's sake. */ |
3131 | if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) && | |
3132 | size > PAGE_SIZE) | |
3133 | goto out_invalid_cdb_field; | |
3134 | ||
c66ac9db NB |
3135 | transport_set_supported_SAM_opcode(cmd); |
3136 | return ret; | |
3137 | ||
3138 | out_unsupported_cdb: | |
3139 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
3140 | cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; | |
5951146d | 3141 | return -EINVAL; |
c66ac9db NB |
3142 | out_invalid_cdb_field: |
3143 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
3144 | cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; | |
5951146d | 3145 | return -EINVAL; |
c66ac9db NB |
3146 | } |
3147 | ||
c66ac9db | 3148 | /* |
35e0e757 | 3149 | * Called from I/O completion to determine which dormant/delayed |
c66ac9db NB |
3150 | * and ordered cmds need to have their tasks added to the execution queue. |
3151 | */ | |
3152 | static void transport_complete_task_attr(struct se_cmd *cmd) | |
3153 | { | |
5951146d | 3154 | struct se_device *dev = cmd->se_dev; |
c66ac9db NB |
3155 | struct se_cmd *cmd_p, *cmd_tmp; |
3156 | int new_active_tasks = 0; | |
3157 | ||
e66ecd50 | 3158 | if (cmd->sam_task_attr == MSG_SIMPLE_TAG) { |
c66ac9db NB |
3159 | atomic_dec(&dev->simple_cmds); |
3160 | smp_mb__after_atomic_dec(); | |
3161 | dev->dev_cur_ordered_id++; | |
6708bb27 | 3162 | pr_debug("Incremented dev->dev_cur_ordered_id: %u for" |
c66ac9db NB |
3163 | " SIMPLE: %u\n", dev->dev_cur_ordered_id, |
3164 | cmd->se_ordered_id); | |
e66ecd50 | 3165 | } else if (cmd->sam_task_attr == MSG_HEAD_TAG) { |
c66ac9db NB |
3166 | atomic_dec(&dev->dev_hoq_count); |
3167 | smp_mb__after_atomic_dec(); | |
3168 | dev->dev_cur_ordered_id++; | |
6708bb27 | 3169 | pr_debug("Incremented dev_cur_ordered_id: %u for" |
c66ac9db NB |
3170 | " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id, |
3171 | cmd->se_ordered_id); | |
e66ecd50 | 3172 | } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { |
c66ac9db | 3173 | spin_lock(&dev->ordered_cmd_lock); |
5951146d | 3174 | list_del(&cmd->se_ordered_node); |
c66ac9db NB |
3175 | atomic_dec(&dev->dev_ordered_sync); |
3176 | smp_mb__after_atomic_dec(); | |
3177 | spin_unlock(&dev->ordered_cmd_lock); | |
3178 | ||
3179 | dev->dev_cur_ordered_id++; | |
6708bb27 | 3180 | pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:" |
c66ac9db NB |
3181 | " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id); |
3182 | } | |
3183 | /* | |
3184 | * Process all commands up to the last received | |
3185 | * ORDERED task attribute which requires another blocking | |
3186 | * boundary | |
3187 | */ | |
3188 | spin_lock(&dev->delayed_cmd_lock); | |
3189 | list_for_each_entry_safe(cmd_p, cmd_tmp, | |
5951146d | 3190 | &dev->delayed_cmd_list, se_delayed_node) { |
c66ac9db | 3191 | |
5951146d | 3192 | list_del(&cmd_p->se_delayed_node); |
c66ac9db NB |
3193 | spin_unlock(&dev->delayed_cmd_lock); |
3194 | ||
6708bb27 | 3195 | pr_debug("Calling add_tasks() for" |
c66ac9db NB |
3196 | " cmd_p: 0x%02x Task Attr: 0x%02x" |
3197 | " Dormant -> Active, se_ordered_id: %u\n", | |
6708bb27 | 3198 | cmd_p->t_task_cdb[0], |
c66ac9db NB |
3199 | cmd_p->sam_task_attr, cmd_p->se_ordered_id); |
3200 | ||
3201 | transport_add_tasks_from_cmd(cmd_p); | |
3202 | new_active_tasks++; | |
3203 | ||
3204 | spin_lock(&dev->delayed_cmd_lock); | |
e66ecd50 | 3205 | if (cmd_p->sam_task_attr == MSG_ORDERED_TAG) |
c66ac9db NB |
3206 | break; |
3207 | } | |
3208 | spin_unlock(&dev->delayed_cmd_lock); | |
3209 | /* | |
3210 | * If new tasks have become active, wake up the transport thread | |
3211 | * to do the processing of the Active tasks. | |
3212 | */ | |
3213 | if (new_active_tasks != 0) | |
e3d6f909 | 3214 | wake_up_interruptible(&dev->dev_queue_obj.thread_wq); |
c66ac9db NB |
3215 | } |
3216 | ||
e057f533 | 3217 | static void transport_complete_qf(struct se_cmd *cmd) |
07bde79a NB |
3218 | { |
3219 | int ret = 0; | |
3220 | ||
e057f533 CH |
3221 | if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) |
3222 | transport_complete_task_attr(cmd); | |
3223 | ||
3224 | if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { | |
3225 | ret = cmd->se_tfo->queue_status(cmd); | |
3226 | if (ret) | |
3227 | goto out; | |
3228 | } | |
07bde79a NB |
3229 | |
3230 | switch (cmd->data_direction) { | |
3231 | case DMA_FROM_DEVICE: | |
3232 | ret = cmd->se_tfo->queue_data_in(cmd); | |
3233 | break; | |
3234 | case DMA_TO_DEVICE: | |
ec98f782 | 3235 | if (cmd->t_bidi_data_sg) { |
07bde79a NB |
3236 | ret = cmd->se_tfo->queue_data_in(cmd); |
3237 | if (ret < 0) | |
e057f533 | 3238 | break; |
07bde79a NB |
3239 | } |
3240 | /* Fall through for DMA_TO_DEVICE */ | |
3241 | case DMA_NONE: | |
3242 | ret = cmd->se_tfo->queue_status(cmd); | |
3243 | break; | |
3244 | default: | |
3245 | break; | |
3246 | } | |
3247 | ||
e057f533 CH |
3248 | out: |
3249 | if (ret < 0) { | |
3250 | transport_handle_queue_full(cmd, cmd->se_dev); | |
3251 | return; | |
3252 | } | |
3253 | transport_lun_remove_cmd(cmd); | |
3254 | transport_cmd_check_stop_to_fabric(cmd); | |
07bde79a NB |
3255 | } |
3256 | ||
3257 | static void transport_handle_queue_full( | |
3258 | struct se_cmd *cmd, | |
e057f533 | 3259 | struct se_device *dev) |
07bde79a NB |
3260 | { |
3261 | spin_lock_irq(&dev->qf_cmd_lock); | |
07bde79a NB |
3262 | list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); |
3263 | atomic_inc(&dev->dev_qf_count); | |
3264 | smp_mb__after_atomic_inc(); | |
3265 | spin_unlock_irq(&cmd->se_dev->qf_cmd_lock); | |
3266 | ||
3267 | schedule_work(&cmd->se_dev->qf_work_queue); | |
3268 | } | |
3269 | ||
35e0e757 | 3270 | static void target_complete_ok_work(struct work_struct *work) |
c66ac9db | 3271 | { |
35e0e757 | 3272 | struct se_cmd *cmd = container_of(work, struct se_cmd, work); |
07bde79a | 3273 | int reason = 0, ret; |
35e0e757 | 3274 | |
c66ac9db NB |
3275 | /* |
3276 | * Check if we need to move delayed/dormant tasks from cmds on the | |
3277 | * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task | |
3278 | * Attribute. | |
3279 | */ | |
5951146d | 3280 | if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) |
c66ac9db | 3281 | transport_complete_task_attr(cmd); |
07bde79a NB |
3282 | /* |
3283 | * Check to schedule QUEUE_FULL work, or execute an existing | |
3284 | * cmd->transport_qf_callback() | |
3285 | */ | |
3286 | if (atomic_read(&cmd->se_dev->dev_qf_count) != 0) | |
3287 | schedule_work(&cmd->se_dev->qf_work_queue); | |
3288 | ||
c66ac9db NB |
3289 | /* |
3290 | * Check if we need to retrieve a sense buffer from | |
3291 | * the struct se_cmd in question. | |
3292 | */ | |
3293 | if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { | |
3294 | if (transport_get_sense_data(cmd) < 0) | |
3295 | reason = TCM_NON_EXISTENT_LUN; | |
3296 | ||
3297 | /* | |
3298 | * Only set when an struct se_task->task_scsi_status returned | |
3299 | * a non GOOD status. | |
3300 | */ | |
3301 | if (cmd->scsi_status) { | |
07bde79a | 3302 | ret = transport_send_check_condition_and_sense( |
c66ac9db | 3303 | cmd, reason, 1); |
f147abb4 | 3304 | if (ret == -EAGAIN || ret == -ENOMEM) |
07bde79a NB |
3305 | goto queue_full; |
3306 | ||
c66ac9db NB |
3307 | transport_lun_remove_cmd(cmd); |
3308 | transport_cmd_check_stop_to_fabric(cmd); | |
3309 | return; | |
3310 | } | |
3311 | } | |
3312 | /* | |
25985edc | 3313 | * Check for a callback, used by amongst other things |
c66ac9db NB |
3314 | * XDWRITE_READ_10 emulation. |
3315 | */ | |
3316 | if (cmd->transport_complete_callback) | |
3317 | cmd->transport_complete_callback(cmd); | |
3318 | ||
3319 | switch (cmd->data_direction) { | |
3320 | case DMA_FROM_DEVICE: | |
3321 | spin_lock(&cmd->se_lun->lun_sep_lock); | |
e3d6f909 AG |
3322 | if (cmd->se_lun->lun_sep) { |
3323 | cmd->se_lun->lun_sep->sep_stats.tx_data_octets += | |
c66ac9db NB |
3324 | cmd->data_length; |
3325 | } | |
3326 | spin_unlock(&cmd->se_lun->lun_sep_lock); | |
c66ac9db | 3327 | |
07bde79a | 3328 | ret = cmd->se_tfo->queue_data_in(cmd); |
f147abb4 | 3329 | if (ret == -EAGAIN || ret == -ENOMEM) |
07bde79a | 3330 | goto queue_full; |
c66ac9db NB |
3331 | break; |
3332 | case DMA_TO_DEVICE: | |
3333 | spin_lock(&cmd->se_lun->lun_sep_lock); | |
e3d6f909 AG |
3334 | if (cmd->se_lun->lun_sep) { |
3335 | cmd->se_lun->lun_sep->sep_stats.rx_data_octets += | |
c66ac9db NB |
3336 | cmd->data_length; |
3337 | } | |
3338 | spin_unlock(&cmd->se_lun->lun_sep_lock); | |
3339 | /* | |
3340 | * Check if we need to send READ payload for BIDI-COMMAND | |
3341 | */ | |
ec98f782 | 3342 | if (cmd->t_bidi_data_sg) { |
c66ac9db | 3343 | spin_lock(&cmd->se_lun->lun_sep_lock); |
e3d6f909 AG |
3344 | if (cmd->se_lun->lun_sep) { |
3345 | cmd->se_lun->lun_sep->sep_stats.tx_data_octets += | |
c66ac9db NB |
3346 | cmd->data_length; |
3347 | } | |
3348 | spin_unlock(&cmd->se_lun->lun_sep_lock); | |
07bde79a | 3349 | ret = cmd->se_tfo->queue_data_in(cmd); |
f147abb4 | 3350 | if (ret == -EAGAIN || ret == -ENOMEM) |
07bde79a | 3351 | goto queue_full; |
c66ac9db NB |
3352 | break; |
3353 | } | |
3354 | /* Fall through for DMA_TO_DEVICE */ | |
3355 | case DMA_NONE: | |
07bde79a | 3356 | ret = cmd->se_tfo->queue_status(cmd); |
f147abb4 | 3357 | if (ret == -EAGAIN || ret == -ENOMEM) |
07bde79a | 3358 | goto queue_full; |
c66ac9db NB |
3359 | break; |
3360 | default: | |
3361 | break; | |
3362 | } | |
3363 | ||
3364 | transport_lun_remove_cmd(cmd); | |
3365 | transport_cmd_check_stop_to_fabric(cmd); | |
07bde79a NB |
3366 | return; |
3367 | ||
3368 | queue_full: | |
6708bb27 | 3369 | pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p," |
07bde79a | 3370 | " data_direction: %d\n", cmd, cmd->data_direction); |
e057f533 CH |
3371 | cmd->t_state = TRANSPORT_COMPLETE_QF_OK; |
3372 | transport_handle_queue_full(cmd, cmd->se_dev); | |
c66ac9db NB |
3373 | } |
3374 | ||
3375 | static void transport_free_dev_tasks(struct se_cmd *cmd) | |
3376 | { | |
3377 | struct se_task *task, *task_tmp; | |
3378 | unsigned long flags; | |
0c2cfe5f | 3379 | LIST_HEAD(dispose_list); |
c66ac9db | 3380 | |
a1d8b49a | 3381 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db | 3382 | list_for_each_entry_safe(task, task_tmp, |
a1d8b49a | 3383 | &cmd->t_task_list, t_list) { |
0c2cfe5f CH |
3384 | if (!(task->task_flags & TF_ACTIVE)) |
3385 | list_move_tail(&task->t_list, &dispose_list); | |
3386 | } | |
3387 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | |
3388 | ||
3389 | while (!list_empty(&dispose_list)) { | |
3390 | task = list_first_entry(&dispose_list, struct se_task, t_list); | |
c66ac9db | 3391 | |
af3f00c7 CH |
3392 | if (task->task_sg != cmd->t_data_sg && |
3393 | task->task_sg != cmd->t_bidi_data_sg) | |
3394 | kfree(task->task_sg); | |
c66ac9db NB |
3395 | |
3396 | list_del(&task->t_list); | |
3397 | ||
42bf829e | 3398 | cmd->se_dev->transport->free_task(task); |
c66ac9db | 3399 | } |
c66ac9db NB |
3400 | } |
3401 | ||
6708bb27 | 3402 | static inline void transport_free_sgl(struct scatterlist *sgl, int nents) |
c66ac9db | 3403 | { |
ec98f782 | 3404 | struct scatterlist *sg; |
ec98f782 | 3405 | int count; |
c66ac9db | 3406 | |
6708bb27 AG |
3407 | for_each_sg(sgl, sg, nents, count) |
3408 | __free_page(sg_page(sg)); | |
c66ac9db | 3409 | |
6708bb27 AG |
3410 | kfree(sgl); |
3411 | } | |
c66ac9db | 3412 | |
6708bb27 AG |
3413 | static inline void transport_free_pages(struct se_cmd *cmd) |
3414 | { | |
3415 | if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) | |
3416 | return; | |
3417 | ||
3418 | transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents); | |
ec98f782 AG |
3419 | cmd->t_data_sg = NULL; |
3420 | cmd->t_data_nents = 0; | |
c66ac9db | 3421 | |
6708bb27 | 3422 | transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents); |
ec98f782 AG |
3423 | cmd->t_bidi_data_sg = NULL; |
3424 | cmd->t_bidi_data_nents = 0; | |
c66ac9db NB |
3425 | } |
3426 | ||
d3df7825 CH |
3427 | /** |
3428 | * transport_put_cmd - release a reference to a command | |
3429 | * @cmd: command to release | |
3430 | * | |
3431 | * This routine releases our reference to the command and frees it if possible. | |
3432 | */ | |
39c05f32 | 3433 | static void transport_put_cmd(struct se_cmd *cmd) |
c66ac9db NB |
3434 | { |
3435 | unsigned long flags; | |
4911e3cc | 3436 | int free_tasks = 0; |
c66ac9db | 3437 | |
a1d8b49a | 3438 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
4911e3cc CH |
3439 | if (atomic_read(&cmd->t_fe_count)) { |
3440 | if (!atomic_dec_and_test(&cmd->t_fe_count)) | |
3441 | goto out_busy; | |
3442 | } | |
3443 | ||
3444 | if (atomic_read(&cmd->t_se_count)) { | |
3445 | if (!atomic_dec_and_test(&cmd->t_se_count)) | |
3446 | goto out_busy; | |
3447 | } | |
3448 | ||
3449 | if (atomic_read(&cmd->transport_dev_active)) { | |
3450 | atomic_set(&cmd->transport_dev_active, 0); | |
3451 | transport_all_task_dev_remove_state(cmd); | |
3452 | free_tasks = 1; | |
c66ac9db | 3453 | } |
a1d8b49a | 3454 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db | 3455 | |
4911e3cc CH |
3456 | if (free_tasks != 0) |
3457 | transport_free_dev_tasks(cmd); | |
d3df7825 | 3458 | |
c66ac9db | 3459 | transport_free_pages(cmd); |
31afc39c | 3460 | transport_release_cmd(cmd); |
39c05f32 | 3461 | return; |
4911e3cc CH |
3462 | out_busy: |
3463 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | |
c66ac9db NB |
3464 | } |
3465 | ||
c66ac9db | 3466 | /* |
ec98f782 AG |
3467 | * transport_generic_map_mem_to_cmd - Use fabric-alloced pages instead of |
3468 | * allocating in the core. | |
c66ac9db NB |
3469 | * @cmd: Associated se_cmd descriptor |
3470 | * @mem: SGL style memory for TCM WRITE / READ | |
3471 | * @sg_mem_num: Number of SGL elements | |
3472 | * @mem_bidi_in: SGL style memory for TCM BIDI READ | |
3473 | * @sg_mem_bidi_num: Number of BIDI READ SGL elements | |
3474 | * | |
3475 | * Return: nonzero return cmd was rejected for -ENOMEM or inproper usage | |
3476 | * of parameters. | |
3477 | */ | |
3478 | int transport_generic_map_mem_to_cmd( | |
3479 | struct se_cmd *cmd, | |
5951146d AG |
3480 | struct scatterlist *sgl, |
3481 | u32 sgl_count, | |
3482 | struct scatterlist *sgl_bidi, | |
3483 | u32 sgl_bidi_count) | |
c66ac9db | 3484 | { |
5951146d | 3485 | if (!sgl || !sgl_count) |
c66ac9db | 3486 | return 0; |
c66ac9db | 3487 | |
c66ac9db NB |
3488 | if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) || |
3489 | (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) { | |
c66ac9db | 3490 | |
ec98f782 AG |
3491 | cmd->t_data_sg = sgl; |
3492 | cmd->t_data_nents = sgl_count; | |
c66ac9db | 3493 | |
ec98f782 AG |
3494 | if (sgl_bidi && sgl_bidi_count) { |
3495 | cmd->t_bidi_data_sg = sgl_bidi; | |
3496 | cmd->t_bidi_data_nents = sgl_bidi_count; | |
c66ac9db NB |
3497 | } |
3498 | cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; | |
c66ac9db NB |
3499 | } |
3500 | ||
3501 | return 0; | |
3502 | } | |
3503 | EXPORT_SYMBOL(transport_generic_map_mem_to_cmd); | |
3504 | ||
05d1c7c0 AG |
3505 | void *transport_kmap_first_data_page(struct se_cmd *cmd) |
3506 | { | |
ec98f782 | 3507 | struct scatterlist *sg = cmd->t_data_sg; |
05d1c7c0 | 3508 | |
ec98f782 | 3509 | BUG_ON(!sg); |
05d1c7c0 | 3510 | /* |
ec98f782 AG |
3511 | * We need to take into account a possible offset here for fabrics like |
3512 | * tcm_loop who may be using a contig buffer from the SCSI midlayer for | |
3513 | * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd() | |
05d1c7c0 | 3514 | */ |
ec98f782 | 3515 | return kmap(sg_page(sg)) + sg->offset; |
05d1c7c0 AG |
3516 | } |
3517 | EXPORT_SYMBOL(transport_kmap_first_data_page); | |
3518 | ||
3519 | void transport_kunmap_first_data_page(struct se_cmd *cmd) | |
3520 | { | |
ec98f782 | 3521 | kunmap(sg_page(cmd->t_data_sg)); |
05d1c7c0 AG |
3522 | } |
3523 | EXPORT_SYMBOL(transport_kunmap_first_data_page); | |
3524 | ||
c66ac9db | 3525 | static int |
05d1c7c0 | 3526 | transport_generic_get_mem(struct se_cmd *cmd) |
c66ac9db | 3527 | { |
ec98f782 AG |
3528 | u32 length = cmd->data_length; |
3529 | unsigned int nents; | |
3530 | struct page *page; | |
3531 | int i = 0; | |
c66ac9db | 3532 | |
ec98f782 AG |
3533 | nents = DIV_ROUND_UP(length, PAGE_SIZE); |
3534 | cmd->t_data_sg = kmalloc(sizeof(struct scatterlist) * nents, GFP_KERNEL); | |
3535 | if (!cmd->t_data_sg) | |
3536 | return -ENOMEM; | |
c66ac9db | 3537 | |
ec98f782 AG |
3538 | cmd->t_data_nents = nents; |
3539 | sg_init_table(cmd->t_data_sg, nents); | |
c66ac9db | 3540 | |
ec98f782 AG |
3541 | while (length) { |
3542 | u32 page_len = min_t(u32, length, PAGE_SIZE); | |
3543 | page = alloc_page(GFP_KERNEL | __GFP_ZERO); | |
3544 | if (!page) | |
3545 | goto out; | |
c66ac9db | 3546 | |
ec98f782 AG |
3547 | sg_set_page(&cmd->t_data_sg[i], page, page_len, 0); |
3548 | length -= page_len; | |
3549 | i++; | |
c66ac9db | 3550 | } |
c66ac9db | 3551 | return 0; |
c66ac9db | 3552 | |
ec98f782 AG |
3553 | out: |
3554 | while (i >= 0) { | |
3555 | __free_page(sg_page(&cmd->t_data_sg[i])); | |
3556 | i--; | |
c66ac9db | 3557 | } |
ec98f782 AG |
3558 | kfree(cmd->t_data_sg); |
3559 | cmd->t_data_sg = NULL; | |
3560 | return -ENOMEM; | |
c66ac9db NB |
3561 | } |
3562 | ||
a1d8b49a AG |
3563 | /* Reduce sectors if they are too long for the device */ |
3564 | static inline sector_t transport_limit_task_sectors( | |
c66ac9db NB |
3565 | struct se_device *dev, |
3566 | unsigned long long lba, | |
a1d8b49a | 3567 | sector_t sectors) |
c66ac9db | 3568 | { |
a1d8b49a | 3569 | sectors = min_t(sector_t, sectors, dev->se_sub_dev->se_dev_attrib.max_sectors); |
c66ac9db | 3570 | |
a1d8b49a AG |
3571 | if (dev->transport->get_device_type(dev) == TYPE_DISK) |
3572 | if ((lba + sectors) > transport_dev_end_lba(dev)) | |
3573 | sectors = ((transport_dev_end_lba(dev) - lba) + 1); | |
c66ac9db | 3574 | |
a1d8b49a | 3575 | return sectors; |
c66ac9db NB |
3576 | } |
3577 | ||
c66ac9db NB |
3578 | |
3579 | /* | |
3580 | * This function can be used by HW target mode drivers to create a linked | |
3581 | * scatterlist from all contiguously allocated struct se_task->task_sg[]. | |
3582 | * This is intended to be called during the completion path by TCM Core | |
3583 | * when struct target_core_fabric_ops->check_task_sg_chaining is enabled. | |
3584 | */ | |
3585 | void transport_do_task_sg_chain(struct se_cmd *cmd) | |
3586 | { | |
ec98f782 AG |
3587 | struct scatterlist *sg_first = NULL; |
3588 | struct scatterlist *sg_prev = NULL; | |
3589 | int sg_prev_nents = 0; | |
3590 | struct scatterlist *sg; | |
c66ac9db | 3591 | struct se_task *task; |
ec98f782 | 3592 | u32 chained_nents = 0; |
c66ac9db NB |
3593 | int i; |
3594 | ||
ec98f782 AG |
3595 | BUG_ON(!cmd->se_tfo->task_sg_chaining); |
3596 | ||
c66ac9db NB |
3597 | /* |
3598 | * Walk the struct se_task list and setup scatterlist chains | |
a1d8b49a | 3599 | * for each contiguously allocated struct se_task->task_sg[]. |
c66ac9db | 3600 | */ |
a1d8b49a | 3601 | list_for_each_entry(task, &cmd->t_task_list, t_list) { |
ec98f782 | 3602 | if (!task->task_sg) |
c66ac9db NB |
3603 | continue; |
3604 | ||
ec98f782 AG |
3605 | if (!sg_first) { |
3606 | sg_first = task->task_sg; | |
6708bb27 | 3607 | chained_nents = task->task_sg_nents; |
97868c89 | 3608 | } else { |
ec98f782 | 3609 | sg_chain(sg_prev, sg_prev_nents, task->task_sg); |
6708bb27 | 3610 | chained_nents += task->task_sg_nents; |
97868c89 | 3611 | } |
c3c74c7a NB |
3612 | /* |
3613 | * For the padded tasks, use the extra SGL vector allocated | |
3614 | * in transport_allocate_data_tasks() for the sg_prev_nents | |
04629b7b CH |
3615 | * offset into sg_chain() above. |
3616 | * | |
3617 | * We do not need the padding for the last task (or a single | |
3618 | * task), but in that case we will never use the sg_prev_nents | |
3619 | * value below which would be incorrect. | |
c3c74c7a | 3620 | */ |
04629b7b | 3621 | sg_prev_nents = (task->task_sg_nents + 1); |
ec98f782 | 3622 | sg_prev = task->task_sg; |
c66ac9db NB |
3623 | } |
3624 | /* | |
3625 | * Setup the starting pointer and total t_tasks_sg_linked_no including | |
3626 | * padding SGs for linking and to mark the end. | |
3627 | */ | |
a1d8b49a | 3628 | cmd->t_tasks_sg_chained = sg_first; |
ec98f782 | 3629 | cmd->t_tasks_sg_chained_no = chained_nents; |
c66ac9db | 3630 | |
6708bb27 | 3631 | pr_debug("Setup cmd: %p cmd->t_tasks_sg_chained: %p and" |
a1d8b49a AG |
3632 | " t_tasks_sg_chained_no: %u\n", cmd, cmd->t_tasks_sg_chained, |
3633 | cmd->t_tasks_sg_chained_no); | |
c66ac9db | 3634 | |
a1d8b49a AG |
3635 | for_each_sg(cmd->t_tasks_sg_chained, sg, |
3636 | cmd->t_tasks_sg_chained_no, i) { | |
c66ac9db | 3637 | |
6708bb27 | 3638 | pr_debug("SG[%d]: %p page: %p length: %d offset: %d\n", |
5951146d | 3639 | i, sg, sg_page(sg), sg->length, sg->offset); |
c66ac9db | 3640 | if (sg_is_chain(sg)) |
6708bb27 | 3641 | pr_debug("SG: %p sg_is_chain=1\n", sg); |
c66ac9db | 3642 | if (sg_is_last(sg)) |
6708bb27 | 3643 | pr_debug("SG: %p sg_is_last=1\n", sg); |
c66ac9db | 3644 | } |
c66ac9db NB |
3645 | } |
3646 | EXPORT_SYMBOL(transport_do_task_sg_chain); | |
3647 | ||
a1d8b49a AG |
3648 | /* |
3649 | * Break up cmd into chunks transport can handle | |
3650 | */ | |
38b40067 CH |
3651 | static int |
3652 | transport_allocate_data_tasks(struct se_cmd *cmd, | |
c66ac9db | 3653 | enum dma_data_direction data_direction, |
38b40067 | 3654 | struct scatterlist *cmd_sg, unsigned int sgl_nents) |
c66ac9db | 3655 | { |
5951146d | 3656 | struct se_device *dev = cmd->se_dev; |
a3eedc22 | 3657 | int task_count, i; |
38b40067 CH |
3658 | unsigned long long lba; |
3659 | sector_t sectors, dev_max_sectors; | |
3660 | u32 sector_size; | |
3661 | ||
3662 | if (transport_cmd_get_valid_sectors(cmd) < 0) | |
3663 | return -EINVAL; | |
3664 | ||
3665 | dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors; | |
3666 | sector_size = dev->se_sub_dev->se_dev_attrib.block_size; | |
a1d8b49a | 3667 | |
ec98f782 | 3668 | WARN_ON(cmd->data_length % sector_size); |
38b40067 CH |
3669 | |
3670 | lba = cmd->t_task_lba; | |
ec98f782 | 3671 | sectors = DIV_ROUND_UP(cmd->data_length, sector_size); |
277c5f27 | 3672 | task_count = DIV_ROUND_UP_SECTOR_T(sectors, dev_max_sectors); |
af3f00c7 CH |
3673 | |
3674 | /* | |
3675 | * If we need just a single task reuse the SG list in the command | |
3676 | * and avoid a lot of work. | |
3677 | */ | |
3678 | if (task_count == 1) { | |
3679 | struct se_task *task; | |
3680 | unsigned long flags; | |
3681 | ||
3682 | task = transport_generic_get_task(cmd, data_direction); | |
3683 | if (!task) | |
3684 | return -ENOMEM; | |
3685 | ||
3686 | task->task_sg = cmd_sg; | |
3687 | task->task_sg_nents = sgl_nents; | |
3688 | ||
3689 | task->task_lba = lba; | |
3690 | task->task_sectors = sectors; | |
3691 | task->task_size = task->task_sectors * sector_size; | |
3692 | ||
3693 | spin_lock_irqsave(&cmd->t_state_lock, flags); | |
3694 | list_add_tail(&task->t_list, &cmd->t_task_list); | |
3695 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | |
3696 | ||
3697 | return task_count; | |
3698 | } | |
3699 | ||
ec98f782 | 3700 | for (i = 0; i < task_count; i++) { |
38b40067 | 3701 | struct se_task *task; |
c3c74c7a | 3702 | unsigned int task_size, task_sg_nents_padded; |
38b40067 CH |
3703 | struct scatterlist *sg; |
3704 | unsigned long flags; | |
ec98f782 | 3705 | int count; |
a1d8b49a | 3706 | |
c66ac9db | 3707 | task = transport_generic_get_task(cmd, data_direction); |
a1d8b49a | 3708 | if (!task) |
ec98f782 | 3709 | return -ENOMEM; |
c66ac9db | 3710 | |
c66ac9db | 3711 | task->task_lba = lba; |
ec98f782 AG |
3712 | task->task_sectors = min(sectors, dev_max_sectors); |
3713 | task->task_size = task->task_sectors * sector_size; | |
c66ac9db | 3714 | |
525a48a2 NB |
3715 | /* |
3716 | * This now assumes that passed sg_ents are in PAGE_SIZE chunks | |
3717 | * in order to calculate the number per task SGL entries | |
3718 | */ | |
3719 | task->task_sg_nents = DIV_ROUND_UP(task->task_size, PAGE_SIZE); | |
c66ac9db | 3720 | /* |
ec98f782 AG |
3721 | * Check if the fabric module driver is requesting that all |
3722 | * struct se_task->task_sg[] be chained together.. If so, | |
3723 | * then allocate an extra padding SG entry for linking and | |
c3c74c7a NB |
3724 | * marking the end of the chained SGL for every task except |
3725 | * the last one for (task_count > 1) operation, or skipping | |
3726 | * the extra padding for the (task_count == 1) case. | |
c66ac9db | 3727 | */ |
c3c74c7a NB |
3728 | if (cmd->se_tfo->task_sg_chaining && (i < (task_count - 1))) { |
3729 | task_sg_nents_padded = (task->task_sg_nents + 1); | |
c3c74c7a NB |
3730 | } else |
3731 | task_sg_nents_padded = task->task_sg_nents; | |
c66ac9db | 3732 | |
1d20bb61 | 3733 | task->task_sg = kmalloc(sizeof(struct scatterlist) * |
c3c74c7a | 3734 | task_sg_nents_padded, GFP_KERNEL); |
ec98f782 AG |
3735 | if (!task->task_sg) { |
3736 | cmd->se_dev->transport->free_task(task); | |
3737 | return -ENOMEM; | |
3738 | } | |
3739 | ||
c3c74c7a | 3740 | sg_init_table(task->task_sg, task_sg_nents_padded); |
c66ac9db | 3741 | |
ec98f782 AG |
3742 | task_size = task->task_size; |
3743 | ||
3744 | /* Build new sgl, only up to task_size */ | |
6708bb27 | 3745 | for_each_sg(task->task_sg, sg, task->task_sg_nents, count) { |
ec98f782 AG |
3746 | if (cmd_sg->length > task_size) |
3747 | break; | |
3748 | ||
3749 | *sg = *cmd_sg; | |
3750 | task_size -= cmd_sg->length; | |
3751 | cmd_sg = sg_next(cmd_sg); | |
c66ac9db | 3752 | } |
c66ac9db | 3753 | |
ec98f782 AG |
3754 | lba += task->task_sectors; |
3755 | sectors -= task->task_sectors; | |
c66ac9db | 3756 | |
ec98f782 AG |
3757 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
3758 | list_add_tail(&task->t_list, &cmd->t_task_list); | |
3759 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | |
c66ac9db NB |
3760 | } |
3761 | ||
ec98f782 | 3762 | return task_count; |
c66ac9db NB |
3763 | } |
3764 | ||
3765 | static int | |
ec98f782 | 3766 | transport_allocate_control_task(struct se_cmd *cmd) |
c66ac9db | 3767 | { |
c66ac9db | 3768 | struct se_task *task; |
ec98f782 | 3769 | unsigned long flags; |
c66ac9db NB |
3770 | |
3771 | task = transport_generic_get_task(cmd, cmd->data_direction); | |
3772 | if (!task) | |
ec98f782 | 3773 | return -ENOMEM; |
c66ac9db | 3774 | |
af3f00c7 | 3775 | task->task_sg = cmd->t_data_sg; |
c66ac9db | 3776 | task->task_size = cmd->data_length; |
6708bb27 | 3777 | task->task_sg_nents = cmd->t_data_nents; |
c66ac9db | 3778 | |
ec98f782 AG |
3779 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
3780 | list_add_tail(&task->t_list, &cmd->t_task_list); | |
3781 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | |
c66ac9db | 3782 | |
6708bb27 | 3783 | /* Success! Return number of tasks allocated */ |
a3eedc22 | 3784 | return 1; |
ec98f782 AG |
3785 | } |
3786 | ||
da0f7619 CH |
3787 | /* |
3788 | * Allocate any required ressources to execute the command, and either place | |
3789 | * it on the execution queue if possible. For writes we might not have the | |
3790 | * payload yet, thus notify the fabric via a call to ->write_pending instead. | |
c66ac9db | 3791 | */ |
a1d8b49a | 3792 | int transport_generic_new_cmd(struct se_cmd *cmd) |
c66ac9db | 3793 | { |
da0f7619 | 3794 | struct se_device *dev = cmd->se_dev; |
9ac54987 | 3795 | int task_cdbs, task_cdbs_bidi = 0; |
da0f7619 | 3796 | int set_counts = 1; |
c66ac9db NB |
3797 | int ret = 0; |
3798 | ||
3799 | /* | |
3800 | * Determine is the TCM fabric module has already allocated physical | |
3801 | * memory, and is directly calling transport_generic_map_mem_to_cmd() | |
ec98f782 | 3802 | * beforehand. |
c66ac9db | 3803 | */ |
ec98f782 AG |
3804 | if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) && |
3805 | cmd->data_length) { | |
05d1c7c0 | 3806 | ret = transport_generic_get_mem(cmd); |
c66ac9db NB |
3807 | if (ret < 0) |
3808 | return ret; | |
3809 | } | |
da0f7619 | 3810 | |
1d20bb61 | 3811 | /* |
38b40067 | 3812 | * For BIDI command set up the read tasks first. |
1d20bb61 | 3813 | */ |
da0f7619 | 3814 | if (cmd->t_bidi_data_sg && |
38b40067 CH |
3815 | dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) { |
3816 | BUG_ON(!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)); | |
3817 | ||
9ac54987 NB |
3818 | task_cdbs_bidi = transport_allocate_data_tasks(cmd, |
3819 | DMA_FROM_DEVICE, cmd->t_bidi_data_sg, | |
3820 | cmd->t_bidi_data_nents); | |
3821 | if (task_cdbs_bidi <= 0) | |
da0f7619 CH |
3822 | goto out_fail; |
3823 | ||
3824 | atomic_inc(&cmd->t_fe_count); | |
3825 | atomic_inc(&cmd->t_se_count); | |
3826 | set_counts = 0; | |
3827 | } | |
38b40067 CH |
3828 | |
3829 | if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { | |
3830 | task_cdbs = transport_allocate_data_tasks(cmd, | |
3831 | cmd->data_direction, cmd->t_data_sg, | |
3832 | cmd->t_data_nents); | |
3833 | } else { | |
3834 | task_cdbs = transport_allocate_control_task(cmd); | |
3835 | } | |
3836 | ||
da0f7619 CH |
3837 | if (task_cdbs <= 0) |
3838 | goto out_fail; | |
3839 | ||
3840 | if (set_counts) { | |
3841 | atomic_inc(&cmd->t_fe_count); | |
3842 | atomic_inc(&cmd->t_se_count); | |
3843 | } | |
3844 | ||
9ac54987 NB |
3845 | cmd->t_task_list_num = (task_cdbs + task_cdbs_bidi); |
3846 | atomic_set(&cmd->t_task_cdbs_left, cmd->t_task_list_num); | |
3847 | atomic_set(&cmd->t_task_cdbs_ex_left, cmd->t_task_list_num); | |
da0f7619 | 3848 | |
c66ac9db | 3849 | /* |
a1d8b49a | 3850 | * For WRITEs, let the fabric know its buffer is ready.. |
c66ac9db NB |
3851 | * This WRITE struct se_cmd (and all of its associated struct se_task's) |
3852 | * will be added to the struct se_device execution queue after its WRITE | |
3853 | * data has arrived. (ie: It gets handled by the transport processing | |
3854 | * thread a second time) | |
3855 | */ | |
3856 | if (cmd->data_direction == DMA_TO_DEVICE) { | |
3857 | transport_add_tasks_to_state_queue(cmd); | |
3858 | return transport_generic_write_pending(cmd); | |
3859 | } | |
3860 | /* | |
3861 | * Everything else but a WRITE, add the struct se_cmd's struct se_task's | |
3862 | * to the execution queue. | |
3863 | */ | |
3864 | transport_execute_tasks(cmd); | |
3865 | return 0; | |
da0f7619 CH |
3866 | |
3867 | out_fail: | |
3868 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
3869 | cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | |
3870 | return -EINVAL; | |
c66ac9db | 3871 | } |
a1d8b49a | 3872 | EXPORT_SYMBOL(transport_generic_new_cmd); |
c66ac9db NB |
3873 | |
3874 | /* transport_generic_process_write(): | |
3875 | * | |
3876 | * | |
3877 | */ | |
3878 | void transport_generic_process_write(struct se_cmd *cmd) | |
3879 | { | |
c66ac9db NB |
3880 | transport_execute_tasks(cmd); |
3881 | } | |
3882 | EXPORT_SYMBOL(transport_generic_process_write); | |
3883 | ||
e057f533 | 3884 | static void transport_write_pending_qf(struct se_cmd *cmd) |
07bde79a | 3885 | { |
f147abb4 NB |
3886 | int ret; |
3887 | ||
3888 | ret = cmd->se_tfo->write_pending(cmd); | |
3889 | if (ret == -EAGAIN || ret == -ENOMEM) { | |
e057f533 CH |
3890 | pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", |
3891 | cmd); | |
3892 | transport_handle_queue_full(cmd, cmd->se_dev); | |
3893 | } | |
07bde79a NB |
3894 | } |
3895 | ||
c66ac9db NB |
3896 | static int transport_generic_write_pending(struct se_cmd *cmd) |
3897 | { | |
3898 | unsigned long flags; | |
3899 | int ret; | |
3900 | ||
a1d8b49a | 3901 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db | 3902 | cmd->t_state = TRANSPORT_WRITE_PENDING; |
a1d8b49a | 3903 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
07bde79a | 3904 | |
c66ac9db NB |
3905 | /* |
3906 | * Clear the se_cmd for WRITE_PENDING status in order to set | |
a1d8b49a | 3907 | * cmd->t_transport_active=0 so that transport_generic_handle_data |
c66ac9db | 3908 | * can be called from HW target mode interrupt code. This is safe |
e3d6f909 | 3909 | * to be called with transport_off=1 before the cmd->se_tfo->write_pending |
c66ac9db NB |
3910 | * because the se_cmd->se_lun pointer is not being cleared. |
3911 | */ | |
3912 | transport_cmd_check_stop(cmd, 1, 0); | |
3913 | ||
3914 | /* | |
3915 | * Call the fabric write_pending function here to let the | |
3916 | * frontend know that WRITE buffers are ready. | |
3917 | */ | |
e3d6f909 | 3918 | ret = cmd->se_tfo->write_pending(cmd); |
f147abb4 | 3919 | if (ret == -EAGAIN || ret == -ENOMEM) |
07bde79a NB |
3920 | goto queue_full; |
3921 | else if (ret < 0) | |
c66ac9db NB |
3922 | return ret; |
3923 | ||
3924 | return PYX_TRANSPORT_WRITE_PENDING; | |
07bde79a NB |
3925 | |
3926 | queue_full: | |
6708bb27 | 3927 | pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); |
07bde79a | 3928 | cmd->t_state = TRANSPORT_COMPLETE_QF_WP; |
e057f533 | 3929 | transport_handle_queue_full(cmd, cmd->se_dev); |
f147abb4 | 3930 | return 0; |
c66ac9db NB |
3931 | } |
3932 | ||
2dbc43d2 CH |
3933 | /** |
3934 | * transport_release_cmd - free a command | |
3935 | * @cmd: command to free | |
3936 | * | |
3937 | * This routine unconditionally frees a command, and reference counting | |
3938 | * or list removal must be done in the caller. | |
3939 | */ | |
35462975 | 3940 | void transport_release_cmd(struct se_cmd *cmd) |
c66ac9db | 3941 | { |
e3d6f909 | 3942 | BUG_ON(!cmd->se_tfo); |
c66ac9db | 3943 | |
2dbc43d2 CH |
3944 | if (cmd->se_tmr_req) |
3945 | core_tmr_release_req(cmd->se_tmr_req); | |
3946 | if (cmd->t_task_cdb != cmd->__t_task_cdb) | |
3947 | kfree(cmd->t_task_cdb); | |
a17f091d NB |
3948 | /* |
3949 | * Check if target_wait_for_sess_cmds() is expecting to | |
3950 | * release se_cmd directly here.. | |
3951 | */ | |
3952 | if (cmd->check_release != 0 && cmd->se_tfo->check_release_cmd) | |
3953 | if (cmd->se_tfo->check_release_cmd(cmd) != 0) | |
3954 | return; | |
3955 | ||
35462975 | 3956 | cmd->se_tfo->release_cmd(cmd); |
c66ac9db | 3957 | } |
35462975 | 3958 | EXPORT_SYMBOL(transport_release_cmd); |
c66ac9db | 3959 | |
39c05f32 | 3960 | void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) |
c66ac9db | 3961 | { |
d14921d6 NB |
3962 | if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) { |
3963 | if (wait_for_tasks && cmd->se_tmr_req) | |
3964 | transport_wait_for_tasks(cmd); | |
3965 | ||
35462975 | 3966 | transport_release_cmd(cmd); |
d14921d6 NB |
3967 | } else { |
3968 | if (wait_for_tasks) | |
3969 | transport_wait_for_tasks(cmd); | |
3970 | ||
c66ac9db NB |
3971 | core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd); |
3972 | ||
82f1c8a4 | 3973 | if (cmd->se_lun) |
c66ac9db | 3974 | transport_lun_remove_cmd(cmd); |
c66ac9db | 3975 | |
f4366772 NB |
3976 | transport_free_dev_tasks(cmd); |
3977 | ||
39c05f32 | 3978 | transport_put_cmd(cmd); |
c66ac9db NB |
3979 | } |
3980 | } | |
3981 | EXPORT_SYMBOL(transport_generic_free_cmd); | |
3982 | ||
a17f091d NB |
3983 | /* target_get_sess_cmd - Add command to active ->sess_cmd_list |
3984 | * @se_sess: session to reference | |
3985 | * @se_cmd: command descriptor to add | |
3986 | */ | |
3987 | void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd) | |
3988 | { | |
3989 | unsigned long flags; | |
3990 | ||
3991 | spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); | |
3992 | list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); | |
3993 | se_cmd->check_release = 1; | |
3994 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); | |
3995 | } | |
3996 | EXPORT_SYMBOL(target_get_sess_cmd); | |
3997 | ||
3998 | /* target_put_sess_cmd - Check for active I/O shutdown or list delete | |
3999 | * @se_sess: session to reference | |
4000 | * @se_cmd: command descriptor to drop | |
4001 | */ | |
4002 | int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd) | |
4003 | { | |
4004 | unsigned long flags; | |
4005 | ||
4006 | spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); | |
4007 | if (list_empty(&se_cmd->se_cmd_list)) { | |
4008 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); | |
4009 | WARN_ON(1); | |
4010 | return 0; | |
4011 | } | |
4012 | ||
4013 | if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) { | |
4014 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); | |
4015 | complete(&se_cmd->cmd_wait_comp); | |
4016 | return 1; | |
4017 | } | |
4018 | list_del(&se_cmd->se_cmd_list); | |
4019 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); | |
4020 | ||
4021 | return 0; | |
4022 | } | |
4023 | EXPORT_SYMBOL(target_put_sess_cmd); | |
4024 | ||
4025 | /* target_splice_sess_cmd_list - Split active cmds into sess_wait_list | |
4026 | * @se_sess: session to split | |
4027 | */ | |
4028 | void target_splice_sess_cmd_list(struct se_session *se_sess) | |
4029 | { | |
4030 | struct se_cmd *se_cmd; | |
4031 | unsigned long flags; | |
4032 | ||
4033 | WARN_ON(!list_empty(&se_sess->sess_wait_list)); | |
4034 | INIT_LIST_HEAD(&se_sess->sess_wait_list); | |
4035 | ||
4036 | spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); | |
4037 | se_sess->sess_tearing_down = 1; | |
4038 | ||
4039 | list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list); | |
4040 | ||
4041 | list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) | |
4042 | se_cmd->cmd_wait_set = 1; | |
4043 | ||
4044 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); | |
4045 | } | |
4046 | EXPORT_SYMBOL(target_splice_sess_cmd_list); | |
4047 | ||
4048 | /* target_wait_for_sess_cmds - Wait for outstanding descriptors | |
4049 | * @se_sess: session to wait for active I/O | |
4050 | * @wait_for_tasks: Make extra transport_wait_for_tasks call | |
4051 | */ | |
4052 | void target_wait_for_sess_cmds( | |
4053 | struct se_session *se_sess, | |
4054 | int wait_for_tasks) | |
4055 | { | |
4056 | struct se_cmd *se_cmd, *tmp_cmd; | |
4057 | bool rc = false; | |
4058 | ||
4059 | list_for_each_entry_safe(se_cmd, tmp_cmd, | |
4060 | &se_sess->sess_wait_list, se_cmd_list) { | |
4061 | list_del(&se_cmd->se_cmd_list); | |
4062 | ||
4063 | pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:" | |
4064 | " %d\n", se_cmd, se_cmd->t_state, | |
4065 | se_cmd->se_tfo->get_cmd_state(se_cmd)); | |
4066 | ||
4067 | if (wait_for_tasks) { | |
4068 | pr_debug("Calling transport_wait_for_tasks se_cmd: %p t_state: %d," | |
4069 | " fabric state: %d\n", se_cmd, se_cmd->t_state, | |
4070 | se_cmd->se_tfo->get_cmd_state(se_cmd)); | |
4071 | ||
4072 | rc = transport_wait_for_tasks(se_cmd); | |
4073 | ||
4074 | pr_debug("After transport_wait_for_tasks se_cmd: %p t_state: %d," | |
4075 | " fabric state: %d\n", se_cmd, se_cmd->t_state, | |
4076 | se_cmd->se_tfo->get_cmd_state(se_cmd)); | |
4077 | } | |
4078 | ||
4079 | if (!rc) { | |
4080 | wait_for_completion(&se_cmd->cmd_wait_comp); | |
4081 | pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d" | |
4082 | " fabric state: %d\n", se_cmd, se_cmd->t_state, | |
4083 | se_cmd->se_tfo->get_cmd_state(se_cmd)); | |
4084 | } | |
4085 | ||
4086 | se_cmd->se_tfo->release_cmd(se_cmd); | |
4087 | } | |
4088 | } | |
4089 | EXPORT_SYMBOL(target_wait_for_sess_cmds); | |
4090 | ||
c66ac9db NB |
4091 | /* transport_lun_wait_for_tasks(): |
4092 | * | |
4093 | * Called from ConfigFS context to stop the passed struct se_cmd to allow | |
4094 | * an struct se_lun to be successfully shutdown. | |
4095 | */ | |
4096 | static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun) | |
4097 | { | |
4098 | unsigned long flags; | |
4099 | int ret; | |
4100 | /* | |
4101 | * If the frontend has already requested this struct se_cmd to | |
4102 | * be stopped, we can safely ignore this struct se_cmd. | |
4103 | */ | |
a1d8b49a AG |
4104 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
4105 | if (atomic_read(&cmd->t_transport_stop)) { | |
4106 | atomic_set(&cmd->transport_lun_stop, 0); | |
6708bb27 | 4107 | pr_debug("ConfigFS ITT[0x%08x] - t_transport_stop ==" |
e3d6f909 | 4108 | " TRUE, skipping\n", cmd->se_tfo->get_task_tag(cmd)); |
a1d8b49a | 4109 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db | 4110 | transport_cmd_check_stop(cmd, 1, 0); |
e3d6f909 | 4111 | return -EPERM; |
c66ac9db | 4112 | } |
a1d8b49a AG |
4113 | atomic_set(&cmd->transport_lun_fe_stop, 1); |
4114 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | |
c66ac9db | 4115 | |
5951146d | 4116 | wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq); |
c66ac9db NB |
4117 | |
4118 | ret = transport_stop_tasks_for_cmd(cmd); | |
4119 | ||
6708bb27 AG |
4120 | pr_debug("ConfigFS: cmd: %p t_tasks: %d stop tasks ret:" |
4121 | " %d\n", cmd, cmd->t_task_list_num, ret); | |
c66ac9db | 4122 | if (!ret) { |
6708bb27 | 4123 | pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n", |
e3d6f909 | 4124 | cmd->se_tfo->get_task_tag(cmd)); |
a1d8b49a | 4125 | wait_for_completion(&cmd->transport_lun_stop_comp); |
6708bb27 | 4126 | pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n", |
e3d6f909 | 4127 | cmd->se_tfo->get_task_tag(cmd)); |
c66ac9db | 4128 | } |
3df8d40b | 4129 | transport_remove_cmd_from_queue(cmd); |
c66ac9db NB |
4130 | |
4131 | return 0; | |
4132 | } | |
4133 | ||
c66ac9db NB |
4134 | static void __transport_clear_lun_from_sessions(struct se_lun *lun) |
4135 | { | |
4136 | struct se_cmd *cmd = NULL; | |
4137 | unsigned long lun_flags, cmd_flags; | |
4138 | /* | |
4139 | * Do exception processing and return CHECK_CONDITION status to the | |
4140 | * Initiator Port. | |
4141 | */ | |
4142 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); | |
5951146d AG |
4143 | while (!list_empty(&lun->lun_cmd_list)) { |
4144 | cmd = list_first_entry(&lun->lun_cmd_list, | |
4145 | struct se_cmd, se_lun_node); | |
4146 | list_del(&cmd->se_lun_node); | |
4147 | ||
a1d8b49a | 4148 | atomic_set(&cmd->transport_lun_active, 0); |
c66ac9db NB |
4149 | /* |
4150 | * This will notify iscsi_target_transport.c: | |
4151 | * transport_cmd_check_stop() that a LUN shutdown is in | |
4152 | * progress for the iscsi_cmd_t. | |
4153 | */ | |
a1d8b49a | 4154 | spin_lock(&cmd->t_state_lock); |
6708bb27 | 4155 | pr_debug("SE_LUN[%d] - Setting cmd->transport" |
c66ac9db | 4156 | "_lun_stop for ITT: 0x%08x\n", |
e3d6f909 AG |
4157 | cmd->se_lun->unpacked_lun, |
4158 | cmd->se_tfo->get_task_tag(cmd)); | |
a1d8b49a AG |
4159 | atomic_set(&cmd->transport_lun_stop, 1); |
4160 | spin_unlock(&cmd->t_state_lock); | |
c66ac9db NB |
4161 | |
4162 | spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); | |
4163 | ||
6708bb27 AG |
4164 | if (!cmd->se_lun) { |
4165 | pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n", | |
e3d6f909 AG |
4166 | cmd->se_tfo->get_task_tag(cmd), |
4167 | cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); | |
c66ac9db NB |
4168 | BUG(); |
4169 | } | |
4170 | /* | |
4171 | * If the Storage engine still owns the iscsi_cmd_t, determine | |
4172 | * and/or stop its context. | |
4173 | */ | |
6708bb27 | 4174 | pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport" |
e3d6f909 AG |
4175 | "_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun, |
4176 | cmd->se_tfo->get_task_tag(cmd)); | |
c66ac9db | 4177 | |
e3d6f909 | 4178 | if (transport_lun_wait_for_tasks(cmd, cmd->se_lun) < 0) { |
c66ac9db NB |
4179 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); |
4180 | continue; | |
4181 | } | |
4182 | ||
6708bb27 | 4183 | pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun" |
c66ac9db | 4184 | "_wait_for_tasks(): SUCCESS\n", |
e3d6f909 AG |
4185 | cmd->se_lun->unpacked_lun, |
4186 | cmd->se_tfo->get_task_tag(cmd)); | |
c66ac9db | 4187 | |
a1d8b49a | 4188 | spin_lock_irqsave(&cmd->t_state_lock, cmd_flags); |
6708bb27 | 4189 | if (!atomic_read(&cmd->transport_dev_active)) { |
a1d8b49a | 4190 | spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); |
c66ac9db NB |
4191 | goto check_cond; |
4192 | } | |
a1d8b49a | 4193 | atomic_set(&cmd->transport_dev_active, 0); |
c66ac9db | 4194 | transport_all_task_dev_remove_state(cmd); |
a1d8b49a | 4195 | spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); |
c66ac9db NB |
4196 | |
4197 | transport_free_dev_tasks(cmd); | |
4198 | /* | |
4199 | * The Storage engine stopped this struct se_cmd before it was | |
4200 | * send to the fabric frontend for delivery back to the | |
4201 | * Initiator Node. Return this SCSI CDB back with an | |
4202 | * CHECK_CONDITION status. | |
4203 | */ | |
4204 | check_cond: | |
4205 | transport_send_check_condition_and_sense(cmd, | |
4206 | TCM_NON_EXISTENT_LUN, 0); | |
4207 | /* | |
4208 | * If the fabric frontend is waiting for this iscsi_cmd_t to | |
4209 | * be released, notify the waiting thread now that LU has | |
4210 | * finished accessing it. | |
4211 | */ | |
a1d8b49a AG |
4212 | spin_lock_irqsave(&cmd->t_state_lock, cmd_flags); |
4213 | if (atomic_read(&cmd->transport_lun_fe_stop)) { | |
6708bb27 | 4214 | pr_debug("SE_LUN[%d] - Detected FE stop for" |
c66ac9db NB |
4215 | " struct se_cmd: %p ITT: 0x%08x\n", |
4216 | lun->unpacked_lun, | |
e3d6f909 | 4217 | cmd, cmd->se_tfo->get_task_tag(cmd)); |
c66ac9db | 4218 | |
a1d8b49a | 4219 | spin_unlock_irqrestore(&cmd->t_state_lock, |
c66ac9db NB |
4220 | cmd_flags); |
4221 | transport_cmd_check_stop(cmd, 1, 0); | |
a1d8b49a | 4222 | complete(&cmd->transport_lun_fe_stop_comp); |
c66ac9db NB |
4223 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); |
4224 | continue; | |
4225 | } | |
6708bb27 | 4226 | pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n", |
e3d6f909 | 4227 | lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd)); |
c66ac9db | 4228 | |
a1d8b49a | 4229 | spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); |
c66ac9db NB |
4230 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); |
4231 | } | |
4232 | spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); | |
4233 | } | |
4234 | ||
4235 | static int transport_clear_lun_thread(void *p) | |
4236 | { | |
4237 | struct se_lun *lun = (struct se_lun *)p; | |
4238 | ||
4239 | __transport_clear_lun_from_sessions(lun); | |
4240 | complete(&lun->lun_shutdown_comp); | |
4241 | ||
4242 | return 0; | |
4243 | } | |
4244 | ||
4245 | int transport_clear_lun_from_sessions(struct se_lun *lun) | |
4246 | { | |
4247 | struct task_struct *kt; | |
4248 | ||
5951146d | 4249 | kt = kthread_run(transport_clear_lun_thread, lun, |
c66ac9db NB |
4250 | "tcm_cl_%u", lun->unpacked_lun); |
4251 | if (IS_ERR(kt)) { | |
6708bb27 | 4252 | pr_err("Unable to start clear_lun thread\n"); |
e3d6f909 | 4253 | return PTR_ERR(kt); |
c66ac9db NB |
4254 | } |
4255 | wait_for_completion(&lun->lun_shutdown_comp); | |
4256 | ||
4257 | return 0; | |
4258 | } | |
4259 | ||
d14921d6 NB |
4260 | /** |
4261 | * transport_wait_for_tasks - wait for completion to occur | |
4262 | * @cmd: command to wait | |
c66ac9db | 4263 | * |
d14921d6 NB |
4264 | * Called from frontend fabric context to wait for storage engine |
4265 | * to pause and/or release frontend generated struct se_cmd. | |
c66ac9db | 4266 | */ |
a17f091d | 4267 | bool transport_wait_for_tasks(struct se_cmd *cmd) |
c66ac9db NB |
4268 | { |
4269 | unsigned long flags; | |
4270 | ||
a1d8b49a | 4271 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
d14921d6 NB |
4272 | if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req)) { |
4273 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | |
a17f091d | 4274 | return false; |
d14921d6 NB |
4275 | } |
4276 | /* | |
4277 | * Only perform a possible wait_for_tasks if SCF_SUPPORTED_SAM_OPCODE | |
4278 | * has been set in transport_set_supported_SAM_opcode(). | |
4279 | */ | |
4280 | if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && !cmd->se_tmr_req) { | |
4281 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | |
a17f091d | 4282 | return false; |
d14921d6 | 4283 | } |
c66ac9db NB |
4284 | /* |
4285 | * If we are already stopped due to an external event (ie: LUN shutdown) | |
4286 | * sleep until the connection can have the passed struct se_cmd back. | |
a1d8b49a | 4287 | * The cmd->transport_lun_stopped_sem will be upped by |
c66ac9db NB |
4288 | * transport_clear_lun_from_sessions() once the ConfigFS context caller |
4289 | * has completed its operation on the struct se_cmd. | |
4290 | */ | |
a1d8b49a | 4291 | if (atomic_read(&cmd->transport_lun_stop)) { |
c66ac9db | 4292 | |
6708bb27 | 4293 | pr_debug("wait_for_tasks: Stopping" |
e3d6f909 | 4294 | " wait_for_completion(&cmd->t_tasktransport_lun_fe" |
c66ac9db | 4295 | "_stop_comp); for ITT: 0x%08x\n", |
e3d6f909 | 4296 | cmd->se_tfo->get_task_tag(cmd)); |
c66ac9db NB |
4297 | /* |
4298 | * There is a special case for WRITES where a FE exception + | |
4299 | * LUN shutdown means ConfigFS context is still sleeping on | |
4300 | * transport_lun_stop_comp in transport_lun_wait_for_tasks(). | |
4301 | * We go ahead and up transport_lun_stop_comp just to be sure | |
4302 | * here. | |
4303 | */ | |
a1d8b49a AG |
4304 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
4305 | complete(&cmd->transport_lun_stop_comp); | |
4306 | wait_for_completion(&cmd->transport_lun_fe_stop_comp); | |
4307 | spin_lock_irqsave(&cmd->t_state_lock, flags); | |
c66ac9db NB |
4308 | |
4309 | transport_all_task_dev_remove_state(cmd); | |
4310 | /* | |
4311 | * At this point, the frontend who was the originator of this | |
4312 | * struct se_cmd, now owns the structure and can be released through | |
4313 | * normal means below. | |
4314 | */ | |
6708bb27 | 4315 | pr_debug("wait_for_tasks: Stopped" |
e3d6f909 | 4316 | " wait_for_completion(&cmd->t_tasktransport_lun_fe_" |
c66ac9db | 4317 | "stop_comp); for ITT: 0x%08x\n", |
e3d6f909 | 4318 | cmd->se_tfo->get_task_tag(cmd)); |
c66ac9db | 4319 | |
a1d8b49a | 4320 | atomic_set(&cmd->transport_lun_stop, 0); |
c66ac9db | 4321 | } |
a1d8b49a | 4322 | if (!atomic_read(&cmd->t_transport_active) || |
d14921d6 NB |
4323 | atomic_read(&cmd->t_transport_aborted)) { |
4324 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | |
a17f091d | 4325 | return false; |
d14921d6 | 4326 | } |
c66ac9db | 4327 | |
a1d8b49a | 4328 | atomic_set(&cmd->t_transport_stop, 1); |
c66ac9db | 4329 | |
6708bb27 | 4330 | pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x" |
f2da9dbd CH |
4331 | " i_state: %d, t_state: %d, t_transport_stop = TRUE\n", |
4332 | cmd, cmd->se_tfo->get_task_tag(cmd), | |
4333 | cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); | |
c66ac9db | 4334 | |
a1d8b49a | 4335 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db | 4336 | |
5951146d | 4337 | wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq); |
c66ac9db | 4338 | |
a1d8b49a | 4339 | wait_for_completion(&cmd->t_transport_stop_comp); |
c66ac9db | 4340 | |
a1d8b49a AG |
4341 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
4342 | atomic_set(&cmd->t_transport_active, 0); | |
4343 | atomic_set(&cmd->t_transport_stop, 0); | |
c66ac9db | 4344 | |
6708bb27 | 4345 | pr_debug("wait_for_tasks: Stopped wait_for_compltion(" |
a1d8b49a | 4346 | "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n", |
e3d6f909 | 4347 | cmd->se_tfo->get_task_tag(cmd)); |
c66ac9db | 4348 | |
d14921d6 | 4349 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
a17f091d NB |
4350 | |
4351 | return true; | |
c66ac9db | 4352 | } |
d14921d6 | 4353 | EXPORT_SYMBOL(transport_wait_for_tasks); |
c66ac9db NB |
4354 | |
4355 | static int transport_get_sense_codes( | |
4356 | struct se_cmd *cmd, | |
4357 | u8 *asc, | |
4358 | u8 *ascq) | |
4359 | { | |
4360 | *asc = cmd->scsi_asc; | |
4361 | *ascq = cmd->scsi_ascq; | |
4362 | ||
4363 | return 0; | |
4364 | } | |
4365 | ||
4366 | static int transport_set_sense_codes( | |
4367 | struct se_cmd *cmd, | |
4368 | u8 asc, | |
4369 | u8 ascq) | |
4370 | { | |
4371 | cmd->scsi_asc = asc; | |
4372 | cmd->scsi_ascq = ascq; | |
4373 | ||
4374 | return 0; | |
4375 | } | |
4376 | ||
4377 | int transport_send_check_condition_and_sense( | |
4378 | struct se_cmd *cmd, | |
4379 | u8 reason, | |
4380 | int from_transport) | |
4381 | { | |
4382 | unsigned char *buffer = cmd->sense_buffer; | |
4383 | unsigned long flags; | |
4384 | int offset; | |
4385 | u8 asc = 0, ascq = 0; | |
4386 | ||
a1d8b49a | 4387 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
c66ac9db | 4388 | if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { |
a1d8b49a | 4389 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
4390 | return 0; |
4391 | } | |
4392 | cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION; | |
a1d8b49a | 4393 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
c66ac9db NB |
4394 | |
4395 | if (!reason && from_transport) | |
4396 | goto after_reason; | |
4397 | ||
4398 | if (!from_transport) | |
4399 | cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE; | |
4400 | /* | |
4401 | * Data Segment and SenseLength of the fabric response PDU. | |
4402 | * | |
4403 | * TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE | |
4404 | * from include/scsi/scsi_cmnd.h | |
4405 | */ | |
e3d6f909 | 4406 | offset = cmd->se_tfo->set_fabric_sense_len(cmd, |
c66ac9db NB |
4407 | TRANSPORT_SENSE_BUFFER); |
4408 | /* | |
4409 | * Actual SENSE DATA, see SPC-3 7.23.2 SPC_SENSE_KEY_OFFSET uses | |
4410 | * SENSE KEY values from include/scsi/scsi.h | |
4411 | */ | |
4412 | switch (reason) { | |
4413 | case TCM_NON_EXISTENT_LUN: | |
eb39d340 NB |
4414 | /* CURRENT ERROR */ |
4415 | buffer[offset] = 0x70; | |
4416 | /* ILLEGAL REQUEST */ | |
4417 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; | |
4418 | /* LOGICAL UNIT NOT SUPPORTED */ | |
4419 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x25; | |
4420 | break; | |
c66ac9db NB |
4421 | case TCM_UNSUPPORTED_SCSI_OPCODE: |
4422 | case TCM_SECTOR_COUNT_TOO_MANY: | |
4423 | /* CURRENT ERROR */ | |
4424 | buffer[offset] = 0x70; | |
4425 | /* ILLEGAL REQUEST */ | |
4426 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; | |
4427 | /* INVALID COMMAND OPERATION CODE */ | |
4428 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x20; | |
4429 | break; | |
4430 | case TCM_UNKNOWN_MODE_PAGE: | |
4431 | /* CURRENT ERROR */ | |
4432 | buffer[offset] = 0x70; | |
4433 | /* ILLEGAL REQUEST */ | |
4434 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; | |
4435 | /* INVALID FIELD IN CDB */ | |
4436 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24; | |
4437 | break; | |
4438 | case TCM_CHECK_CONDITION_ABORT_CMD: | |
4439 | /* CURRENT ERROR */ | |
4440 | buffer[offset] = 0x70; | |
4441 | /* ABORTED COMMAND */ | |
4442 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | |
4443 | /* BUS DEVICE RESET FUNCTION OCCURRED */ | |
4444 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x29; | |
4445 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x03; | |
4446 | break; | |
4447 | case TCM_INCORRECT_AMOUNT_OF_DATA: | |
4448 | /* CURRENT ERROR */ | |
4449 | buffer[offset] = 0x70; | |
4450 | /* ABORTED COMMAND */ | |
4451 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | |
4452 | /* WRITE ERROR */ | |
4453 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c; | |
4454 | /* NOT ENOUGH UNSOLICITED DATA */ | |
4455 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0d; | |
4456 | break; | |
4457 | case TCM_INVALID_CDB_FIELD: | |
4458 | /* CURRENT ERROR */ | |
4459 | buffer[offset] = 0x70; | |
4460 | /* ABORTED COMMAND */ | |
4461 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | |
4462 | /* INVALID FIELD IN CDB */ | |
4463 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24; | |
4464 | break; | |
4465 | case TCM_INVALID_PARAMETER_LIST: | |
4466 | /* CURRENT ERROR */ | |
4467 | buffer[offset] = 0x70; | |
4468 | /* ABORTED COMMAND */ | |
4469 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | |
4470 | /* INVALID FIELD IN PARAMETER LIST */ | |
4471 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26; | |
4472 | break; | |
4473 | case TCM_UNEXPECTED_UNSOLICITED_DATA: | |
4474 | /* CURRENT ERROR */ | |
4475 | buffer[offset] = 0x70; | |
4476 | /* ABORTED COMMAND */ | |
4477 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | |
4478 | /* WRITE ERROR */ | |
4479 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c; | |
4480 | /* UNEXPECTED_UNSOLICITED_DATA */ | |
4481 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0c; | |
4482 | break; | |
4483 | case TCM_SERVICE_CRC_ERROR: | |
4484 | /* CURRENT ERROR */ | |
4485 | buffer[offset] = 0x70; | |
4486 | /* ABORTED COMMAND */ | |
4487 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | |
4488 | /* PROTOCOL SERVICE CRC ERROR */ | |
4489 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x47; | |
4490 | /* N/A */ | |
4491 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x05; | |
4492 | break; | |
4493 | case TCM_SNACK_REJECTED: | |
4494 | /* CURRENT ERROR */ | |
4495 | buffer[offset] = 0x70; | |
4496 | /* ABORTED COMMAND */ | |
4497 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | |
4498 | /* READ ERROR */ | |
4499 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x11; | |
4500 | /* FAILED RETRANSMISSION REQUEST */ | |
4501 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x13; | |
4502 | break; | |
4503 | case TCM_WRITE_PROTECTED: | |
4504 | /* CURRENT ERROR */ | |
4505 | buffer[offset] = 0x70; | |
4506 | /* DATA PROTECT */ | |
4507 | buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT; | |
4508 | /* WRITE PROTECTED */ | |
4509 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27; | |
4510 | break; | |
4511 | case TCM_CHECK_CONDITION_UNIT_ATTENTION: | |
4512 | /* CURRENT ERROR */ | |
4513 | buffer[offset] = 0x70; | |
4514 | /* UNIT ATTENTION */ | |
4515 | buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION; | |
4516 | core_scsi3_ua_for_check_condition(cmd, &asc, &ascq); | |
4517 | buffer[offset+SPC_ASC_KEY_OFFSET] = asc; | |
4518 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq; | |
4519 | break; | |
4520 | case TCM_CHECK_CONDITION_NOT_READY: | |
4521 | /* CURRENT ERROR */ | |
4522 | buffer[offset] = 0x70; | |
4523 | /* Not Ready */ | |
4524 | buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY; | |
4525 | transport_get_sense_codes(cmd, &asc, &ascq); | |
4526 | buffer[offset+SPC_ASC_KEY_OFFSET] = asc; | |
4527 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq; | |
4528 | break; | |
4529 | case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: | |
4530 | default: | |
4531 | /* CURRENT ERROR */ | |
4532 | buffer[offset] = 0x70; | |
4533 | /* ILLEGAL REQUEST */ | |
4534 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; | |
4535 | /* LOGICAL UNIT COMMUNICATION FAILURE */ | |
4536 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x80; | |
4537 | break; | |
4538 | } | |
4539 | /* | |
4540 | * This code uses linux/include/scsi/scsi.h SAM status codes! | |
4541 | */ | |
4542 | cmd->scsi_status = SAM_STAT_CHECK_CONDITION; | |
4543 | /* | |
4544 | * Automatically padded, this value is encoded in the fabric's | |
4545 | * data_length response PDU containing the SCSI defined sense data. | |
4546 | */ | |
4547 | cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset; | |
4548 | ||
4549 | after_reason: | |
07bde79a | 4550 | return cmd->se_tfo->queue_status(cmd); |
c66ac9db NB |
4551 | } |
4552 | EXPORT_SYMBOL(transport_send_check_condition_and_sense); | |
4553 | ||
4554 | int transport_check_aborted_status(struct se_cmd *cmd, int send_status) | |
4555 | { | |
4556 | int ret = 0; | |
4557 | ||
a1d8b49a | 4558 | if (atomic_read(&cmd->t_transport_aborted) != 0) { |
6708bb27 | 4559 | if (!send_status || |
c66ac9db NB |
4560 | (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS)) |
4561 | return 1; | |
4562 | #if 0 | |
6708bb27 | 4563 | pr_debug("Sending delayed SAM_STAT_TASK_ABORTED" |
c66ac9db | 4564 | " status for CDB: 0x%02x ITT: 0x%08x\n", |
a1d8b49a | 4565 | cmd->t_task_cdb[0], |
e3d6f909 | 4566 | cmd->se_tfo->get_task_tag(cmd)); |
c66ac9db NB |
4567 | #endif |
4568 | cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS; | |
e3d6f909 | 4569 | cmd->se_tfo->queue_status(cmd); |
c66ac9db NB |
4570 | ret = 1; |
4571 | } | |
4572 | return ret; | |
4573 | } | |
4574 | EXPORT_SYMBOL(transport_check_aborted_status); | |
4575 | ||
4576 | void transport_send_task_abort(struct se_cmd *cmd) | |
4577 | { | |
c252f003 NB |
4578 | unsigned long flags; |
4579 | ||
4580 | spin_lock_irqsave(&cmd->t_state_lock, flags); | |
4581 | if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { | |
4582 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | |
4583 | return; | |
4584 | } | |
4585 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | |
4586 | ||
c66ac9db NB |
4587 | /* |
4588 | * If there are still expected incoming fabric WRITEs, we wait | |
4589 | * until until they have completed before sending a TASK_ABORTED | |
4590 | * response. This response with TASK_ABORTED status will be | |
4591 | * queued back to fabric module by transport_check_aborted_status(). | |
4592 | */ | |
4593 | if (cmd->data_direction == DMA_TO_DEVICE) { | |
e3d6f909 | 4594 | if (cmd->se_tfo->write_pending_status(cmd) != 0) { |
a1d8b49a | 4595 | atomic_inc(&cmd->t_transport_aborted); |
c66ac9db NB |
4596 | smp_mb__after_atomic_inc(); |
4597 | cmd->scsi_status = SAM_STAT_TASK_ABORTED; | |
4598 | transport_new_cmd_failure(cmd); | |
4599 | return; | |
4600 | } | |
4601 | } | |
4602 | cmd->scsi_status = SAM_STAT_TASK_ABORTED; | |
4603 | #if 0 | |
6708bb27 | 4604 | pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x," |
a1d8b49a | 4605 | " ITT: 0x%08x\n", cmd->t_task_cdb[0], |
e3d6f909 | 4606 | cmd->se_tfo->get_task_tag(cmd)); |
c66ac9db | 4607 | #endif |
e3d6f909 | 4608 | cmd->se_tfo->queue_status(cmd); |
c66ac9db NB |
4609 | } |
4610 | ||
4611 | /* transport_generic_do_tmr(): | |
4612 | * | |
4613 | * | |
4614 | */ | |
4615 | int transport_generic_do_tmr(struct se_cmd *cmd) | |
4616 | { | |
5951146d | 4617 | struct se_device *dev = cmd->se_dev; |
c66ac9db NB |
4618 | struct se_tmr_req *tmr = cmd->se_tmr_req; |
4619 | int ret; | |
4620 | ||
4621 | switch (tmr->function) { | |
5c6cd613 | 4622 | case TMR_ABORT_TASK: |
c66ac9db NB |
4623 | tmr->response = TMR_FUNCTION_REJECTED; |
4624 | break; | |
5c6cd613 NB |
4625 | case TMR_ABORT_TASK_SET: |
4626 | case TMR_CLEAR_ACA: | |
4627 | case TMR_CLEAR_TASK_SET: | |
c66ac9db NB |
4628 | tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED; |
4629 | break; | |
5c6cd613 | 4630 | case TMR_LUN_RESET: |
c66ac9db NB |
4631 | ret = core_tmr_lun_reset(dev, tmr, NULL, NULL); |
4632 | tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE : | |
4633 | TMR_FUNCTION_REJECTED; | |
4634 | break; | |
5c6cd613 | 4635 | case TMR_TARGET_WARM_RESET: |
c66ac9db NB |
4636 | tmr->response = TMR_FUNCTION_REJECTED; |
4637 | break; | |
5c6cd613 | 4638 | case TMR_TARGET_COLD_RESET: |
c66ac9db NB |
4639 | tmr->response = TMR_FUNCTION_REJECTED; |
4640 | break; | |
c66ac9db | 4641 | default: |
6708bb27 | 4642 | pr_err("Uknown TMR function: 0x%02x.\n", |
c66ac9db NB |
4643 | tmr->function); |
4644 | tmr->response = TMR_FUNCTION_REJECTED; | |
4645 | break; | |
4646 | } | |
4647 | ||
4648 | cmd->t_state = TRANSPORT_ISTATE_PROCESSING; | |
e3d6f909 | 4649 | cmd->se_tfo->queue_tm_rsp(cmd); |
c66ac9db | 4650 | |
b7b8bef7 | 4651 | transport_cmd_check_stop_to_fabric(cmd); |
c66ac9db NB |
4652 | return 0; |
4653 | } | |
4654 | ||
c66ac9db NB |
4655 | /* transport_processing_thread(): |
4656 | * | |
4657 | * | |
4658 | */ | |
4659 | static int transport_processing_thread(void *param) | |
4660 | { | |
5951146d | 4661 | int ret; |
c66ac9db NB |
4662 | struct se_cmd *cmd; |
4663 | struct se_device *dev = (struct se_device *) param; | |
c66ac9db NB |
4664 | |
4665 | set_user_nice(current, -20); | |
4666 | ||
4667 | while (!kthread_should_stop()) { | |
e3d6f909 AG |
4668 | ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq, |
4669 | atomic_read(&dev->dev_queue_obj.queue_cnt) || | |
c66ac9db NB |
4670 | kthread_should_stop()); |
4671 | if (ret < 0) | |
4672 | goto out; | |
4673 | ||
c66ac9db NB |
4674 | get_cmd: |
4675 | __transport_execute_tasks(dev); | |
4676 | ||
5951146d AG |
4677 | cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj); |
4678 | if (!cmd) | |
c66ac9db NB |
4679 | continue; |
4680 | ||
5951146d | 4681 | switch (cmd->t_state) { |
680b73c5 CH |
4682 | case TRANSPORT_NEW_CMD: |
4683 | BUG(); | |
4684 | break; | |
c66ac9db | 4685 | case TRANSPORT_NEW_CMD_MAP: |
6708bb27 AG |
4686 | if (!cmd->se_tfo->new_cmd_map) { |
4687 | pr_err("cmd->se_tfo->new_cmd_map is" | |
c66ac9db NB |
4688 | " NULL for TRANSPORT_NEW_CMD_MAP\n"); |
4689 | BUG(); | |
4690 | } | |
e3d6f909 | 4691 | ret = cmd->se_tfo->new_cmd_map(cmd); |
c66ac9db NB |
4692 | if (ret < 0) { |
4693 | cmd->transport_error_status = ret; | |
4499dda8 | 4694 | transport_generic_request_failure(cmd, |
c66ac9db NB |
4695 | 0, (cmd->data_direction != |
4696 | DMA_TO_DEVICE)); | |
4697 | break; | |
4698 | } | |
c66ac9db | 4699 | ret = transport_generic_new_cmd(cmd); |
f147abb4 | 4700 | if (ret < 0) { |
c66ac9db | 4701 | cmd->transport_error_status = ret; |
4499dda8 | 4702 | transport_generic_request_failure(cmd, |
c66ac9db NB |
4703 | 0, (cmd->data_direction != |
4704 | DMA_TO_DEVICE)); | |
4705 | } | |
4706 | break; | |
4707 | case TRANSPORT_PROCESS_WRITE: | |
4708 | transport_generic_process_write(cmd); | |
4709 | break; | |
c66ac9db NB |
4710 | case TRANSPORT_PROCESS_TMR: |
4711 | transport_generic_do_tmr(cmd); | |
4712 | break; | |
07bde79a | 4713 | case TRANSPORT_COMPLETE_QF_WP: |
e057f533 CH |
4714 | transport_write_pending_qf(cmd); |
4715 | break; | |
4716 | case TRANSPORT_COMPLETE_QF_OK: | |
4717 | transport_complete_qf(cmd); | |
07bde79a | 4718 | break; |
c66ac9db | 4719 | default: |
f2da9dbd CH |
4720 | pr_err("Unknown t_state: %d for ITT: 0x%08x " |
4721 | "i_state: %d on SE LUN: %u\n", | |
4722 | cmd->t_state, | |
e3d6f909 AG |
4723 | cmd->se_tfo->get_task_tag(cmd), |
4724 | cmd->se_tfo->get_cmd_state(cmd), | |
4725 | cmd->se_lun->unpacked_lun); | |
c66ac9db NB |
4726 | BUG(); |
4727 | } | |
4728 | ||
4729 | goto get_cmd; | |
4730 | } | |
4731 | ||
4732 | out: | |
ce8762f6 NB |
4733 | WARN_ON(!list_empty(&dev->state_task_list)); |
4734 | WARN_ON(!list_empty(&dev->dev_queue_obj.qobj_list)); | |
c66ac9db NB |
4735 | dev->process_thread = NULL; |
4736 | return 0; | |
4737 | } |