target: target_core_configfs.h is not needed in fabric drivers
[deliverable/linux.git] / drivers / target / target_core_transport.c
1 /*******************************************************************************
2 * Filename: target_core_transport.c
3 *
4 * This file contains the Generic Target Engine Core.
5 *
6 * (c) Copyright 2002-2013 Datera, Inc.
7 *
8 * Nicholas A. Bellinger <nab@kernel.org>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 *
24 ******************************************************************************/
25
26 #include <linux/net.h>
27 #include <linux/delay.h>
28 #include <linux/string.h>
29 #include <linux/timer.h>
30 #include <linux/slab.h>
31 #include <linux/spinlock.h>
32 #include <linux/kthread.h>
33 #include <linux/in.h>
34 #include <linux/cdrom.h>
35 #include <linux/module.h>
36 #include <linux/ratelimit.h>
37 #include <asm/unaligned.h>
38 #include <net/sock.h>
39 #include <net/tcp.h>
40 #include <scsi/scsi.h>
41 #include <scsi/scsi_cmnd.h>
42 #include <scsi/scsi_tcq.h>
43
44 #include <target/target_core_base.h>
45 #include <target/target_core_backend.h>
46 #include <target/target_core_fabric.h>
47
48 #include "target_core_internal.h"
49 #include "target_core_alua.h"
50 #include "target_core_pr.h"
51 #include "target_core_ua.h"
52
53 #define CREATE_TRACE_POINTS
54 #include <trace/events/target.h>
55
56 static struct workqueue_struct *target_completion_wq;
57 static struct kmem_cache *se_sess_cache;
58 struct kmem_cache *se_ua_cache;
59 struct kmem_cache *t10_pr_reg_cache;
60 struct kmem_cache *t10_alua_lu_gp_cache;
61 struct kmem_cache *t10_alua_lu_gp_mem_cache;
62 struct kmem_cache *t10_alua_tg_pt_gp_cache;
63 struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
64 struct kmem_cache *t10_alua_lba_map_cache;
65 struct kmem_cache *t10_alua_lba_map_mem_cache;
66
67 static void transport_complete_task_attr(struct se_cmd *cmd);
68 static void transport_handle_queue_full(struct se_cmd *cmd,
69 struct se_device *dev);
70 static int transport_put_cmd(struct se_cmd *cmd);
71 static void target_complete_ok_work(struct work_struct *work);
72
73 int init_se_kmem_caches(void)
74 {
75 se_sess_cache = kmem_cache_create("se_sess_cache",
76 sizeof(struct se_session), __alignof__(struct se_session),
77 0, NULL);
78 if (!se_sess_cache) {
79 pr_err("kmem_cache_create() for struct se_session"
80 " failed\n");
81 goto out;
82 }
83 se_ua_cache = kmem_cache_create("se_ua_cache",
84 sizeof(struct se_ua), __alignof__(struct se_ua),
85 0, NULL);
86 if (!se_ua_cache) {
87 pr_err("kmem_cache_create() for struct se_ua failed\n");
88 goto out_free_sess_cache;
89 }
90 t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
91 sizeof(struct t10_pr_registration),
92 __alignof__(struct t10_pr_registration), 0, NULL);
93 if (!t10_pr_reg_cache) {
94 pr_err("kmem_cache_create() for struct t10_pr_registration"
95 " failed\n");
96 goto out_free_ua_cache;
97 }
98 t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
99 sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
100 0, NULL);
101 if (!t10_alua_lu_gp_cache) {
102 pr_err("kmem_cache_create() for t10_alua_lu_gp_cache"
103 " failed\n");
104 goto out_free_pr_reg_cache;
105 }
106 t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
107 sizeof(struct t10_alua_lu_gp_member),
108 __alignof__(struct t10_alua_lu_gp_member), 0, NULL);
109 if (!t10_alua_lu_gp_mem_cache) {
110 pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_"
111 "cache failed\n");
112 goto out_free_lu_gp_cache;
113 }
114 t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
115 sizeof(struct t10_alua_tg_pt_gp),
116 __alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
117 if (!t10_alua_tg_pt_gp_cache) {
118 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
119 "cache failed\n");
120 goto out_free_lu_gp_mem_cache;
121 }
122 t10_alua_tg_pt_gp_mem_cache = kmem_cache_create(
123 "t10_alua_tg_pt_gp_mem_cache",
124 sizeof(struct t10_alua_tg_pt_gp_member),
125 __alignof__(struct t10_alua_tg_pt_gp_member),
126 0, NULL);
127 if (!t10_alua_tg_pt_gp_mem_cache) {
128 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
129 "mem_t failed\n");
130 goto out_free_tg_pt_gp_cache;
131 }
132 t10_alua_lba_map_cache = kmem_cache_create(
133 "t10_alua_lba_map_cache",
134 sizeof(struct t10_alua_lba_map),
135 __alignof__(struct t10_alua_lba_map), 0, NULL);
136 if (!t10_alua_lba_map_cache) {
137 pr_err("kmem_cache_create() for t10_alua_lba_map_"
138 "cache failed\n");
139 goto out_free_tg_pt_gp_mem_cache;
140 }
141 t10_alua_lba_map_mem_cache = kmem_cache_create(
142 "t10_alua_lba_map_mem_cache",
143 sizeof(struct t10_alua_lba_map_member),
144 __alignof__(struct t10_alua_lba_map_member), 0, NULL);
145 if (!t10_alua_lba_map_mem_cache) {
146 pr_err("kmem_cache_create() for t10_alua_lba_map_mem_"
147 "cache failed\n");
148 goto out_free_lba_map_cache;
149 }
150
151 target_completion_wq = alloc_workqueue("target_completion",
152 WQ_MEM_RECLAIM, 0);
153 if (!target_completion_wq)
154 goto out_free_lba_map_mem_cache;
155
156 return 0;
157
158 out_free_lba_map_mem_cache:
159 kmem_cache_destroy(t10_alua_lba_map_mem_cache);
160 out_free_lba_map_cache:
161 kmem_cache_destroy(t10_alua_lba_map_cache);
162 out_free_tg_pt_gp_mem_cache:
163 kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
164 out_free_tg_pt_gp_cache:
165 kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
166 out_free_lu_gp_mem_cache:
167 kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
168 out_free_lu_gp_cache:
169 kmem_cache_destroy(t10_alua_lu_gp_cache);
170 out_free_pr_reg_cache:
171 kmem_cache_destroy(t10_pr_reg_cache);
172 out_free_ua_cache:
173 kmem_cache_destroy(se_ua_cache);
174 out_free_sess_cache:
175 kmem_cache_destroy(se_sess_cache);
176 out:
177 return -ENOMEM;
178 }
179
180 void release_se_kmem_caches(void)
181 {
182 destroy_workqueue(target_completion_wq);
183 kmem_cache_destroy(se_sess_cache);
184 kmem_cache_destroy(se_ua_cache);
185 kmem_cache_destroy(t10_pr_reg_cache);
186 kmem_cache_destroy(t10_alua_lu_gp_cache);
187 kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
188 kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
189 kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
190 kmem_cache_destroy(t10_alua_lba_map_cache);
191 kmem_cache_destroy(t10_alua_lba_map_mem_cache);
192 }
193
194 /* This code ensures unique mib indexes are handed out. */
195 static DEFINE_SPINLOCK(scsi_mib_index_lock);
196 static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
197
198 /*
199 * Allocate a new row index for the entry type specified
200 */
201 u32 scsi_get_new_index(scsi_index_t type)
202 {
203 u32 new_index;
204
205 BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX));
206
207 spin_lock(&scsi_mib_index_lock);
208 new_index = ++scsi_mib_index[type];
209 spin_unlock(&scsi_mib_index_lock);
210
211 return new_index;
212 }
213
214 void transport_subsystem_check_init(void)
215 {
216 int ret;
217 static int sub_api_initialized;
218
219 if (sub_api_initialized)
220 return;
221
222 ret = request_module("target_core_iblock");
223 if (ret != 0)
224 pr_err("Unable to load target_core_iblock\n");
225
226 ret = request_module("target_core_file");
227 if (ret != 0)
228 pr_err("Unable to load target_core_file\n");
229
230 ret = request_module("target_core_pscsi");
231 if (ret != 0)
232 pr_err("Unable to load target_core_pscsi\n");
233
234 ret = request_module("target_core_user");
235 if (ret != 0)
236 pr_err("Unable to load target_core_user\n");
237
238 sub_api_initialized = 1;
239 }
240
241 struct se_session *transport_init_session(enum target_prot_op sup_prot_ops)
242 {
243 struct se_session *se_sess;
244
245 se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
246 if (!se_sess) {
247 pr_err("Unable to allocate struct se_session from"
248 " se_sess_cache\n");
249 return ERR_PTR(-ENOMEM);
250 }
251 INIT_LIST_HEAD(&se_sess->sess_list);
252 INIT_LIST_HEAD(&se_sess->sess_acl_list);
253 INIT_LIST_HEAD(&se_sess->sess_cmd_list);
254 INIT_LIST_HEAD(&se_sess->sess_wait_list);
255 spin_lock_init(&se_sess->sess_cmd_lock);
256 kref_init(&se_sess->sess_kref);
257 se_sess->sup_prot_ops = sup_prot_ops;
258
259 return se_sess;
260 }
261 EXPORT_SYMBOL(transport_init_session);
262
263 int transport_alloc_session_tags(struct se_session *se_sess,
264 unsigned int tag_num, unsigned int tag_size)
265 {
266 int rc;
267
268 se_sess->sess_cmd_map = kzalloc(tag_num * tag_size,
269 GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
270 if (!se_sess->sess_cmd_map) {
271 se_sess->sess_cmd_map = vzalloc(tag_num * tag_size);
272 if (!se_sess->sess_cmd_map) {
273 pr_err("Unable to allocate se_sess->sess_cmd_map\n");
274 return -ENOMEM;
275 }
276 }
277
278 rc = percpu_ida_init(&se_sess->sess_tag_pool, tag_num);
279 if (rc < 0) {
280 pr_err("Unable to init se_sess->sess_tag_pool,"
281 " tag_num: %u\n", tag_num);
282 if (is_vmalloc_addr(se_sess->sess_cmd_map))
283 vfree(se_sess->sess_cmd_map);
284 else
285 kfree(se_sess->sess_cmd_map);
286 se_sess->sess_cmd_map = NULL;
287 return -ENOMEM;
288 }
289
290 return 0;
291 }
292 EXPORT_SYMBOL(transport_alloc_session_tags);
293
294 struct se_session *transport_init_session_tags(unsigned int tag_num,
295 unsigned int tag_size,
296 enum target_prot_op sup_prot_ops)
297 {
298 struct se_session *se_sess;
299 int rc;
300
301 se_sess = transport_init_session(sup_prot_ops);
302 if (IS_ERR(se_sess))
303 return se_sess;
304
305 rc = transport_alloc_session_tags(se_sess, tag_num, tag_size);
306 if (rc < 0) {
307 transport_free_session(se_sess);
308 return ERR_PTR(-ENOMEM);
309 }
310
311 return se_sess;
312 }
313 EXPORT_SYMBOL(transport_init_session_tags);
314
315 /*
316 * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called.
317 */
318 void __transport_register_session(
319 struct se_portal_group *se_tpg,
320 struct se_node_acl *se_nacl,
321 struct se_session *se_sess,
322 void *fabric_sess_ptr)
323 {
324 const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
325 unsigned char buf[PR_REG_ISID_LEN];
326
327 se_sess->se_tpg = se_tpg;
328 se_sess->fabric_sess_ptr = fabric_sess_ptr;
329 /*
330 * Used by struct se_node_acl's under ConfigFS to locate active se_session-t
331 *
332 * Only set for struct se_session's that will actually be moving I/O.
333 * eg: *NOT* discovery sessions.
334 */
335 if (se_nacl) {
336 /*
337 *
338 * Determine if fabric allows for T10-PI feature bits exposed to
339 * initiators for device backends with !dev->dev_attrib.pi_prot_type.
340 *
341 * If so, then always save prot_type on a per se_node_acl node
342 * basis and re-instate the previous sess_prot_type to avoid
343 * disabling PI from below any previously initiator side
344 * registered LUNs.
345 */
346 if (se_nacl->saved_prot_type)
347 se_sess->sess_prot_type = se_nacl->saved_prot_type;
348 else if (tfo->tpg_check_prot_fabric_only)
349 se_sess->sess_prot_type = se_nacl->saved_prot_type =
350 tfo->tpg_check_prot_fabric_only(se_tpg);
351 /*
352 * If the fabric module supports an ISID based TransportID,
353 * save this value in binary from the fabric I_T Nexus now.
354 */
355 if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
356 memset(&buf[0], 0, PR_REG_ISID_LEN);
357 se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
358 &buf[0], PR_REG_ISID_LEN);
359 se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
360 }
361 kref_get(&se_nacl->acl_kref);
362
363 spin_lock_irq(&se_nacl->nacl_sess_lock);
364 /*
365 * The se_nacl->nacl_sess pointer will be set to the
366 * last active I_T Nexus for each struct se_node_acl.
367 */
368 se_nacl->nacl_sess = se_sess;
369
370 list_add_tail(&se_sess->sess_acl_list,
371 &se_nacl->acl_sess_list);
372 spin_unlock_irq(&se_nacl->nacl_sess_lock);
373 }
374 list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
375
376 pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
377 se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr);
378 }
379 EXPORT_SYMBOL(__transport_register_session);
380
381 void transport_register_session(
382 struct se_portal_group *se_tpg,
383 struct se_node_acl *se_nacl,
384 struct se_session *se_sess,
385 void *fabric_sess_ptr)
386 {
387 unsigned long flags;
388
389 spin_lock_irqsave(&se_tpg->session_lock, flags);
390 __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr);
391 spin_unlock_irqrestore(&se_tpg->session_lock, flags);
392 }
393 EXPORT_SYMBOL(transport_register_session);
394
395 static void target_release_session(struct kref *kref)
396 {
397 struct se_session *se_sess = container_of(kref,
398 struct se_session, sess_kref);
399 struct se_portal_group *se_tpg = se_sess->se_tpg;
400
401 se_tpg->se_tpg_tfo->close_session(se_sess);
402 }
403
404 void target_get_session(struct se_session *se_sess)
405 {
406 kref_get(&se_sess->sess_kref);
407 }
408 EXPORT_SYMBOL(target_get_session);
409
410 void target_put_session(struct se_session *se_sess)
411 {
412 struct se_portal_group *tpg = se_sess->se_tpg;
413
414 if (tpg->se_tpg_tfo->put_session != NULL) {
415 tpg->se_tpg_tfo->put_session(se_sess);
416 return;
417 }
418 kref_put(&se_sess->sess_kref, target_release_session);
419 }
420 EXPORT_SYMBOL(target_put_session);
421
422 ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page)
423 {
424 struct se_session *se_sess;
425 ssize_t len = 0;
426
427 spin_lock_bh(&se_tpg->session_lock);
428 list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) {
429 if (!se_sess->se_node_acl)
430 continue;
431 if (!se_sess->se_node_acl->dynamic_node_acl)
432 continue;
433 if (strlen(se_sess->se_node_acl->initiatorname) + 1 + len > PAGE_SIZE)
434 break;
435
436 len += snprintf(page + len, PAGE_SIZE - len, "%s\n",
437 se_sess->se_node_acl->initiatorname);
438 len += 1; /* Include NULL terminator */
439 }
440 spin_unlock_bh(&se_tpg->session_lock);
441
442 return len;
443 }
444 EXPORT_SYMBOL(target_show_dynamic_sessions);
445
446 static void target_complete_nacl(struct kref *kref)
447 {
448 struct se_node_acl *nacl = container_of(kref,
449 struct se_node_acl, acl_kref);
450
451 complete(&nacl->acl_free_comp);
452 }
453
454 void target_put_nacl(struct se_node_acl *nacl)
455 {
456 kref_put(&nacl->acl_kref, target_complete_nacl);
457 }
458
459 void transport_deregister_session_configfs(struct se_session *se_sess)
460 {
461 struct se_node_acl *se_nacl;
462 unsigned long flags;
463 /*
464 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session
465 */
466 se_nacl = se_sess->se_node_acl;
467 if (se_nacl) {
468 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
469 if (se_nacl->acl_stop == 0)
470 list_del(&se_sess->sess_acl_list);
471 /*
472 * If the session list is empty, then clear the pointer.
473 * Otherwise, set the struct se_session pointer from the tail
474 * element of the per struct se_node_acl active session list.
475 */
476 if (list_empty(&se_nacl->acl_sess_list))
477 se_nacl->nacl_sess = NULL;
478 else {
479 se_nacl->nacl_sess = container_of(
480 se_nacl->acl_sess_list.prev,
481 struct se_session, sess_acl_list);
482 }
483 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
484 }
485 }
486 EXPORT_SYMBOL(transport_deregister_session_configfs);
487
488 void transport_free_session(struct se_session *se_sess)
489 {
490 if (se_sess->sess_cmd_map) {
491 percpu_ida_destroy(&se_sess->sess_tag_pool);
492 if (is_vmalloc_addr(se_sess->sess_cmd_map))
493 vfree(se_sess->sess_cmd_map);
494 else
495 kfree(se_sess->sess_cmd_map);
496 }
497 kmem_cache_free(se_sess_cache, se_sess);
498 }
499 EXPORT_SYMBOL(transport_free_session);
500
501 void transport_deregister_session(struct se_session *se_sess)
502 {
503 struct se_portal_group *se_tpg = se_sess->se_tpg;
504 const struct target_core_fabric_ops *se_tfo;
505 struct se_node_acl *se_nacl;
506 unsigned long flags;
507 bool comp_nacl = true;
508
509 if (!se_tpg) {
510 transport_free_session(se_sess);
511 return;
512 }
513 se_tfo = se_tpg->se_tpg_tfo;
514
515 spin_lock_irqsave(&se_tpg->session_lock, flags);
516 list_del(&se_sess->sess_list);
517 se_sess->se_tpg = NULL;
518 se_sess->fabric_sess_ptr = NULL;
519 spin_unlock_irqrestore(&se_tpg->session_lock, flags);
520
521 /*
522 * Determine if we need to do extra work for this initiator node's
523 * struct se_node_acl if it had been previously dynamically generated.
524 */
525 se_nacl = se_sess->se_node_acl;
526
527 spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
528 if (se_nacl && se_nacl->dynamic_node_acl) {
529 if (!se_tfo->tpg_check_demo_mode_cache(se_tpg)) {
530 list_del(&se_nacl->acl_list);
531 se_tpg->num_node_acls--;
532 spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
533 core_tpg_wait_for_nacl_pr_ref(se_nacl);
534 core_free_device_list_for_node(se_nacl, se_tpg);
535 kfree(se_nacl);
536
537 comp_nacl = false;
538 spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
539 }
540 }
541 spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
542
543 pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
544 se_tpg->se_tpg_tfo->get_fabric_name());
545 /*
546 * If last kref is dropping now for an explicit NodeACL, awake sleeping
547 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group
548 * removal context.
549 */
550 if (se_nacl && comp_nacl)
551 target_put_nacl(se_nacl);
552
553 transport_free_session(se_sess);
554 }
555 EXPORT_SYMBOL(transport_deregister_session);
556
557 /*
558 * Called with cmd->t_state_lock held.
559 */
560 static void target_remove_from_state_list(struct se_cmd *cmd)
561 {
562 struct se_device *dev = cmd->se_dev;
563 unsigned long flags;
564
565 if (!dev)
566 return;
567
568 if (cmd->transport_state & CMD_T_BUSY)
569 return;
570
571 spin_lock_irqsave(&dev->execute_task_lock, flags);
572 if (cmd->state_active) {
573 list_del(&cmd->state_list);
574 cmd->state_active = false;
575 }
576 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
577 }
578
579 static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,
580 bool write_pending)
581 {
582 unsigned long flags;
583
584 spin_lock_irqsave(&cmd->t_state_lock, flags);
585 if (write_pending)
586 cmd->t_state = TRANSPORT_WRITE_PENDING;
587
588 if (remove_from_lists) {
589 target_remove_from_state_list(cmd);
590
591 /*
592 * Clear struct se_cmd->se_lun before the handoff to FE.
593 */
594 cmd->se_lun = NULL;
595 }
596
597 /*
598 * Determine if frontend context caller is requesting the stopping of
599 * this command for frontend exceptions.
600 */
601 if (cmd->transport_state & CMD_T_STOP) {
602 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
603 __func__, __LINE__, cmd->tag);
604
605 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
606
607 complete_all(&cmd->t_transport_stop_comp);
608 return 1;
609 }
610
611 cmd->transport_state &= ~CMD_T_ACTIVE;
612 if (remove_from_lists) {
613 /*
614 * Some fabric modules like tcm_loop can release
615 * their internally allocated I/O reference now and
616 * struct se_cmd now.
617 *
618 * Fabric modules are expected to return '1' here if the
619 * se_cmd being passed is released at this point,
620 * or zero if not being released.
621 */
622 if (cmd->se_tfo->check_stop_free != NULL) {
623 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
624 return cmd->se_tfo->check_stop_free(cmd);
625 }
626 }
627
628 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
629 return 0;
630 }
631
632 static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
633 {
634 return transport_cmd_check_stop(cmd, true, false);
635 }
636
637 static void transport_lun_remove_cmd(struct se_cmd *cmd)
638 {
639 struct se_lun *lun = cmd->se_lun;
640
641 if (!lun)
642 return;
643
644 if (cmpxchg(&cmd->lun_ref_active, true, false))
645 percpu_ref_put(&lun->lun_ref);
646 }
647
648 void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
649 {
650 if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
651 transport_lun_remove_cmd(cmd);
652 /*
653 * Allow the fabric driver to unmap any resources before
654 * releasing the descriptor via TFO->release_cmd()
655 */
656 if (remove)
657 cmd->se_tfo->aborted_task(cmd);
658
659 if (transport_cmd_check_stop_to_fabric(cmd))
660 return;
661 if (remove)
662 transport_put_cmd(cmd);
663 }
664
665 static void target_complete_failure_work(struct work_struct *work)
666 {
667 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
668
669 transport_generic_request_failure(cmd,
670 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
671 }
672
673 /*
674 * Used when asking transport to copy Sense Data from the underlying
675 * Linux/SCSI struct scsi_cmnd
676 */
677 static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd)
678 {
679 struct se_device *dev = cmd->se_dev;
680
681 WARN_ON(!cmd->se_lun);
682
683 if (!dev)
684 return NULL;
685
686 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION)
687 return NULL;
688
689 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
690
691 pr_debug("HBA_[%u]_PLUG[%s]: Requesting sense for SAM STATUS: 0x%02x\n",
692 dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status);
693 return cmd->sense_buffer;
694 }
695
696 void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
697 {
698 struct se_device *dev = cmd->se_dev;
699 int success = scsi_status == GOOD;
700 unsigned long flags;
701
702 cmd->scsi_status = scsi_status;
703
704
705 spin_lock_irqsave(&cmd->t_state_lock, flags);
706 cmd->transport_state &= ~CMD_T_BUSY;
707
708 if (dev && dev->transport->transport_complete) {
709 dev->transport->transport_complete(cmd,
710 cmd->t_data_sg,
711 transport_get_sense_buffer(cmd));
712 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
713 success = 1;
714 }
715
716 /*
717 * See if we are waiting to complete for an exception condition.
718 */
719 if (cmd->transport_state & CMD_T_REQUEST_STOP) {
720 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
721 complete(&cmd->task_stop_comp);
722 return;
723 }
724
725 /*
726 * Check for case where an explicit ABORT_TASK has been received
727 * and transport_wait_for_tasks() will be waiting for completion..
728 */
729 if (cmd->transport_state & CMD_T_ABORTED &&
730 cmd->transport_state & CMD_T_STOP) {
731 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
732 complete_all(&cmd->t_transport_stop_comp);
733 return;
734 } else if (!success) {
735 INIT_WORK(&cmd->work, target_complete_failure_work);
736 } else {
737 INIT_WORK(&cmd->work, target_complete_ok_work);
738 }
739
740 cmd->t_state = TRANSPORT_COMPLETE;
741 cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE);
742 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
743
744 queue_work(target_completion_wq, &cmd->work);
745 }
746 EXPORT_SYMBOL(target_complete_cmd);
747
748 void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length)
749 {
750 if (scsi_status == SAM_STAT_GOOD && length < cmd->data_length) {
751 if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
752 cmd->residual_count += cmd->data_length - length;
753 } else {
754 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
755 cmd->residual_count = cmd->data_length - length;
756 }
757
758 cmd->data_length = length;
759 }
760
761 target_complete_cmd(cmd, scsi_status);
762 }
763 EXPORT_SYMBOL(target_complete_cmd_with_length);
764
765 static void target_add_to_state_list(struct se_cmd *cmd)
766 {
767 struct se_device *dev = cmd->se_dev;
768 unsigned long flags;
769
770 spin_lock_irqsave(&dev->execute_task_lock, flags);
771 if (!cmd->state_active) {
772 list_add_tail(&cmd->state_list, &dev->state_list);
773 cmd->state_active = true;
774 }
775 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
776 }
777
778 /*
779 * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status
780 */
781 static void transport_write_pending_qf(struct se_cmd *cmd);
782 static void transport_complete_qf(struct se_cmd *cmd);
783
784 void target_qf_do_work(struct work_struct *work)
785 {
786 struct se_device *dev = container_of(work, struct se_device,
787 qf_work_queue);
788 LIST_HEAD(qf_cmd_list);
789 struct se_cmd *cmd, *cmd_tmp;
790
791 spin_lock_irq(&dev->qf_cmd_lock);
792 list_splice_init(&dev->qf_cmd_list, &qf_cmd_list);
793 spin_unlock_irq(&dev->qf_cmd_lock);
794
795 list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
796 list_del(&cmd->se_qf_node);
797 atomic_dec_mb(&dev->dev_qf_count);
798
799 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
800 " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
801 (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" :
802 (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
803 : "UNKNOWN");
804
805 if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP)
806 transport_write_pending_qf(cmd);
807 else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK)
808 transport_complete_qf(cmd);
809 }
810 }
811
812 unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
813 {
814 switch (cmd->data_direction) {
815 case DMA_NONE:
816 return "NONE";
817 case DMA_FROM_DEVICE:
818 return "READ";
819 case DMA_TO_DEVICE:
820 return "WRITE";
821 case DMA_BIDIRECTIONAL:
822 return "BIDI";
823 default:
824 break;
825 }
826
827 return "UNKNOWN";
828 }
829
830 void transport_dump_dev_state(
831 struct se_device *dev,
832 char *b,
833 int *bl)
834 {
835 *bl += sprintf(b + *bl, "Status: ");
836 if (dev->export_count)
837 *bl += sprintf(b + *bl, "ACTIVATED");
838 else
839 *bl += sprintf(b + *bl, "DEACTIVATED");
840
841 *bl += sprintf(b + *bl, " Max Queue Depth: %d", dev->queue_depth);
842 *bl += sprintf(b + *bl, " SectorSize: %u HwMaxSectors: %u\n",
843 dev->dev_attrib.block_size,
844 dev->dev_attrib.hw_max_sectors);
845 *bl += sprintf(b + *bl, " ");
846 }
847
848 void transport_dump_vpd_proto_id(
849 struct t10_vpd *vpd,
850 unsigned char *p_buf,
851 int p_buf_len)
852 {
853 unsigned char buf[VPD_TMP_BUF_SIZE];
854 int len;
855
856 memset(buf, 0, VPD_TMP_BUF_SIZE);
857 len = sprintf(buf, "T10 VPD Protocol Identifier: ");
858
859 switch (vpd->protocol_identifier) {
860 case 0x00:
861 sprintf(buf+len, "Fibre Channel\n");
862 break;
863 case 0x10:
864 sprintf(buf+len, "Parallel SCSI\n");
865 break;
866 case 0x20:
867 sprintf(buf+len, "SSA\n");
868 break;
869 case 0x30:
870 sprintf(buf+len, "IEEE 1394\n");
871 break;
872 case 0x40:
873 sprintf(buf+len, "SCSI Remote Direct Memory Access"
874 " Protocol\n");
875 break;
876 case 0x50:
877 sprintf(buf+len, "Internet SCSI (iSCSI)\n");
878 break;
879 case 0x60:
880 sprintf(buf+len, "SAS Serial SCSI Protocol\n");
881 break;
882 case 0x70:
883 sprintf(buf+len, "Automation/Drive Interface Transport"
884 " Protocol\n");
885 break;
886 case 0x80:
887 sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n");
888 break;
889 default:
890 sprintf(buf+len, "Unknown 0x%02x\n",
891 vpd->protocol_identifier);
892 break;
893 }
894
895 if (p_buf)
896 strncpy(p_buf, buf, p_buf_len);
897 else
898 pr_debug("%s", buf);
899 }
900
901 void
902 transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83)
903 {
904 /*
905 * Check if the Protocol Identifier Valid (PIV) bit is set..
906 *
907 * from spc3r23.pdf section 7.5.1
908 */
909 if (page_83[1] & 0x80) {
910 vpd->protocol_identifier = (page_83[0] & 0xf0);
911 vpd->protocol_identifier_set = 1;
912 transport_dump_vpd_proto_id(vpd, NULL, 0);
913 }
914 }
915 EXPORT_SYMBOL(transport_set_vpd_proto_id);
916
917 int transport_dump_vpd_assoc(
918 struct t10_vpd *vpd,
919 unsigned char *p_buf,
920 int p_buf_len)
921 {
922 unsigned char buf[VPD_TMP_BUF_SIZE];
923 int ret = 0;
924 int len;
925
926 memset(buf, 0, VPD_TMP_BUF_SIZE);
927 len = sprintf(buf, "T10 VPD Identifier Association: ");
928
929 switch (vpd->association) {
930 case 0x00:
931 sprintf(buf+len, "addressed logical unit\n");
932 break;
933 case 0x10:
934 sprintf(buf+len, "target port\n");
935 break;
936 case 0x20:
937 sprintf(buf+len, "SCSI target device\n");
938 break;
939 default:
940 sprintf(buf+len, "Unknown 0x%02x\n", vpd->association);
941 ret = -EINVAL;
942 break;
943 }
944
945 if (p_buf)
946 strncpy(p_buf, buf, p_buf_len);
947 else
948 pr_debug("%s", buf);
949
950 return ret;
951 }
952
953 int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83)
954 {
955 /*
956 * The VPD identification association..
957 *
958 * from spc3r23.pdf Section 7.6.3.1 Table 297
959 */
960 vpd->association = (page_83[1] & 0x30);
961 return transport_dump_vpd_assoc(vpd, NULL, 0);
962 }
963 EXPORT_SYMBOL(transport_set_vpd_assoc);
964
965 int transport_dump_vpd_ident_type(
966 struct t10_vpd *vpd,
967 unsigned char *p_buf,
968 int p_buf_len)
969 {
970 unsigned char buf[VPD_TMP_BUF_SIZE];
971 int ret = 0;
972 int len;
973
974 memset(buf, 0, VPD_TMP_BUF_SIZE);
975 len = sprintf(buf, "T10 VPD Identifier Type: ");
976
977 switch (vpd->device_identifier_type) {
978 case 0x00:
979 sprintf(buf+len, "Vendor specific\n");
980 break;
981 case 0x01:
982 sprintf(buf+len, "T10 Vendor ID based\n");
983 break;
984 case 0x02:
985 sprintf(buf+len, "EUI-64 based\n");
986 break;
987 case 0x03:
988 sprintf(buf+len, "NAA\n");
989 break;
990 case 0x04:
991 sprintf(buf+len, "Relative target port identifier\n");
992 break;
993 case 0x08:
994 sprintf(buf+len, "SCSI name string\n");
995 break;
996 default:
997 sprintf(buf+len, "Unsupported: 0x%02x\n",
998 vpd->device_identifier_type);
999 ret = -EINVAL;
1000 break;
1001 }
1002
1003 if (p_buf) {
1004 if (p_buf_len < strlen(buf)+1)
1005 return -EINVAL;
1006 strncpy(p_buf, buf, p_buf_len);
1007 } else {
1008 pr_debug("%s", buf);
1009 }
1010
1011 return ret;
1012 }
1013
1014 int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83)
1015 {
1016 /*
1017 * The VPD identifier type..
1018 *
1019 * from spc3r23.pdf Section 7.6.3.1 Table 298
1020 */
1021 vpd->device_identifier_type = (page_83[1] & 0x0f);
1022 return transport_dump_vpd_ident_type(vpd, NULL, 0);
1023 }
1024 EXPORT_SYMBOL(transport_set_vpd_ident_type);
1025
1026 int transport_dump_vpd_ident(
1027 struct t10_vpd *vpd,
1028 unsigned char *p_buf,
1029 int p_buf_len)
1030 {
1031 unsigned char buf[VPD_TMP_BUF_SIZE];
1032 int ret = 0;
1033
1034 memset(buf, 0, VPD_TMP_BUF_SIZE);
1035
1036 switch (vpd->device_identifier_code_set) {
1037 case 0x01: /* Binary */
1038 snprintf(buf, sizeof(buf),
1039 "T10 VPD Binary Device Identifier: %s\n",
1040 &vpd->device_identifier[0]);
1041 break;
1042 case 0x02: /* ASCII */
1043 snprintf(buf, sizeof(buf),
1044 "T10 VPD ASCII Device Identifier: %s\n",
1045 &vpd->device_identifier[0]);
1046 break;
1047 case 0x03: /* UTF-8 */
1048 snprintf(buf, sizeof(buf),
1049 "T10 VPD UTF-8 Device Identifier: %s\n",
1050 &vpd->device_identifier[0]);
1051 break;
1052 default:
1053 sprintf(buf, "T10 VPD Device Identifier encoding unsupported:"
1054 " 0x%02x", vpd->device_identifier_code_set);
1055 ret = -EINVAL;
1056 break;
1057 }
1058
1059 if (p_buf)
1060 strncpy(p_buf, buf, p_buf_len);
1061 else
1062 pr_debug("%s", buf);
1063
1064 return ret;
1065 }
1066
1067 int
1068 transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
1069 {
1070 static const char hex_str[] = "0123456789abcdef";
1071 int j = 0, i = 4; /* offset to start of the identifier */
1072
1073 /*
1074 * The VPD Code Set (encoding)
1075 *
1076 * from spc3r23.pdf Section 7.6.3.1 Table 296
1077 */
1078 vpd->device_identifier_code_set = (page_83[0] & 0x0f);
1079 switch (vpd->device_identifier_code_set) {
1080 case 0x01: /* Binary */
1081 vpd->device_identifier[j++] =
1082 hex_str[vpd->device_identifier_type];
1083 while (i < (4 + page_83[3])) {
1084 vpd->device_identifier[j++] =
1085 hex_str[(page_83[i] & 0xf0) >> 4];
1086 vpd->device_identifier[j++] =
1087 hex_str[page_83[i] & 0x0f];
1088 i++;
1089 }
1090 break;
1091 case 0x02: /* ASCII */
1092 case 0x03: /* UTF-8 */
1093 while (i < (4 + page_83[3]))
1094 vpd->device_identifier[j++] = page_83[i++];
1095 break;
1096 default:
1097 break;
1098 }
1099
1100 return transport_dump_vpd_ident(vpd, NULL, 0);
1101 }
1102 EXPORT_SYMBOL(transport_set_vpd_ident);
1103
1104 sense_reason_t
1105 target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
1106 {
1107 struct se_device *dev = cmd->se_dev;
1108
1109 if (cmd->unknown_data_length) {
1110 cmd->data_length = size;
1111 } else if (size != cmd->data_length) {
1112 pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
1113 " %u does not match SCSI CDB Length: %u for SAM Opcode:"
1114 " 0x%02x\n", cmd->se_tfo->get_fabric_name(),
1115 cmd->data_length, size, cmd->t_task_cdb[0]);
1116
1117 if (cmd->data_direction == DMA_TO_DEVICE) {
1118 pr_err("Rejecting underflow/overflow"
1119 " WRITE data\n");
1120 return TCM_INVALID_CDB_FIELD;
1121 }
1122 /*
1123 * Reject READ_* or WRITE_* with overflow/underflow for
1124 * type SCF_SCSI_DATA_CDB.
1125 */
1126 if (dev->dev_attrib.block_size != 512) {
1127 pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
1128 " CDB on non 512-byte sector setup subsystem"
1129 " plugin: %s\n", dev->transport->name);
1130 /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
1131 return TCM_INVALID_CDB_FIELD;
1132 }
1133 /*
1134 * For the overflow case keep the existing fabric provided
1135 * ->data_length. Otherwise for the underflow case, reset
1136 * ->data_length to the smaller SCSI expected data transfer
1137 * length.
1138 */
1139 if (size > cmd->data_length) {
1140 cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
1141 cmd->residual_count = (size - cmd->data_length);
1142 } else {
1143 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
1144 cmd->residual_count = (cmd->data_length - size);
1145 cmd->data_length = size;
1146 }
1147 }
1148
1149 return 0;
1150
1151 }
1152
1153 /*
1154 * Used by fabric modules containing a local struct se_cmd within their
1155 * fabric dependent per I/O descriptor.
1156 *
1157 * Preserves the value of @cmd->tag.
1158 */
1159 void transport_init_se_cmd(
1160 struct se_cmd *cmd,
1161 const struct target_core_fabric_ops *tfo,
1162 struct se_session *se_sess,
1163 u32 data_length,
1164 int data_direction,
1165 int task_attr,
1166 unsigned char *sense_buffer)
1167 {
1168 INIT_LIST_HEAD(&cmd->se_delayed_node);
1169 INIT_LIST_HEAD(&cmd->se_qf_node);
1170 INIT_LIST_HEAD(&cmd->se_cmd_list);
1171 INIT_LIST_HEAD(&cmd->state_list);
1172 init_completion(&cmd->t_transport_stop_comp);
1173 init_completion(&cmd->cmd_wait_comp);
1174 init_completion(&cmd->task_stop_comp);
1175 spin_lock_init(&cmd->t_state_lock);
1176 kref_init(&cmd->cmd_kref);
1177 cmd->transport_state = CMD_T_DEV_ACTIVE;
1178
1179 cmd->se_tfo = tfo;
1180 cmd->se_sess = se_sess;
1181 cmd->data_length = data_length;
1182 cmd->data_direction = data_direction;
1183 cmd->sam_task_attr = task_attr;
1184 cmd->sense_buffer = sense_buffer;
1185
1186 cmd->state_active = false;
1187 }
1188 EXPORT_SYMBOL(transport_init_se_cmd);
1189
1190 static sense_reason_t
1191 transport_check_alloc_task_attr(struct se_cmd *cmd)
1192 {
1193 struct se_device *dev = cmd->se_dev;
1194
1195 /*
1196 * Check if SAM Task Attribute emulation is enabled for this
1197 * struct se_device storage object
1198 */
1199 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
1200 return 0;
1201
1202 if (cmd->sam_task_attr == TCM_ACA_TAG) {
1203 pr_debug("SAM Task Attribute ACA"
1204 " emulation is not supported\n");
1205 return TCM_INVALID_CDB_FIELD;
1206 }
1207 /*
1208 * Used to determine when ORDERED commands should go from
1209 * Dormant to Active status.
1210 */
1211 cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
1212 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
1213 cmd->se_ordered_id, cmd->sam_task_attr,
1214 dev->transport->name);
1215 return 0;
1216 }
1217
1218 sense_reason_t
1219 target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb)
1220 {
1221 struct se_device *dev = cmd->se_dev;
1222 sense_reason_t ret;
1223
1224 /*
1225 * Ensure that the received CDB is less than the max (252 + 8) bytes
1226 * for VARIABLE_LENGTH_CMD
1227 */
1228 if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
1229 pr_err("Received SCSI CDB with command_size: %d that"
1230 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1231 scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
1232 return TCM_INVALID_CDB_FIELD;
1233 }
1234 /*
1235 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
1236 * allocate the additional extended CDB buffer now.. Otherwise
1237 * setup the pointer from __t_task_cdb to t_task_cdb.
1238 */
1239 if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
1240 cmd->t_task_cdb = kzalloc(scsi_command_size(cdb),
1241 GFP_KERNEL);
1242 if (!cmd->t_task_cdb) {
1243 pr_err("Unable to allocate cmd->t_task_cdb"
1244 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
1245 scsi_command_size(cdb),
1246 (unsigned long)sizeof(cmd->__t_task_cdb));
1247 return TCM_OUT_OF_RESOURCES;
1248 }
1249 } else
1250 cmd->t_task_cdb = &cmd->__t_task_cdb[0];
1251 /*
1252 * Copy the original CDB into cmd->
1253 */
1254 memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
1255
1256 trace_target_sequencer_start(cmd);
1257
1258 /*
1259 * Check for an existing UNIT ATTENTION condition
1260 */
1261 ret = target_scsi3_ua_check(cmd);
1262 if (ret)
1263 return ret;
1264
1265 ret = target_alua_state_check(cmd);
1266 if (ret)
1267 return ret;
1268
1269 ret = target_check_reservation(cmd);
1270 if (ret) {
1271 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
1272 return ret;
1273 }
1274
1275 ret = dev->transport->parse_cdb(cmd);
1276 if (ret)
1277 return ret;
1278
1279 ret = transport_check_alloc_task_attr(cmd);
1280 if (ret)
1281 return ret;
1282
1283 cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
1284
1285 spin_lock(&cmd->se_lun->lun_sep_lock);
1286 if (cmd->se_lun->lun_sep)
1287 cmd->se_lun->lun_sep->sep_stats.cmd_pdus++;
1288 spin_unlock(&cmd->se_lun->lun_sep_lock);
1289 return 0;
1290 }
1291 EXPORT_SYMBOL(target_setup_cmd_from_cdb);
1292
1293 /*
1294 * Used by fabric module frontends to queue tasks directly.
1295 * Many only be used from process context only
1296 */
1297 int transport_handle_cdb_direct(
1298 struct se_cmd *cmd)
1299 {
1300 sense_reason_t ret;
1301
1302 if (!cmd->se_lun) {
1303 dump_stack();
1304 pr_err("cmd->se_lun is NULL\n");
1305 return -EINVAL;
1306 }
1307 if (in_interrupt()) {
1308 dump_stack();
1309 pr_err("transport_generic_handle_cdb cannot be called"
1310 " from interrupt context\n");
1311 return -EINVAL;
1312 }
1313 /*
1314 * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE to ensure that
1315 * outstanding descriptors are handled correctly during shutdown via
1316 * transport_wait_for_tasks()
1317 *
1318 * Also, we don't take cmd->t_state_lock here as we only expect
1319 * this to be called for initial descriptor submission.
1320 */
1321 cmd->t_state = TRANSPORT_NEW_CMD;
1322 cmd->transport_state |= CMD_T_ACTIVE;
1323
1324 /*
1325 * transport_generic_new_cmd() is already handling QUEUE_FULL,
1326 * so follow TRANSPORT_NEW_CMD processing thread context usage
1327 * and call transport_generic_request_failure() if necessary..
1328 */
1329 ret = transport_generic_new_cmd(cmd);
1330 if (ret)
1331 transport_generic_request_failure(cmd, ret);
1332 return 0;
1333 }
1334 EXPORT_SYMBOL(transport_handle_cdb_direct);
1335
1336 sense_reason_t
1337 transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
1338 u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count)
1339 {
1340 if (!sgl || !sgl_count)
1341 return 0;
1342
1343 /*
1344 * Reject SCSI data overflow with map_mem_to_cmd() as incoming
1345 * scatterlists already have been set to follow what the fabric
1346 * passes for the original expected data transfer length.
1347 */
1348 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
1349 pr_warn("Rejecting SCSI DATA overflow for fabric using"
1350 " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
1351 return TCM_INVALID_CDB_FIELD;
1352 }
1353
1354 cmd->t_data_sg = sgl;
1355 cmd->t_data_nents = sgl_count;
1356 cmd->t_bidi_data_sg = sgl_bidi;
1357 cmd->t_bidi_data_nents = sgl_bidi_count;
1358
1359 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
1360 return 0;
1361 }
1362
1363 /*
1364 * target_submit_cmd_map_sgls - lookup unpacked lun and submit uninitialized
1365 * se_cmd + use pre-allocated SGL memory.
1366 *
1367 * @se_cmd: command descriptor to submit
1368 * @se_sess: associated se_sess for endpoint
1369 * @cdb: pointer to SCSI CDB
1370 * @sense: pointer to SCSI sense buffer
1371 * @unpacked_lun: unpacked LUN to reference for struct se_lun
1372 * @data_length: fabric expected data transfer length
1373 * @task_addr: SAM task attribute
1374 * @data_dir: DMA data direction
1375 * @flags: flags for command submission from target_sc_flags_tables
1376 * @sgl: struct scatterlist memory for unidirectional mapping
1377 * @sgl_count: scatterlist count for unidirectional mapping
1378 * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping
1379 * @sgl_bidi_count: scatterlist count for bidirectional READ mapping
1380 * @sgl_prot: struct scatterlist memory protection information
1381 * @sgl_prot_count: scatterlist count for protection information
1382 *
1383 * Task tags are supported if the caller has set @se_cmd->tag.
1384 *
1385 * Returns non zero to signal active I/O shutdown failure. All other
1386 * setup exceptions will be returned as a SCSI CHECK_CONDITION response,
1387 * but still return zero here.
1388 *
1389 * This may only be called from process context, and also currently
1390 * assumes internal allocation of fabric payload buffer by target-core.
1391 */
1392 int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess,
1393 unsigned char *cdb, unsigned char *sense, u32 unpacked_lun,
1394 u32 data_length, int task_attr, int data_dir, int flags,
1395 struct scatterlist *sgl, u32 sgl_count,
1396 struct scatterlist *sgl_bidi, u32 sgl_bidi_count,
1397 struct scatterlist *sgl_prot, u32 sgl_prot_count)
1398 {
1399 struct se_portal_group *se_tpg;
1400 sense_reason_t rc;
1401 int ret;
1402
1403 se_tpg = se_sess->se_tpg;
1404 BUG_ON(!se_tpg);
1405 BUG_ON(se_cmd->se_tfo || se_cmd->se_sess);
1406 BUG_ON(in_interrupt());
1407 /*
1408 * Initialize se_cmd for target operation. From this point
1409 * exceptions are handled by sending exception status via
1410 * target_core_fabric_ops->queue_status() callback
1411 */
1412 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
1413 data_length, data_dir, task_attr, sense);
1414 if (flags & TARGET_SCF_UNKNOWN_SIZE)
1415 se_cmd->unknown_data_length = 1;
1416 /*
1417 * Obtain struct se_cmd->cmd_kref reference and add new cmd to
1418 * se_sess->sess_cmd_list. A second kref_get here is necessary
1419 * for fabrics using TARGET_SCF_ACK_KREF that expect a second
1420 * kref_put() to happen during fabric packet acknowledgement.
1421 */
1422 ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
1423 if (ret)
1424 return ret;
1425 /*
1426 * Signal bidirectional data payloads to target-core
1427 */
1428 if (flags & TARGET_SCF_BIDI_OP)
1429 se_cmd->se_cmd_flags |= SCF_BIDI;
1430 /*
1431 * Locate se_lun pointer and attach it to struct se_cmd
1432 */
1433 rc = transport_lookup_cmd_lun(se_cmd, unpacked_lun);
1434 if (rc) {
1435 transport_send_check_condition_and_sense(se_cmd, rc, 0);
1436 target_put_sess_cmd(se_cmd);
1437 return 0;
1438 }
1439
1440 rc = target_setup_cmd_from_cdb(se_cmd, cdb);
1441 if (rc != 0) {
1442 transport_generic_request_failure(se_cmd, rc);
1443 return 0;
1444 }
1445
1446 /*
1447 * Save pointers for SGLs containing protection information,
1448 * if present.
1449 */
1450 if (sgl_prot_count) {
1451 se_cmd->t_prot_sg = sgl_prot;
1452 se_cmd->t_prot_nents = sgl_prot_count;
1453 se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC;
1454 }
1455
1456 /*
1457 * When a non zero sgl_count has been passed perform SGL passthrough
1458 * mapping for pre-allocated fabric memory instead of having target
1459 * core perform an internal SGL allocation..
1460 */
1461 if (sgl_count != 0) {
1462 BUG_ON(!sgl);
1463
1464 /*
1465 * A work-around for tcm_loop as some userspace code via
1466 * scsi-generic do not memset their associated read buffers,
1467 * so go ahead and do that here for type non-data CDBs. Also
1468 * note that this is currently guaranteed to be a single SGL
1469 * for this case by target core in target_setup_cmd_from_cdb()
1470 * -> transport_generic_cmd_sequencer().
1471 */
1472 if (!(se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) &&
1473 se_cmd->data_direction == DMA_FROM_DEVICE) {
1474 unsigned char *buf = NULL;
1475
1476 if (sgl)
1477 buf = kmap(sg_page(sgl)) + sgl->offset;
1478
1479 if (buf) {
1480 memset(buf, 0, sgl->length);
1481 kunmap(sg_page(sgl));
1482 }
1483 }
1484
1485 rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count,
1486 sgl_bidi, sgl_bidi_count);
1487 if (rc != 0) {
1488 transport_generic_request_failure(se_cmd, rc);
1489 return 0;
1490 }
1491 }
1492
1493 /*
1494 * Check if we need to delay processing because of ALUA
1495 * Active/NonOptimized primary access state..
1496 */
1497 core_alua_check_nonop_delay(se_cmd);
1498
1499 transport_handle_cdb_direct(se_cmd);
1500 return 0;
1501 }
1502 EXPORT_SYMBOL(target_submit_cmd_map_sgls);
1503
1504 /*
1505 * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd
1506 *
1507 * @se_cmd: command descriptor to submit
1508 * @se_sess: associated se_sess for endpoint
1509 * @cdb: pointer to SCSI CDB
1510 * @sense: pointer to SCSI sense buffer
1511 * @unpacked_lun: unpacked LUN to reference for struct se_lun
1512 * @data_length: fabric expected data transfer length
1513 * @task_addr: SAM task attribute
1514 * @data_dir: DMA data direction
1515 * @flags: flags for command submission from target_sc_flags_tables
1516 *
1517 * Task tags are supported if the caller has set @se_cmd->tag.
1518 *
1519 * Returns non zero to signal active I/O shutdown failure. All other
1520 * setup exceptions will be returned as a SCSI CHECK_CONDITION response,
1521 * but still return zero here.
1522 *
1523 * This may only be called from process context, and also currently
1524 * assumes internal allocation of fabric payload buffer by target-core.
1525 *
1526 * It also assumes interal target core SGL memory allocation.
1527 */
1528 int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
1529 unsigned char *cdb, unsigned char *sense, u32 unpacked_lun,
1530 u32 data_length, int task_attr, int data_dir, int flags)
1531 {
1532 return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense,
1533 unpacked_lun, data_length, task_attr, data_dir,
1534 flags, NULL, 0, NULL, 0, NULL, 0);
1535 }
1536 EXPORT_SYMBOL(target_submit_cmd);
1537
1538 static void target_complete_tmr_failure(struct work_struct *work)
1539 {
1540 struct se_cmd *se_cmd = container_of(work, struct se_cmd, work);
1541
1542 se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
1543 se_cmd->se_tfo->queue_tm_rsp(se_cmd);
1544
1545 transport_cmd_check_stop_to_fabric(se_cmd);
1546 }
1547
1548 /**
1549 * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd
1550 * for TMR CDBs
1551 *
1552 * @se_cmd: command descriptor to submit
1553 * @se_sess: associated se_sess for endpoint
1554 * @sense: pointer to SCSI sense buffer
1555 * @unpacked_lun: unpacked LUN to reference for struct se_lun
1556 * @fabric_context: fabric context for TMR req
1557 * @tm_type: Type of TM request
1558 * @gfp: gfp type for caller
1559 * @tag: referenced task tag for TMR_ABORT_TASK
1560 * @flags: submit cmd flags
1561 *
1562 * Callable from all contexts.
1563 **/
1564
1565 int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
1566 unsigned char *sense, u32 unpacked_lun,
1567 void *fabric_tmr_ptr, unsigned char tm_type,
1568 gfp_t gfp, unsigned int tag, int flags)
1569 {
1570 struct se_portal_group *se_tpg;
1571 int ret;
1572
1573 se_tpg = se_sess->se_tpg;
1574 BUG_ON(!se_tpg);
1575
1576 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
1577 0, DMA_NONE, TCM_SIMPLE_TAG, sense);
1578 /*
1579 * FIXME: Currently expect caller to handle se_cmd->se_tmr_req
1580 * allocation failure.
1581 */
1582 ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp);
1583 if (ret < 0)
1584 return -ENOMEM;
1585
1586 if (tm_type == TMR_ABORT_TASK)
1587 se_cmd->se_tmr_req->ref_task_tag = tag;
1588
1589 /* See target_submit_cmd for commentary */
1590 ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
1591 if (ret) {
1592 core_tmr_release_req(se_cmd->se_tmr_req);
1593 return ret;
1594 }
1595
1596 ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun);
1597 if (ret) {
1598 /*
1599 * For callback during failure handling, push this work off
1600 * to process context with TMR_LUN_DOES_NOT_EXIST status.
1601 */
1602 INIT_WORK(&se_cmd->work, target_complete_tmr_failure);
1603 schedule_work(&se_cmd->work);
1604 return 0;
1605 }
1606 transport_generic_handle_tmr(se_cmd);
1607 return 0;
1608 }
1609 EXPORT_SYMBOL(target_submit_tmr);
1610
1611 /*
1612 * If the cmd is active, request it to be stopped and sleep until it
1613 * has completed.
1614 */
1615 bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags)
1616 __releases(&cmd->t_state_lock)
1617 __acquires(&cmd->t_state_lock)
1618 {
1619 bool was_active = false;
1620
1621 if (cmd->transport_state & CMD_T_BUSY) {
1622 cmd->transport_state |= CMD_T_REQUEST_STOP;
1623 spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
1624
1625 pr_debug("cmd %p waiting to complete\n", cmd);
1626 wait_for_completion(&cmd->task_stop_comp);
1627 pr_debug("cmd %p stopped successfully\n", cmd);
1628
1629 spin_lock_irqsave(&cmd->t_state_lock, *flags);
1630 cmd->transport_state &= ~CMD_T_REQUEST_STOP;
1631 cmd->transport_state &= ~CMD_T_BUSY;
1632 was_active = true;
1633 }
1634
1635 return was_active;
1636 }
1637
1638 /*
1639 * Handle SAM-esque emulation for generic transport request failures.
1640 */
1641 void transport_generic_request_failure(struct se_cmd *cmd,
1642 sense_reason_t sense_reason)
1643 {
1644 int ret = 0;
1645
1646 pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08llx"
1647 " CDB: 0x%02x\n", cmd, cmd->tag, cmd->t_task_cdb[0]);
1648 pr_debug("-----[ i_state: %d t_state: %d sense_reason: %d\n",
1649 cmd->se_tfo->get_cmd_state(cmd),
1650 cmd->t_state, sense_reason);
1651 pr_debug("-----[ CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n",
1652 (cmd->transport_state & CMD_T_ACTIVE) != 0,
1653 (cmd->transport_state & CMD_T_STOP) != 0,
1654 (cmd->transport_state & CMD_T_SENT) != 0);
1655
1656 /*
1657 * For SAM Task Attribute emulation for failed struct se_cmd
1658 */
1659 transport_complete_task_attr(cmd);
1660 /*
1661 * Handle special case for COMPARE_AND_WRITE failure, where the
1662 * callback is expected to drop the per device ->caw_sem.
1663 */
1664 if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
1665 cmd->transport_complete_callback)
1666 cmd->transport_complete_callback(cmd, false);
1667
1668 switch (sense_reason) {
1669 case TCM_NON_EXISTENT_LUN:
1670 case TCM_UNSUPPORTED_SCSI_OPCODE:
1671 case TCM_INVALID_CDB_FIELD:
1672 case TCM_INVALID_PARAMETER_LIST:
1673 case TCM_PARAMETER_LIST_LENGTH_ERROR:
1674 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
1675 case TCM_UNKNOWN_MODE_PAGE:
1676 case TCM_WRITE_PROTECTED:
1677 case TCM_ADDRESS_OUT_OF_RANGE:
1678 case TCM_CHECK_CONDITION_ABORT_CMD:
1679 case TCM_CHECK_CONDITION_UNIT_ATTENTION:
1680 case TCM_CHECK_CONDITION_NOT_READY:
1681 case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED:
1682 case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
1683 case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
1684 break;
1685 case TCM_OUT_OF_RESOURCES:
1686 sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1687 break;
1688 case TCM_RESERVATION_CONFLICT:
1689 /*
1690 * No SENSE Data payload for this case, set SCSI Status
1691 * and queue the response to $FABRIC_MOD.
1692 *
1693 * Uses linux/include/scsi/scsi.h SAM status codes defs
1694 */
1695 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
1696 /*
1697 * For UA Interlock Code 11b, a RESERVATION CONFLICT will
1698 * establish a UNIT ATTENTION with PREVIOUS RESERVATION
1699 * CONFLICT STATUS.
1700 *
1701 * See spc4r17, section 7.4.6 Control Mode Page, Table 349
1702 */
1703 if (cmd->se_sess &&
1704 cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl == 2)
1705 core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
1706 cmd->orig_fe_lun, 0x2C,
1707 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
1708
1709 trace_target_cmd_complete(cmd);
1710 ret = cmd->se_tfo-> queue_status(cmd);
1711 if (ret == -EAGAIN || ret == -ENOMEM)
1712 goto queue_full;
1713 goto check_stop;
1714 default:
1715 pr_err("Unknown transport error for CDB 0x%02x: %d\n",
1716 cmd->t_task_cdb[0], sense_reason);
1717 sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
1718 break;
1719 }
1720
1721 ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0);
1722 if (ret == -EAGAIN || ret == -ENOMEM)
1723 goto queue_full;
1724
1725 check_stop:
1726 transport_lun_remove_cmd(cmd);
1727 if (!transport_cmd_check_stop_to_fabric(cmd))
1728 ;
1729 return;
1730
1731 queue_full:
1732 cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
1733 transport_handle_queue_full(cmd, cmd->se_dev);
1734 }
1735 EXPORT_SYMBOL(transport_generic_request_failure);
1736
1737 void __target_execute_cmd(struct se_cmd *cmd)
1738 {
1739 sense_reason_t ret;
1740
1741 if (cmd->execute_cmd) {
1742 ret = cmd->execute_cmd(cmd);
1743 if (ret) {
1744 spin_lock_irq(&cmd->t_state_lock);
1745 cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT);
1746 spin_unlock_irq(&cmd->t_state_lock);
1747
1748 transport_generic_request_failure(cmd, ret);
1749 }
1750 }
1751 }
1752
1753 static int target_write_prot_action(struct se_cmd *cmd)
1754 {
1755 u32 sectors;
1756 /*
1757 * Perform WRITE_INSERT of PI using software emulation when backend
1758 * device has PI enabled, if the transport has not already generated
1759 * PI using hardware WRITE_INSERT offload.
1760 */
1761 switch (cmd->prot_op) {
1762 case TARGET_PROT_DOUT_INSERT:
1763 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT))
1764 sbc_dif_generate(cmd);
1765 break;
1766 case TARGET_PROT_DOUT_STRIP:
1767 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_STRIP)
1768 break;
1769
1770 sectors = cmd->data_length >> ilog2(cmd->se_dev->dev_attrib.block_size);
1771 cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba,
1772 sectors, 0, cmd->t_prot_sg, 0);
1773 if (unlikely(cmd->pi_err)) {
1774 spin_lock_irq(&cmd->t_state_lock);
1775 cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT);
1776 spin_unlock_irq(&cmd->t_state_lock);
1777 transport_generic_request_failure(cmd, cmd->pi_err);
1778 return -1;
1779 }
1780 break;
1781 default:
1782 break;
1783 }
1784
1785 return 0;
1786 }
1787
1788 static bool target_handle_task_attr(struct se_cmd *cmd)
1789 {
1790 struct se_device *dev = cmd->se_dev;
1791
1792 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
1793 return false;
1794
1795 /*
1796 * Check for the existence of HEAD_OF_QUEUE, and if true return 1
1797 * to allow the passed struct se_cmd list of tasks to the front of the list.
1798 */
1799 switch (cmd->sam_task_attr) {
1800 case TCM_HEAD_TAG:
1801 pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x, "
1802 "se_ordered_id: %u\n",
1803 cmd->t_task_cdb[0], cmd->se_ordered_id);
1804 return false;
1805 case TCM_ORDERED_TAG:
1806 atomic_inc_mb(&dev->dev_ordered_sync);
1807
1808 pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, "
1809 " se_ordered_id: %u\n",
1810 cmd->t_task_cdb[0], cmd->se_ordered_id);
1811
1812 /*
1813 * Execute an ORDERED command if no other older commands
1814 * exist that need to be completed first.
1815 */
1816 if (!atomic_read(&dev->simple_cmds))
1817 return false;
1818 break;
1819 default:
1820 /*
1821 * For SIMPLE and UNTAGGED Task Attribute commands
1822 */
1823 atomic_inc_mb(&dev->simple_cmds);
1824 break;
1825 }
1826
1827 if (atomic_read(&dev->dev_ordered_sync) == 0)
1828 return false;
1829
1830 spin_lock(&dev->delayed_cmd_lock);
1831 list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list);
1832 spin_unlock(&dev->delayed_cmd_lock);
1833
1834 pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to"
1835 " delayed CMD list, se_ordered_id: %u\n",
1836 cmd->t_task_cdb[0], cmd->sam_task_attr,
1837 cmd->se_ordered_id);
1838 return true;
1839 }
1840
1841 void target_execute_cmd(struct se_cmd *cmd)
1842 {
1843 /*
1844 * If the received CDB has aleady been aborted stop processing it here.
1845 */
1846 if (transport_check_aborted_status(cmd, 1))
1847 return;
1848
1849 /*
1850 * Determine if frontend context caller is requesting the stopping of
1851 * this command for frontend exceptions.
1852 */
1853 spin_lock_irq(&cmd->t_state_lock);
1854 if (cmd->transport_state & CMD_T_STOP) {
1855 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
1856 __func__, __LINE__, cmd->tag);
1857
1858 spin_unlock_irq(&cmd->t_state_lock);
1859 complete_all(&cmd->t_transport_stop_comp);
1860 return;
1861 }
1862
1863 cmd->t_state = TRANSPORT_PROCESSING;
1864 cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT;
1865 spin_unlock_irq(&cmd->t_state_lock);
1866
1867 if (target_write_prot_action(cmd))
1868 return;
1869
1870 if (target_handle_task_attr(cmd)) {
1871 spin_lock_irq(&cmd->t_state_lock);
1872 cmd->transport_state &= ~(CMD_T_BUSY | CMD_T_SENT);
1873 spin_unlock_irq(&cmd->t_state_lock);
1874 return;
1875 }
1876
1877 __target_execute_cmd(cmd);
1878 }
1879 EXPORT_SYMBOL(target_execute_cmd);
1880
1881 /*
1882 * Process all commands up to the last received ORDERED task attribute which
1883 * requires another blocking boundary
1884 */
1885 static void target_restart_delayed_cmds(struct se_device *dev)
1886 {
1887 for (;;) {
1888 struct se_cmd *cmd;
1889
1890 spin_lock(&dev->delayed_cmd_lock);
1891 if (list_empty(&dev->delayed_cmd_list)) {
1892 spin_unlock(&dev->delayed_cmd_lock);
1893 break;
1894 }
1895
1896 cmd = list_entry(dev->delayed_cmd_list.next,
1897 struct se_cmd, se_delayed_node);
1898 list_del(&cmd->se_delayed_node);
1899 spin_unlock(&dev->delayed_cmd_lock);
1900
1901 __target_execute_cmd(cmd);
1902
1903 if (cmd->sam_task_attr == TCM_ORDERED_TAG)
1904 break;
1905 }
1906 }
1907
1908 /*
1909 * Called from I/O completion to determine which dormant/delayed
1910 * and ordered cmds need to have their tasks added to the execution queue.
1911 */
1912 static void transport_complete_task_attr(struct se_cmd *cmd)
1913 {
1914 struct se_device *dev = cmd->se_dev;
1915
1916 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
1917 return;
1918
1919 if (cmd->sam_task_attr == TCM_SIMPLE_TAG) {
1920 atomic_dec_mb(&dev->simple_cmds);
1921 dev->dev_cur_ordered_id++;
1922 pr_debug("Incremented dev->dev_cur_ordered_id: %u for"
1923 " SIMPLE: %u\n", dev->dev_cur_ordered_id,
1924 cmd->se_ordered_id);
1925 } else if (cmd->sam_task_attr == TCM_HEAD_TAG) {
1926 dev->dev_cur_ordered_id++;
1927 pr_debug("Incremented dev_cur_ordered_id: %u for"
1928 " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
1929 cmd->se_ordered_id);
1930 } else if (cmd->sam_task_attr == TCM_ORDERED_TAG) {
1931 atomic_dec_mb(&dev->dev_ordered_sync);
1932
1933 dev->dev_cur_ordered_id++;
1934 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
1935 " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id);
1936 }
1937
1938 target_restart_delayed_cmds(dev);
1939 }
1940
1941 static void transport_complete_qf(struct se_cmd *cmd)
1942 {
1943 int ret = 0;
1944
1945 transport_complete_task_attr(cmd);
1946
1947 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
1948 trace_target_cmd_complete(cmd);
1949 ret = cmd->se_tfo->queue_status(cmd);
1950 goto out;
1951 }
1952
1953 switch (cmd->data_direction) {
1954 case DMA_FROM_DEVICE:
1955 trace_target_cmd_complete(cmd);
1956 ret = cmd->se_tfo->queue_data_in(cmd);
1957 break;
1958 case DMA_TO_DEVICE:
1959 if (cmd->se_cmd_flags & SCF_BIDI) {
1960 ret = cmd->se_tfo->queue_data_in(cmd);
1961 break;
1962 }
1963 /* Fall through for DMA_TO_DEVICE */
1964 case DMA_NONE:
1965 trace_target_cmd_complete(cmd);
1966 ret = cmd->se_tfo->queue_status(cmd);
1967 break;
1968 default:
1969 break;
1970 }
1971
1972 out:
1973 if (ret < 0) {
1974 transport_handle_queue_full(cmd, cmd->se_dev);
1975 return;
1976 }
1977 transport_lun_remove_cmd(cmd);
1978 transport_cmd_check_stop_to_fabric(cmd);
1979 }
1980
1981 static void transport_handle_queue_full(
1982 struct se_cmd *cmd,
1983 struct se_device *dev)
1984 {
1985 spin_lock_irq(&dev->qf_cmd_lock);
1986 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
1987 atomic_inc_mb(&dev->dev_qf_count);
1988 spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);
1989
1990 schedule_work(&cmd->se_dev->qf_work_queue);
1991 }
1992
1993 static bool target_read_prot_action(struct se_cmd *cmd)
1994 {
1995 switch (cmd->prot_op) {
1996 case TARGET_PROT_DIN_STRIP:
1997 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) {
1998 u32 sectors = cmd->data_length >>
1999 ilog2(cmd->se_dev->dev_attrib.block_size);
2000
2001 cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba,
2002 sectors, 0, cmd->t_prot_sg,
2003 0);
2004 if (cmd->pi_err)
2005 return true;
2006 }
2007 break;
2008 case TARGET_PROT_DIN_INSERT:
2009 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_INSERT)
2010 break;
2011
2012 sbc_dif_generate(cmd);
2013 break;
2014 default:
2015 break;
2016 }
2017
2018 return false;
2019 }
2020
2021 static void target_complete_ok_work(struct work_struct *work)
2022 {
2023 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
2024 int ret;
2025
2026 /*
2027 * Check if we need to move delayed/dormant tasks from cmds on the
2028 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
2029 * Attribute.
2030 */
2031 transport_complete_task_attr(cmd);
2032
2033 /*
2034 * Check to schedule QUEUE_FULL work, or execute an existing
2035 * cmd->transport_qf_callback()
2036 */
2037 if (atomic_read(&cmd->se_dev->dev_qf_count) != 0)
2038 schedule_work(&cmd->se_dev->qf_work_queue);
2039
2040 /*
2041 * Check if we need to send a sense buffer from
2042 * the struct se_cmd in question.
2043 */
2044 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
2045 WARN_ON(!cmd->scsi_status);
2046 ret = transport_send_check_condition_and_sense(
2047 cmd, 0, 1);
2048 if (ret == -EAGAIN || ret == -ENOMEM)
2049 goto queue_full;
2050
2051 transport_lun_remove_cmd(cmd);
2052 transport_cmd_check_stop_to_fabric(cmd);
2053 return;
2054 }
2055 /*
2056 * Check for a callback, used by amongst other things
2057 * XDWRITE_READ_10 and COMPARE_AND_WRITE emulation.
2058 */
2059 if (cmd->transport_complete_callback) {
2060 sense_reason_t rc;
2061
2062 rc = cmd->transport_complete_callback(cmd, true);
2063 if (!rc && !(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE_POST)) {
2064 if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
2065 !cmd->data_length)
2066 goto queue_rsp;
2067
2068 return;
2069 } else if (rc) {
2070 ret = transport_send_check_condition_and_sense(cmd,
2071 rc, 0);
2072 if (ret == -EAGAIN || ret == -ENOMEM)
2073 goto queue_full;
2074
2075 transport_lun_remove_cmd(cmd);
2076 transport_cmd_check_stop_to_fabric(cmd);
2077 return;
2078 }
2079 }
2080
2081 queue_rsp:
2082 switch (cmd->data_direction) {
2083 case DMA_FROM_DEVICE:
2084 spin_lock(&cmd->se_lun->lun_sep_lock);
2085 if (cmd->se_lun->lun_sep) {
2086 cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
2087 cmd->data_length;
2088 }
2089 spin_unlock(&cmd->se_lun->lun_sep_lock);
2090 /*
2091 * Perform READ_STRIP of PI using software emulation when
2092 * backend had PI enabled, if the transport will not be
2093 * performing hardware READ_STRIP offload.
2094 */
2095 if (target_read_prot_action(cmd)) {
2096 ret = transport_send_check_condition_and_sense(cmd,
2097 cmd->pi_err, 0);
2098 if (ret == -EAGAIN || ret == -ENOMEM)
2099 goto queue_full;
2100
2101 transport_lun_remove_cmd(cmd);
2102 transport_cmd_check_stop_to_fabric(cmd);
2103 return;
2104 }
2105
2106 trace_target_cmd_complete(cmd);
2107 ret = cmd->se_tfo->queue_data_in(cmd);
2108 if (ret == -EAGAIN || ret == -ENOMEM)
2109 goto queue_full;
2110 break;
2111 case DMA_TO_DEVICE:
2112 spin_lock(&cmd->se_lun->lun_sep_lock);
2113 if (cmd->se_lun->lun_sep) {
2114 cmd->se_lun->lun_sep->sep_stats.rx_data_octets +=
2115 cmd->data_length;
2116 }
2117 spin_unlock(&cmd->se_lun->lun_sep_lock);
2118 /*
2119 * Check if we need to send READ payload for BIDI-COMMAND
2120 */
2121 if (cmd->se_cmd_flags & SCF_BIDI) {
2122 spin_lock(&cmd->se_lun->lun_sep_lock);
2123 if (cmd->se_lun->lun_sep) {
2124 cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
2125 cmd->data_length;
2126 }
2127 spin_unlock(&cmd->se_lun->lun_sep_lock);
2128 ret = cmd->se_tfo->queue_data_in(cmd);
2129 if (ret == -EAGAIN || ret == -ENOMEM)
2130 goto queue_full;
2131 break;
2132 }
2133 /* Fall through for DMA_TO_DEVICE */
2134 case DMA_NONE:
2135 trace_target_cmd_complete(cmd);
2136 ret = cmd->se_tfo->queue_status(cmd);
2137 if (ret == -EAGAIN || ret == -ENOMEM)
2138 goto queue_full;
2139 break;
2140 default:
2141 break;
2142 }
2143
2144 transport_lun_remove_cmd(cmd);
2145 transport_cmd_check_stop_to_fabric(cmd);
2146 return;
2147
2148 queue_full:
2149 pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
2150 " data_direction: %d\n", cmd, cmd->data_direction);
2151 cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
2152 transport_handle_queue_full(cmd, cmd->se_dev);
2153 }
2154
2155 static inline void transport_free_sgl(struct scatterlist *sgl, int nents)
2156 {
2157 struct scatterlist *sg;
2158 int count;
2159
2160 for_each_sg(sgl, sg, nents, count)
2161 __free_page(sg_page(sg));
2162
2163 kfree(sgl);
2164 }
2165
2166 static inline void transport_reset_sgl_orig(struct se_cmd *cmd)
2167 {
2168 /*
2169 * Check for saved t_data_sg that may be used for COMPARE_AND_WRITE
2170 * emulation, and free + reset pointers if necessary..
2171 */
2172 if (!cmd->t_data_sg_orig)
2173 return;
2174
2175 kfree(cmd->t_data_sg);
2176 cmd->t_data_sg = cmd->t_data_sg_orig;
2177 cmd->t_data_sg_orig = NULL;
2178 cmd->t_data_nents = cmd->t_data_nents_orig;
2179 cmd->t_data_nents_orig = 0;
2180 }
2181
2182 static inline void transport_free_pages(struct se_cmd *cmd)
2183 {
2184 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) {
2185 transport_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents);
2186 cmd->t_prot_sg = NULL;
2187 cmd->t_prot_nents = 0;
2188 }
2189
2190 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) {
2191 /*
2192 * Release special case READ buffer payload required for
2193 * SG_TO_MEM_NOALLOC to function with COMPARE_AND_WRITE
2194 */
2195 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) {
2196 transport_free_sgl(cmd->t_bidi_data_sg,
2197 cmd->t_bidi_data_nents);
2198 cmd->t_bidi_data_sg = NULL;
2199 cmd->t_bidi_data_nents = 0;
2200 }
2201 transport_reset_sgl_orig(cmd);
2202 return;
2203 }
2204 transport_reset_sgl_orig(cmd);
2205
2206 transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
2207 cmd->t_data_sg = NULL;
2208 cmd->t_data_nents = 0;
2209
2210 transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
2211 cmd->t_bidi_data_sg = NULL;
2212 cmd->t_bidi_data_nents = 0;
2213 }
2214
2215 /**
2216 * transport_release_cmd - free a command
2217 * @cmd: command to free
2218 *
2219 * This routine unconditionally frees a command, and reference counting
2220 * or list removal must be done in the caller.
2221 */
2222 static int transport_release_cmd(struct se_cmd *cmd)
2223 {
2224 BUG_ON(!cmd->se_tfo);
2225
2226 if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
2227 core_tmr_release_req(cmd->se_tmr_req);
2228 if (cmd->t_task_cdb != cmd->__t_task_cdb)
2229 kfree(cmd->t_task_cdb);
2230 /*
2231 * If this cmd has been setup with target_get_sess_cmd(), drop
2232 * the kref and call ->release_cmd() in kref callback.
2233 */
2234 return target_put_sess_cmd(cmd);
2235 }
2236
2237 /**
2238 * transport_put_cmd - release a reference to a command
2239 * @cmd: command to release
2240 *
2241 * This routine releases our reference to the command and frees it if possible.
2242 */
2243 static int transport_put_cmd(struct se_cmd *cmd)
2244 {
2245 transport_free_pages(cmd);
2246 return transport_release_cmd(cmd);
2247 }
2248
2249 void *transport_kmap_data_sg(struct se_cmd *cmd)
2250 {
2251 struct scatterlist *sg = cmd->t_data_sg;
2252 struct page **pages;
2253 int i;
2254
2255 /*
2256 * We need to take into account a possible offset here for fabrics like
2257 * tcm_loop who may be using a contig buffer from the SCSI midlayer for
2258 * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd()
2259 */
2260 if (!cmd->t_data_nents)
2261 return NULL;
2262
2263 BUG_ON(!sg);
2264 if (cmd->t_data_nents == 1)
2265 return kmap(sg_page(sg)) + sg->offset;
2266
2267 /* >1 page. use vmap */
2268 pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL);
2269 if (!pages)
2270 return NULL;
2271
2272 /* convert sg[] to pages[] */
2273 for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) {
2274 pages[i] = sg_page(sg);
2275 }
2276
2277 cmd->t_data_vmap = vmap(pages, cmd->t_data_nents, VM_MAP, PAGE_KERNEL);
2278 kfree(pages);
2279 if (!cmd->t_data_vmap)
2280 return NULL;
2281
2282 return cmd->t_data_vmap + cmd->t_data_sg[0].offset;
2283 }
2284 EXPORT_SYMBOL(transport_kmap_data_sg);
2285
2286 void transport_kunmap_data_sg(struct se_cmd *cmd)
2287 {
2288 if (!cmd->t_data_nents) {
2289 return;
2290 } else if (cmd->t_data_nents == 1) {
2291 kunmap(sg_page(cmd->t_data_sg));
2292 return;
2293 }
2294
2295 vunmap(cmd->t_data_vmap);
2296 cmd->t_data_vmap = NULL;
2297 }
2298 EXPORT_SYMBOL(transport_kunmap_data_sg);
2299
2300 int
2301 target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length,
2302 bool zero_page)
2303 {
2304 struct scatterlist *sg;
2305 struct page *page;
2306 gfp_t zero_flag = (zero_page) ? __GFP_ZERO : 0;
2307 unsigned int nent;
2308 int i = 0;
2309
2310 nent = DIV_ROUND_UP(length, PAGE_SIZE);
2311 sg = kmalloc(sizeof(struct scatterlist) * nent, GFP_KERNEL);
2312 if (!sg)
2313 return -ENOMEM;
2314
2315 sg_init_table(sg, nent);
2316
2317 while (length) {
2318 u32 page_len = min_t(u32, length, PAGE_SIZE);
2319 page = alloc_page(GFP_KERNEL | zero_flag);
2320 if (!page)
2321 goto out;
2322
2323 sg_set_page(&sg[i], page, page_len, 0);
2324 length -= page_len;
2325 i++;
2326 }
2327 *sgl = sg;
2328 *nents = nent;
2329 return 0;
2330
2331 out:
2332 while (i > 0) {
2333 i--;
2334 __free_page(sg_page(&sg[i]));
2335 }
2336 kfree(sg);
2337 return -ENOMEM;
2338 }
2339
2340 /*
2341 * Allocate any required resources to execute the command. For writes we
2342 * might not have the payload yet, so notify the fabric via a call to
2343 * ->write_pending instead. Otherwise place it on the execution queue.
2344 */
2345 sense_reason_t
2346 transport_generic_new_cmd(struct se_cmd *cmd)
2347 {
2348 int ret = 0;
2349 bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB);
2350
2351 if (cmd->prot_op != TARGET_PROT_NORMAL &&
2352 !(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) {
2353 ret = target_alloc_sgl(&cmd->t_prot_sg, &cmd->t_prot_nents,
2354 cmd->prot_length, true);
2355 if (ret < 0)
2356 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2357 }
2358
2359 /*
2360 * Determine is the TCM fabric module has already allocated physical
2361 * memory, and is directly calling transport_generic_map_mem_to_cmd()
2362 * beforehand.
2363 */
2364 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
2365 cmd->data_length) {
2366
2367 if ((cmd->se_cmd_flags & SCF_BIDI) ||
2368 (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) {
2369 u32 bidi_length;
2370
2371 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)
2372 bidi_length = cmd->t_task_nolb *
2373 cmd->se_dev->dev_attrib.block_size;
2374 else
2375 bidi_length = cmd->data_length;
2376
2377 ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
2378 &cmd->t_bidi_data_nents,
2379 bidi_length, zero_flag);
2380 if (ret < 0)
2381 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2382 }
2383
2384 ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents,
2385 cmd->data_length, zero_flag);
2386 if (ret < 0)
2387 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2388 } else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
2389 cmd->data_length) {
2390 /*
2391 * Special case for COMPARE_AND_WRITE with fabrics
2392 * using SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC.
2393 */
2394 u32 caw_length = cmd->t_task_nolb *
2395 cmd->se_dev->dev_attrib.block_size;
2396
2397 ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
2398 &cmd->t_bidi_data_nents,
2399 caw_length, zero_flag);
2400 if (ret < 0)
2401 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2402 }
2403 /*
2404 * If this command is not a write we can execute it right here,
2405 * for write buffers we need to notify the fabric driver first
2406 * and let it call back once the write buffers are ready.
2407 */
2408 target_add_to_state_list(cmd);
2409 if (cmd->data_direction != DMA_TO_DEVICE || cmd->data_length == 0) {
2410 target_execute_cmd(cmd);
2411 return 0;
2412 }
2413 transport_cmd_check_stop(cmd, false, true);
2414
2415 ret = cmd->se_tfo->write_pending(cmd);
2416 if (ret == -EAGAIN || ret == -ENOMEM)
2417 goto queue_full;
2418
2419 /* fabric drivers should only return -EAGAIN or -ENOMEM as error */
2420 WARN_ON(ret);
2421
2422 return (!ret) ? 0 : TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2423
2424 queue_full:
2425 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
2426 cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
2427 transport_handle_queue_full(cmd, cmd->se_dev);
2428 return 0;
2429 }
2430 EXPORT_SYMBOL(transport_generic_new_cmd);
2431
2432 static void transport_write_pending_qf(struct se_cmd *cmd)
2433 {
2434 int ret;
2435
2436 ret = cmd->se_tfo->write_pending(cmd);
2437 if (ret == -EAGAIN || ret == -ENOMEM) {
2438 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
2439 cmd);
2440 transport_handle_queue_full(cmd, cmd->se_dev);
2441 }
2442 }
2443
2444 int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
2445 {
2446 unsigned long flags;
2447 int ret = 0;
2448
2449 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
2450 if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
2451 transport_wait_for_tasks(cmd);
2452
2453 ret = transport_release_cmd(cmd);
2454 } else {
2455 if (wait_for_tasks)
2456 transport_wait_for_tasks(cmd);
2457 /*
2458 * Handle WRITE failure case where transport_generic_new_cmd()
2459 * has already added se_cmd to state_list, but fabric has
2460 * failed command before I/O submission.
2461 */
2462 if (cmd->state_active) {
2463 spin_lock_irqsave(&cmd->t_state_lock, flags);
2464 target_remove_from_state_list(cmd);
2465 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2466 }
2467
2468 if (cmd->se_lun)
2469 transport_lun_remove_cmd(cmd);
2470
2471 ret = transport_put_cmd(cmd);
2472 }
2473 return ret;
2474 }
2475 EXPORT_SYMBOL(transport_generic_free_cmd);
2476
2477 /* target_get_sess_cmd - Add command to active ->sess_cmd_list
2478 * @se_cmd: command descriptor to add
2479 * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd()
2480 */
2481 int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
2482 {
2483 struct se_session *se_sess = se_cmd->se_sess;
2484 unsigned long flags;
2485 int ret = 0;
2486
2487 /*
2488 * Add a second kref if the fabric caller is expecting to handle
2489 * fabric acknowledgement that requires two target_put_sess_cmd()
2490 * invocations before se_cmd descriptor release.
2491 */
2492 if (ack_kref)
2493 kref_get(&se_cmd->cmd_kref);
2494
2495 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2496 if (se_sess->sess_tearing_down) {
2497 ret = -ESHUTDOWN;
2498 goto out;
2499 }
2500 list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
2501 out:
2502 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2503
2504 if (ret && ack_kref)
2505 target_put_sess_cmd(se_cmd);
2506
2507 return ret;
2508 }
2509 EXPORT_SYMBOL(target_get_sess_cmd);
2510
2511 static void target_release_cmd_kref(struct kref *kref)
2512 __releases(&se_cmd->se_sess->sess_cmd_lock)
2513 {
2514 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
2515 struct se_session *se_sess = se_cmd->se_sess;
2516
2517 if (list_empty(&se_cmd->se_cmd_list)) {
2518 spin_unlock(&se_sess->sess_cmd_lock);
2519 se_cmd->se_tfo->release_cmd(se_cmd);
2520 return;
2521 }
2522 if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
2523 spin_unlock(&se_sess->sess_cmd_lock);
2524 complete(&se_cmd->cmd_wait_comp);
2525 return;
2526 }
2527 list_del(&se_cmd->se_cmd_list);
2528 spin_unlock(&se_sess->sess_cmd_lock);
2529
2530 se_cmd->se_tfo->release_cmd(se_cmd);
2531 }
2532
2533 /* target_put_sess_cmd - Check for active I/O shutdown via kref_put
2534 * @se_cmd: command descriptor to drop
2535 */
2536 int target_put_sess_cmd(struct se_cmd *se_cmd)
2537 {
2538 struct se_session *se_sess = se_cmd->se_sess;
2539
2540 if (!se_sess) {
2541 se_cmd->se_tfo->release_cmd(se_cmd);
2542 return 1;
2543 }
2544 return kref_put_spinlock_irqsave(&se_cmd->cmd_kref, target_release_cmd_kref,
2545 &se_sess->sess_cmd_lock);
2546 }
2547 EXPORT_SYMBOL(target_put_sess_cmd);
2548
2549 /* target_sess_cmd_list_set_waiting - Flag all commands in
2550 * sess_cmd_list to complete cmd_wait_comp. Set
2551 * sess_tearing_down so no more commands are queued.
2552 * @se_sess: session to flag
2553 */
2554 void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
2555 {
2556 struct se_cmd *se_cmd;
2557 unsigned long flags;
2558
2559 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2560 if (se_sess->sess_tearing_down) {
2561 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2562 return;
2563 }
2564 se_sess->sess_tearing_down = 1;
2565 list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
2566
2567 list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list)
2568 se_cmd->cmd_wait_set = 1;
2569
2570 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2571 }
2572 EXPORT_SYMBOL(target_sess_cmd_list_set_waiting);
2573
2574 /* target_wait_for_sess_cmds - Wait for outstanding descriptors
2575 * @se_sess: session to wait for active I/O
2576 */
2577 void target_wait_for_sess_cmds(struct se_session *se_sess)
2578 {
2579 struct se_cmd *se_cmd, *tmp_cmd;
2580 unsigned long flags;
2581
2582 list_for_each_entry_safe(se_cmd, tmp_cmd,
2583 &se_sess->sess_wait_list, se_cmd_list) {
2584 list_del(&se_cmd->se_cmd_list);
2585
2586 pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
2587 " %d\n", se_cmd, se_cmd->t_state,
2588 se_cmd->se_tfo->get_cmd_state(se_cmd));
2589
2590 wait_for_completion(&se_cmd->cmd_wait_comp);
2591 pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
2592 " fabric state: %d\n", se_cmd, se_cmd->t_state,
2593 se_cmd->se_tfo->get_cmd_state(se_cmd));
2594
2595 se_cmd->se_tfo->release_cmd(se_cmd);
2596 }
2597
2598 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2599 WARN_ON(!list_empty(&se_sess->sess_cmd_list));
2600 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2601
2602 }
2603 EXPORT_SYMBOL(target_wait_for_sess_cmds);
2604
2605 static int transport_clear_lun_ref_thread(void *p)
2606 {
2607 struct se_lun *lun = p;
2608
2609 percpu_ref_kill(&lun->lun_ref);
2610
2611 wait_for_completion(&lun->lun_ref_comp);
2612 complete(&lun->lun_shutdown_comp);
2613
2614 return 0;
2615 }
2616
2617 int transport_clear_lun_ref(struct se_lun *lun)
2618 {
2619 struct task_struct *kt;
2620
2621 kt = kthread_run(transport_clear_lun_ref_thread, lun,
2622 "tcm_cl_%u", lun->unpacked_lun);
2623 if (IS_ERR(kt)) {
2624 pr_err("Unable to start clear_lun thread\n");
2625 return PTR_ERR(kt);
2626 }
2627 wait_for_completion(&lun->lun_shutdown_comp);
2628
2629 return 0;
2630 }
2631
2632 /**
2633 * transport_wait_for_tasks - wait for completion to occur
2634 * @cmd: command to wait
2635 *
2636 * Called from frontend fabric context to wait for storage engine
2637 * to pause and/or release frontend generated struct se_cmd.
2638 */
2639 bool transport_wait_for_tasks(struct se_cmd *cmd)
2640 {
2641 unsigned long flags;
2642
2643 spin_lock_irqsave(&cmd->t_state_lock, flags);
2644 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) &&
2645 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
2646 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2647 return false;
2648 }
2649
2650 if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
2651 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
2652 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2653 return false;
2654 }
2655
2656 if (!(cmd->transport_state & CMD_T_ACTIVE)) {
2657 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2658 return false;
2659 }
2660
2661 cmd->transport_state |= CMD_T_STOP;
2662
2663 pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d, t_state: %d, CMD_T_STOP\n",
2664 cmd, cmd->tag, cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
2665
2666 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2667
2668 wait_for_completion(&cmd->t_transport_stop_comp);
2669
2670 spin_lock_irqsave(&cmd->t_state_lock, flags);
2671 cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
2672
2673 pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->t_transport_stop_comp) for ITT: 0x%08llx\n",
2674 cmd->tag);
2675
2676 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2677
2678 return true;
2679 }
2680 EXPORT_SYMBOL(transport_wait_for_tasks);
2681
2682 static int transport_get_sense_codes(
2683 struct se_cmd *cmd,
2684 u8 *asc,
2685 u8 *ascq)
2686 {
2687 *asc = cmd->scsi_asc;
2688 *ascq = cmd->scsi_ascq;
2689
2690 return 0;
2691 }
2692
2693 static
2694 void transport_err_sector_info(unsigned char *buffer, sector_t bad_sector)
2695 {
2696 /* Place failed LBA in sense data information descriptor 0. */
2697 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 0xc;
2698 buffer[SPC_DESC_TYPE_OFFSET] = 0; /* Information */
2699 buffer[SPC_ADDITIONAL_DESC_LEN_OFFSET] = 0xa;
2700 buffer[SPC_VALIDITY_OFFSET] = 0x80;
2701
2702 /* Descriptor Information: failing sector */
2703 put_unaligned_be64(bad_sector, &buffer[12]);
2704 }
2705
2706 int
2707 transport_send_check_condition_and_sense(struct se_cmd *cmd,
2708 sense_reason_t reason, int from_transport)
2709 {
2710 unsigned char *buffer = cmd->sense_buffer;
2711 unsigned long flags;
2712 u8 asc = 0, ascq = 0;
2713
2714 spin_lock_irqsave(&cmd->t_state_lock, flags);
2715 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
2716 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2717 return 0;
2718 }
2719 cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
2720 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2721
2722 if (!reason && from_transport)
2723 goto after_reason;
2724
2725 if (!from_transport)
2726 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
2727
2728 /*
2729 * Actual SENSE DATA, see SPC-3 7.23.2 SPC_SENSE_KEY_OFFSET uses
2730 * SENSE KEY values from include/scsi/scsi.h
2731 */
2732 switch (reason) {
2733 case TCM_NO_SENSE:
2734 /* CURRENT ERROR */
2735 buffer[0] = 0x70;
2736 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2737 /* Not Ready */
2738 buffer[SPC_SENSE_KEY_OFFSET] = NOT_READY;
2739 /* NO ADDITIONAL SENSE INFORMATION */
2740 buffer[SPC_ASC_KEY_OFFSET] = 0;
2741 buffer[SPC_ASCQ_KEY_OFFSET] = 0;
2742 break;
2743 case TCM_NON_EXISTENT_LUN:
2744 /* CURRENT ERROR */
2745 buffer[0] = 0x70;
2746 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2747 /* ILLEGAL REQUEST */
2748 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
2749 /* LOGICAL UNIT NOT SUPPORTED */
2750 buffer[SPC_ASC_KEY_OFFSET] = 0x25;
2751 break;
2752 case TCM_UNSUPPORTED_SCSI_OPCODE:
2753 case TCM_SECTOR_COUNT_TOO_MANY:
2754 /* CURRENT ERROR */
2755 buffer[0] = 0x70;
2756 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2757 /* ILLEGAL REQUEST */
2758 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
2759 /* INVALID COMMAND OPERATION CODE */
2760 buffer[SPC_ASC_KEY_OFFSET] = 0x20;
2761 break;
2762 case TCM_UNKNOWN_MODE_PAGE:
2763 /* CURRENT ERROR */
2764 buffer[0] = 0x70;
2765 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2766 /* ILLEGAL REQUEST */
2767 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
2768 /* INVALID FIELD IN CDB */
2769 buffer[SPC_ASC_KEY_OFFSET] = 0x24;
2770 break;
2771 case TCM_CHECK_CONDITION_ABORT_CMD:
2772 /* CURRENT ERROR */
2773 buffer[0] = 0x70;
2774 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2775 /* ABORTED COMMAND */
2776 buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
2777 /* BUS DEVICE RESET FUNCTION OCCURRED */
2778 buffer[SPC_ASC_KEY_OFFSET] = 0x29;
2779 buffer[SPC_ASCQ_KEY_OFFSET] = 0x03;
2780 break;
2781 case TCM_INCORRECT_AMOUNT_OF_DATA:
2782 /* CURRENT ERROR */
2783 buffer[0] = 0x70;
2784 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2785 /* ABORTED COMMAND */
2786 buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
2787 /* WRITE ERROR */
2788 buffer[SPC_ASC_KEY_OFFSET] = 0x0c;
2789 /* NOT ENOUGH UNSOLICITED DATA */
2790 buffer[SPC_ASCQ_KEY_OFFSET] = 0x0d;
2791 break;
2792 case TCM_INVALID_CDB_FIELD:
2793 /* CURRENT ERROR */
2794 buffer[0] = 0x70;
2795 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2796 /* ILLEGAL REQUEST */
2797 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
2798 /* INVALID FIELD IN CDB */
2799 buffer[SPC_ASC_KEY_OFFSET] = 0x24;
2800 break;
2801 case TCM_INVALID_PARAMETER_LIST:
2802 /* CURRENT ERROR */
2803 buffer[0] = 0x70;
2804 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2805 /* ILLEGAL REQUEST */
2806 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
2807 /* INVALID FIELD IN PARAMETER LIST */
2808 buffer[SPC_ASC_KEY_OFFSET] = 0x26;
2809 break;
2810 case TCM_PARAMETER_LIST_LENGTH_ERROR:
2811 /* CURRENT ERROR */
2812 buffer[0] = 0x70;
2813 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2814 /* ILLEGAL REQUEST */
2815 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
2816 /* PARAMETER LIST LENGTH ERROR */
2817 buffer[SPC_ASC_KEY_OFFSET] = 0x1a;
2818 break;
2819 case TCM_UNEXPECTED_UNSOLICITED_DATA:
2820 /* CURRENT ERROR */
2821 buffer[0] = 0x70;
2822 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2823 /* ABORTED COMMAND */
2824 buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
2825 /* WRITE ERROR */
2826 buffer[SPC_ASC_KEY_OFFSET] = 0x0c;
2827 /* UNEXPECTED_UNSOLICITED_DATA */
2828 buffer[SPC_ASCQ_KEY_OFFSET] = 0x0c;
2829 break;
2830 case TCM_SERVICE_CRC_ERROR:
2831 /* CURRENT ERROR */
2832 buffer[0] = 0x70;
2833 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2834 /* ABORTED COMMAND */
2835 buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
2836 /* PROTOCOL SERVICE CRC ERROR */
2837 buffer[SPC_ASC_KEY_OFFSET] = 0x47;
2838 /* N/A */
2839 buffer[SPC_ASCQ_KEY_OFFSET] = 0x05;
2840 break;
2841 case TCM_SNACK_REJECTED:
2842 /* CURRENT ERROR */
2843 buffer[0] = 0x70;
2844 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2845 /* ABORTED COMMAND */
2846 buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
2847 /* READ ERROR */
2848 buffer[SPC_ASC_KEY_OFFSET] = 0x11;
2849 /* FAILED RETRANSMISSION REQUEST */
2850 buffer[SPC_ASCQ_KEY_OFFSET] = 0x13;
2851 break;
2852 case TCM_WRITE_PROTECTED:
2853 /* CURRENT ERROR */
2854 buffer[0] = 0x70;
2855 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2856 /* DATA PROTECT */
2857 buffer[SPC_SENSE_KEY_OFFSET] = DATA_PROTECT;
2858 /* WRITE PROTECTED */
2859 buffer[SPC_ASC_KEY_OFFSET] = 0x27;
2860 break;
2861 case TCM_ADDRESS_OUT_OF_RANGE:
2862 /* CURRENT ERROR */
2863 buffer[0] = 0x70;
2864 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2865 /* ILLEGAL REQUEST */
2866 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
2867 /* LOGICAL BLOCK ADDRESS OUT OF RANGE */
2868 buffer[SPC_ASC_KEY_OFFSET] = 0x21;
2869 break;
2870 case TCM_CHECK_CONDITION_UNIT_ATTENTION:
2871 /* CURRENT ERROR */
2872 buffer[0] = 0x70;
2873 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2874 /* UNIT ATTENTION */
2875 buffer[SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
2876 core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
2877 buffer[SPC_ASC_KEY_OFFSET] = asc;
2878 buffer[SPC_ASCQ_KEY_OFFSET] = ascq;
2879 break;
2880 case TCM_CHECK_CONDITION_NOT_READY:
2881 /* CURRENT ERROR */
2882 buffer[0] = 0x70;
2883 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2884 /* Not Ready */
2885 buffer[SPC_SENSE_KEY_OFFSET] = NOT_READY;
2886 transport_get_sense_codes(cmd, &asc, &ascq);
2887 buffer[SPC_ASC_KEY_OFFSET] = asc;
2888 buffer[SPC_ASCQ_KEY_OFFSET] = ascq;
2889 break;
2890 case TCM_MISCOMPARE_VERIFY:
2891 /* CURRENT ERROR */
2892 buffer[0] = 0x70;
2893 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2894 buffer[SPC_SENSE_KEY_OFFSET] = MISCOMPARE;
2895 /* MISCOMPARE DURING VERIFY OPERATION */
2896 buffer[SPC_ASC_KEY_OFFSET] = 0x1d;
2897 buffer[SPC_ASCQ_KEY_OFFSET] = 0x00;
2898 break;
2899 case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED:
2900 /* CURRENT ERROR */
2901 buffer[0] = 0x70;
2902 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2903 /* ILLEGAL REQUEST */
2904 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
2905 /* LOGICAL BLOCK GUARD CHECK FAILED */
2906 buffer[SPC_ASC_KEY_OFFSET] = 0x10;
2907 buffer[SPC_ASCQ_KEY_OFFSET] = 0x01;
2908 transport_err_sector_info(buffer, cmd->bad_sector);
2909 break;
2910 case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
2911 /* CURRENT ERROR */
2912 buffer[0] = 0x70;
2913 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2914 /* ILLEGAL REQUEST */
2915 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
2916 /* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */
2917 buffer[SPC_ASC_KEY_OFFSET] = 0x10;
2918 buffer[SPC_ASCQ_KEY_OFFSET] = 0x02;
2919 transport_err_sector_info(buffer, cmd->bad_sector);
2920 break;
2921 case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
2922 /* CURRENT ERROR */
2923 buffer[0] = 0x70;
2924 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2925 /* ILLEGAL REQUEST */
2926 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
2927 /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */
2928 buffer[SPC_ASC_KEY_OFFSET] = 0x10;
2929 buffer[SPC_ASCQ_KEY_OFFSET] = 0x03;
2930 transport_err_sector_info(buffer, cmd->bad_sector);
2931 break;
2932 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
2933 default:
2934 /* CURRENT ERROR */
2935 buffer[0] = 0x70;
2936 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2937 /*
2938 * Returning ILLEGAL REQUEST would cause immediate IO errors on
2939 * Solaris initiators. Returning NOT READY instead means the
2940 * operations will be retried a finite number of times and we
2941 * can survive intermittent errors.
2942 */
2943 buffer[SPC_SENSE_KEY_OFFSET] = NOT_READY;
2944 /* LOGICAL UNIT COMMUNICATION FAILURE */
2945 buffer[SPC_ASC_KEY_OFFSET] = 0x08;
2946 break;
2947 }
2948 /*
2949 * This code uses linux/include/scsi/scsi.h SAM status codes!
2950 */
2951 cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
2952 /*
2953 * Automatically padded, this value is encoded in the fabric's
2954 * data_length response PDU containing the SCSI defined sense data.
2955 */
2956 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
2957
2958 after_reason:
2959 trace_target_cmd_complete(cmd);
2960 return cmd->se_tfo->queue_status(cmd);
2961 }
2962 EXPORT_SYMBOL(transport_send_check_condition_and_sense);
2963
2964 int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
2965 {
2966 if (!(cmd->transport_state & CMD_T_ABORTED))
2967 return 0;
2968
2969 /*
2970 * If cmd has been aborted but either no status is to be sent or it has
2971 * already been sent, just return
2972 */
2973 if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS))
2974 return 1;
2975
2976 pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08llx\n",
2977 cmd->t_task_cdb[0], cmd->tag);
2978
2979 cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS;
2980 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
2981 trace_target_cmd_complete(cmd);
2982 cmd->se_tfo->queue_status(cmd);
2983
2984 return 1;
2985 }
2986 EXPORT_SYMBOL(transport_check_aborted_status);
2987
2988 void transport_send_task_abort(struct se_cmd *cmd)
2989 {
2990 unsigned long flags;
2991
2992 spin_lock_irqsave(&cmd->t_state_lock, flags);
2993 if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) {
2994 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2995 return;
2996 }
2997 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2998
2999 /*
3000 * If there are still expected incoming fabric WRITEs, we wait
3001 * until until they have completed before sending a TASK_ABORTED
3002 * response. This response with TASK_ABORTED status will be
3003 * queued back to fabric module by transport_check_aborted_status().
3004 */
3005 if (cmd->data_direction == DMA_TO_DEVICE) {
3006 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
3007 cmd->transport_state |= CMD_T_ABORTED;
3008 cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
3009 return;
3010 }
3011 }
3012 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
3013
3014 transport_lun_remove_cmd(cmd);
3015
3016 pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n",
3017 cmd->t_task_cdb[0], cmd->tag);
3018
3019 trace_target_cmd_complete(cmd);
3020 cmd->se_tfo->queue_status(cmd);
3021 }
3022
3023 static void target_tmr_work(struct work_struct *work)
3024 {
3025 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
3026 struct se_device *dev = cmd->se_dev;
3027 struct se_tmr_req *tmr = cmd->se_tmr_req;
3028 int ret;
3029
3030 switch (tmr->function) {
3031 case TMR_ABORT_TASK:
3032 core_tmr_abort_task(dev, tmr, cmd->se_sess);
3033 break;
3034 case TMR_ABORT_TASK_SET:
3035 case TMR_CLEAR_ACA:
3036 case TMR_CLEAR_TASK_SET:
3037 tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
3038 break;
3039 case TMR_LUN_RESET:
3040 ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
3041 tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
3042 TMR_FUNCTION_REJECTED;
3043 break;
3044 case TMR_TARGET_WARM_RESET:
3045 tmr->response = TMR_FUNCTION_REJECTED;
3046 break;
3047 case TMR_TARGET_COLD_RESET:
3048 tmr->response = TMR_FUNCTION_REJECTED;
3049 break;
3050 default:
3051 pr_err("Uknown TMR function: 0x%02x.\n",
3052 tmr->function);
3053 tmr->response = TMR_FUNCTION_REJECTED;
3054 break;
3055 }
3056
3057 cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
3058 cmd->se_tfo->queue_tm_rsp(cmd);
3059
3060 transport_cmd_check_stop_to_fabric(cmd);
3061 }
3062
3063 int transport_generic_handle_tmr(
3064 struct se_cmd *cmd)
3065 {
3066 unsigned long flags;
3067
3068 spin_lock_irqsave(&cmd->t_state_lock, flags);
3069 cmd->transport_state |= CMD_T_ACTIVE;
3070 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3071
3072 INIT_WORK(&cmd->work, target_tmr_work);
3073 queue_work(cmd->se_dev->tmr_wq, &cmd->work);
3074 return 0;
3075 }
3076 EXPORT_SYMBOL(transport_generic_handle_tmr);
3077
3078 bool
3079 target_check_wce(struct se_device *dev)
3080 {
3081 bool wce = false;
3082
3083 if (dev->transport->get_write_cache)
3084 wce = dev->transport->get_write_cache(dev);
3085 else if (dev->dev_attrib.emulate_write_cache > 0)
3086 wce = true;
3087
3088 return wce;
3089 }
3090
3091 bool
3092 target_check_fua(struct se_device *dev)
3093 {
3094 return target_check_wce(dev) && dev->dev_attrib.emulate_fua_write > 0;
3095 }
This page took 0.143575 seconds and 5 git commands to generate.