1 /*******************************************************************************
2 * Filename: target_core_device.c (based on iscsi_target_device.c)
4 * This file contains the TCM Virtual Device and Disk Transport
5 * agnostic related functions.
7 * (c) Copyright 2003-2013 Datera, Inc.
9 * Nicholas A. Bellinger <nab@kernel.org>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
25 ******************************************************************************/
27 #include <linux/net.h>
28 #include <linux/string.h>
29 #include <linux/delay.h>
30 #include <linux/timer.h>
31 #include <linux/slab.h>
32 #include <linux/spinlock.h>
33 #include <linux/kthread.h>
35 #include <linux/export.h>
38 #include <scsi/scsi.h>
39 #include <scsi/scsi_device.h>
41 #include <target/target_core_base.h>
42 #include <target/target_core_backend.h>
43 #include <target/target_core_fabric.h>
45 #include "target_core_internal.h"
46 #include "target_core_alua.h"
47 #include "target_core_pr.h"
48 #include "target_core_ua.h"
50 DEFINE_MUTEX(g_device_mutex
);
51 LIST_HEAD(g_device_list
);
53 static struct se_hba
*lun0_hba
;
54 /* not static, needed by tpg.c */
55 struct se_device
*g_lun0_dev
;
58 transport_lookup_cmd_lun(struct se_cmd
*se_cmd
, u32 unpacked_lun
)
60 struct se_lun
*se_lun
= NULL
;
61 struct se_session
*se_sess
= se_cmd
->se_sess
;
62 struct se_device
*dev
;
65 if (unpacked_lun
>= TRANSPORT_MAX_LUNS_PER_TPG
)
66 return TCM_NON_EXISTENT_LUN
;
68 spin_lock_irqsave(&se_sess
->se_node_acl
->device_list_lock
, flags
);
69 se_cmd
->se_deve
= se_sess
->se_node_acl
->device_list
[unpacked_lun
];
70 if (se_cmd
->se_deve
->lun_flags
& TRANSPORT_LUNFLAGS_INITIATOR_ACCESS
) {
71 struct se_dev_entry
*deve
= se_cmd
->se_deve
;
75 if ((se_cmd
->data_direction
== DMA_TO_DEVICE
) &&
76 (deve
->lun_flags
& TRANSPORT_LUNFLAGS_READ_ONLY
)) {
77 pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
78 " Access for 0x%08x\n",
79 se_cmd
->se_tfo
->get_fabric_name(),
81 spin_unlock_irqrestore(&se_sess
->se_node_acl
->device_list_lock
, flags
);
82 return TCM_WRITE_PROTECTED
;
85 if (se_cmd
->data_direction
== DMA_TO_DEVICE
)
86 deve
->write_bytes
+= se_cmd
->data_length
;
87 else if (se_cmd
->data_direction
== DMA_FROM_DEVICE
)
88 deve
->read_bytes
+= se_cmd
->data_length
;
90 se_lun
= deve
->se_lun
;
91 se_cmd
->se_lun
= deve
->se_lun
;
92 se_cmd
->pr_res_key
= deve
->pr_res_key
;
93 se_cmd
->orig_fe_lun
= unpacked_lun
;
94 se_cmd
->se_cmd_flags
|= SCF_SE_LUN_CMD
;
96 percpu_ref_get(&se_lun
->lun_ref
);
97 se_cmd
->lun_ref_active
= true;
99 spin_unlock_irqrestore(&se_sess
->se_node_acl
->device_list_lock
, flags
);
103 * Use the se_portal_group->tpg_virt_lun0 to allow for
104 * REPORT_LUNS, et al to be returned when no active
105 * MappedLUN=0 exists for this Initiator Port.
107 if (unpacked_lun
!= 0) {
108 pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
109 " Access for 0x%08x\n",
110 se_cmd
->se_tfo
->get_fabric_name(),
112 return TCM_NON_EXISTENT_LUN
;
115 * Force WRITE PROTECT for virtual LUN 0
117 if ((se_cmd
->data_direction
!= DMA_FROM_DEVICE
) &&
118 (se_cmd
->data_direction
!= DMA_NONE
))
119 return TCM_WRITE_PROTECTED
;
121 se_lun
= &se_sess
->se_tpg
->tpg_virt_lun0
;
122 se_cmd
->se_lun
= &se_sess
->se_tpg
->tpg_virt_lun0
;
123 se_cmd
->orig_fe_lun
= 0;
124 se_cmd
->se_cmd_flags
|= SCF_SE_LUN_CMD
;
126 percpu_ref_get(&se_lun
->lun_ref
);
127 se_cmd
->lun_ref_active
= true;
130 /* Directly associate cmd with se_dev */
131 se_cmd
->se_dev
= se_lun
->lun_se_dev
;
133 dev
= se_lun
->lun_se_dev
;
134 atomic_long_inc(&dev
->num_cmds
);
135 if (se_cmd
->data_direction
== DMA_TO_DEVICE
)
136 atomic_long_add(se_cmd
->data_length
, &dev
->write_bytes
);
137 else if (se_cmd
->data_direction
== DMA_FROM_DEVICE
)
138 atomic_long_add(se_cmd
->data_length
, &dev
->read_bytes
);
142 EXPORT_SYMBOL(transport_lookup_cmd_lun
);
144 int transport_lookup_tmr_lun(struct se_cmd
*se_cmd
, u32 unpacked_lun
)
146 struct se_dev_entry
*deve
;
147 struct se_lun
*se_lun
= NULL
;
148 struct se_session
*se_sess
= se_cmd
->se_sess
;
149 struct se_tmr_req
*se_tmr
= se_cmd
->se_tmr_req
;
152 if (unpacked_lun
>= TRANSPORT_MAX_LUNS_PER_TPG
)
155 spin_lock_irqsave(&se_sess
->se_node_acl
->device_list_lock
, flags
);
156 se_cmd
->se_deve
= se_sess
->se_node_acl
->device_list
[unpacked_lun
];
157 deve
= se_cmd
->se_deve
;
159 if (deve
->lun_flags
& TRANSPORT_LUNFLAGS_INITIATOR_ACCESS
) {
160 se_tmr
->tmr_lun
= deve
->se_lun
;
161 se_cmd
->se_lun
= deve
->se_lun
;
162 se_lun
= deve
->se_lun
;
163 se_cmd
->pr_res_key
= deve
->pr_res_key
;
164 se_cmd
->orig_fe_lun
= unpacked_lun
;
166 spin_unlock_irqrestore(&se_sess
->se_node_acl
->device_list_lock
, flags
);
169 pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
170 " Access for 0x%08x\n",
171 se_cmd
->se_tfo
->get_fabric_name(),
176 /* Directly associate cmd with se_dev */
177 se_cmd
->se_dev
= se_lun
->lun_se_dev
;
178 se_tmr
->tmr_dev
= se_lun
->lun_se_dev
;
180 spin_lock_irqsave(&se_tmr
->tmr_dev
->se_tmr_lock
, flags
);
181 list_add_tail(&se_tmr
->tmr_list
, &se_tmr
->tmr_dev
->dev_tmr_list
);
182 spin_unlock_irqrestore(&se_tmr
->tmr_dev
->se_tmr_lock
, flags
);
186 EXPORT_SYMBOL(transport_lookup_tmr_lun
);
189 * This function is called from core_scsi3_emulate_pro_register_and_move()
190 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_ref_count
191 * when a matching rtpi is found.
193 struct se_dev_entry
*core_get_se_deve_from_rtpi(
194 struct se_node_acl
*nacl
,
197 struct se_dev_entry
*deve
;
199 struct se_port
*port
;
200 struct se_portal_group
*tpg
= nacl
->se_tpg
;
203 spin_lock_irq(&nacl
->device_list_lock
);
204 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
205 deve
= nacl
->device_list
[i
];
207 if (!(deve
->lun_flags
& TRANSPORT_LUNFLAGS_INITIATOR_ACCESS
))
212 pr_err("%s device entries device pointer is"
213 " NULL, but Initiator has access.\n",
214 tpg
->se_tpg_tfo
->get_fabric_name());
219 pr_err("%s device entries device pointer is"
220 " NULL, but Initiator has access.\n",
221 tpg
->se_tpg_tfo
->get_fabric_name());
224 if (port
->sep_rtpi
!= rtpi
)
227 atomic_inc_mb(&deve
->pr_ref_count
);
228 spin_unlock_irq(&nacl
->device_list_lock
);
232 spin_unlock_irq(&nacl
->device_list_lock
);
237 int core_free_device_list_for_node(
238 struct se_node_acl
*nacl
,
239 struct se_portal_group
*tpg
)
241 struct se_dev_entry
*deve
;
245 if (!nacl
->device_list
)
248 spin_lock_irq(&nacl
->device_list_lock
);
249 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
250 deve
= nacl
->device_list
[i
];
252 if (!(deve
->lun_flags
& TRANSPORT_LUNFLAGS_INITIATOR_ACCESS
))
256 pr_err("%s device entries device pointer is"
257 " NULL, but Initiator has access.\n",
258 tpg
->se_tpg_tfo
->get_fabric_name());
263 spin_unlock_irq(&nacl
->device_list_lock
);
264 core_disable_device_list_for_node(lun
, NULL
, deve
->mapped_lun
,
265 TRANSPORT_LUNFLAGS_NO_ACCESS
, nacl
, tpg
);
266 spin_lock_irq(&nacl
->device_list_lock
);
268 spin_unlock_irq(&nacl
->device_list_lock
);
270 array_free(nacl
->device_list
, TRANSPORT_MAX_LUNS_PER_TPG
);
271 nacl
->device_list
= NULL
;
276 void core_update_device_list_access(
279 struct se_node_acl
*nacl
)
281 struct se_dev_entry
*deve
;
283 spin_lock_irq(&nacl
->device_list_lock
);
284 deve
= nacl
->device_list
[mapped_lun
];
285 if (lun_access
& TRANSPORT_LUNFLAGS_READ_WRITE
) {
286 deve
->lun_flags
&= ~TRANSPORT_LUNFLAGS_READ_ONLY
;
287 deve
->lun_flags
|= TRANSPORT_LUNFLAGS_READ_WRITE
;
289 deve
->lun_flags
&= ~TRANSPORT_LUNFLAGS_READ_WRITE
;
290 deve
->lun_flags
|= TRANSPORT_LUNFLAGS_READ_ONLY
;
292 spin_unlock_irq(&nacl
->device_list_lock
);
295 /* core_enable_device_list_for_node():
299 int core_enable_device_list_for_node(
301 struct se_lun_acl
*lun_acl
,
304 struct se_node_acl
*nacl
,
305 struct se_portal_group
*tpg
)
307 struct se_port
*port
= lun
->lun_sep
;
308 struct se_dev_entry
*deve
;
310 spin_lock_irq(&nacl
->device_list_lock
);
312 deve
= nacl
->device_list
[mapped_lun
];
315 * Check if the call is handling demo mode -> explicit LUN ACL
316 * transition. This transition must be for the same struct se_lun
317 * + mapped_lun that was setup in demo mode..
319 if (deve
->lun_flags
& TRANSPORT_LUNFLAGS_INITIATOR_ACCESS
) {
320 if (deve
->se_lun_acl
!= NULL
) {
321 pr_err("struct se_dev_entry->se_lun_acl"
322 " already set for demo mode -> explicit"
323 " LUN ACL transition\n");
324 spin_unlock_irq(&nacl
->device_list_lock
);
327 if (deve
->se_lun
!= lun
) {
328 pr_err("struct se_dev_entry->se_lun does"
329 " match passed struct se_lun for demo mode"
330 " -> explicit LUN ACL transition\n");
331 spin_unlock_irq(&nacl
->device_list_lock
);
334 deve
->se_lun_acl
= lun_acl
;
336 if (lun_access
& TRANSPORT_LUNFLAGS_READ_WRITE
) {
337 deve
->lun_flags
&= ~TRANSPORT_LUNFLAGS_READ_ONLY
;
338 deve
->lun_flags
|= TRANSPORT_LUNFLAGS_READ_WRITE
;
340 deve
->lun_flags
&= ~TRANSPORT_LUNFLAGS_READ_WRITE
;
341 deve
->lun_flags
|= TRANSPORT_LUNFLAGS_READ_ONLY
;
344 spin_unlock_irq(&nacl
->device_list_lock
);
349 deve
->se_lun_acl
= lun_acl
;
350 deve
->mapped_lun
= mapped_lun
;
351 deve
->lun_flags
|= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS
;
353 if (lun_access
& TRANSPORT_LUNFLAGS_READ_WRITE
) {
354 deve
->lun_flags
&= ~TRANSPORT_LUNFLAGS_READ_ONLY
;
355 deve
->lun_flags
|= TRANSPORT_LUNFLAGS_READ_WRITE
;
357 deve
->lun_flags
&= ~TRANSPORT_LUNFLAGS_READ_WRITE
;
358 deve
->lun_flags
|= TRANSPORT_LUNFLAGS_READ_ONLY
;
361 deve
->creation_time
= get_jiffies_64();
362 deve
->attach_count
++;
363 spin_unlock_irq(&nacl
->device_list_lock
);
365 spin_lock_bh(&port
->sep_alua_lock
);
366 list_add_tail(&deve
->alua_port_list
, &port
->sep_alua_list
);
367 spin_unlock_bh(&port
->sep_alua_lock
);
372 /* core_disable_device_list_for_node():
376 int core_disable_device_list_for_node(
378 struct se_lun_acl
*lun_acl
,
381 struct se_node_acl
*nacl
,
382 struct se_portal_group
*tpg
)
384 struct se_port
*port
= lun
->lun_sep
;
385 struct se_dev_entry
*deve
= nacl
->device_list
[mapped_lun
];
388 * If the MappedLUN entry is being disabled, the entry in
389 * port->sep_alua_list must be removed now before clearing the
390 * struct se_dev_entry pointers below as logic in
391 * core_alua_do_transition_tg_pt() depends on these being present.
393 * deve->se_lun_acl will be NULL for demo-mode created LUNs
394 * that have not been explicitly converted to MappedLUNs ->
395 * struct se_lun_acl, but we remove deve->alua_port_list from
396 * port->sep_alua_list. This also means that active UAs and
397 * NodeACL context specific PR metadata for demo-mode
398 * MappedLUN *deve will be released below..
400 spin_lock_bh(&port
->sep_alua_lock
);
401 list_del(&deve
->alua_port_list
);
402 spin_unlock_bh(&port
->sep_alua_lock
);
404 * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE
405 * PR operation to complete.
407 while (atomic_read(&deve
->pr_ref_count
) != 0)
410 spin_lock_irq(&nacl
->device_list_lock
);
412 * Disable struct se_dev_entry LUN ACL mapping
414 core_scsi3_ua_release_all(deve
);
416 deve
->se_lun_acl
= NULL
;
418 deve
->creation_time
= 0;
419 deve
->attach_count
--;
420 spin_unlock_irq(&nacl
->device_list_lock
);
422 core_scsi3_free_pr_reg_from_nacl(lun
->lun_se_dev
, nacl
);
426 /* core_clear_lun_from_tpg():
430 void core_clear_lun_from_tpg(struct se_lun
*lun
, struct se_portal_group
*tpg
)
432 struct se_node_acl
*nacl
;
433 struct se_dev_entry
*deve
;
436 spin_lock_irq(&tpg
->acl_node_lock
);
437 list_for_each_entry(nacl
, &tpg
->acl_node_list
, acl_list
) {
438 spin_unlock_irq(&tpg
->acl_node_lock
);
440 spin_lock_irq(&nacl
->device_list_lock
);
441 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
442 deve
= nacl
->device_list
[i
];
443 if (lun
!= deve
->se_lun
)
445 spin_unlock_irq(&nacl
->device_list_lock
);
447 core_disable_device_list_for_node(lun
, NULL
,
448 deve
->mapped_lun
, TRANSPORT_LUNFLAGS_NO_ACCESS
,
451 spin_lock_irq(&nacl
->device_list_lock
);
453 spin_unlock_irq(&nacl
->device_list_lock
);
455 spin_lock_irq(&tpg
->acl_node_lock
);
457 spin_unlock_irq(&tpg
->acl_node_lock
);
460 static struct se_port
*core_alloc_port(struct se_device
*dev
)
462 struct se_port
*port
, *port_tmp
;
464 port
= kzalloc(sizeof(struct se_port
), GFP_KERNEL
);
466 pr_err("Unable to allocate struct se_port\n");
467 return ERR_PTR(-ENOMEM
);
469 INIT_LIST_HEAD(&port
->sep_alua_list
);
470 INIT_LIST_HEAD(&port
->sep_list
);
471 atomic_set(&port
->sep_tg_pt_secondary_offline
, 0);
472 spin_lock_init(&port
->sep_alua_lock
);
473 mutex_init(&port
->sep_tg_pt_md_mutex
);
475 spin_lock(&dev
->se_port_lock
);
476 if (dev
->dev_port_count
== 0x0000ffff) {
477 pr_warn("Reached dev->dev_port_count =="
479 spin_unlock(&dev
->se_port_lock
);
480 return ERR_PTR(-ENOSPC
);
484 * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device
485 * Here is the table from spc4r17 section 7.7.3.8.
487 * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
491 * 1h Relative port 1, historically known as port A
492 * 2h Relative port 2, historically known as port B
493 * 3h to FFFFh Relative port 3 through 65 535
495 port
->sep_rtpi
= dev
->dev_rpti_counter
++;
499 list_for_each_entry(port_tmp
, &dev
->dev_sep_list
, sep_list
) {
501 * Make sure RELATIVE TARGET PORT IDENTIFIER is unique
504 if (port
->sep_rtpi
== port_tmp
->sep_rtpi
)
507 spin_unlock(&dev
->se_port_lock
);
512 static void core_export_port(
513 struct se_device
*dev
,
514 struct se_portal_group
*tpg
,
515 struct se_port
*port
,
518 struct t10_alua_tg_pt_gp_member
*tg_pt_gp_mem
= NULL
;
520 spin_lock(&dev
->se_port_lock
);
521 spin_lock(&lun
->lun_sep_lock
);
525 spin_unlock(&lun
->lun_sep_lock
);
527 list_add_tail(&port
->sep_list
, &dev
->dev_sep_list
);
528 spin_unlock(&dev
->se_port_lock
);
530 if (dev
->transport
->transport_type
!= TRANSPORT_PLUGIN_PHBA_PDEV
&&
531 !(dev
->se_hba
->hba_flags
& HBA_FLAGS_INTERNAL_USE
)) {
532 tg_pt_gp_mem
= core_alua_allocate_tg_pt_gp_mem(port
);
533 if (IS_ERR(tg_pt_gp_mem
) || !tg_pt_gp_mem
) {
534 pr_err("Unable to allocate t10_alua_tg_pt"
538 spin_lock(&tg_pt_gp_mem
->tg_pt_gp_mem_lock
);
539 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem
,
540 dev
->t10_alua
.default_tg_pt_gp
);
541 spin_unlock(&tg_pt_gp_mem
->tg_pt_gp_mem_lock
);
542 pr_debug("%s/%s: Adding to default ALUA Target Port"
543 " Group: alua/default_tg_pt_gp\n",
544 dev
->transport
->name
, tpg
->se_tpg_tfo
->get_fabric_name());
547 dev
->dev_port_count
++;
548 port
->sep_index
= port
->sep_rtpi
; /* RELATIVE TARGET PORT IDENTIFIER */
552 * Called with struct se_device->se_port_lock spinlock held.
554 static void core_release_port(struct se_device
*dev
, struct se_port
*port
)
555 __releases(&dev
->se_port_lock
) __acquires(&dev
->se_port_lock
)
558 * Wait for any port reference for PR ALL_TG_PT=1 operation
559 * to complete in __core_scsi3_alloc_registration()
561 spin_unlock(&dev
->se_port_lock
);
562 if (atomic_read(&port
->sep_tg_pt_ref_cnt
))
564 spin_lock(&dev
->se_port_lock
);
566 core_alua_free_tg_pt_gp_mem(port
);
568 list_del(&port
->sep_list
);
569 dev
->dev_port_count
--;
574 struct se_device
*dev
,
575 struct se_portal_group
*tpg
,
578 struct se_hba
*hba
= dev
->se_hba
;
579 struct se_port
*port
;
581 port
= core_alloc_port(dev
);
583 return PTR_ERR(port
);
585 lun
->lun_se_dev
= dev
;
587 spin_lock(&hba
->device_lock
);
589 spin_unlock(&hba
->device_lock
);
591 core_export_port(dev
, tpg
, port
, lun
);
595 void core_dev_unexport(
596 struct se_device
*dev
,
597 struct se_portal_group
*tpg
,
600 struct se_hba
*hba
= dev
->se_hba
;
601 struct se_port
*port
= lun
->lun_sep
;
603 spin_lock(&lun
->lun_sep_lock
);
604 if (lun
->lun_se_dev
== NULL
) {
605 spin_unlock(&lun
->lun_sep_lock
);
608 spin_unlock(&lun
->lun_sep_lock
);
610 spin_lock(&dev
->se_port_lock
);
611 core_release_port(dev
, port
);
612 spin_unlock(&dev
->se_port_lock
);
614 spin_lock(&hba
->device_lock
);
616 spin_unlock(&hba
->device_lock
);
619 lun
->lun_se_dev
= NULL
;
622 static void se_release_vpd_for_dev(struct se_device
*dev
)
624 struct t10_vpd
*vpd
, *vpd_tmp
;
626 spin_lock(&dev
->t10_wwn
.t10_vpd_lock
);
627 list_for_each_entry_safe(vpd
, vpd_tmp
,
628 &dev
->t10_wwn
.t10_vpd_list
, vpd_list
) {
629 list_del(&vpd
->vpd_list
);
632 spin_unlock(&dev
->t10_wwn
.t10_vpd_lock
);
635 static u32
se_dev_align_max_sectors(u32 max_sectors
, u32 block_size
)
637 u32 aligned_max_sectors
;
640 * Limit max_sectors to a PAGE_SIZE aligned value for modern
641 * transport_allocate_data_tasks() operation.
643 alignment
= max(1ul, PAGE_SIZE
/ block_size
);
644 aligned_max_sectors
= rounddown(max_sectors
, alignment
);
646 if (max_sectors
!= aligned_max_sectors
)
647 pr_info("Rounding down aligned max_sectors from %u to %u\n",
648 max_sectors
, aligned_max_sectors
);
650 return aligned_max_sectors
;
653 int se_dev_set_max_unmap_lba_count(
654 struct se_device
*dev
,
655 u32 max_unmap_lba_count
)
657 dev
->dev_attrib
.max_unmap_lba_count
= max_unmap_lba_count
;
658 pr_debug("dev[%p]: Set max_unmap_lba_count: %u\n",
659 dev
, dev
->dev_attrib
.max_unmap_lba_count
);
662 EXPORT_SYMBOL(se_dev_set_max_unmap_lba_count
);
664 int se_dev_set_max_unmap_block_desc_count(
665 struct se_device
*dev
,
666 u32 max_unmap_block_desc_count
)
668 dev
->dev_attrib
.max_unmap_block_desc_count
=
669 max_unmap_block_desc_count
;
670 pr_debug("dev[%p]: Set max_unmap_block_desc_count: %u\n",
671 dev
, dev
->dev_attrib
.max_unmap_block_desc_count
);
674 EXPORT_SYMBOL(se_dev_set_max_unmap_block_desc_count
);
676 int se_dev_set_unmap_granularity(
677 struct se_device
*dev
,
678 u32 unmap_granularity
)
680 dev
->dev_attrib
.unmap_granularity
= unmap_granularity
;
681 pr_debug("dev[%p]: Set unmap_granularity: %u\n",
682 dev
, dev
->dev_attrib
.unmap_granularity
);
685 EXPORT_SYMBOL(se_dev_set_unmap_granularity
);
687 int se_dev_set_unmap_granularity_alignment(
688 struct se_device
*dev
,
689 u32 unmap_granularity_alignment
)
691 dev
->dev_attrib
.unmap_granularity_alignment
= unmap_granularity_alignment
;
692 pr_debug("dev[%p]: Set unmap_granularity_alignment: %u\n",
693 dev
, dev
->dev_attrib
.unmap_granularity_alignment
);
696 EXPORT_SYMBOL(se_dev_set_unmap_granularity_alignment
);
698 int se_dev_set_max_write_same_len(
699 struct se_device
*dev
,
700 u32 max_write_same_len
)
702 dev
->dev_attrib
.max_write_same_len
= max_write_same_len
;
703 pr_debug("dev[%p]: Set max_write_same_len: %u\n",
704 dev
, dev
->dev_attrib
.max_write_same_len
);
707 EXPORT_SYMBOL(se_dev_set_max_write_same_len
);
709 static void dev_set_t10_wwn_model_alias(struct se_device
*dev
)
711 const char *configname
;
713 configname
= config_item_name(&dev
->dev_group
.cg_item
);
714 if (strlen(configname
) >= 16) {
715 pr_warn("dev[%p]: Backstore name '%s' is too long for "
716 "INQUIRY_MODEL, truncating to 16 bytes\n", dev
,
719 snprintf(&dev
->t10_wwn
.model
[0], 16, "%s", configname
);
722 int se_dev_set_emulate_model_alias(struct se_device
*dev
, int flag
)
724 if (dev
->export_count
) {
725 pr_err("dev[%p]: Unable to change model alias"
726 " while export_count is %d\n",
727 dev
, dev
->export_count
);
731 if (flag
!= 0 && flag
!= 1) {
732 pr_err("Illegal value %d\n", flag
);
737 dev_set_t10_wwn_model_alias(dev
);
739 strncpy(&dev
->t10_wwn
.model
[0],
740 dev
->transport
->inquiry_prod
, 16);
742 dev
->dev_attrib
.emulate_model_alias
= flag
;
746 EXPORT_SYMBOL(se_dev_set_emulate_model_alias
);
748 int se_dev_set_emulate_dpo(struct se_device
*dev
, int flag
)
750 if (flag
!= 0 && flag
!= 1) {
751 pr_err("Illegal value %d\n", flag
);
756 pr_err("dpo_emulated not supported\n");
762 EXPORT_SYMBOL(se_dev_set_emulate_dpo
);
764 int se_dev_set_emulate_fua_write(struct se_device
*dev
, int flag
)
766 if (flag
!= 0 && flag
!= 1) {
767 pr_err("Illegal value %d\n", flag
);
772 dev
->transport
->transport_type
== TRANSPORT_PLUGIN_PHBA_PDEV
) {
773 pr_err("emulate_fua_write not supported for pSCSI\n");
776 dev
->dev_attrib
.emulate_fua_write
= flag
;
777 pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
778 dev
, dev
->dev_attrib
.emulate_fua_write
);
781 EXPORT_SYMBOL(se_dev_set_emulate_fua_write
);
783 int se_dev_set_emulate_fua_read(struct se_device
*dev
, int flag
)
785 if (flag
!= 0 && flag
!= 1) {
786 pr_err("Illegal value %d\n", flag
);
791 pr_err("ua read emulated not supported\n");
797 EXPORT_SYMBOL(se_dev_set_emulate_fua_read
);
799 int se_dev_set_emulate_write_cache(struct se_device
*dev
, int flag
)
801 if (flag
!= 0 && flag
!= 1) {
802 pr_err("Illegal value %d\n", flag
);
806 dev
->transport
->transport_type
== TRANSPORT_PLUGIN_PHBA_PDEV
) {
807 pr_err("emulate_write_cache not supported for pSCSI\n");
811 dev
->transport
->get_write_cache
) {
812 pr_err("emulate_write_cache not supported for this device\n");
816 dev
->dev_attrib
.emulate_write_cache
= flag
;
817 pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
818 dev
, dev
->dev_attrib
.emulate_write_cache
);
821 EXPORT_SYMBOL(se_dev_set_emulate_write_cache
);
823 int se_dev_set_emulate_ua_intlck_ctrl(struct se_device
*dev
, int flag
)
825 if ((flag
!= 0) && (flag
!= 1) && (flag
!= 2)) {
826 pr_err("Illegal value %d\n", flag
);
830 if (dev
->export_count
) {
831 pr_err("dev[%p]: Unable to change SE Device"
832 " UA_INTRLCK_CTRL while export_count is %d\n",
833 dev
, dev
->export_count
);
836 dev
->dev_attrib
.emulate_ua_intlck_ctrl
= flag
;
837 pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
838 dev
, dev
->dev_attrib
.emulate_ua_intlck_ctrl
);
842 EXPORT_SYMBOL(se_dev_set_emulate_ua_intlck_ctrl
);
844 int se_dev_set_emulate_tas(struct se_device
*dev
, int flag
)
846 if ((flag
!= 0) && (flag
!= 1)) {
847 pr_err("Illegal value %d\n", flag
);
851 if (dev
->export_count
) {
852 pr_err("dev[%p]: Unable to change SE Device TAS while"
853 " export_count is %d\n",
854 dev
, dev
->export_count
);
857 dev
->dev_attrib
.emulate_tas
= flag
;
858 pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
859 dev
, (dev
->dev_attrib
.emulate_tas
) ? "Enabled" : "Disabled");
863 EXPORT_SYMBOL(se_dev_set_emulate_tas
);
865 int se_dev_set_emulate_tpu(struct se_device
*dev
, int flag
)
867 if ((flag
!= 0) && (flag
!= 1)) {
868 pr_err("Illegal value %d\n", flag
);
872 * We expect this value to be non-zero when generic Block Layer
873 * Discard supported is detected iblock_create_virtdevice().
875 if (flag
&& !dev
->dev_attrib
.max_unmap_block_desc_count
) {
876 pr_err("Generic Block Discard not supported\n");
880 dev
->dev_attrib
.emulate_tpu
= flag
;
881 pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
885 EXPORT_SYMBOL(se_dev_set_emulate_tpu
);
887 int se_dev_set_emulate_tpws(struct se_device
*dev
, int flag
)
889 if ((flag
!= 0) && (flag
!= 1)) {
890 pr_err("Illegal value %d\n", flag
);
894 * We expect this value to be non-zero when generic Block Layer
895 * Discard supported is detected iblock_create_virtdevice().
897 if (flag
&& !dev
->dev_attrib
.max_unmap_block_desc_count
) {
898 pr_err("Generic Block Discard not supported\n");
902 dev
->dev_attrib
.emulate_tpws
= flag
;
903 pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
907 EXPORT_SYMBOL(se_dev_set_emulate_tpws
);
909 int se_dev_set_emulate_caw(struct se_device
*dev
, int flag
)
911 if (flag
!= 0 && flag
!= 1) {
912 pr_err("Illegal value %d\n", flag
);
915 dev
->dev_attrib
.emulate_caw
= flag
;
916 pr_debug("dev[%p]: SE Device CompareAndWrite (AtomicTestandSet): %d\n",
921 EXPORT_SYMBOL(se_dev_set_emulate_caw
);
923 int se_dev_set_emulate_3pc(struct se_device
*dev
, int flag
)
925 if (flag
!= 0 && flag
!= 1) {
926 pr_err("Illegal value %d\n", flag
);
929 dev
->dev_attrib
.emulate_3pc
= flag
;
930 pr_debug("dev[%p]: SE Device 3rd Party Copy (EXTENDED_COPY): %d\n",
935 EXPORT_SYMBOL(se_dev_set_emulate_3pc
);
937 int se_dev_set_pi_prot_type(struct se_device
*dev
, int flag
)
939 int rc
, old_prot
= dev
->dev_attrib
.pi_prot_type
;
941 if (flag
!= 0 && flag
!= 1 && flag
!= 2 && flag
!= 3) {
942 pr_err("Illegal value %d for pi_prot_type\n", flag
);
946 pr_err("DIF TYPE2 protection currently not supported\n");
949 if (dev
->dev_attrib
.hw_pi_prot_type
) {
950 pr_warn("DIF protection enabled on underlying hardware,"
954 if (!dev
->transport
->init_prot
|| !dev
->transport
->free_prot
) {
955 /* 0 is only allowed value for non-supporting backends */
959 pr_err("DIF protection not supported by backend: %s\n",
960 dev
->transport
->name
);
963 if (!(dev
->dev_flags
& DF_CONFIGURED
)) {
964 pr_err("DIF protection requires device to be configured\n");
967 if (dev
->export_count
) {
968 pr_err("dev[%p]: Unable to change SE Device PROT type while"
969 " export_count is %d\n", dev
, dev
->export_count
);
973 dev
->dev_attrib
.pi_prot_type
= flag
;
975 if (flag
&& !old_prot
) {
976 rc
= dev
->transport
->init_prot(dev
);
978 dev
->dev_attrib
.pi_prot_type
= old_prot
;
982 } else if (!flag
&& old_prot
) {
983 dev
->transport
->free_prot(dev
);
985 pr_debug("dev[%p]: SE Device Protection Type: %d\n", dev
, flag
);
989 EXPORT_SYMBOL(se_dev_set_pi_prot_type
);
991 int se_dev_set_pi_prot_format(struct se_device
*dev
, int flag
)
999 pr_err("Illegal value %d for pi_prot_format\n", flag
);
1002 if (!dev
->transport
->format_prot
) {
1003 pr_err("DIF protection format not supported by backend %s\n",
1004 dev
->transport
->name
);
1007 if (!(dev
->dev_flags
& DF_CONFIGURED
)) {
1008 pr_err("DIF protection format requires device to be configured\n");
1011 if (dev
->export_count
) {
1012 pr_err("dev[%p]: Unable to format SE Device PROT type while"
1013 " export_count is %d\n", dev
, dev
->export_count
);
1017 rc
= dev
->transport
->format_prot(dev
);
1021 pr_debug("dev[%p]: SE Device Protection Format complete\n", dev
);
1025 EXPORT_SYMBOL(se_dev_set_pi_prot_format
);
1027 int se_dev_set_enforce_pr_isids(struct se_device
*dev
, int flag
)
1029 if ((flag
!= 0) && (flag
!= 1)) {
1030 pr_err("Illegal value %d\n", flag
);
1033 dev
->dev_attrib
.enforce_pr_isids
= flag
;
1034 pr_debug("dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev
,
1035 (dev
->dev_attrib
.enforce_pr_isids
) ? "Enabled" : "Disabled");
1038 EXPORT_SYMBOL(se_dev_set_enforce_pr_isids
);
1040 int se_dev_set_force_pr_aptpl(struct se_device
*dev
, int flag
)
1042 if ((flag
!= 0) && (flag
!= 1)) {
1043 printk(KERN_ERR
"Illegal value %d\n", flag
);
1046 if (dev
->export_count
) {
1047 pr_err("dev[%p]: Unable to set force_pr_aptpl while"
1048 " export_count is %d\n", dev
, dev
->export_count
);
1052 dev
->dev_attrib
.force_pr_aptpl
= flag
;
1053 pr_debug("dev[%p]: SE Device force_pr_aptpl: %d\n", dev
, flag
);
1056 EXPORT_SYMBOL(se_dev_set_force_pr_aptpl
);
1058 int se_dev_set_is_nonrot(struct se_device
*dev
, int flag
)
1060 if ((flag
!= 0) && (flag
!= 1)) {
1061 printk(KERN_ERR
"Illegal value %d\n", flag
);
1064 dev
->dev_attrib
.is_nonrot
= flag
;
1065 pr_debug("dev[%p]: SE Device is_nonrot bit: %d\n",
1069 EXPORT_SYMBOL(se_dev_set_is_nonrot
);
1071 int se_dev_set_emulate_rest_reord(struct se_device
*dev
, int flag
)
1074 printk(KERN_ERR
"dev[%p]: SE Device emulatation of restricted"
1075 " reordering not implemented\n", dev
);
1078 dev
->dev_attrib
.emulate_rest_reord
= flag
;
1079 pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev
, flag
);
1082 EXPORT_SYMBOL(se_dev_set_emulate_rest_reord
);
1085 * Note, this can only be called on unexported SE Device Object.
1087 int se_dev_set_queue_depth(struct se_device
*dev
, u32 queue_depth
)
1089 if (dev
->export_count
) {
1090 pr_err("dev[%p]: Unable to change SE Device TCQ while"
1091 " export_count is %d\n",
1092 dev
, dev
->export_count
);
1096 pr_err("dev[%p]: Illegal ZERO value for queue"
1101 if (dev
->transport
->transport_type
== TRANSPORT_PLUGIN_PHBA_PDEV
) {
1102 if (queue_depth
> dev
->dev_attrib
.hw_queue_depth
) {
1103 pr_err("dev[%p]: Passed queue_depth: %u"
1104 " exceeds TCM/SE_Device TCQ: %u\n",
1106 dev
->dev_attrib
.hw_queue_depth
);
1110 if (queue_depth
> dev
->dev_attrib
.queue_depth
) {
1111 if (queue_depth
> dev
->dev_attrib
.hw_queue_depth
) {
1112 pr_err("dev[%p]: Passed queue_depth:"
1113 " %u exceeds TCM/SE_Device MAX"
1114 " TCQ: %u\n", dev
, queue_depth
,
1115 dev
->dev_attrib
.hw_queue_depth
);
1121 dev
->dev_attrib
.queue_depth
= dev
->queue_depth
= queue_depth
;
1122 pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
1126 EXPORT_SYMBOL(se_dev_set_queue_depth
);
1128 int se_dev_set_fabric_max_sectors(struct se_device
*dev
, u32 fabric_max_sectors
)
1130 int block_size
= dev
->dev_attrib
.block_size
;
1132 if (dev
->export_count
) {
1133 pr_err("dev[%p]: Unable to change SE Device"
1134 " fabric_max_sectors while export_count is %d\n",
1135 dev
, dev
->export_count
);
1138 if (!fabric_max_sectors
) {
1139 pr_err("dev[%p]: Illegal ZERO value for"
1140 " fabric_max_sectors\n", dev
);
1143 if (fabric_max_sectors
< DA_STATUS_MAX_SECTORS_MIN
) {
1144 pr_err("dev[%p]: Passed fabric_max_sectors: %u less than"
1145 " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev
, fabric_max_sectors
,
1146 DA_STATUS_MAX_SECTORS_MIN
);
1149 if (dev
->transport
->transport_type
== TRANSPORT_PLUGIN_PHBA_PDEV
) {
1150 if (fabric_max_sectors
> dev
->dev_attrib
.hw_max_sectors
) {
1151 pr_err("dev[%p]: Passed fabric_max_sectors: %u"
1152 " greater than TCM/SE_Device max_sectors:"
1153 " %u\n", dev
, fabric_max_sectors
,
1154 dev
->dev_attrib
.hw_max_sectors
);
1158 if (fabric_max_sectors
> DA_STATUS_MAX_SECTORS_MAX
) {
1159 pr_err("dev[%p]: Passed fabric_max_sectors: %u"
1160 " greater than DA_STATUS_MAX_SECTORS_MAX:"
1161 " %u\n", dev
, fabric_max_sectors
,
1162 DA_STATUS_MAX_SECTORS_MAX
);
1167 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
1171 pr_warn("Defaulting to 512 for zero block_size\n");
1173 fabric_max_sectors
= se_dev_align_max_sectors(fabric_max_sectors
,
1176 dev
->dev_attrib
.fabric_max_sectors
= fabric_max_sectors
;
1177 pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
1178 dev
, fabric_max_sectors
);
1181 EXPORT_SYMBOL(se_dev_set_fabric_max_sectors
);
1183 int se_dev_set_optimal_sectors(struct se_device
*dev
, u32 optimal_sectors
)
1185 if (dev
->export_count
) {
1186 pr_err("dev[%p]: Unable to change SE Device"
1187 " optimal_sectors while export_count is %d\n",
1188 dev
, dev
->export_count
);
1191 if (dev
->transport
->transport_type
== TRANSPORT_PLUGIN_PHBA_PDEV
) {
1192 pr_err("dev[%p]: Passed optimal_sectors cannot be"
1193 " changed for TCM/pSCSI\n", dev
);
1196 if (optimal_sectors
> dev
->dev_attrib
.fabric_max_sectors
) {
1197 pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
1198 " greater than fabric_max_sectors: %u\n", dev
,
1199 optimal_sectors
, dev
->dev_attrib
.fabric_max_sectors
);
1203 dev
->dev_attrib
.optimal_sectors
= optimal_sectors
;
1204 pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n",
1205 dev
, optimal_sectors
);
1208 EXPORT_SYMBOL(se_dev_set_optimal_sectors
);
1210 int se_dev_set_block_size(struct se_device
*dev
, u32 block_size
)
1212 if (dev
->export_count
) {
1213 pr_err("dev[%p]: Unable to change SE Device block_size"
1214 " while export_count is %d\n",
1215 dev
, dev
->export_count
);
1219 if ((block_size
!= 512) &&
1220 (block_size
!= 1024) &&
1221 (block_size
!= 2048) &&
1222 (block_size
!= 4096)) {
1223 pr_err("dev[%p]: Illegal value for block_device: %u"
1224 " for SE device, must be 512, 1024, 2048 or 4096\n",
1229 if (dev
->transport
->transport_type
== TRANSPORT_PLUGIN_PHBA_PDEV
) {
1230 pr_err("dev[%p]: Not allowed to change block_size for"
1231 " Physical Device, use for Linux/SCSI to change"
1232 " block_size for underlying hardware\n", dev
);
1236 dev
->dev_attrib
.block_size
= block_size
;
1237 pr_debug("dev[%p]: SE Device block_size changed to %u\n",
1240 if (dev
->dev_attrib
.max_bytes_per_io
)
1241 dev
->dev_attrib
.hw_max_sectors
=
1242 dev
->dev_attrib
.max_bytes_per_io
/ block_size
;
1246 EXPORT_SYMBOL(se_dev_set_block_size
);
1248 struct se_lun
*core_dev_add_lun(
1249 struct se_portal_group
*tpg
,
1250 struct se_device
*dev
,
1256 lun
= core_tpg_alloc_lun(tpg
, unpacked_lun
);
1260 rc
= core_tpg_add_lun(tpg
, lun
,
1261 TRANSPORT_LUNFLAGS_READ_WRITE
, dev
);
1265 pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
1266 " CORE HBA: %u\n", tpg
->se_tpg_tfo
->get_fabric_name(),
1267 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), lun
->unpacked_lun
,
1268 tpg
->se_tpg_tfo
->get_fabric_name(), dev
->se_hba
->hba_id
);
1270 * Update LUN maps for dynamically added initiators when
1271 * generate_node_acl is enabled.
1273 if (tpg
->se_tpg_tfo
->tpg_check_demo_mode(tpg
)) {
1274 struct se_node_acl
*acl
;
1275 spin_lock_irq(&tpg
->acl_node_lock
);
1276 list_for_each_entry(acl
, &tpg
->acl_node_list
, acl_list
) {
1277 if (acl
->dynamic_node_acl
&&
1278 (!tpg
->se_tpg_tfo
->tpg_check_demo_mode_login_only
||
1279 !tpg
->se_tpg_tfo
->tpg_check_demo_mode_login_only(tpg
))) {
1280 spin_unlock_irq(&tpg
->acl_node_lock
);
1281 core_tpg_add_node_to_devs(acl
, tpg
);
1282 spin_lock_irq(&tpg
->acl_node_lock
);
1285 spin_unlock_irq(&tpg
->acl_node_lock
);
1291 /* core_dev_del_lun():
1295 void core_dev_del_lun(
1296 struct se_portal_group
*tpg
,
1299 pr_debug("%s_TPG[%u]_LUN[%u] - Deactivating %s Logical Unit from"
1300 " device object\n", tpg
->se_tpg_tfo
->get_fabric_name(),
1301 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), lun
->unpacked_lun
,
1302 tpg
->se_tpg_tfo
->get_fabric_name());
1304 core_tpg_remove_lun(tpg
, lun
);
1307 struct se_lun
*core_get_lun_from_tpg(struct se_portal_group
*tpg
, u32 unpacked_lun
)
1311 spin_lock(&tpg
->tpg_lun_lock
);
1312 if (unpacked_lun
> (TRANSPORT_MAX_LUNS_PER_TPG
-1)) {
1313 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS"
1314 "_PER_TPG-1: %u for Target Portal Group: %hu\n",
1315 tpg
->se_tpg_tfo
->get_fabric_name(), unpacked_lun
,
1316 TRANSPORT_MAX_LUNS_PER_TPG
-1,
1317 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
1318 spin_unlock(&tpg
->tpg_lun_lock
);
1321 lun
= tpg
->tpg_lun_list
[unpacked_lun
];
1323 if (lun
->lun_status
!= TRANSPORT_LUN_STATUS_FREE
) {
1324 pr_err("%s Logical Unit Number: %u is not free on"
1325 " Target Portal Group: %hu, ignoring request.\n",
1326 tpg
->se_tpg_tfo
->get_fabric_name(), unpacked_lun
,
1327 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
1328 spin_unlock(&tpg
->tpg_lun_lock
);
1331 spin_unlock(&tpg
->tpg_lun_lock
);
1336 /* core_dev_get_lun():
1340 static struct se_lun
*core_dev_get_lun(struct se_portal_group
*tpg
, u32 unpacked_lun
)
1344 spin_lock(&tpg
->tpg_lun_lock
);
1345 if (unpacked_lun
> (TRANSPORT_MAX_LUNS_PER_TPG
-1)) {
1346 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER"
1347 "_TPG-1: %u for Target Portal Group: %hu\n",
1348 tpg
->se_tpg_tfo
->get_fabric_name(), unpacked_lun
,
1349 TRANSPORT_MAX_LUNS_PER_TPG
-1,
1350 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
1351 spin_unlock(&tpg
->tpg_lun_lock
);
1354 lun
= tpg
->tpg_lun_list
[unpacked_lun
];
1356 if (lun
->lun_status
!= TRANSPORT_LUN_STATUS_ACTIVE
) {
1357 pr_err("%s Logical Unit Number: %u is not active on"
1358 " Target Portal Group: %hu, ignoring request.\n",
1359 tpg
->se_tpg_tfo
->get_fabric_name(), unpacked_lun
,
1360 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
1361 spin_unlock(&tpg
->tpg_lun_lock
);
1364 spin_unlock(&tpg
->tpg_lun_lock
);
1369 struct se_lun_acl
*core_dev_init_initiator_node_lun_acl(
1370 struct se_portal_group
*tpg
,
1371 struct se_node_acl
*nacl
,
1375 struct se_lun_acl
*lacl
;
1377 if (strlen(nacl
->initiatorname
) >= TRANSPORT_IQN_LEN
) {
1378 pr_err("%s InitiatorName exceeds maximum size.\n",
1379 tpg
->se_tpg_tfo
->get_fabric_name());
1383 lacl
= kzalloc(sizeof(struct se_lun_acl
), GFP_KERNEL
);
1385 pr_err("Unable to allocate memory for struct se_lun_acl.\n");
1390 INIT_LIST_HEAD(&lacl
->lacl_list
);
1391 lacl
->mapped_lun
= mapped_lun
;
1392 lacl
->se_lun_nacl
= nacl
;
1393 snprintf(lacl
->initiatorname
, TRANSPORT_IQN_LEN
, "%s",
1394 nacl
->initiatorname
);
1399 int core_dev_add_initiator_node_lun_acl(
1400 struct se_portal_group
*tpg
,
1401 struct se_lun_acl
*lacl
,
1406 struct se_node_acl
*nacl
;
1408 lun
= core_dev_get_lun(tpg
, unpacked_lun
);
1410 pr_err("%s Logical Unit Number: %u is not active on"
1411 " Target Portal Group: %hu, ignoring request.\n",
1412 tpg
->se_tpg_tfo
->get_fabric_name(), unpacked_lun
,
1413 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
1417 nacl
= lacl
->se_lun_nacl
;
1421 if ((lun
->lun_access
& TRANSPORT_LUNFLAGS_READ_ONLY
) &&
1422 (lun_access
& TRANSPORT_LUNFLAGS_READ_WRITE
))
1423 lun_access
= TRANSPORT_LUNFLAGS_READ_ONLY
;
1427 if (core_enable_device_list_for_node(lun
, lacl
, lacl
->mapped_lun
,
1428 lun_access
, nacl
, tpg
) < 0)
1431 spin_lock(&lun
->lun_acl_lock
);
1432 list_add_tail(&lacl
->lacl_list
, &lun
->lun_acl_list
);
1433 atomic_inc_mb(&lun
->lun_acl_count
);
1434 spin_unlock(&lun
->lun_acl_lock
);
1436 pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
1437 " InitiatorNode: %s\n", tpg
->se_tpg_tfo
->get_fabric_name(),
1438 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), unpacked_lun
, lacl
->mapped_lun
,
1439 (lun_access
& TRANSPORT_LUNFLAGS_READ_WRITE
) ? "RW" : "RO",
1440 lacl
->initiatorname
);
1442 * Check to see if there are any existing persistent reservation APTPL
1443 * pre-registrations that need to be enabled for this LUN ACL..
1445 core_scsi3_check_aptpl_registration(lun
->lun_se_dev
, tpg
, lun
, nacl
,
1450 /* core_dev_del_initiator_node_lun_acl():
1454 int core_dev_del_initiator_node_lun_acl(
1455 struct se_portal_group
*tpg
,
1457 struct se_lun_acl
*lacl
)
1459 struct se_node_acl
*nacl
;
1461 nacl
= lacl
->se_lun_nacl
;
1465 spin_lock(&lun
->lun_acl_lock
);
1466 list_del(&lacl
->lacl_list
);
1467 atomic_dec_mb(&lun
->lun_acl_count
);
1468 spin_unlock(&lun
->lun_acl_lock
);
1470 core_disable_device_list_for_node(lun
, NULL
, lacl
->mapped_lun
,
1471 TRANSPORT_LUNFLAGS_NO_ACCESS
, nacl
, tpg
);
1473 lacl
->se_lun
= NULL
;
1475 pr_debug("%s_TPG[%hu]_LUN[%u] - Removed ACL for"
1476 " InitiatorNode: %s Mapped LUN: %u\n",
1477 tpg
->se_tpg_tfo
->get_fabric_name(),
1478 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), lun
->unpacked_lun
,
1479 lacl
->initiatorname
, lacl
->mapped_lun
);
1484 void core_dev_free_initiator_node_lun_acl(
1485 struct se_portal_group
*tpg
,
1486 struct se_lun_acl
*lacl
)
1488 pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
1489 " Mapped LUN: %u\n", tpg
->se_tpg_tfo
->get_fabric_name(),
1490 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
),
1491 tpg
->se_tpg_tfo
->get_fabric_name(),
1492 lacl
->initiatorname
, lacl
->mapped_lun
);
1497 static void scsi_dump_inquiry(struct se_device
*dev
)
1499 struct t10_wwn
*wwn
= &dev
->t10_wwn
;
1503 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
1505 for (i
= 0; i
< 8; i
++)
1506 if (wwn
->vendor
[i
] >= 0x20)
1507 buf
[i
] = wwn
->vendor
[i
];
1511 pr_debug(" Vendor: %s\n", buf
);
1513 for (i
= 0; i
< 16; i
++)
1514 if (wwn
->model
[i
] >= 0x20)
1515 buf
[i
] = wwn
->model
[i
];
1519 pr_debug(" Model: %s\n", buf
);
1521 for (i
= 0; i
< 4; i
++)
1522 if (wwn
->revision
[i
] >= 0x20)
1523 buf
[i
] = wwn
->revision
[i
];
1527 pr_debug(" Revision: %s\n", buf
);
1529 device_type
= dev
->transport
->get_device_type(dev
);
1530 pr_debug(" Type: %s ", scsi_device_type(device_type
));
1533 struct se_device
*target_alloc_device(struct se_hba
*hba
, const char *name
)
1535 struct se_device
*dev
;
1536 struct se_lun
*xcopy_lun
;
1538 dev
= hba
->transport
->alloc_device(hba
, name
);
1542 dev
->dev_link_magic
= SE_DEV_LINK_MAGIC
;
1544 dev
->transport
= hba
->transport
;
1545 dev
->prot_length
= sizeof(struct se_dif_v1_tuple
);
1547 INIT_LIST_HEAD(&dev
->dev_list
);
1548 INIT_LIST_HEAD(&dev
->dev_sep_list
);
1549 INIT_LIST_HEAD(&dev
->dev_tmr_list
);
1550 INIT_LIST_HEAD(&dev
->delayed_cmd_list
);
1551 INIT_LIST_HEAD(&dev
->state_list
);
1552 INIT_LIST_HEAD(&dev
->qf_cmd_list
);
1553 INIT_LIST_HEAD(&dev
->g_dev_node
);
1554 spin_lock_init(&dev
->execute_task_lock
);
1555 spin_lock_init(&dev
->delayed_cmd_lock
);
1556 spin_lock_init(&dev
->dev_reservation_lock
);
1557 spin_lock_init(&dev
->se_port_lock
);
1558 spin_lock_init(&dev
->se_tmr_lock
);
1559 spin_lock_init(&dev
->qf_cmd_lock
);
1560 sema_init(&dev
->caw_sem
, 1);
1561 atomic_set(&dev
->dev_ordered_id
, 0);
1562 INIT_LIST_HEAD(&dev
->t10_wwn
.t10_vpd_list
);
1563 spin_lock_init(&dev
->t10_wwn
.t10_vpd_lock
);
1564 INIT_LIST_HEAD(&dev
->t10_pr
.registration_list
);
1565 INIT_LIST_HEAD(&dev
->t10_pr
.aptpl_reg_list
);
1566 spin_lock_init(&dev
->t10_pr
.registration_lock
);
1567 spin_lock_init(&dev
->t10_pr
.aptpl_reg_lock
);
1568 INIT_LIST_HEAD(&dev
->t10_alua
.tg_pt_gps_list
);
1569 spin_lock_init(&dev
->t10_alua
.tg_pt_gps_lock
);
1570 INIT_LIST_HEAD(&dev
->t10_alua
.lba_map_list
);
1571 spin_lock_init(&dev
->t10_alua
.lba_map_lock
);
1573 dev
->t10_wwn
.t10_dev
= dev
;
1574 dev
->t10_alua
.t10_dev
= dev
;
1576 dev
->dev_attrib
.da_dev
= dev
;
1577 dev
->dev_attrib
.emulate_model_alias
= DA_EMULATE_MODEL_ALIAS
;
1578 dev
->dev_attrib
.emulate_dpo
= DA_EMULATE_DPO
;
1579 dev
->dev_attrib
.emulate_fua_write
= DA_EMULATE_FUA_WRITE
;
1580 dev
->dev_attrib
.emulate_fua_read
= DA_EMULATE_FUA_READ
;
1581 dev
->dev_attrib
.emulate_write_cache
= DA_EMULATE_WRITE_CACHE
;
1582 dev
->dev_attrib
.emulate_ua_intlck_ctrl
= DA_EMULATE_UA_INTLLCK_CTRL
;
1583 dev
->dev_attrib
.emulate_tas
= DA_EMULATE_TAS
;
1584 dev
->dev_attrib
.emulate_tpu
= DA_EMULATE_TPU
;
1585 dev
->dev_attrib
.emulate_tpws
= DA_EMULATE_TPWS
;
1586 dev
->dev_attrib
.emulate_caw
= DA_EMULATE_CAW
;
1587 dev
->dev_attrib
.emulate_3pc
= DA_EMULATE_3PC
;
1588 dev
->dev_attrib
.pi_prot_type
= TARGET_DIF_TYPE0_PROT
;
1589 dev
->dev_attrib
.enforce_pr_isids
= DA_ENFORCE_PR_ISIDS
;
1590 dev
->dev_attrib
.force_pr_aptpl
= DA_FORCE_PR_APTPL
;
1591 dev
->dev_attrib
.is_nonrot
= DA_IS_NONROT
;
1592 dev
->dev_attrib
.emulate_rest_reord
= DA_EMULATE_REST_REORD
;
1593 dev
->dev_attrib
.max_unmap_lba_count
= DA_MAX_UNMAP_LBA_COUNT
;
1594 dev
->dev_attrib
.max_unmap_block_desc_count
=
1595 DA_MAX_UNMAP_BLOCK_DESC_COUNT
;
1596 dev
->dev_attrib
.unmap_granularity
= DA_UNMAP_GRANULARITY_DEFAULT
;
1597 dev
->dev_attrib
.unmap_granularity_alignment
=
1598 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT
;
1599 dev
->dev_attrib
.max_write_same_len
= DA_MAX_WRITE_SAME_LEN
;
1600 dev
->dev_attrib
.fabric_max_sectors
= DA_FABRIC_MAX_SECTORS
;
1601 dev
->dev_attrib
.optimal_sectors
= DA_FABRIC_MAX_SECTORS
;
1603 xcopy_lun
= &dev
->xcopy_lun
;
1604 xcopy_lun
->lun_se_dev
= dev
;
1605 init_completion(&xcopy_lun
->lun_shutdown_comp
);
1606 INIT_LIST_HEAD(&xcopy_lun
->lun_acl_list
);
1607 spin_lock_init(&xcopy_lun
->lun_acl_lock
);
1608 spin_lock_init(&xcopy_lun
->lun_sep_lock
);
1609 init_completion(&xcopy_lun
->lun_ref_comp
);
1614 int target_configure_device(struct se_device
*dev
)
1616 struct se_hba
*hba
= dev
->se_hba
;
1619 if (dev
->dev_flags
& DF_CONFIGURED
) {
1620 pr_err("se_dev->se_dev_ptr already set for storage"
1625 ret
= dev
->transport
->configure_device(dev
);
1628 dev
->dev_flags
|= DF_CONFIGURED
;
1631 * XXX: there is not much point to have two different values here..
1633 dev
->dev_attrib
.block_size
= dev
->dev_attrib
.hw_block_size
;
1634 dev
->dev_attrib
.queue_depth
= dev
->dev_attrib
.hw_queue_depth
;
1637 * Align max_hw_sectors down to PAGE_SIZE I/O transfers
1639 dev
->dev_attrib
.hw_max_sectors
=
1640 se_dev_align_max_sectors(dev
->dev_attrib
.hw_max_sectors
,
1641 dev
->dev_attrib
.hw_block_size
);
1643 dev
->dev_index
= scsi_get_new_index(SCSI_DEVICE_INDEX
);
1644 dev
->creation_time
= get_jiffies_64();
1646 ret
= core_setup_alua(dev
);
1651 * Startup the struct se_device processing thread
1653 dev
->tmr_wq
= alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM
| WQ_UNBOUND
, 1,
1654 dev
->transport
->name
);
1656 pr_err("Unable to create tmr workqueue for %s\n",
1657 dev
->transport
->name
);
1663 * Setup work_queue for QUEUE_FULL
1665 INIT_WORK(&dev
->qf_work_queue
, target_qf_do_work
);
1668 * Preload the initial INQUIRY const values if we are doing
1669 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
1670 * passthrough because this is being provided by the backend LLD.
1672 if (dev
->transport
->transport_type
!= TRANSPORT_PLUGIN_PHBA_PDEV
) {
1673 strncpy(&dev
->t10_wwn
.vendor
[0], "LIO-ORG", 8);
1674 strncpy(&dev
->t10_wwn
.model
[0],
1675 dev
->transport
->inquiry_prod
, 16);
1676 strncpy(&dev
->t10_wwn
.revision
[0],
1677 dev
->transport
->inquiry_rev
, 4);
1680 scsi_dump_inquiry(dev
);
1682 spin_lock(&hba
->device_lock
);
1684 spin_unlock(&hba
->device_lock
);
1686 mutex_lock(&g_device_mutex
);
1687 list_add_tail(&dev
->g_dev_node
, &g_device_list
);
1688 mutex_unlock(&g_device_mutex
);
1693 core_alua_free_lu_gp_mem(dev
);
1695 se_release_vpd_for_dev(dev
);
1699 void target_free_device(struct se_device
*dev
)
1701 struct se_hba
*hba
= dev
->se_hba
;
1703 WARN_ON(!list_empty(&dev
->dev_sep_list
));
1705 if (dev
->dev_flags
& DF_CONFIGURED
) {
1706 destroy_workqueue(dev
->tmr_wq
);
1708 mutex_lock(&g_device_mutex
);
1709 list_del(&dev
->g_dev_node
);
1710 mutex_unlock(&g_device_mutex
);
1712 spin_lock(&hba
->device_lock
);
1714 spin_unlock(&hba
->device_lock
);
1717 core_alua_free_lu_gp_mem(dev
);
1718 core_alua_set_lba_map(dev
, NULL
, 0, 0);
1719 core_scsi3_free_all_registrations(dev
);
1720 se_release_vpd_for_dev(dev
);
1722 if (dev
->transport
->free_prot
)
1723 dev
->transport
->free_prot(dev
);
1725 dev
->transport
->free_device(dev
);
1728 int core_dev_setup_virtual_lun0(void)
1731 struct se_device
*dev
;
1732 char buf
[] = "rd_pages=8,rd_nullio=1";
1735 hba
= core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE
);
1737 return PTR_ERR(hba
);
1739 dev
= target_alloc_device(hba
, "virt_lun0");
1745 hba
->transport
->set_configfs_dev_params(dev
, buf
, sizeof(buf
));
1747 ret
= target_configure_device(dev
);
1749 goto out_free_se_dev
;
1756 target_free_device(dev
);
1758 core_delete_hba(hba
);
1763 void core_dev_release_virtual_lun0(void)
1765 struct se_hba
*hba
= lun0_hba
;
1771 target_free_device(g_lun0_dev
);
1772 core_delete_hba(hba
);