1 /*******************************************************************************
2 * Filename: target_core_tpg.c
4 * This file contains generic Target Portal Group related functions.
6 * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
7 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
8 * Copyright (c) 2007-2010 Rising Tide Systems
9 * Copyright (c) 2008-2010 Linux-iSCSI.org
11 * Nicholas A. Bellinger <nab@kernel.org>
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
27 ******************************************************************************/
29 #include <linux/net.h>
30 #include <linux/string.h>
31 #include <linux/timer.h>
32 #include <linux/slab.h>
33 #include <linux/spinlock.h>
35 #include <linux/export.h>
38 #include <scsi/scsi.h>
39 #include <scsi/scsi_cmnd.h>
41 #include <target/target_core_base.h>
42 #include <target/target_core_backend.h>
43 #include <target/target_core_fabric.h>
45 #include "target_core_internal.h"
47 extern struct se_device
*g_lun0_dev
;
49 static DEFINE_SPINLOCK(tpg_lock
);
50 static LIST_HEAD(tpg_list
);
52 /* core_clear_initiator_node_from_tpg():
56 static void core_clear_initiator_node_from_tpg(
57 struct se_node_acl
*nacl
,
58 struct se_portal_group
*tpg
)
61 struct se_dev_entry
*deve
;
64 spin_lock_irq(&nacl
->device_list_lock
);
65 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
66 deve
= nacl
->device_list
[i
];
68 if (!(deve
->lun_flags
& TRANSPORT_LUNFLAGS_INITIATOR_ACCESS
))
72 pr_err("%s device entries device pointer is"
73 " NULL, but Initiator has access.\n",
74 tpg
->se_tpg_tfo
->get_fabric_name());
79 spin_unlock_irq(&nacl
->device_list_lock
);
80 core_update_device_list_for_node(lun
, NULL
, deve
->mapped_lun
,
81 TRANSPORT_LUNFLAGS_NO_ACCESS
, nacl
, tpg
, 0);
83 spin_lock_irq(&nacl
->device_list_lock
);
85 spin_unlock_irq(&nacl
->device_list_lock
);
88 /* __core_tpg_get_initiator_node_acl():
90 * spin_lock_bh(&tpg->acl_node_lock); must be held when calling
92 struct se_node_acl
*__core_tpg_get_initiator_node_acl(
93 struct se_portal_group
*tpg
,
94 const char *initiatorname
)
96 struct se_node_acl
*acl
;
98 list_for_each_entry(acl
, &tpg
->acl_node_list
, acl_list
) {
99 if (!strcmp(acl
->initiatorname
, initiatorname
))
106 /* core_tpg_get_initiator_node_acl():
110 struct se_node_acl
*core_tpg_get_initiator_node_acl(
111 struct se_portal_group
*tpg
,
112 unsigned char *initiatorname
)
114 struct se_node_acl
*acl
;
116 spin_lock_irq(&tpg
->acl_node_lock
);
117 list_for_each_entry(acl
, &tpg
->acl_node_list
, acl_list
) {
118 if (!strcmp(acl
->initiatorname
, initiatorname
) &&
119 !acl
->dynamic_node_acl
) {
120 spin_unlock_irq(&tpg
->acl_node_lock
);
124 spin_unlock_irq(&tpg
->acl_node_lock
);
129 /* core_tpg_add_node_to_devs():
133 void core_tpg_add_node_to_devs(
134 struct se_node_acl
*acl
,
135 struct se_portal_group
*tpg
)
140 struct se_device
*dev
;
142 spin_lock(&tpg
->tpg_lun_lock
);
143 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
144 lun
= tpg
->tpg_lun_list
[i
];
145 if (lun
->lun_status
!= TRANSPORT_LUN_STATUS_ACTIVE
)
148 spin_unlock(&tpg
->tpg_lun_lock
);
150 dev
= lun
->lun_se_dev
;
152 * By default in LIO-Target $FABRIC_MOD,
153 * demo_mode_write_protect is ON, or READ_ONLY;
155 if (!tpg
->se_tpg_tfo
->tpg_check_demo_mode_write_protect(tpg
)) {
156 lun_access
= TRANSPORT_LUNFLAGS_READ_WRITE
;
159 * Allow only optical drives to issue R/W in default RO
162 if (dev
->transport
->get_device_type(dev
) == TYPE_DISK
)
163 lun_access
= TRANSPORT_LUNFLAGS_READ_ONLY
;
165 lun_access
= TRANSPORT_LUNFLAGS_READ_WRITE
;
168 pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
169 " access for LUN in Demo Mode\n",
170 tpg
->se_tpg_tfo
->get_fabric_name(),
171 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), lun
->unpacked_lun
,
172 (lun_access
== TRANSPORT_LUNFLAGS_READ_WRITE
) ?
173 "READ-WRITE" : "READ-ONLY");
175 core_update_device_list_for_node(lun
, NULL
, lun
->unpacked_lun
,
176 lun_access
, acl
, tpg
, 1);
177 spin_lock(&tpg
->tpg_lun_lock
);
179 spin_unlock(&tpg
->tpg_lun_lock
);
182 /* core_set_queue_depth_for_node():
186 static int core_set_queue_depth_for_node(
187 struct se_portal_group
*tpg
,
188 struct se_node_acl
*acl
)
190 if (!acl
->queue_depth
) {
191 pr_err("Queue depth for %s Initiator Node: %s is 0,"
192 "defaulting to 1.\n", tpg
->se_tpg_tfo
->get_fabric_name(),
194 acl
->queue_depth
= 1;
200 void array_free(void *array
, int n
)
205 for (i
= 0; i
< n
; i
++)
210 static void *array_zalloc(int n
, size_t size
, gfp_t flags
)
215 a
= kzalloc(n
* sizeof(void*), flags
);
218 for (i
= 0; i
< n
; i
++) {
219 a
[i
] = kzalloc(size
, flags
);
228 /* core_create_device_list_for_node():
232 static int core_create_device_list_for_node(struct se_node_acl
*nacl
)
234 struct se_dev_entry
*deve
;
237 nacl
->device_list
= array_zalloc(TRANSPORT_MAX_LUNS_PER_TPG
,
238 sizeof(struct se_dev_entry
), GFP_KERNEL
);
239 if (!nacl
->device_list
) {
240 pr_err("Unable to allocate memory for"
241 " struct se_node_acl->device_list\n");
244 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
245 deve
= nacl
->device_list
[i
];
247 atomic_set(&deve
->ua_count
, 0);
248 atomic_set(&deve
->pr_ref_count
, 0);
249 spin_lock_init(&deve
->ua_lock
);
250 INIT_LIST_HEAD(&deve
->alua_port_list
);
251 INIT_LIST_HEAD(&deve
->ua_list
);
257 /* core_tpg_check_initiator_node_acl()
261 struct se_node_acl
*core_tpg_check_initiator_node_acl(
262 struct se_portal_group
*tpg
,
263 unsigned char *initiatorname
)
265 struct se_node_acl
*acl
;
267 acl
= core_tpg_get_initiator_node_acl(tpg
, initiatorname
);
271 if (!tpg
->se_tpg_tfo
->tpg_check_demo_mode(tpg
))
274 acl
= tpg
->se_tpg_tfo
->tpg_alloc_fabric_acl(tpg
);
278 INIT_LIST_HEAD(&acl
->acl_list
);
279 INIT_LIST_HEAD(&acl
->acl_sess_list
);
280 kref_init(&acl
->acl_kref
);
281 init_completion(&acl
->acl_free_comp
);
282 spin_lock_init(&acl
->device_list_lock
);
283 spin_lock_init(&acl
->nacl_sess_lock
);
284 atomic_set(&acl
->acl_pr_ref_count
, 0);
285 acl
->queue_depth
= tpg
->se_tpg_tfo
->tpg_get_default_depth(tpg
);
286 snprintf(acl
->initiatorname
, TRANSPORT_IQN_LEN
, "%s", initiatorname
);
288 acl
->acl_index
= scsi_get_new_index(SCSI_AUTH_INTR_INDEX
);
289 spin_lock_init(&acl
->stats_lock
);
290 acl
->dynamic_node_acl
= 1;
292 tpg
->se_tpg_tfo
->set_default_node_attributes(acl
);
294 if (core_create_device_list_for_node(acl
) < 0) {
295 tpg
->se_tpg_tfo
->tpg_release_fabric_acl(tpg
, acl
);
299 if (core_set_queue_depth_for_node(tpg
, acl
) < 0) {
300 core_free_device_list_for_node(acl
, tpg
);
301 tpg
->se_tpg_tfo
->tpg_release_fabric_acl(tpg
, acl
);
305 * Here we only create demo-mode MappedLUNs from the active
306 * TPG LUNs if the fabric is not explictly asking for
307 * tpg_check_demo_mode_login_only() == 1.
309 if ((tpg
->se_tpg_tfo
->tpg_check_demo_mode_login_only
!= NULL
) &&
310 (tpg
->se_tpg_tfo
->tpg_check_demo_mode_login_only(tpg
) == 1))
313 core_tpg_add_node_to_devs(acl
, tpg
);
315 spin_lock_irq(&tpg
->acl_node_lock
);
316 list_add_tail(&acl
->acl_list
, &tpg
->acl_node_list
);
317 tpg
->num_node_acls
++;
318 spin_unlock_irq(&tpg
->acl_node_lock
);
320 pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
321 " Initiator Node: %s\n", tpg
->se_tpg_tfo
->get_fabric_name(),
322 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), acl
->queue_depth
,
323 tpg
->se_tpg_tfo
->get_fabric_name(), initiatorname
);
327 EXPORT_SYMBOL(core_tpg_check_initiator_node_acl
);
329 void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl
*nacl
)
331 while (atomic_read(&nacl
->acl_pr_ref_count
) != 0)
335 void core_tpg_clear_object_luns(struct se_portal_group
*tpg
)
340 spin_lock(&tpg
->tpg_lun_lock
);
341 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
342 lun
= tpg
->tpg_lun_list
[i
];
344 if ((lun
->lun_status
!= TRANSPORT_LUN_STATUS_ACTIVE
) ||
345 (lun
->lun_se_dev
== NULL
))
348 spin_unlock(&tpg
->tpg_lun_lock
);
349 core_dev_del_lun(tpg
, lun
->unpacked_lun
);
350 spin_lock(&tpg
->tpg_lun_lock
);
352 spin_unlock(&tpg
->tpg_lun_lock
);
354 EXPORT_SYMBOL(core_tpg_clear_object_luns
);
356 /* core_tpg_add_initiator_node_acl():
360 struct se_node_acl
*core_tpg_add_initiator_node_acl(
361 struct se_portal_group
*tpg
,
362 struct se_node_acl
*se_nacl
,
363 const char *initiatorname
,
366 struct se_node_acl
*acl
= NULL
;
368 spin_lock_irq(&tpg
->acl_node_lock
);
369 acl
= __core_tpg_get_initiator_node_acl(tpg
, initiatorname
);
371 if (acl
->dynamic_node_acl
) {
372 acl
->dynamic_node_acl
= 0;
373 pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
374 " for %s\n", tpg
->se_tpg_tfo
->get_fabric_name(),
375 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), initiatorname
);
376 spin_unlock_irq(&tpg
->acl_node_lock
);
378 * Release the locally allocated struct se_node_acl
379 * because * core_tpg_add_initiator_node_acl() returned
380 * a pointer to an existing demo mode node ACL.
383 tpg
->se_tpg_tfo
->tpg_release_fabric_acl(tpg
,
388 pr_err("ACL entry for %s Initiator"
389 " Node %s already exists for TPG %u, ignoring"
390 " request.\n", tpg
->se_tpg_tfo
->get_fabric_name(),
391 initiatorname
, tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
392 spin_unlock_irq(&tpg
->acl_node_lock
);
393 return ERR_PTR(-EEXIST
);
395 spin_unlock_irq(&tpg
->acl_node_lock
);
398 pr_err("struct se_node_acl pointer is NULL\n");
399 return ERR_PTR(-EINVAL
);
402 * For v4.x logic the se_node_acl_s is hanging off a fabric
403 * dependent structure allocated via
404 * struct target_core_fabric_ops->fabric_make_nodeacl()
408 INIT_LIST_HEAD(&acl
->acl_list
);
409 INIT_LIST_HEAD(&acl
->acl_sess_list
);
410 kref_init(&acl
->acl_kref
);
411 init_completion(&acl
->acl_free_comp
);
412 spin_lock_init(&acl
->device_list_lock
);
413 spin_lock_init(&acl
->nacl_sess_lock
);
414 atomic_set(&acl
->acl_pr_ref_count
, 0);
415 acl
->queue_depth
= queue_depth
;
416 snprintf(acl
->initiatorname
, TRANSPORT_IQN_LEN
, "%s", initiatorname
);
418 acl
->acl_index
= scsi_get_new_index(SCSI_AUTH_INTR_INDEX
);
419 spin_lock_init(&acl
->stats_lock
);
421 tpg
->se_tpg_tfo
->set_default_node_attributes(acl
);
423 if (core_create_device_list_for_node(acl
) < 0) {
424 tpg
->se_tpg_tfo
->tpg_release_fabric_acl(tpg
, acl
);
425 return ERR_PTR(-ENOMEM
);
428 if (core_set_queue_depth_for_node(tpg
, acl
) < 0) {
429 core_free_device_list_for_node(acl
, tpg
);
430 tpg
->se_tpg_tfo
->tpg_release_fabric_acl(tpg
, acl
);
431 return ERR_PTR(-EINVAL
);
434 spin_lock_irq(&tpg
->acl_node_lock
);
435 list_add_tail(&acl
->acl_list
, &tpg
->acl_node_list
);
436 tpg
->num_node_acls
++;
437 spin_unlock_irq(&tpg
->acl_node_lock
);
440 pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
441 " Initiator Node: %s\n", tpg
->se_tpg_tfo
->get_fabric_name(),
442 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), acl
->queue_depth
,
443 tpg
->se_tpg_tfo
->get_fabric_name(), initiatorname
);
447 EXPORT_SYMBOL(core_tpg_add_initiator_node_acl
);
449 /* core_tpg_del_initiator_node_acl():
453 int core_tpg_del_initiator_node_acl(
454 struct se_portal_group
*tpg
,
455 struct se_node_acl
*acl
,
458 LIST_HEAD(sess_list
);
459 struct se_session
*sess
, *sess_tmp
;
463 spin_lock_irq(&tpg
->acl_node_lock
);
464 if (acl
->dynamic_node_acl
) {
465 acl
->dynamic_node_acl
= 0;
467 list_del(&acl
->acl_list
);
468 tpg
->num_node_acls
--;
469 spin_unlock_irq(&tpg
->acl_node_lock
);
471 spin_lock_irqsave(&acl
->nacl_sess_lock
, flags
);
474 list_for_each_entry_safe(sess
, sess_tmp
, &acl
->acl_sess_list
,
476 if (sess
->sess_tearing_down
!= 0)
479 target_get_session(sess
);
480 list_move(&sess
->sess_acl_list
, &sess_list
);
482 spin_unlock_irqrestore(&acl
->nacl_sess_lock
, flags
);
484 list_for_each_entry_safe(sess
, sess_tmp
, &sess_list
, sess_acl_list
) {
485 list_del(&sess
->sess_acl_list
);
487 rc
= tpg
->se_tpg_tfo
->shutdown_session(sess
);
488 target_put_session(sess
);
491 target_put_session(sess
);
493 target_put_nacl(acl
);
495 * Wait for last target_put_nacl() to complete in target_complete_nacl()
496 * for active fabric session transport_deregister_session() callbacks.
498 wait_for_completion(&acl
->acl_free_comp
);
500 core_tpg_wait_for_nacl_pr_ref(acl
);
501 core_clear_initiator_node_from_tpg(acl
, tpg
);
502 core_free_device_list_for_node(acl
, tpg
);
504 pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
505 " Initiator Node: %s\n", tpg
->se_tpg_tfo
->get_fabric_name(),
506 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), acl
->queue_depth
,
507 tpg
->se_tpg_tfo
->get_fabric_name(), acl
->initiatorname
);
511 EXPORT_SYMBOL(core_tpg_del_initiator_node_acl
);
513 /* core_tpg_set_initiator_node_queue_depth():
517 int core_tpg_set_initiator_node_queue_depth(
518 struct se_portal_group
*tpg
,
519 unsigned char *initiatorname
,
523 struct se_session
*sess
, *init_sess
= NULL
;
524 struct se_node_acl
*acl
;
528 spin_lock_irq(&tpg
->acl_node_lock
);
529 acl
= __core_tpg_get_initiator_node_acl(tpg
, initiatorname
);
531 pr_err("Access Control List entry for %s Initiator"
532 " Node %s does not exists for TPG %hu, ignoring"
533 " request.\n", tpg
->se_tpg_tfo
->get_fabric_name(),
534 initiatorname
, tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
535 spin_unlock_irq(&tpg
->acl_node_lock
);
538 if (acl
->dynamic_node_acl
) {
539 acl
->dynamic_node_acl
= 0;
542 spin_unlock_irq(&tpg
->acl_node_lock
);
544 spin_lock_irqsave(&tpg
->session_lock
, flags
);
545 list_for_each_entry(sess
, &tpg
->tpg_sess_list
, sess_list
) {
546 if (sess
->se_node_acl
!= acl
)
550 pr_err("Unable to change queue depth for %s"
551 " Initiator Node: %s while session is"
552 " operational. To forcefully change the queue"
553 " depth and force session reinstatement"
554 " use the \"force=1\" parameter.\n",
555 tpg
->se_tpg_tfo
->get_fabric_name(), initiatorname
);
556 spin_unlock_irqrestore(&tpg
->session_lock
, flags
);
558 spin_lock_irq(&tpg
->acl_node_lock
);
560 acl
->dynamic_node_acl
= 1;
561 spin_unlock_irq(&tpg
->acl_node_lock
);
565 * Determine if the session needs to be closed by our context.
567 if (!tpg
->se_tpg_tfo
->shutdown_session(sess
))
575 * User has requested to change the queue depth for a Initiator Node.
576 * Change the value in the Node's struct se_node_acl, and call
577 * core_set_queue_depth_for_node() to add the requested queue depth.
579 * Finally call tpg->se_tpg_tfo->close_session() to force session
580 * reinstatement to occur if there is an active session for the
581 * $FABRIC_MOD Initiator Node in question.
583 acl
->queue_depth
= queue_depth
;
585 if (core_set_queue_depth_for_node(tpg
, acl
) < 0) {
586 spin_unlock_irqrestore(&tpg
->session_lock
, flags
);
588 * Force session reinstatement if
589 * core_set_queue_depth_for_node() failed, because we assume
590 * the $FABRIC_MOD has already the set session reinstatement
591 * bit from tpg->se_tpg_tfo->shutdown_session() called above.
594 tpg
->se_tpg_tfo
->close_session(init_sess
);
596 spin_lock_irq(&tpg
->acl_node_lock
);
598 acl
->dynamic_node_acl
= 1;
599 spin_unlock_irq(&tpg
->acl_node_lock
);
602 spin_unlock_irqrestore(&tpg
->session_lock
, flags
);
604 * If the $FABRIC_MOD session for the Initiator Node ACL exists,
605 * forcefully shutdown the $FABRIC_MOD session/nexus.
608 tpg
->se_tpg_tfo
->close_session(init_sess
);
610 pr_debug("Successfully changed queue depth to: %d for Initiator"
611 " Node: %s on %s Target Portal Group: %u\n", queue_depth
,
612 initiatorname
, tpg
->se_tpg_tfo
->get_fabric_name(),
613 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
615 spin_lock_irq(&tpg
->acl_node_lock
);
617 acl
->dynamic_node_acl
= 1;
618 spin_unlock_irq(&tpg
->acl_node_lock
);
622 EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth
);
624 static int core_tpg_setup_virtual_lun0(struct se_portal_group
*se_tpg
)
626 /* Set in core_dev_setup_virtual_lun0() */
627 struct se_device
*dev
= g_lun0_dev
;
628 struct se_lun
*lun
= &se_tpg
->tpg_virt_lun0
;
629 u32 lun_access
= TRANSPORT_LUNFLAGS_READ_ONLY
;
632 lun
->unpacked_lun
= 0;
633 lun
->lun_status
= TRANSPORT_LUN_STATUS_FREE
;
634 atomic_set(&lun
->lun_acl_count
, 0);
635 init_completion(&lun
->lun_shutdown_comp
);
636 INIT_LIST_HEAD(&lun
->lun_acl_list
);
637 INIT_LIST_HEAD(&lun
->lun_cmd_list
);
638 spin_lock_init(&lun
->lun_acl_lock
);
639 spin_lock_init(&lun
->lun_cmd_lock
);
640 spin_lock_init(&lun
->lun_sep_lock
);
642 ret
= core_tpg_post_addlun(se_tpg
, lun
, lun_access
, dev
);
649 static void core_tpg_release_virtual_lun0(struct se_portal_group
*se_tpg
)
651 struct se_lun
*lun
= &se_tpg
->tpg_virt_lun0
;
653 core_tpg_post_dellun(se_tpg
, lun
);
656 int core_tpg_register(
657 struct target_core_fabric_ops
*tfo
,
658 struct se_wwn
*se_wwn
,
659 struct se_portal_group
*se_tpg
,
660 void *tpg_fabric_ptr
,
666 se_tpg
->tpg_lun_list
= array_zalloc(TRANSPORT_MAX_LUNS_PER_TPG
,
667 sizeof(struct se_lun
), GFP_KERNEL
);
668 if (!se_tpg
->tpg_lun_list
) {
669 pr_err("Unable to allocate struct se_portal_group->"
674 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
675 lun
= se_tpg
->tpg_lun_list
[i
];
676 lun
->unpacked_lun
= i
;
677 lun
->lun_status
= TRANSPORT_LUN_STATUS_FREE
;
678 atomic_set(&lun
->lun_acl_count
, 0);
679 init_completion(&lun
->lun_shutdown_comp
);
680 INIT_LIST_HEAD(&lun
->lun_acl_list
);
681 INIT_LIST_HEAD(&lun
->lun_cmd_list
);
682 spin_lock_init(&lun
->lun_acl_lock
);
683 spin_lock_init(&lun
->lun_cmd_lock
);
684 spin_lock_init(&lun
->lun_sep_lock
);
687 se_tpg
->se_tpg_type
= se_tpg_type
;
688 se_tpg
->se_tpg_fabric_ptr
= tpg_fabric_ptr
;
689 se_tpg
->se_tpg_tfo
= tfo
;
690 se_tpg
->se_tpg_wwn
= se_wwn
;
691 atomic_set(&se_tpg
->tpg_pr_ref_count
, 0);
692 INIT_LIST_HEAD(&se_tpg
->acl_node_list
);
693 INIT_LIST_HEAD(&se_tpg
->se_tpg_node
);
694 INIT_LIST_HEAD(&se_tpg
->tpg_sess_list
);
695 spin_lock_init(&se_tpg
->acl_node_lock
);
696 spin_lock_init(&se_tpg
->session_lock
);
697 spin_lock_init(&se_tpg
->tpg_lun_lock
);
699 if (se_tpg
->se_tpg_type
== TRANSPORT_TPG_TYPE_NORMAL
) {
700 if (core_tpg_setup_virtual_lun0(se_tpg
) < 0) {
706 spin_lock_bh(&tpg_lock
);
707 list_add_tail(&se_tpg
->se_tpg_node
, &tpg_list
);
708 spin_unlock_bh(&tpg_lock
);
710 pr_debug("TARGET_CORE[%s]: Allocated %s struct se_portal_group for"
711 " endpoint: %s, Portal Tag: %u\n", tfo
->get_fabric_name(),
712 (se_tpg
->se_tpg_type
== TRANSPORT_TPG_TYPE_NORMAL
) ?
713 "Normal" : "Discovery", (tfo
->tpg_get_wwn(se_tpg
) == NULL
) ?
714 "None" : tfo
->tpg_get_wwn(se_tpg
), tfo
->tpg_get_tag(se_tpg
));
718 EXPORT_SYMBOL(core_tpg_register
);
720 int core_tpg_deregister(struct se_portal_group
*se_tpg
)
722 struct se_node_acl
*nacl
, *nacl_tmp
;
724 pr_debug("TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
725 " for endpoint: %s Portal Tag %u\n",
726 (se_tpg
->se_tpg_type
== TRANSPORT_TPG_TYPE_NORMAL
) ?
727 "Normal" : "Discovery", se_tpg
->se_tpg_tfo
->get_fabric_name(),
728 se_tpg
->se_tpg_tfo
->tpg_get_wwn(se_tpg
),
729 se_tpg
->se_tpg_tfo
->tpg_get_tag(se_tpg
));
731 spin_lock_bh(&tpg_lock
);
732 list_del(&se_tpg
->se_tpg_node
);
733 spin_unlock_bh(&tpg_lock
);
735 while (atomic_read(&se_tpg
->tpg_pr_ref_count
) != 0)
738 * Release any remaining demo-mode generated se_node_acl that have
739 * not been released because of TFO->tpg_check_demo_mode_cache() == 1
740 * in transport_deregister_session().
742 spin_lock_irq(&se_tpg
->acl_node_lock
);
743 list_for_each_entry_safe(nacl
, nacl_tmp
, &se_tpg
->acl_node_list
,
745 list_del(&nacl
->acl_list
);
746 se_tpg
->num_node_acls
--;
747 spin_unlock_irq(&se_tpg
->acl_node_lock
);
749 core_tpg_wait_for_nacl_pr_ref(nacl
);
750 core_free_device_list_for_node(nacl
, se_tpg
);
751 se_tpg
->se_tpg_tfo
->tpg_release_fabric_acl(se_tpg
, nacl
);
753 spin_lock_irq(&se_tpg
->acl_node_lock
);
755 spin_unlock_irq(&se_tpg
->acl_node_lock
);
757 if (se_tpg
->se_tpg_type
== TRANSPORT_TPG_TYPE_NORMAL
)
758 core_tpg_release_virtual_lun0(se_tpg
);
760 se_tpg
->se_tpg_fabric_ptr
= NULL
;
761 array_free(se_tpg
->tpg_lun_list
, TRANSPORT_MAX_LUNS_PER_TPG
);
764 EXPORT_SYMBOL(core_tpg_deregister
);
766 struct se_lun
*core_tpg_pre_addlun(
767 struct se_portal_group
*tpg
,
772 if (unpacked_lun
> (TRANSPORT_MAX_LUNS_PER_TPG
-1)) {
773 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
774 "-1: %u for Target Portal Group: %u\n",
775 tpg
->se_tpg_tfo
->get_fabric_name(),
776 unpacked_lun
, TRANSPORT_MAX_LUNS_PER_TPG
-1,
777 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
778 return ERR_PTR(-EOVERFLOW
);
781 spin_lock(&tpg
->tpg_lun_lock
);
782 lun
= tpg
->tpg_lun_list
[unpacked_lun
];
783 if (lun
->lun_status
== TRANSPORT_LUN_STATUS_ACTIVE
) {
784 pr_err("TPG Logical Unit Number: %u is already active"
785 " on %s Target Portal Group: %u, ignoring request.\n",
786 unpacked_lun
, tpg
->se_tpg_tfo
->get_fabric_name(),
787 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
788 spin_unlock(&tpg
->tpg_lun_lock
);
789 return ERR_PTR(-EINVAL
);
791 spin_unlock(&tpg
->tpg_lun_lock
);
796 int core_tpg_post_addlun(
797 struct se_portal_group
*tpg
,
804 ret
= core_dev_export(lun_ptr
, tpg
, lun
);
808 spin_lock(&tpg
->tpg_lun_lock
);
809 lun
->lun_access
= lun_access
;
810 lun
->lun_status
= TRANSPORT_LUN_STATUS_ACTIVE
;
811 spin_unlock(&tpg
->tpg_lun_lock
);
816 static void core_tpg_shutdown_lun(
817 struct se_portal_group
*tpg
,
820 core_clear_lun_from_tpg(lun
, tpg
);
821 transport_clear_lun_from_sessions(lun
);
824 struct se_lun
*core_tpg_pre_dellun(
825 struct se_portal_group
*tpg
,
830 if (unpacked_lun
> (TRANSPORT_MAX_LUNS_PER_TPG
-1)) {
831 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
832 "-1: %u for Target Portal Group: %u\n",
833 tpg
->se_tpg_tfo
->get_fabric_name(), unpacked_lun
,
834 TRANSPORT_MAX_LUNS_PER_TPG
-1,
835 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
836 return ERR_PTR(-EOVERFLOW
);
839 spin_lock(&tpg
->tpg_lun_lock
);
840 lun
= tpg
->tpg_lun_list
[unpacked_lun
];
841 if (lun
->lun_status
!= TRANSPORT_LUN_STATUS_ACTIVE
) {
842 pr_err("%s Logical Unit Number: %u is not active on"
843 " Target Portal Group: %u, ignoring request.\n",
844 tpg
->se_tpg_tfo
->get_fabric_name(), unpacked_lun
,
845 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
846 spin_unlock(&tpg
->tpg_lun_lock
);
847 return ERR_PTR(-ENODEV
);
849 spin_unlock(&tpg
->tpg_lun_lock
);
854 int core_tpg_post_dellun(
855 struct se_portal_group
*tpg
,
858 core_tpg_shutdown_lun(tpg
, lun
);
860 core_dev_unexport(lun
->lun_se_dev
, tpg
, lun
);
862 spin_lock(&tpg
->tpg_lun_lock
);
863 lun
->lun_status
= TRANSPORT_LUN_STATUS_FREE
;
864 spin_unlock(&tpg
->tpg_lun_lock
);