1 /*******************************************************************************
2 * Filename: target_core_configfs.c
4 * This file contains ConfigFS logic for the Generic Target Engine project.
6 * (c) Copyright 2008-2013 Datera, Inc.
8 * Nicholas A. Bellinger <nab@kernel.org>
10 * based on configfs Copyright (C) 2005 Oracle. All rights reserved.
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 ****************************************************************************/
23 #include <linux/module.h>
24 #include <linux/moduleparam.h>
25 #include <generated/utsrelease.h>
26 #include <linux/utsname.h>
27 #include <linux/init.h>
29 #include <linux/namei.h>
30 #include <linux/slab.h>
31 #include <linux/types.h>
32 #include <linux/delay.h>
33 #include <linux/unistd.h>
34 #include <linux/string.h>
35 #include <linux/parser.h>
36 #include <linux/syscalls.h>
37 #include <linux/configfs.h>
38 #include <linux/spinlock.h>
40 #include <target/target_core_base.h>
41 #include <target/target_core_backend.h>
42 #include <target/target_core_fabric.h>
43 #include <target/target_core_fabric_configfs.h>
44 #include <target/configfs_macros.h>
46 #include "target_core_internal.h"
47 #include "target_core_alua.h"
48 #include "target_core_pr.h"
49 #include "target_core_rd.h"
50 #include "target_core_xcopy.h"
52 #define TB_CIT_SETUP(_name, _item_ops, _group_ops, _attrs) \
53 static void target_core_setup_##_name##_cit(struct target_backend *tb) \
55 struct config_item_type *cit = &tb->tb_##_name##_cit; \
57 cit->ct_item_ops = _item_ops; \
58 cit->ct_group_ops = _group_ops; \
59 cit->ct_attrs = _attrs; \
60 cit->ct_owner = tb->ops->owner; \
61 pr_debug("Setup generic %s\n", __stringify(_name)); \
64 #define TB_CIT_SETUP_DRV(_name, _item_ops, _group_ops) \
65 static void target_core_setup_##_name##_cit(struct target_backend *tb) \
67 struct config_item_type *cit = &tb->tb_##_name##_cit; \
69 cit->ct_item_ops = _item_ops; \
70 cit->ct_group_ops = _group_ops; \
71 cit->ct_attrs = tb->ops->tb_##_name##_attrs; \
72 cit->ct_owner = tb->ops->owner; \
73 pr_debug("Setup generic %s\n", __stringify(_name)); \
76 extern struct t10_alua_lu_gp
*default_lu_gp
;
78 static LIST_HEAD(g_tf_list
);
79 static DEFINE_MUTEX(g_tf_lock
);
81 struct target_core_configfs_attribute
{
82 struct configfs_attribute attr
;
83 ssize_t (*show
)(void *, char *);
84 ssize_t (*store
)(void *, const char *, size_t);
87 static struct config_group target_core_hbagroup
;
88 static struct config_group alua_group
;
89 static struct config_group alua_lu_gps_group
;
91 static inline struct se_hba
*
92 item_to_hba(struct config_item
*item
)
94 return container_of(to_config_group(item
), struct se_hba
, hba_group
);
98 * Attributes for /sys/kernel/config/target/
100 static ssize_t
target_core_attr_show(struct config_item
*item
,
101 struct configfs_attribute
*attr
,
104 return sprintf(page
, "Target Engine Core ConfigFS Infrastructure %s"
105 " on %s/%s on "UTS_RELEASE
"\n", TARGET_CORE_VERSION
,
106 utsname()->sysname
, utsname()->machine
);
109 static struct configfs_item_operations target_core_fabric_item_ops
= {
110 .show_attribute
= target_core_attr_show
,
113 static struct configfs_attribute target_core_item_attr_version
= {
114 .ca_owner
= THIS_MODULE
,
115 .ca_name
= "version",
119 static struct target_fabric_configfs
*target_core_get_fabric(
122 struct target_fabric_configfs
*tf
;
127 mutex_lock(&g_tf_lock
);
128 list_for_each_entry(tf
, &g_tf_list
, tf_list
) {
129 if (!strcmp(tf
->tf_ops
->name
, name
)) {
130 atomic_inc(&tf
->tf_access_cnt
);
131 mutex_unlock(&g_tf_lock
);
135 mutex_unlock(&g_tf_lock
);
141 * Called from struct target_core_group_ops->make_group()
143 static struct config_group
*target_core_register_fabric(
144 struct config_group
*group
,
147 struct target_fabric_configfs
*tf
;
150 pr_debug("Target_Core_ConfigFS: REGISTER -> group: %p name:"
151 " %s\n", group
, name
);
153 tf
= target_core_get_fabric(name
);
155 pr_debug("target_core_register_fabric() trying autoload for %s\n",
159 * Below are some hardcoded request_module() calls to automatically
160 * local fabric modules when the following is called:
162 * mkdir -p /sys/kernel/config/target/$MODULE_NAME
164 * Note that this does not limit which TCM fabric module can be
165 * registered, but simply provids auto loading logic for modules with
166 * mkdir(2) system calls with known TCM fabric modules.
169 if (!strncmp(name
, "iscsi", 5)) {
171 * Automatically load the LIO Target fabric module when the
172 * following is called:
174 * mkdir -p $CONFIGFS/target/iscsi
176 ret
= request_module("iscsi_target_mod");
178 pr_debug("request_module() failed for"
179 " iscsi_target_mod.ko: %d\n", ret
);
180 return ERR_PTR(-EINVAL
);
182 } else if (!strncmp(name
, "loopback", 8)) {
184 * Automatically load the tcm_loop fabric module when the
185 * following is called:
187 * mkdir -p $CONFIGFS/target/loopback
189 ret
= request_module("tcm_loop");
191 pr_debug("request_module() failed for"
192 " tcm_loop.ko: %d\n", ret
);
193 return ERR_PTR(-EINVAL
);
197 tf
= target_core_get_fabric(name
);
201 pr_debug("target_core_get_fabric() failed for %s\n",
203 return ERR_PTR(-EINVAL
);
205 pr_debug("Target_Core_ConfigFS: REGISTER -> Located fabric:"
206 " %s\n", tf
->tf_ops
->name
);
208 * On a successful target_core_get_fabric() look, the returned
209 * struct target_fabric_configfs *tf will contain a usage reference.
211 pr_debug("Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n",
214 tf
->tf_group
.default_groups
= tf
->tf_default_groups
;
215 tf
->tf_group
.default_groups
[0] = &tf
->tf_disc_group
;
216 tf
->tf_group
.default_groups
[1] = NULL
;
218 config_group_init_type_name(&tf
->tf_group
, name
, &tf
->tf_wwn_cit
);
219 config_group_init_type_name(&tf
->tf_disc_group
, "discovery_auth",
220 &tf
->tf_discovery_cit
);
222 pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric:"
223 " %s\n", tf
->tf_group
.cg_item
.ci_name
);
224 return &tf
->tf_group
;
228 * Called from struct target_core_group_ops->drop_item()
230 static void target_core_deregister_fabric(
231 struct config_group
*group
,
232 struct config_item
*item
)
234 struct target_fabric_configfs
*tf
= container_of(
235 to_config_group(item
), struct target_fabric_configfs
, tf_group
);
236 struct config_group
*tf_group
;
237 struct config_item
*df_item
;
240 pr_debug("Target_Core_ConfigFS: DEREGISTER -> Looking up %s in"
241 " tf list\n", config_item_name(item
));
243 pr_debug("Target_Core_ConfigFS: DEREGISTER -> located fabric:"
244 " %s\n", tf
->tf_ops
->name
);
245 atomic_dec(&tf
->tf_access_cnt
);
247 pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing ci"
248 " %s\n", config_item_name(item
));
250 tf_group
= &tf
->tf_group
;
251 for (i
= 0; tf_group
->default_groups
[i
]; i
++) {
252 df_item
= &tf_group
->default_groups
[i
]->cg_item
;
253 tf_group
->default_groups
[i
] = NULL
;
254 config_item_put(df_item
);
256 config_item_put(item
);
259 static struct configfs_group_operations target_core_fabric_group_ops
= {
260 .make_group
= &target_core_register_fabric
,
261 .drop_item
= &target_core_deregister_fabric
,
265 * All item attributes appearing in /sys/kernel/target/ appear here.
267 static struct configfs_attribute
*target_core_fabric_item_attrs
[] = {
268 &target_core_item_attr_version
,
273 * Provides Fabrics Groups and Item Attributes for /sys/kernel/config/target/
275 static struct config_item_type target_core_fabrics_item
= {
276 .ct_item_ops
= &target_core_fabric_item_ops
,
277 .ct_group_ops
= &target_core_fabric_group_ops
,
278 .ct_attrs
= target_core_fabric_item_attrs
,
279 .ct_owner
= THIS_MODULE
,
282 static struct configfs_subsystem target_core_fabrics
= {
285 .ci_namebuf
= "target",
286 .ci_type
= &target_core_fabrics_item
,
291 int target_depend_item(struct config_item
*item
)
293 return configfs_depend_item(&target_core_fabrics
, item
);
295 EXPORT_SYMBOL(target_depend_item
);
297 void target_undepend_item(struct config_item
*item
)
299 return configfs_undepend_item(&target_core_fabrics
, item
);
301 EXPORT_SYMBOL(target_undepend_item
);
303 /*##############################################################################
304 // Start functions called by external Target Fabrics Modules
305 //############################################################################*/
307 static int target_fabric_tf_ops_check(const struct target_core_fabric_ops
*tfo
)
310 pr_err("Missing tfo->name\n");
313 if (strlen(tfo
->name
) >= TARGET_FABRIC_NAME_SIZE
) {
314 pr_err("Passed name: %s exceeds TARGET_FABRIC"
315 "_NAME_SIZE\n", tfo
->name
);
318 if (!tfo
->get_fabric_name
) {
319 pr_err("Missing tfo->get_fabric_name()\n");
322 if (!tfo
->tpg_get_wwn
) {
323 pr_err("Missing tfo->tpg_get_wwn()\n");
326 if (!tfo
->tpg_get_tag
) {
327 pr_err("Missing tfo->tpg_get_tag()\n");
330 if (!tfo
->tpg_check_demo_mode
) {
331 pr_err("Missing tfo->tpg_check_demo_mode()\n");
334 if (!tfo
->tpg_check_demo_mode_cache
) {
335 pr_err("Missing tfo->tpg_check_demo_mode_cache()\n");
338 if (!tfo
->tpg_check_demo_mode_write_protect
) {
339 pr_err("Missing tfo->tpg_check_demo_mode_write_protect()\n");
342 if (!tfo
->tpg_check_prod_mode_write_protect
) {
343 pr_err("Missing tfo->tpg_check_prod_mode_write_protect()\n");
346 if (!tfo
->tpg_get_inst_index
) {
347 pr_err("Missing tfo->tpg_get_inst_index()\n");
350 if (!tfo
->release_cmd
) {
351 pr_err("Missing tfo->release_cmd()\n");
354 if (!tfo
->shutdown_session
) {
355 pr_err("Missing tfo->shutdown_session()\n");
358 if (!tfo
->close_session
) {
359 pr_err("Missing tfo->close_session()\n");
362 if (!tfo
->sess_get_index
) {
363 pr_err("Missing tfo->sess_get_index()\n");
366 if (!tfo
->write_pending
) {
367 pr_err("Missing tfo->write_pending()\n");
370 if (!tfo
->write_pending_status
) {
371 pr_err("Missing tfo->write_pending_status()\n");
374 if (!tfo
->set_default_node_attributes
) {
375 pr_err("Missing tfo->set_default_node_attributes()\n");
378 if (!tfo
->get_cmd_state
) {
379 pr_err("Missing tfo->get_cmd_state()\n");
382 if (!tfo
->queue_data_in
) {
383 pr_err("Missing tfo->queue_data_in()\n");
386 if (!tfo
->queue_status
) {
387 pr_err("Missing tfo->queue_status()\n");
390 if (!tfo
->queue_tm_rsp
) {
391 pr_err("Missing tfo->queue_tm_rsp()\n");
394 if (!tfo
->aborted_task
) {
395 pr_err("Missing tfo->aborted_task()\n");
399 * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn()
400 * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in
401 * target_core_fabric_configfs.c WWN+TPG group context code.
403 if (!tfo
->fabric_make_wwn
) {
404 pr_err("Missing tfo->fabric_make_wwn()\n");
407 if (!tfo
->fabric_drop_wwn
) {
408 pr_err("Missing tfo->fabric_drop_wwn()\n");
411 if (!tfo
->fabric_make_tpg
) {
412 pr_err("Missing tfo->fabric_make_tpg()\n");
415 if (!tfo
->fabric_drop_tpg
) {
416 pr_err("Missing tfo->fabric_drop_tpg()\n");
423 int target_register_template(const struct target_core_fabric_ops
*fo
)
425 struct target_fabric_configfs
*tf
;
428 ret
= target_fabric_tf_ops_check(fo
);
432 tf
= kzalloc(sizeof(struct target_fabric_configfs
), GFP_KERNEL
);
434 pr_err("%s: could not allocate memory!\n", __func__
);
438 INIT_LIST_HEAD(&tf
->tf_list
);
439 atomic_set(&tf
->tf_access_cnt
, 0);
441 target_fabric_setup_cits(tf
);
443 mutex_lock(&g_tf_lock
);
444 list_add_tail(&tf
->tf_list
, &g_tf_list
);
445 mutex_unlock(&g_tf_lock
);
449 EXPORT_SYMBOL(target_register_template
);
451 void target_unregister_template(const struct target_core_fabric_ops
*fo
)
453 struct target_fabric_configfs
*t
;
455 mutex_lock(&g_tf_lock
);
456 list_for_each_entry(t
, &g_tf_list
, tf_list
) {
457 if (!strcmp(t
->tf_ops
->name
, fo
->name
)) {
458 BUG_ON(atomic_read(&t
->tf_access_cnt
));
459 list_del(&t
->tf_list
);
464 mutex_unlock(&g_tf_lock
);
466 EXPORT_SYMBOL(target_unregister_template
);
468 /*##############################################################################
469 // Stop functions called by external Target Fabrics Modules
470 //############################################################################*/
472 /* Start functions for struct config_item_type tb_dev_attrib_cit */
473 #define DEF_TB_DEV_ATTRIB_SHOW(_name) \
474 static ssize_t show_##_name(struct se_dev_attrib *da, char *page) \
476 return snprintf(page, PAGE_SIZE, "%u\n", da->_name); \
479 DEF_TB_DEV_ATTRIB_SHOW(emulate_model_alias
);
480 DEF_TB_DEV_ATTRIB_SHOW(emulate_dpo
);
481 DEF_TB_DEV_ATTRIB_SHOW(emulate_fua_write
);
482 DEF_TB_DEV_ATTRIB_SHOW(emulate_fua_read
);
483 DEF_TB_DEV_ATTRIB_SHOW(emulate_write_cache
);
484 DEF_TB_DEV_ATTRIB_SHOW(emulate_ua_intlck_ctrl
);
485 DEF_TB_DEV_ATTRIB_SHOW(emulate_tas
);
486 DEF_TB_DEV_ATTRIB_SHOW(emulate_tpu
);
487 DEF_TB_DEV_ATTRIB_SHOW(emulate_tpws
);
488 DEF_TB_DEV_ATTRIB_SHOW(emulate_caw
);
489 DEF_TB_DEV_ATTRIB_SHOW(emulate_3pc
);
490 DEF_TB_DEV_ATTRIB_SHOW(pi_prot_type
);
491 DEF_TB_DEV_ATTRIB_SHOW(hw_pi_prot_type
);
492 DEF_TB_DEV_ATTRIB_SHOW(pi_prot_format
);
493 DEF_TB_DEV_ATTRIB_SHOW(enforce_pr_isids
);
494 DEF_TB_DEV_ATTRIB_SHOW(is_nonrot
);
495 DEF_TB_DEV_ATTRIB_SHOW(emulate_rest_reord
);
496 DEF_TB_DEV_ATTRIB_SHOW(force_pr_aptpl
);
497 DEF_TB_DEV_ATTRIB_SHOW(hw_block_size
);
498 DEF_TB_DEV_ATTRIB_SHOW(block_size
);
499 DEF_TB_DEV_ATTRIB_SHOW(hw_max_sectors
);
500 DEF_TB_DEV_ATTRIB_SHOW(optimal_sectors
);
501 DEF_TB_DEV_ATTRIB_SHOW(hw_queue_depth
);
502 DEF_TB_DEV_ATTRIB_SHOW(queue_depth
);
503 DEF_TB_DEV_ATTRIB_SHOW(max_unmap_lba_count
);
504 DEF_TB_DEV_ATTRIB_SHOW(max_unmap_block_desc_count
);
505 DEF_TB_DEV_ATTRIB_SHOW(unmap_granularity
);
506 DEF_TB_DEV_ATTRIB_SHOW(unmap_granularity_alignment
);
507 DEF_TB_DEV_ATTRIB_SHOW(max_write_same_len
);
509 #define DEF_TB_DEV_ATTRIB_STORE_U32(_name) \
510 static ssize_t store_##_name(struct se_dev_attrib *da, const char *page,\
516 ret = kstrtou32(page, 0, &val); \
523 DEF_TB_DEV_ATTRIB_STORE_U32(max_unmap_lba_count
);
524 DEF_TB_DEV_ATTRIB_STORE_U32(max_unmap_block_desc_count
);
525 DEF_TB_DEV_ATTRIB_STORE_U32(unmap_granularity
);
526 DEF_TB_DEV_ATTRIB_STORE_U32(unmap_granularity_alignment
);
527 DEF_TB_DEV_ATTRIB_STORE_U32(max_write_same_len
);
529 #define DEF_TB_DEV_ATTRIB_STORE_BOOL(_name) \
530 static ssize_t store_##_name(struct se_dev_attrib *da, const char *page,\
536 ret = strtobool(page, &flag); \
543 DEF_TB_DEV_ATTRIB_STORE_BOOL(emulate_fua_write
);
544 DEF_TB_DEV_ATTRIB_STORE_BOOL(emulate_caw
);
545 DEF_TB_DEV_ATTRIB_STORE_BOOL(emulate_3pc
);
546 DEF_TB_DEV_ATTRIB_STORE_BOOL(enforce_pr_isids
);
547 DEF_TB_DEV_ATTRIB_STORE_BOOL(is_nonrot
);
549 #define DEF_TB_DEV_ATTRIB_STORE_STUB(_name) \
550 static ssize_t store_##_name(struct se_dev_attrib *da, const char *page,\
553 printk_once(KERN_WARNING \
554 "ignoring deprecated ##_name## attribute\n"); \
558 DEF_TB_DEV_ATTRIB_STORE_STUB(emulate_dpo
);
559 DEF_TB_DEV_ATTRIB_STORE_STUB(emulate_fua_read
);
561 static void dev_set_t10_wwn_model_alias(struct se_device
*dev
)
563 const char *configname
;
565 configname
= config_item_name(&dev
->dev_group
.cg_item
);
566 if (strlen(configname
) >= 16) {
567 pr_warn("dev[%p]: Backstore name '%s' is too long for "
568 "INQUIRY_MODEL, truncating to 16 bytes\n", dev
,
571 snprintf(&dev
->t10_wwn
.model
[0], 16, "%s", configname
);
574 static ssize_t
store_emulate_model_alias(struct se_dev_attrib
*da
,
575 const char *page
, size_t count
)
577 struct se_device
*dev
= da
->da_dev
;
581 if (dev
->export_count
) {
582 pr_err("dev[%p]: Unable to change model alias"
583 " while export_count is %d\n",
584 dev
, dev
->export_count
);
588 ret
= strtobool(page
, &flag
);
593 dev_set_t10_wwn_model_alias(dev
);
595 strncpy(&dev
->t10_wwn
.model
[0],
596 dev
->transport
->inquiry_prod
, 16);
598 da
->emulate_model_alias
= flag
;
602 static ssize_t
store_emulate_write_cache(struct se_dev_attrib
*da
,
603 const char *page
, size_t count
)
608 ret
= strtobool(page
, &flag
);
612 if (flag
&& da
->da_dev
->transport
->get_write_cache
) {
613 pr_err("emulate_write_cache not supported for this device\n");
617 da
->emulate_write_cache
= flag
;
618 pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
623 static ssize_t
store_emulate_ua_intlck_ctrl(struct se_dev_attrib
*da
,
624 const char *page
, size_t count
)
629 ret
= kstrtou32(page
, 0, &val
);
633 if (val
!= 0 && val
!= 1 && val
!= 2) {
634 pr_err("Illegal value %d\n", val
);
638 if (da
->da_dev
->export_count
) {
639 pr_err("dev[%p]: Unable to change SE Device"
640 " UA_INTRLCK_CTRL while export_count is %d\n",
641 da
->da_dev
, da
->da_dev
->export_count
);
644 da
->emulate_ua_intlck_ctrl
= val
;
645 pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
650 static ssize_t
store_emulate_tas(struct se_dev_attrib
*da
,
651 const char *page
, size_t count
)
656 ret
= strtobool(page
, &flag
);
660 if (da
->da_dev
->export_count
) {
661 pr_err("dev[%p]: Unable to change SE Device TAS while"
662 " export_count is %d\n",
663 da
->da_dev
, da
->da_dev
->export_count
);
666 da
->emulate_tas
= flag
;
667 pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
668 da
->da_dev
, flag
? "Enabled" : "Disabled");
673 static ssize_t
store_emulate_tpu(struct se_dev_attrib
*da
,
674 const char *page
, size_t count
)
679 ret
= strtobool(page
, &flag
);
684 * We expect this value to be non-zero when generic Block Layer
685 * Discard supported is detected iblock_create_virtdevice().
687 if (flag
&& !da
->max_unmap_block_desc_count
) {
688 pr_err("Generic Block Discard not supported\n");
692 da
->emulate_tpu
= flag
;
693 pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
698 static ssize_t
store_emulate_tpws(struct se_dev_attrib
*da
,
699 const char *page
, size_t count
)
704 ret
= strtobool(page
, &flag
);
709 * We expect this value to be non-zero when generic Block Layer
710 * Discard supported is detected iblock_create_virtdevice().
712 if (flag
&& !da
->max_unmap_block_desc_count
) {
713 pr_err("Generic Block Discard not supported\n");
717 da
->emulate_tpws
= flag
;
718 pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
723 static ssize_t
store_pi_prot_type(struct se_dev_attrib
*da
,
724 const char *page
, size_t count
)
726 int old_prot
= da
->pi_prot_type
, ret
;
727 struct se_device
*dev
= da
->da_dev
;
730 ret
= kstrtou32(page
, 0, &flag
);
734 if (flag
!= 0 && flag
!= 1 && flag
!= 2 && flag
!= 3) {
735 pr_err("Illegal value %d for pi_prot_type\n", flag
);
739 pr_err("DIF TYPE2 protection currently not supported\n");
742 if (da
->hw_pi_prot_type
) {
743 pr_warn("DIF protection enabled on underlying hardware,"
747 if (!dev
->transport
->init_prot
|| !dev
->transport
->free_prot
) {
748 /* 0 is only allowed value for non-supporting backends */
752 pr_err("DIF protection not supported by backend: %s\n",
753 dev
->transport
->name
);
756 if (!(dev
->dev_flags
& DF_CONFIGURED
)) {
757 pr_err("DIF protection requires device to be configured\n");
760 if (dev
->export_count
) {
761 pr_err("dev[%p]: Unable to change SE Device PROT type while"
762 " export_count is %d\n", dev
, dev
->export_count
);
766 da
->pi_prot_type
= flag
;
768 if (flag
&& !old_prot
) {
769 ret
= dev
->transport
->init_prot(dev
);
771 da
->pi_prot_type
= old_prot
;
775 } else if (!flag
&& old_prot
) {
776 dev
->transport
->free_prot(dev
);
779 pr_debug("dev[%p]: SE Device Protection Type: %d\n", dev
, flag
);
783 static ssize_t
store_pi_prot_format(struct se_dev_attrib
*da
,
784 const char *page
, size_t count
)
786 struct se_device
*dev
= da
->da_dev
;
790 ret
= strtobool(page
, &flag
);
797 if (!dev
->transport
->format_prot
) {
798 pr_err("DIF protection format not supported by backend %s\n",
799 dev
->transport
->name
);
802 if (!(dev
->dev_flags
& DF_CONFIGURED
)) {
803 pr_err("DIF protection format requires device to be configured\n");
806 if (dev
->export_count
) {
807 pr_err("dev[%p]: Unable to format SE Device PROT type while"
808 " export_count is %d\n", dev
, dev
->export_count
);
812 ret
= dev
->transport
->format_prot(dev
);
816 pr_debug("dev[%p]: SE Device Protection Format complete\n", dev
);
820 static ssize_t
store_force_pr_aptpl(struct se_dev_attrib
*da
,
821 const char *page
, size_t count
)
826 ret
= strtobool(page
, &flag
);
829 if (da
->da_dev
->export_count
) {
830 pr_err("dev[%p]: Unable to set force_pr_aptpl while"
831 " export_count is %d\n",
832 da
->da_dev
, da
->da_dev
->export_count
);
836 da
->force_pr_aptpl
= flag
;
837 pr_debug("dev[%p]: SE Device force_pr_aptpl: %d\n", da
->da_dev
, flag
);
841 static ssize_t
store_emulate_rest_reord(struct se_dev_attrib
*da
,
842 const char *page
, size_t count
)
847 ret
= strtobool(page
, &flag
);
852 printk(KERN_ERR
"dev[%p]: SE Device emulation of restricted"
853 " reordering not implemented\n", da
->da_dev
);
856 da
->emulate_rest_reord
= flag
;
857 pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n",
863 * Note, this can only be called on unexported SE Device Object.
865 static ssize_t
store_queue_depth(struct se_dev_attrib
*da
,
866 const char *page
, size_t count
)
868 struct se_device
*dev
= da
->da_dev
;
872 ret
= kstrtou32(page
, 0, &val
);
876 if (dev
->export_count
) {
877 pr_err("dev[%p]: Unable to change SE Device TCQ while"
878 " export_count is %d\n",
879 dev
, dev
->export_count
);
883 pr_err("dev[%p]: Illegal ZERO value for queue_depth\n", dev
);
887 if (val
> dev
->dev_attrib
.queue_depth
) {
888 if (val
> dev
->dev_attrib
.hw_queue_depth
) {
889 pr_err("dev[%p]: Passed queue_depth:"
890 " %u exceeds TCM/SE_Device MAX"
891 " TCQ: %u\n", dev
, val
,
892 dev
->dev_attrib
.hw_queue_depth
);
896 da
->queue_depth
= dev
->queue_depth
= val
;
897 pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n", dev
, val
);
901 static ssize_t
store_optimal_sectors(struct se_dev_attrib
*da
,
902 const char *page
, size_t count
)
907 ret
= kstrtou32(page
, 0, &val
);
911 if (da
->da_dev
->export_count
) {
912 pr_err("dev[%p]: Unable to change SE Device"
913 " optimal_sectors while export_count is %d\n",
914 da
->da_dev
, da
->da_dev
->export_count
);
917 if (val
> da
->hw_max_sectors
) {
918 pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
919 " greater than hw_max_sectors: %u\n",
920 da
->da_dev
, val
, da
->hw_max_sectors
);
924 da
->optimal_sectors
= val
;
925 pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n",
930 static ssize_t
store_block_size(struct se_dev_attrib
*da
,
931 const char *page
, size_t count
)
936 ret
= kstrtou32(page
, 0, &val
);
940 if (da
->da_dev
->export_count
) {
941 pr_err("dev[%p]: Unable to change SE Device block_size"
942 " while export_count is %d\n",
943 da
->da_dev
, da
->da_dev
->export_count
);
947 if (val
!= 512 && val
!= 1024 && val
!= 2048 && val
!= 4096) {
948 pr_err("dev[%p]: Illegal value for block_device: %u"
949 " for SE device, must be 512, 1024, 2048 or 4096\n",
954 da
->block_size
= val
;
955 if (da
->max_bytes_per_io
)
956 da
->hw_max_sectors
= da
->max_bytes_per_io
/ val
;
958 pr_debug("dev[%p]: SE Device block_size changed to %u\n",
963 CONFIGFS_EATTR_STRUCT(target_backend_dev_attrib
, se_dev_attrib
);
964 #define TB_DEV_ATTR(_backend, _name, _mode) \
965 static struct target_backend_dev_attrib_attribute _backend##_dev_attrib_##_name = \
966 __CONFIGFS_EATTR(_name, _mode, \
970 #define TB_DEV_ATTR_RO(_backend, _name) \
971 static struct target_backend_dev_attrib_attribute _backend##_dev_attrib_##_name = \
972 __CONFIGFS_EATTR_RO(_name, \
975 TB_DEV_ATTR(target_core
, emulate_model_alias
, S_IRUGO
| S_IWUSR
);
976 TB_DEV_ATTR(target_core
, emulate_dpo
, S_IRUGO
| S_IWUSR
);
977 TB_DEV_ATTR(target_core
, emulate_fua_write
, S_IRUGO
| S_IWUSR
);
978 TB_DEV_ATTR(target_core
, emulate_fua_read
, S_IRUGO
| S_IWUSR
);
979 TB_DEV_ATTR(target_core
, emulate_write_cache
, S_IRUGO
| S_IWUSR
);
980 TB_DEV_ATTR(target_core
, emulate_ua_intlck_ctrl
, S_IRUGO
| S_IWUSR
);
981 TB_DEV_ATTR(target_core
, emulate_tas
, S_IRUGO
| S_IWUSR
);
982 TB_DEV_ATTR(target_core
, emulate_tpu
, S_IRUGO
| S_IWUSR
);
983 TB_DEV_ATTR(target_core
, emulate_tpws
, S_IRUGO
| S_IWUSR
);
984 TB_DEV_ATTR(target_core
, emulate_caw
, S_IRUGO
| S_IWUSR
);
985 TB_DEV_ATTR(target_core
, emulate_3pc
, S_IRUGO
| S_IWUSR
);
986 TB_DEV_ATTR(target_core
, pi_prot_type
, S_IRUGO
| S_IWUSR
);
987 TB_DEV_ATTR_RO(target_core
, hw_pi_prot_type
);
988 TB_DEV_ATTR(target_core
, pi_prot_format
, S_IRUGO
| S_IWUSR
);
989 TB_DEV_ATTR(target_core
, enforce_pr_isids
, S_IRUGO
| S_IWUSR
);
990 TB_DEV_ATTR(target_core
, is_nonrot
, S_IRUGO
| S_IWUSR
);
991 TB_DEV_ATTR(target_core
, emulate_rest_reord
, S_IRUGO
| S_IWUSR
);
992 TB_DEV_ATTR(target_core
, force_pr_aptpl
, S_IRUGO
| S_IWUSR
)
993 TB_DEV_ATTR_RO(target_core
, hw_block_size
);
994 TB_DEV_ATTR(target_core
, block_size
, S_IRUGO
| S_IWUSR
)
995 TB_DEV_ATTR_RO(target_core
, hw_max_sectors
);
996 TB_DEV_ATTR(target_core
, optimal_sectors
, S_IRUGO
| S_IWUSR
);
997 TB_DEV_ATTR_RO(target_core
, hw_queue_depth
);
998 TB_DEV_ATTR(target_core
, queue_depth
, S_IRUGO
| S_IWUSR
);
999 TB_DEV_ATTR(target_core
, max_unmap_lba_count
, S_IRUGO
| S_IWUSR
);
1000 TB_DEV_ATTR(target_core
, max_unmap_block_desc_count
, S_IRUGO
| S_IWUSR
);
1001 TB_DEV_ATTR(target_core
, unmap_granularity
, S_IRUGO
| S_IWUSR
);
1002 TB_DEV_ATTR(target_core
, unmap_granularity_alignment
, S_IRUGO
| S_IWUSR
);
1003 TB_DEV_ATTR(target_core
, max_write_same_len
, S_IRUGO
| S_IWUSR
);
1005 CONFIGFS_EATTR_STRUCT(target_core_dev_attrib
, se_dev_attrib
);
1006 CONFIGFS_EATTR_OPS(target_core_dev_attrib
, se_dev_attrib
, da_group
);
1009 * dev_attrib attributes for devices using the target core SBC/SPC
1010 * interpreter. Any backend using spc_parse_cdb should be using
1013 struct configfs_attribute
*sbc_attrib_attrs
[] = {
1014 &target_core_dev_attrib_emulate_model_alias
.attr
,
1015 &target_core_dev_attrib_emulate_dpo
.attr
,
1016 &target_core_dev_attrib_emulate_fua_write
.attr
,
1017 &target_core_dev_attrib_emulate_fua_read
.attr
,
1018 &target_core_dev_attrib_emulate_write_cache
.attr
,
1019 &target_core_dev_attrib_emulate_ua_intlck_ctrl
.attr
,
1020 &target_core_dev_attrib_emulate_tas
.attr
,
1021 &target_core_dev_attrib_emulate_tpu
.attr
,
1022 &target_core_dev_attrib_emulate_tpws
.attr
,
1023 &target_core_dev_attrib_emulate_caw
.attr
,
1024 &target_core_dev_attrib_emulate_3pc
.attr
,
1025 &target_core_dev_attrib_pi_prot_type
.attr
,
1026 &target_core_dev_attrib_hw_pi_prot_type
.attr
,
1027 &target_core_dev_attrib_pi_prot_format
.attr
,
1028 &target_core_dev_attrib_enforce_pr_isids
.attr
,
1029 &target_core_dev_attrib_is_nonrot
.attr
,
1030 &target_core_dev_attrib_emulate_rest_reord
.attr
,
1031 &target_core_dev_attrib_force_pr_aptpl
.attr
,
1032 &target_core_dev_attrib_hw_block_size
.attr
,
1033 &target_core_dev_attrib_block_size
.attr
,
1034 &target_core_dev_attrib_hw_max_sectors
.attr
,
1035 &target_core_dev_attrib_optimal_sectors
.attr
,
1036 &target_core_dev_attrib_hw_queue_depth
.attr
,
1037 &target_core_dev_attrib_queue_depth
.attr
,
1038 &target_core_dev_attrib_max_unmap_lba_count
.attr
,
1039 &target_core_dev_attrib_max_unmap_block_desc_count
.attr
,
1040 &target_core_dev_attrib_unmap_granularity
.attr
,
1041 &target_core_dev_attrib_unmap_granularity_alignment
.attr
,
1042 &target_core_dev_attrib_max_write_same_len
.attr
,
1045 EXPORT_SYMBOL(sbc_attrib_attrs
);
1047 TB_DEV_ATTR_RO(target_pt
, hw_pi_prot_type
);
1048 TB_DEV_ATTR_RO(target_pt
, hw_block_size
);
1049 TB_DEV_ATTR_RO(target_pt
, hw_max_sectors
);
1050 TB_DEV_ATTR_RO(target_pt
, hw_queue_depth
);
1053 * Minimal dev_attrib attributes for devices passing through CDBs.
1054 * In this case we only provide a few read-only attributes for
1055 * backwards compatibility.
1057 struct configfs_attribute
*passthrough_attrib_attrs
[] = {
1058 &target_pt_dev_attrib_hw_pi_prot_type
.attr
,
1059 &target_pt_dev_attrib_hw_block_size
.attr
,
1060 &target_pt_dev_attrib_hw_max_sectors
.attr
,
1061 &target_pt_dev_attrib_hw_queue_depth
.attr
,
1064 EXPORT_SYMBOL(passthrough_attrib_attrs
);
1066 static struct configfs_item_operations target_core_dev_attrib_ops
= {
1067 .show_attribute
= target_core_dev_attrib_attr_show
,
1068 .store_attribute
= target_core_dev_attrib_attr_store
,
1071 TB_CIT_SETUP_DRV(dev_attrib
, &target_core_dev_attrib_ops
, NULL
);
1073 /* End functions for struct config_item_type tb_dev_attrib_cit */
1075 /* Start functions for struct config_item_type tb_dev_wwn_cit */
1077 CONFIGFS_EATTR_STRUCT(target_core_dev_wwn
, t10_wwn
);
1078 #define SE_DEV_WWN_ATTR(_name, _mode) \
1079 static struct target_core_dev_wwn_attribute target_core_dev_wwn_##_name = \
1080 __CONFIGFS_EATTR(_name, _mode, \
1081 target_core_dev_wwn_show_attr_##_name, \
1082 target_core_dev_wwn_store_attr_##_name);
1084 #define SE_DEV_WWN_ATTR_RO(_name); \
1086 static struct target_core_dev_wwn_attribute \
1087 target_core_dev_wwn_##_name = \
1088 __CONFIGFS_EATTR_RO(_name, \
1089 target_core_dev_wwn_show_attr_##_name); \
1093 * VPD page 0x80 Unit serial
1095 static ssize_t
target_core_dev_wwn_show_attr_vpd_unit_serial(
1096 struct t10_wwn
*t10_wwn
,
1099 return sprintf(page
, "T10 VPD Unit Serial Number: %s\n",
1100 &t10_wwn
->unit_serial
[0]);
1103 static ssize_t
target_core_dev_wwn_store_attr_vpd_unit_serial(
1104 struct t10_wwn
*t10_wwn
,
1108 struct se_device
*dev
= t10_wwn
->t10_dev
;
1109 unsigned char buf
[INQUIRY_VPD_SERIAL_LEN
];
1112 * If Linux/SCSI subsystem_api_t plugin got a VPD Unit Serial
1113 * from the struct scsi_device level firmware, do not allow
1114 * VPD Unit Serial to be emulated.
1116 * Note this struct scsi_device could also be emulating VPD
1117 * information from its drivers/scsi LLD. But for now we assume
1118 * it is doing 'the right thing' wrt a world wide unique
1119 * VPD Unit Serial Number that OS dependent multipath can depend on.
1121 if (dev
->dev_flags
& DF_FIRMWARE_VPD_UNIT_SERIAL
) {
1122 pr_err("Underlying SCSI device firmware provided VPD"
1123 " Unit Serial, ignoring request\n");
1127 if (strlen(page
) >= INQUIRY_VPD_SERIAL_LEN
) {
1128 pr_err("Emulated VPD Unit Serial exceeds"
1129 " INQUIRY_VPD_SERIAL_LEN: %d\n", INQUIRY_VPD_SERIAL_LEN
);
1133 * Check to see if any active $FABRIC_MOD exports exist. If they
1134 * do exist, fail here as changing this information on the fly
1135 * (underneath the initiator side OS dependent multipath code)
1136 * could cause negative effects.
1138 if (dev
->export_count
) {
1139 pr_err("Unable to set VPD Unit Serial while"
1140 " active %d $FABRIC_MOD exports exist\n",
1146 * This currently assumes ASCII encoding for emulated VPD Unit Serial.
1148 * Also, strip any newline added from the userspace
1149 * echo $UUID > $TARGET/$HBA/$STORAGE_OBJECT/wwn/vpd_unit_serial
1151 memset(buf
, 0, INQUIRY_VPD_SERIAL_LEN
);
1152 snprintf(buf
, INQUIRY_VPD_SERIAL_LEN
, "%s", page
);
1153 snprintf(dev
->t10_wwn
.unit_serial
, INQUIRY_VPD_SERIAL_LEN
,
1154 "%s", strstrip(buf
));
1155 dev
->dev_flags
|= DF_EMULATED_VPD_UNIT_SERIAL
;
1157 pr_debug("Target_Core_ConfigFS: Set emulated VPD Unit Serial:"
1158 " %s\n", dev
->t10_wwn
.unit_serial
);
1163 SE_DEV_WWN_ATTR(vpd_unit_serial
, S_IRUGO
| S_IWUSR
);
1166 * VPD page 0x83 Protocol Identifier
1168 static ssize_t
target_core_dev_wwn_show_attr_vpd_protocol_identifier(
1169 struct t10_wwn
*t10_wwn
,
1172 struct t10_vpd
*vpd
;
1173 unsigned char buf
[VPD_TMP_BUF_SIZE
];
1176 memset(buf
, 0, VPD_TMP_BUF_SIZE
);
1178 spin_lock(&t10_wwn
->t10_vpd_lock
);
1179 list_for_each_entry(vpd
, &t10_wwn
->t10_vpd_list
, vpd_list
) {
1180 if (!vpd
->protocol_identifier_set
)
1183 transport_dump_vpd_proto_id(vpd
, buf
, VPD_TMP_BUF_SIZE
);
1185 if (len
+ strlen(buf
) >= PAGE_SIZE
)
1188 len
+= sprintf(page
+len
, "%s", buf
);
1190 spin_unlock(&t10_wwn
->t10_vpd_lock
);
1195 static ssize_t
target_core_dev_wwn_store_attr_vpd_protocol_identifier(
1196 struct t10_wwn
*t10_wwn
,
1203 SE_DEV_WWN_ATTR(vpd_protocol_identifier
, S_IRUGO
| S_IWUSR
);
1206 * Generic wrapper for dumping VPD identifiers by association.
1208 #define DEF_DEV_WWN_ASSOC_SHOW(_name, _assoc) \
1209 static ssize_t target_core_dev_wwn_show_attr_##_name( \
1210 struct t10_wwn *t10_wwn, \
1213 struct t10_vpd *vpd; \
1214 unsigned char buf[VPD_TMP_BUF_SIZE]; \
1217 spin_lock(&t10_wwn->t10_vpd_lock); \
1218 list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) { \
1219 if (vpd->association != _assoc) \
1222 memset(buf, 0, VPD_TMP_BUF_SIZE); \
1223 transport_dump_vpd_assoc(vpd, buf, VPD_TMP_BUF_SIZE); \
1224 if (len + strlen(buf) >= PAGE_SIZE) \
1226 len += sprintf(page+len, "%s", buf); \
1228 memset(buf, 0, VPD_TMP_BUF_SIZE); \
1229 transport_dump_vpd_ident_type(vpd, buf, VPD_TMP_BUF_SIZE); \
1230 if (len + strlen(buf) >= PAGE_SIZE) \
1232 len += sprintf(page+len, "%s", buf); \
1234 memset(buf, 0, VPD_TMP_BUF_SIZE); \
1235 transport_dump_vpd_ident(vpd, buf, VPD_TMP_BUF_SIZE); \
1236 if (len + strlen(buf) >= PAGE_SIZE) \
1238 len += sprintf(page+len, "%s", buf); \
1240 spin_unlock(&t10_wwn->t10_vpd_lock); \
1246 * VPD page 0x83 Association: Logical Unit
1248 DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_logical_unit
, 0x00);
1250 static ssize_t
target_core_dev_wwn_store_attr_vpd_assoc_logical_unit(
1251 struct t10_wwn
*t10_wwn
,
1258 SE_DEV_WWN_ATTR(vpd_assoc_logical_unit
, S_IRUGO
| S_IWUSR
);
1261 * VPD page 0x83 Association: Target Port
1263 DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_target_port
, 0x10);
1265 static ssize_t
target_core_dev_wwn_store_attr_vpd_assoc_target_port(
1266 struct t10_wwn
*t10_wwn
,
1273 SE_DEV_WWN_ATTR(vpd_assoc_target_port
, S_IRUGO
| S_IWUSR
);
1276 * VPD page 0x83 Association: SCSI Target Device
1278 DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_scsi_target_device
, 0x20);
1280 static ssize_t
target_core_dev_wwn_store_attr_vpd_assoc_scsi_target_device(
1281 struct t10_wwn
*t10_wwn
,
1288 SE_DEV_WWN_ATTR(vpd_assoc_scsi_target_device
, S_IRUGO
| S_IWUSR
);
1290 CONFIGFS_EATTR_OPS(target_core_dev_wwn
, t10_wwn
, t10_wwn_group
);
1292 static struct configfs_attribute
*target_core_dev_wwn_attrs
[] = {
1293 &target_core_dev_wwn_vpd_unit_serial
.attr
,
1294 &target_core_dev_wwn_vpd_protocol_identifier
.attr
,
1295 &target_core_dev_wwn_vpd_assoc_logical_unit
.attr
,
1296 &target_core_dev_wwn_vpd_assoc_target_port
.attr
,
1297 &target_core_dev_wwn_vpd_assoc_scsi_target_device
.attr
,
1301 static struct configfs_item_operations target_core_dev_wwn_ops
= {
1302 .show_attribute
= target_core_dev_wwn_attr_show
,
1303 .store_attribute
= target_core_dev_wwn_attr_store
,
1306 TB_CIT_SETUP(dev_wwn
, &target_core_dev_wwn_ops
, NULL
, target_core_dev_wwn_attrs
);
1308 /* End functions for struct config_item_type tb_dev_wwn_cit */
1310 /* Start functions for struct config_item_type tb_dev_pr_cit */
1312 CONFIGFS_EATTR_STRUCT(target_core_dev_pr
, se_device
);
1313 #define SE_DEV_PR_ATTR(_name, _mode) \
1314 static struct target_core_dev_pr_attribute target_core_dev_pr_##_name = \
1315 __CONFIGFS_EATTR(_name, _mode, \
1316 target_core_dev_pr_show_attr_##_name, \
1317 target_core_dev_pr_store_attr_##_name);
1319 #define SE_DEV_PR_ATTR_RO(_name); \
1320 static struct target_core_dev_pr_attribute target_core_dev_pr_##_name = \
1321 __CONFIGFS_EATTR_RO(_name, \
1322 target_core_dev_pr_show_attr_##_name);
1324 static ssize_t
target_core_dev_pr_show_spc3_res(struct se_device
*dev
,
1327 struct se_node_acl
*se_nacl
;
1328 struct t10_pr_registration
*pr_reg
;
1329 char i_buf
[PR_REG_ISID_ID_LEN
];
1331 memset(i_buf
, 0, PR_REG_ISID_ID_LEN
);
1333 pr_reg
= dev
->dev_pr_res_holder
;
1335 return sprintf(page
, "No SPC-3 Reservation holder\n");
1337 se_nacl
= pr_reg
->pr_reg_nacl
;
1338 core_pr_dump_initiator_port(pr_reg
, i_buf
, PR_REG_ISID_ID_LEN
);
1340 return sprintf(page
, "SPC-3 Reservation: %s Initiator: %s%s\n",
1341 se_nacl
->se_tpg
->se_tpg_tfo
->get_fabric_name(),
1342 se_nacl
->initiatorname
, i_buf
);
1345 static ssize_t
target_core_dev_pr_show_spc2_res(struct se_device
*dev
,
1348 struct se_node_acl
*se_nacl
;
1351 se_nacl
= dev
->dev_reserved_node_acl
;
1354 "SPC-2 Reservation: %s Initiator: %s\n",
1355 se_nacl
->se_tpg
->se_tpg_tfo
->get_fabric_name(),
1356 se_nacl
->initiatorname
);
1358 len
= sprintf(page
, "No SPC-2 Reservation holder\n");
1363 static ssize_t
target_core_dev_pr_show_attr_res_holder(struct se_device
*dev
,
1368 if (dev
->transport
->transport_flags
& TRANSPORT_FLAG_PASSTHROUGH
)
1369 return sprintf(page
, "Passthrough\n");
1371 spin_lock(&dev
->dev_reservation_lock
);
1372 if (dev
->dev_reservation_flags
& DRF_SPC2_RESERVATIONS
)
1373 ret
= target_core_dev_pr_show_spc2_res(dev
, page
);
1375 ret
= target_core_dev_pr_show_spc3_res(dev
, page
);
1376 spin_unlock(&dev
->dev_reservation_lock
);
1380 SE_DEV_PR_ATTR_RO(res_holder
);
1382 static ssize_t
target_core_dev_pr_show_attr_res_pr_all_tgt_pts(
1383 struct se_device
*dev
, char *page
)
1387 spin_lock(&dev
->dev_reservation_lock
);
1388 if (!dev
->dev_pr_res_holder
) {
1389 len
= sprintf(page
, "No SPC-3 Reservation holder\n");
1390 } else if (dev
->dev_pr_res_holder
->pr_reg_all_tg_pt
) {
1391 len
= sprintf(page
, "SPC-3 Reservation: All Target"
1392 " Ports registration\n");
1394 len
= sprintf(page
, "SPC-3 Reservation: Single"
1395 " Target Port registration\n");
1398 spin_unlock(&dev
->dev_reservation_lock
);
1402 SE_DEV_PR_ATTR_RO(res_pr_all_tgt_pts
);
1404 static ssize_t
target_core_dev_pr_show_attr_res_pr_generation(
1405 struct se_device
*dev
, char *page
)
1407 return sprintf(page
, "0x%08x\n", dev
->t10_pr
.pr_generation
);
1410 SE_DEV_PR_ATTR_RO(res_pr_generation
);
1413 * res_pr_holder_tg_port
1415 static ssize_t
target_core_dev_pr_show_attr_res_pr_holder_tg_port(
1416 struct se_device
*dev
, char *page
)
1418 struct se_node_acl
*se_nacl
;
1419 struct se_portal_group
*se_tpg
;
1420 struct t10_pr_registration
*pr_reg
;
1421 const struct target_core_fabric_ops
*tfo
;
1424 spin_lock(&dev
->dev_reservation_lock
);
1425 pr_reg
= dev
->dev_pr_res_holder
;
1427 len
= sprintf(page
, "No SPC-3 Reservation holder\n");
1431 se_nacl
= pr_reg
->pr_reg_nacl
;
1432 se_tpg
= se_nacl
->se_tpg
;
1433 tfo
= se_tpg
->se_tpg_tfo
;
1435 len
+= sprintf(page
+len
, "SPC-3 Reservation: %s"
1436 " Target Node Endpoint: %s\n", tfo
->get_fabric_name(),
1437 tfo
->tpg_get_wwn(se_tpg
));
1438 len
+= sprintf(page
+len
, "SPC-3 Reservation: Relative Port"
1439 " Identifier Tag: %hu %s Portal Group Tag: %hu"
1440 " %s Logical Unit: %llu\n", pr_reg
->tg_pt_sep_rtpi
,
1441 tfo
->get_fabric_name(), tfo
->tpg_get_tag(se_tpg
),
1442 tfo
->get_fabric_name(), pr_reg
->pr_aptpl_target_lun
);
1445 spin_unlock(&dev
->dev_reservation_lock
);
1449 SE_DEV_PR_ATTR_RO(res_pr_holder_tg_port
);
1451 static ssize_t
target_core_dev_pr_show_attr_res_pr_registered_i_pts(
1452 struct se_device
*dev
, char *page
)
1454 const struct target_core_fabric_ops
*tfo
;
1455 struct t10_pr_registration
*pr_reg
;
1456 unsigned char buf
[384];
1457 char i_buf
[PR_REG_ISID_ID_LEN
];
1461 len
+= sprintf(page
+len
, "SPC-3 PR Registrations:\n");
1463 spin_lock(&dev
->t10_pr
.registration_lock
);
1464 list_for_each_entry(pr_reg
, &dev
->t10_pr
.registration_list
,
1467 memset(buf
, 0, 384);
1468 memset(i_buf
, 0, PR_REG_ISID_ID_LEN
);
1469 tfo
= pr_reg
->pr_reg_nacl
->se_tpg
->se_tpg_tfo
;
1470 core_pr_dump_initiator_port(pr_reg
, i_buf
,
1471 PR_REG_ISID_ID_LEN
);
1472 sprintf(buf
, "%s Node: %s%s Key: 0x%016Lx PRgen: 0x%08x\n",
1473 tfo
->get_fabric_name(),
1474 pr_reg
->pr_reg_nacl
->initiatorname
, i_buf
, pr_reg
->pr_res_key
,
1475 pr_reg
->pr_res_generation
);
1477 if (len
+ strlen(buf
) >= PAGE_SIZE
)
1480 len
+= sprintf(page
+len
, "%s", buf
);
1483 spin_unlock(&dev
->t10_pr
.registration_lock
);
1486 len
+= sprintf(page
+len
, "None\n");
1491 SE_DEV_PR_ATTR_RO(res_pr_registered_i_pts
);
1493 static ssize_t
target_core_dev_pr_show_attr_res_pr_type(
1494 struct se_device
*dev
, char *page
)
1496 struct t10_pr_registration
*pr_reg
;
1499 spin_lock(&dev
->dev_reservation_lock
);
1500 pr_reg
= dev
->dev_pr_res_holder
;
1502 len
= sprintf(page
, "SPC-3 Reservation Type: %s\n",
1503 core_scsi3_pr_dump_type(pr_reg
->pr_res_type
));
1505 len
= sprintf(page
, "No SPC-3 Reservation holder\n");
1508 spin_unlock(&dev
->dev_reservation_lock
);
1512 SE_DEV_PR_ATTR_RO(res_pr_type
);
1514 static ssize_t
target_core_dev_pr_show_attr_res_type(
1515 struct se_device
*dev
, char *page
)
1517 if (dev
->transport
->transport_flags
& TRANSPORT_FLAG_PASSTHROUGH
)
1518 return sprintf(page
, "SPC_PASSTHROUGH\n");
1519 else if (dev
->dev_reservation_flags
& DRF_SPC2_RESERVATIONS
)
1520 return sprintf(page
, "SPC2_RESERVATIONS\n");
1522 return sprintf(page
, "SPC3_PERSISTENT_RESERVATIONS\n");
1525 SE_DEV_PR_ATTR_RO(res_type
);
1527 static ssize_t
target_core_dev_pr_show_attr_res_aptpl_active(
1528 struct se_device
*dev
, char *page
)
1530 if (dev
->transport
->transport_flags
& TRANSPORT_FLAG_PASSTHROUGH
)
1533 return sprintf(page
, "APTPL Bit Status: %s\n",
1534 (dev
->t10_pr
.pr_aptpl_active
) ? "Activated" : "Disabled");
1537 SE_DEV_PR_ATTR_RO(res_aptpl_active
);
1540 * res_aptpl_metadata
1542 static ssize_t
target_core_dev_pr_show_attr_res_aptpl_metadata(
1543 struct se_device
*dev
, char *page
)
1545 if (dev
->transport
->transport_flags
& TRANSPORT_FLAG_PASSTHROUGH
)
1548 return sprintf(page
, "Ready to process PR APTPL metadata..\n");
1552 Opt_initiator_fabric
, Opt_initiator_node
, Opt_initiator_sid
,
1553 Opt_sa_res_key
, Opt_res_holder
, Opt_res_type
, Opt_res_scope
,
1554 Opt_res_all_tg_pt
, Opt_mapped_lun
, Opt_target_fabric
,
1555 Opt_target_node
, Opt_tpgt
, Opt_port_rtpi
, Opt_target_lun
, Opt_err
1558 static match_table_t tokens
= {
1559 {Opt_initiator_fabric
, "initiator_fabric=%s"},
1560 {Opt_initiator_node
, "initiator_node=%s"},
1561 {Opt_initiator_sid
, "initiator_sid=%s"},
1562 {Opt_sa_res_key
, "sa_res_key=%s"},
1563 {Opt_res_holder
, "res_holder=%d"},
1564 {Opt_res_type
, "res_type=%d"},
1565 {Opt_res_scope
, "res_scope=%d"},
1566 {Opt_res_all_tg_pt
, "res_all_tg_pt=%d"},
1567 {Opt_mapped_lun
, "mapped_lun=%lld"},
1568 {Opt_target_fabric
, "target_fabric=%s"},
1569 {Opt_target_node
, "target_node=%s"},
1570 {Opt_tpgt
, "tpgt=%d"},
1571 {Opt_port_rtpi
, "port_rtpi=%d"},
1572 {Opt_target_lun
, "target_lun=%lld"},
1576 static ssize_t
target_core_dev_pr_store_attr_res_aptpl_metadata(
1577 struct se_device
*dev
,
1581 unsigned char *i_fabric
= NULL
, *i_port
= NULL
, *isid
= NULL
;
1582 unsigned char *t_fabric
= NULL
, *t_port
= NULL
;
1583 char *orig
, *ptr
, *opts
;
1584 substring_t args
[MAX_OPT_ARGS
];
1585 unsigned long long tmp_ll
;
1587 u64 mapped_lun
= 0, target_lun
= 0;
1588 int ret
= -1, res_holder
= 0, all_tg_pt
= 0, arg
, token
;
1592 if (dev
->transport
->transport_flags
& TRANSPORT_FLAG_PASSTHROUGH
)
1594 if (dev
->dev_reservation_flags
& DRF_SPC2_RESERVATIONS
)
1597 if (dev
->export_count
) {
1598 pr_debug("Unable to process APTPL metadata while"
1599 " active fabric exports exist\n");
1603 opts
= kstrdup(page
, GFP_KERNEL
);
1608 while ((ptr
= strsep(&opts
, ",\n")) != NULL
) {
1612 token
= match_token(ptr
, tokens
, args
);
1614 case Opt_initiator_fabric
:
1615 i_fabric
= match_strdup(args
);
1621 case Opt_initiator_node
:
1622 i_port
= match_strdup(args
);
1627 if (strlen(i_port
) >= PR_APTPL_MAX_IPORT_LEN
) {
1628 pr_err("APTPL metadata initiator_node="
1629 " exceeds PR_APTPL_MAX_IPORT_LEN: %d\n",
1630 PR_APTPL_MAX_IPORT_LEN
);
1635 case Opt_initiator_sid
:
1636 isid
= match_strdup(args
);
1641 if (strlen(isid
) >= PR_REG_ISID_LEN
) {
1642 pr_err("APTPL metadata initiator_isid"
1643 "= exceeds PR_REG_ISID_LEN: %d\n",
1649 case Opt_sa_res_key
:
1650 ret
= kstrtoull(args
->from
, 0, &tmp_ll
);
1652 pr_err("kstrtoull() failed for sa_res_key=\n");
1655 sa_res_key
= (u64
)tmp_ll
;
1658 * PR APTPL Metadata for Reservation
1660 case Opt_res_holder
:
1661 ret
= match_int(args
, &arg
);
1667 ret
= match_int(args
, &arg
);
1673 ret
= match_int(args
, &arg
);
1677 case Opt_res_all_tg_pt
:
1678 ret
= match_int(args
, &arg
);
1681 all_tg_pt
= (int)arg
;
1683 case Opt_mapped_lun
:
1684 ret
= match_int(args
, &arg
);
1687 mapped_lun
= (u64
)arg
;
1690 * PR APTPL Metadata for Target Port
1692 case Opt_target_fabric
:
1693 t_fabric
= match_strdup(args
);
1699 case Opt_target_node
:
1700 t_port
= match_strdup(args
);
1705 if (strlen(t_port
) >= PR_APTPL_MAX_TPORT_LEN
) {
1706 pr_err("APTPL metadata target_node="
1707 " exceeds PR_APTPL_MAX_TPORT_LEN: %d\n",
1708 PR_APTPL_MAX_TPORT_LEN
);
1714 ret
= match_int(args
, &arg
);
1720 ret
= match_int(args
, &arg
);
1724 case Opt_target_lun
:
1725 ret
= match_int(args
, &arg
);
1728 target_lun
= (u64
)arg
;
1735 if (!i_port
|| !t_port
|| !sa_res_key
) {
1736 pr_err("Illegal parameters for APTPL registration\n");
1741 if (res_holder
&& !(type
)) {
1742 pr_err("Illegal PR type: 0x%02x for reservation"
1748 ret
= core_scsi3_alloc_aptpl_registration(&dev
->t10_pr
, sa_res_key
,
1749 i_port
, isid
, mapped_lun
, t_port
, tpgt
, target_lun
,
1750 res_holder
, all_tg_pt
, type
);
1758 return (ret
== 0) ? count
: ret
;
1761 SE_DEV_PR_ATTR(res_aptpl_metadata
, S_IRUGO
| S_IWUSR
);
1763 CONFIGFS_EATTR_OPS(target_core_dev_pr
, se_device
, dev_pr_group
);
1765 static struct configfs_attribute
*target_core_dev_pr_attrs
[] = {
1766 &target_core_dev_pr_res_holder
.attr
,
1767 &target_core_dev_pr_res_pr_all_tgt_pts
.attr
,
1768 &target_core_dev_pr_res_pr_generation
.attr
,
1769 &target_core_dev_pr_res_pr_holder_tg_port
.attr
,
1770 &target_core_dev_pr_res_pr_registered_i_pts
.attr
,
1771 &target_core_dev_pr_res_pr_type
.attr
,
1772 &target_core_dev_pr_res_type
.attr
,
1773 &target_core_dev_pr_res_aptpl_active
.attr
,
1774 &target_core_dev_pr_res_aptpl_metadata
.attr
,
1778 static struct configfs_item_operations target_core_dev_pr_ops
= {
1779 .show_attribute
= target_core_dev_pr_attr_show
,
1780 .store_attribute
= target_core_dev_pr_attr_store
,
1783 TB_CIT_SETUP(dev_pr
, &target_core_dev_pr_ops
, NULL
, target_core_dev_pr_attrs
);
1785 /* End functions for struct config_item_type tb_dev_pr_cit */
1787 /* Start functions for struct config_item_type tb_dev_cit */
1789 static ssize_t
target_core_show_dev_info(void *p
, char *page
)
1791 struct se_device
*dev
= p
;
1793 ssize_t read_bytes
= 0;
1795 transport_dump_dev_state(dev
, page
, &bl
);
1797 read_bytes
+= dev
->transport
->show_configfs_dev_params(dev
,
1802 static struct target_core_configfs_attribute target_core_attr_dev_info
= {
1803 .attr
= { .ca_owner
= THIS_MODULE
,
1805 .ca_mode
= S_IRUGO
},
1806 .show
= target_core_show_dev_info
,
1810 static ssize_t
target_core_store_dev_control(
1815 struct se_device
*dev
= p
;
1817 return dev
->transport
->set_configfs_dev_params(dev
, page
, count
);
1820 static struct target_core_configfs_attribute target_core_attr_dev_control
= {
1821 .attr
= { .ca_owner
= THIS_MODULE
,
1822 .ca_name
= "control",
1823 .ca_mode
= S_IWUSR
},
1825 .store
= target_core_store_dev_control
,
1828 static ssize_t
target_core_show_dev_alias(void *p
, char *page
)
1830 struct se_device
*dev
= p
;
1832 if (!(dev
->dev_flags
& DF_USING_ALIAS
))
1835 return snprintf(page
, PAGE_SIZE
, "%s\n", dev
->dev_alias
);
1838 static ssize_t
target_core_store_dev_alias(
1843 struct se_device
*dev
= p
;
1844 struct se_hba
*hba
= dev
->se_hba
;
1847 if (count
> (SE_DEV_ALIAS_LEN
-1)) {
1848 pr_err("alias count: %d exceeds"
1849 " SE_DEV_ALIAS_LEN-1: %u\n", (int)count
,
1850 SE_DEV_ALIAS_LEN
-1);
1854 read_bytes
= snprintf(&dev
->dev_alias
[0], SE_DEV_ALIAS_LEN
, "%s", page
);
1857 if (dev
->dev_alias
[read_bytes
- 1] == '\n')
1858 dev
->dev_alias
[read_bytes
- 1] = '\0';
1860 dev
->dev_flags
|= DF_USING_ALIAS
;
1862 pr_debug("Target_Core_ConfigFS: %s/%s set alias: %s\n",
1863 config_item_name(&hba
->hba_group
.cg_item
),
1864 config_item_name(&dev
->dev_group
.cg_item
),
1870 static struct target_core_configfs_attribute target_core_attr_dev_alias
= {
1871 .attr
= { .ca_owner
= THIS_MODULE
,
1873 .ca_mode
= S_IRUGO
| S_IWUSR
},
1874 .show
= target_core_show_dev_alias
,
1875 .store
= target_core_store_dev_alias
,
1878 static ssize_t
target_core_show_dev_udev_path(void *p
, char *page
)
1880 struct se_device
*dev
= p
;
1882 if (!(dev
->dev_flags
& DF_USING_UDEV_PATH
))
1885 return snprintf(page
, PAGE_SIZE
, "%s\n", dev
->udev_path
);
1888 static ssize_t
target_core_store_dev_udev_path(
1893 struct se_device
*dev
= p
;
1894 struct se_hba
*hba
= dev
->se_hba
;
1897 if (count
> (SE_UDEV_PATH_LEN
-1)) {
1898 pr_err("udev_path count: %d exceeds"
1899 " SE_UDEV_PATH_LEN-1: %u\n", (int)count
,
1900 SE_UDEV_PATH_LEN
-1);
1904 read_bytes
= snprintf(&dev
->udev_path
[0], SE_UDEV_PATH_LEN
,
1908 if (dev
->udev_path
[read_bytes
- 1] == '\n')
1909 dev
->udev_path
[read_bytes
- 1] = '\0';
1911 dev
->dev_flags
|= DF_USING_UDEV_PATH
;
1913 pr_debug("Target_Core_ConfigFS: %s/%s set udev_path: %s\n",
1914 config_item_name(&hba
->hba_group
.cg_item
),
1915 config_item_name(&dev
->dev_group
.cg_item
),
1921 static struct target_core_configfs_attribute target_core_attr_dev_udev_path
= {
1922 .attr
= { .ca_owner
= THIS_MODULE
,
1923 .ca_name
= "udev_path",
1924 .ca_mode
= S_IRUGO
| S_IWUSR
},
1925 .show
= target_core_show_dev_udev_path
,
1926 .store
= target_core_store_dev_udev_path
,
1929 static ssize_t
target_core_show_dev_enable(void *p
, char *page
)
1931 struct se_device
*dev
= p
;
1933 return snprintf(page
, PAGE_SIZE
, "%d\n", !!(dev
->dev_flags
& DF_CONFIGURED
));
1936 static ssize_t
target_core_store_dev_enable(
1941 struct se_device
*dev
= p
;
1945 ptr
= strstr(page
, "1");
1947 pr_err("For dev_enable ops, only valid value"
1952 ret
= target_configure_device(dev
);
1958 static struct target_core_configfs_attribute target_core_attr_dev_enable
= {
1959 .attr
= { .ca_owner
= THIS_MODULE
,
1960 .ca_name
= "enable",
1961 .ca_mode
= S_IRUGO
| S_IWUSR
},
1962 .show
= target_core_show_dev_enable
,
1963 .store
= target_core_store_dev_enable
,
1966 static ssize_t
target_core_show_alua_lu_gp(void *p
, char *page
)
1968 struct se_device
*dev
= p
;
1969 struct config_item
*lu_ci
;
1970 struct t10_alua_lu_gp
*lu_gp
;
1971 struct t10_alua_lu_gp_member
*lu_gp_mem
;
1974 lu_gp_mem
= dev
->dev_alua_lu_gp_mem
;
1978 spin_lock(&lu_gp_mem
->lu_gp_mem_lock
);
1979 lu_gp
= lu_gp_mem
->lu_gp
;
1981 lu_ci
= &lu_gp
->lu_gp_group
.cg_item
;
1982 len
+= sprintf(page
, "LU Group Alias: %s\nLU Group ID: %hu\n",
1983 config_item_name(lu_ci
), lu_gp
->lu_gp_id
);
1985 spin_unlock(&lu_gp_mem
->lu_gp_mem_lock
);
1990 static ssize_t
target_core_store_alua_lu_gp(
1995 struct se_device
*dev
= p
;
1996 struct se_hba
*hba
= dev
->se_hba
;
1997 struct t10_alua_lu_gp
*lu_gp
= NULL
, *lu_gp_new
= NULL
;
1998 struct t10_alua_lu_gp_member
*lu_gp_mem
;
1999 unsigned char buf
[LU_GROUP_NAME_BUF
];
2002 lu_gp_mem
= dev
->dev_alua_lu_gp_mem
;
2006 if (count
> LU_GROUP_NAME_BUF
) {
2007 pr_err("ALUA LU Group Alias too large!\n");
2010 memset(buf
, 0, LU_GROUP_NAME_BUF
);
2011 memcpy(buf
, page
, count
);
2013 * Any ALUA logical unit alias besides "NULL" means we will be
2014 * making a new group association.
2016 if (strcmp(strstrip(buf
), "NULL")) {
2018 * core_alua_get_lu_gp_by_name() will increment reference to
2019 * struct t10_alua_lu_gp. This reference is released with
2020 * core_alua_get_lu_gp_by_name below().
2022 lu_gp_new
= core_alua_get_lu_gp_by_name(strstrip(buf
));
2027 spin_lock(&lu_gp_mem
->lu_gp_mem_lock
);
2028 lu_gp
= lu_gp_mem
->lu_gp
;
2031 * Clearing an existing lu_gp association, and replacing
2035 pr_debug("Target_Core_ConfigFS: Releasing %s/%s"
2036 " from ALUA LU Group: core/alua/lu_gps/%s, ID:"
2038 config_item_name(&hba
->hba_group
.cg_item
),
2039 config_item_name(&dev
->dev_group
.cg_item
),
2040 config_item_name(&lu_gp
->lu_gp_group
.cg_item
),
2043 __core_alua_drop_lu_gp_mem(lu_gp_mem
, lu_gp
);
2044 spin_unlock(&lu_gp_mem
->lu_gp_mem_lock
);
2049 * Removing existing association of lu_gp_mem with lu_gp
2051 __core_alua_drop_lu_gp_mem(lu_gp_mem
, lu_gp
);
2055 * Associate lu_gp_mem with lu_gp_new.
2057 __core_alua_attach_lu_gp_mem(lu_gp_mem
, lu_gp_new
);
2058 spin_unlock(&lu_gp_mem
->lu_gp_mem_lock
);
2060 pr_debug("Target_Core_ConfigFS: %s %s/%s to ALUA LU Group:"
2061 " core/alua/lu_gps/%s, ID: %hu\n",
2062 (move
) ? "Moving" : "Adding",
2063 config_item_name(&hba
->hba_group
.cg_item
),
2064 config_item_name(&dev
->dev_group
.cg_item
),
2065 config_item_name(&lu_gp_new
->lu_gp_group
.cg_item
),
2066 lu_gp_new
->lu_gp_id
);
2068 core_alua_put_lu_gp_from_name(lu_gp_new
);
2072 static struct target_core_configfs_attribute target_core_attr_dev_alua_lu_gp
= {
2073 .attr
= { .ca_owner
= THIS_MODULE
,
2074 .ca_name
= "alua_lu_gp",
2075 .ca_mode
= S_IRUGO
| S_IWUSR
},
2076 .show
= target_core_show_alua_lu_gp
,
2077 .store
= target_core_store_alua_lu_gp
,
2080 static ssize_t
target_core_show_dev_lba_map(void *p
, char *page
)
2082 struct se_device
*dev
= p
;
2083 struct t10_alua_lba_map
*map
;
2084 struct t10_alua_lba_map_member
*mem
;
2089 spin_lock(&dev
->t10_alua
.lba_map_lock
);
2090 if (!list_empty(&dev
->t10_alua
.lba_map_list
))
2091 bl
+= sprintf(b
+ bl
, "%u %u\n",
2092 dev
->t10_alua
.lba_map_segment_size
,
2093 dev
->t10_alua
.lba_map_segment_multiplier
);
2094 list_for_each_entry(map
, &dev
->t10_alua
.lba_map_list
, lba_map_list
) {
2095 bl
+= sprintf(b
+ bl
, "%llu %llu",
2096 map
->lba_map_first_lba
, map
->lba_map_last_lba
);
2097 list_for_each_entry(mem
, &map
->lba_map_mem_list
,
2099 switch (mem
->lba_map_mem_alua_state
) {
2100 case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED
:
2103 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED
:
2106 case ALUA_ACCESS_STATE_STANDBY
:
2109 case ALUA_ACCESS_STATE_UNAVAILABLE
:
2116 bl
+= sprintf(b
+ bl
, " %d:%c",
2117 mem
->lba_map_mem_alua_pg_id
, state
);
2119 bl
+= sprintf(b
+ bl
, "\n");
2121 spin_unlock(&dev
->t10_alua
.lba_map_lock
);
2125 static ssize_t
target_core_store_dev_lba_map(
2130 struct se_device
*dev
= p
;
2131 struct t10_alua_lba_map
*lba_map
= NULL
;
2132 struct list_head lba_list
;
2133 char *map_entries
, *ptr
;
2135 int pg_num
= -1, pg
;
2136 int ret
= 0, num
= 0, pg_id
, alua_state
;
2137 unsigned long start_lba
= -1, end_lba
= -1;
2138 unsigned long segment_size
= -1, segment_mult
= -1;
2140 map_entries
= kstrdup(page
, GFP_KERNEL
);
2144 INIT_LIST_HEAD(&lba_list
);
2145 while ((ptr
= strsep(&map_entries
, "\n")) != NULL
) {
2150 if (sscanf(ptr
, "%lu %lu\n",
2151 &segment_size
, &segment_mult
) != 2) {
2152 pr_err("Invalid line %d\n", num
);
2159 if (sscanf(ptr
, "%lu %lu", &start_lba
, &end_lba
) != 2) {
2160 pr_err("Invalid line %d\n", num
);
2164 ptr
= strchr(ptr
, ' ');
2166 pr_err("Invalid line %d, missing end lba\n", num
);
2171 ptr
= strchr(ptr
, ' ');
2173 pr_err("Invalid line %d, missing state definitions\n",
2179 lba_map
= core_alua_allocate_lba_map(&lba_list
,
2180 start_lba
, end_lba
);
2181 if (IS_ERR(lba_map
)) {
2182 ret
= PTR_ERR(lba_map
);
2186 while (sscanf(ptr
, "%d:%c", &pg_id
, &state
) == 2) {
2189 alua_state
= ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED
;
2192 alua_state
= ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED
;
2195 alua_state
= ALUA_ACCESS_STATE_STANDBY
;
2198 alua_state
= ALUA_ACCESS_STATE_UNAVAILABLE
;
2201 pr_err("Invalid ALUA state '%c'\n", state
);
2206 ret
= core_alua_allocate_lba_map_mem(lba_map
,
2209 pr_err("Invalid target descriptor %d:%c "
2215 ptr
= strchr(ptr
, ' ');
2223 else if (pg
!= pg_num
) {
2224 pr_err("Only %d from %d port groups definitions "
2225 "at line %d\n", pg
, pg_num
, num
);
2233 core_alua_free_lba_map(&lba_list
);
2236 core_alua_set_lba_map(dev
, &lba_list
,
2237 segment_size
, segment_mult
);
2242 static struct target_core_configfs_attribute target_core_attr_dev_lba_map
= {
2243 .attr
= { .ca_owner
= THIS_MODULE
,
2244 .ca_name
= "lba_map",
2245 .ca_mode
= S_IRUGO
| S_IWUSR
},
2246 .show
= target_core_show_dev_lba_map
,
2247 .store
= target_core_store_dev_lba_map
,
2250 static struct configfs_attribute
*target_core_dev_attrs
[] = {
2251 &target_core_attr_dev_info
.attr
,
2252 &target_core_attr_dev_control
.attr
,
2253 &target_core_attr_dev_alias
.attr
,
2254 &target_core_attr_dev_udev_path
.attr
,
2255 &target_core_attr_dev_enable
.attr
,
2256 &target_core_attr_dev_alua_lu_gp
.attr
,
2257 &target_core_attr_dev_lba_map
.attr
,
2261 static void target_core_dev_release(struct config_item
*item
)
2263 struct config_group
*dev_cg
= to_config_group(item
);
2264 struct se_device
*dev
=
2265 container_of(dev_cg
, struct se_device
, dev_group
);
2267 kfree(dev_cg
->default_groups
);
2268 target_free_device(dev
);
2271 static ssize_t
target_core_dev_show(struct config_item
*item
,
2272 struct configfs_attribute
*attr
,
2275 struct config_group
*dev_cg
= to_config_group(item
);
2276 struct se_device
*dev
=
2277 container_of(dev_cg
, struct se_device
, dev_group
);
2278 struct target_core_configfs_attribute
*tc_attr
= container_of(
2279 attr
, struct target_core_configfs_attribute
, attr
);
2284 return tc_attr
->show(dev
, page
);
2287 static ssize_t
target_core_dev_store(struct config_item
*item
,
2288 struct configfs_attribute
*attr
,
2289 const char *page
, size_t count
)
2291 struct config_group
*dev_cg
= to_config_group(item
);
2292 struct se_device
*dev
=
2293 container_of(dev_cg
, struct se_device
, dev_group
);
2294 struct target_core_configfs_attribute
*tc_attr
= container_of(
2295 attr
, struct target_core_configfs_attribute
, attr
);
2297 if (!tc_attr
->store
)
2300 return tc_attr
->store(dev
, page
, count
);
2303 static struct configfs_item_operations target_core_dev_item_ops
= {
2304 .release
= target_core_dev_release
,
2305 .show_attribute
= target_core_dev_show
,
2306 .store_attribute
= target_core_dev_store
,
2309 TB_CIT_SETUP(dev
, &target_core_dev_item_ops
, NULL
, target_core_dev_attrs
);
2311 /* End functions for struct config_item_type tb_dev_cit */
2313 /* Start functions for struct config_item_type target_core_alua_lu_gp_cit */
2315 CONFIGFS_EATTR_STRUCT(target_core_alua_lu_gp
, t10_alua_lu_gp
);
2316 #define SE_DEV_ALUA_LU_ATTR(_name, _mode) \
2317 static struct target_core_alua_lu_gp_attribute \
2318 target_core_alua_lu_gp_##_name = \
2319 __CONFIGFS_EATTR(_name, _mode, \
2320 target_core_alua_lu_gp_show_attr_##_name, \
2321 target_core_alua_lu_gp_store_attr_##_name);
2323 #define SE_DEV_ALUA_LU_ATTR_RO(_name) \
2324 static struct target_core_alua_lu_gp_attribute \
2325 target_core_alua_lu_gp_##_name = \
2326 __CONFIGFS_EATTR_RO(_name, \
2327 target_core_alua_lu_gp_show_attr_##_name);
2332 static ssize_t
target_core_alua_lu_gp_show_attr_lu_gp_id(
2333 struct t10_alua_lu_gp
*lu_gp
,
2336 if (!lu_gp
->lu_gp_valid_id
)
2339 return sprintf(page
, "%hu\n", lu_gp
->lu_gp_id
);
2342 static ssize_t
target_core_alua_lu_gp_store_attr_lu_gp_id(
2343 struct t10_alua_lu_gp
*lu_gp
,
2347 struct config_group
*alua_lu_gp_cg
= &lu_gp
->lu_gp_group
;
2348 unsigned long lu_gp_id
;
2351 ret
= kstrtoul(page
, 0, &lu_gp_id
);
2353 pr_err("kstrtoul() returned %d for"
2354 " lu_gp_id\n", ret
);
2357 if (lu_gp_id
> 0x0000ffff) {
2358 pr_err("ALUA lu_gp_id: %lu exceeds maximum:"
2359 " 0x0000ffff\n", lu_gp_id
);
2363 ret
= core_alua_set_lu_gp_id(lu_gp
, (u16
)lu_gp_id
);
2367 pr_debug("Target_Core_ConfigFS: Set ALUA Logical Unit"
2368 " Group: core/alua/lu_gps/%s to ID: %hu\n",
2369 config_item_name(&alua_lu_gp_cg
->cg_item
),
2375 SE_DEV_ALUA_LU_ATTR(lu_gp_id
, S_IRUGO
| S_IWUSR
);
2380 static ssize_t
target_core_alua_lu_gp_show_attr_members(
2381 struct t10_alua_lu_gp
*lu_gp
,
2384 struct se_device
*dev
;
2386 struct t10_alua_lu_gp_member
*lu_gp_mem
;
2387 ssize_t len
= 0, cur_len
;
2388 unsigned char buf
[LU_GROUP_NAME_BUF
];
2390 memset(buf
, 0, LU_GROUP_NAME_BUF
);
2392 spin_lock(&lu_gp
->lu_gp_lock
);
2393 list_for_each_entry(lu_gp_mem
, &lu_gp
->lu_gp_mem_list
, lu_gp_mem_list
) {
2394 dev
= lu_gp_mem
->lu_gp_mem_dev
;
2397 cur_len
= snprintf(buf
, LU_GROUP_NAME_BUF
, "%s/%s\n",
2398 config_item_name(&hba
->hba_group
.cg_item
),
2399 config_item_name(&dev
->dev_group
.cg_item
));
2400 cur_len
++; /* Extra byte for NULL terminator */
2402 if ((cur_len
+ len
) > PAGE_SIZE
) {
2403 pr_warn("Ran out of lu_gp_show_attr"
2404 "_members buffer\n");
2407 memcpy(page
+len
, buf
, cur_len
);
2410 spin_unlock(&lu_gp
->lu_gp_lock
);
2415 SE_DEV_ALUA_LU_ATTR_RO(members
);
2417 CONFIGFS_EATTR_OPS(target_core_alua_lu_gp
, t10_alua_lu_gp
, lu_gp_group
);
2419 static struct configfs_attribute
*target_core_alua_lu_gp_attrs
[] = {
2420 &target_core_alua_lu_gp_lu_gp_id
.attr
,
2421 &target_core_alua_lu_gp_members
.attr
,
2425 static void target_core_alua_lu_gp_release(struct config_item
*item
)
2427 struct t10_alua_lu_gp
*lu_gp
= container_of(to_config_group(item
),
2428 struct t10_alua_lu_gp
, lu_gp_group
);
2430 core_alua_free_lu_gp(lu_gp
);
2433 static struct configfs_item_operations target_core_alua_lu_gp_ops
= {
2434 .release
= target_core_alua_lu_gp_release
,
2435 .show_attribute
= target_core_alua_lu_gp_attr_show
,
2436 .store_attribute
= target_core_alua_lu_gp_attr_store
,
2439 static struct config_item_type target_core_alua_lu_gp_cit
= {
2440 .ct_item_ops
= &target_core_alua_lu_gp_ops
,
2441 .ct_attrs
= target_core_alua_lu_gp_attrs
,
2442 .ct_owner
= THIS_MODULE
,
2445 /* End functions for struct config_item_type target_core_alua_lu_gp_cit */
2447 /* Start functions for struct config_item_type target_core_alua_lu_gps_cit */
2449 static struct config_group
*target_core_alua_create_lu_gp(
2450 struct config_group
*group
,
2453 struct t10_alua_lu_gp
*lu_gp
;
2454 struct config_group
*alua_lu_gp_cg
= NULL
;
2455 struct config_item
*alua_lu_gp_ci
= NULL
;
2457 lu_gp
= core_alua_allocate_lu_gp(name
, 0);
2461 alua_lu_gp_cg
= &lu_gp
->lu_gp_group
;
2462 alua_lu_gp_ci
= &alua_lu_gp_cg
->cg_item
;
2464 config_group_init_type_name(alua_lu_gp_cg
, name
,
2465 &target_core_alua_lu_gp_cit
);
2467 pr_debug("Target_Core_ConfigFS: Allocated ALUA Logical Unit"
2468 " Group: core/alua/lu_gps/%s\n",
2469 config_item_name(alua_lu_gp_ci
));
2471 return alua_lu_gp_cg
;
2475 static void target_core_alua_drop_lu_gp(
2476 struct config_group
*group
,
2477 struct config_item
*item
)
2479 struct t10_alua_lu_gp
*lu_gp
= container_of(to_config_group(item
),
2480 struct t10_alua_lu_gp
, lu_gp_group
);
2482 pr_debug("Target_Core_ConfigFS: Releasing ALUA Logical Unit"
2483 " Group: core/alua/lu_gps/%s, ID: %hu\n",
2484 config_item_name(item
), lu_gp
->lu_gp_id
);
2486 * core_alua_free_lu_gp() is called from target_core_alua_lu_gp_ops->release()
2487 * -> target_core_alua_lu_gp_release()
2489 config_item_put(item
);
2492 static struct configfs_group_operations target_core_alua_lu_gps_group_ops
= {
2493 .make_group
= &target_core_alua_create_lu_gp
,
2494 .drop_item
= &target_core_alua_drop_lu_gp
,
2497 static struct config_item_type target_core_alua_lu_gps_cit
= {
2498 .ct_item_ops
= NULL
,
2499 .ct_group_ops
= &target_core_alua_lu_gps_group_ops
,
2500 .ct_owner
= THIS_MODULE
,
2503 /* End functions for struct config_item_type target_core_alua_lu_gps_cit */
2505 /* Start functions for struct config_item_type target_core_alua_tg_pt_gp_cit */
2507 CONFIGFS_EATTR_STRUCT(target_core_alua_tg_pt_gp
, t10_alua_tg_pt_gp
);
2508 #define SE_DEV_ALUA_TG_PT_ATTR(_name, _mode) \
2509 static struct target_core_alua_tg_pt_gp_attribute \
2510 target_core_alua_tg_pt_gp_##_name = \
2511 __CONFIGFS_EATTR(_name, _mode, \
2512 target_core_alua_tg_pt_gp_show_attr_##_name, \
2513 target_core_alua_tg_pt_gp_store_attr_##_name);
2515 #define SE_DEV_ALUA_TG_PT_ATTR_RO(_name) \
2516 static struct target_core_alua_tg_pt_gp_attribute \
2517 target_core_alua_tg_pt_gp_##_name = \
2518 __CONFIGFS_EATTR_RO(_name, \
2519 target_core_alua_tg_pt_gp_show_attr_##_name);
2524 static ssize_t
target_core_alua_tg_pt_gp_show_attr_alua_access_state(
2525 struct t10_alua_tg_pt_gp
*tg_pt_gp
,
2528 return sprintf(page
, "%d\n",
2529 atomic_read(&tg_pt_gp
->tg_pt_gp_alua_access_state
));
2532 static ssize_t
target_core_alua_tg_pt_gp_store_attr_alua_access_state(
2533 struct t10_alua_tg_pt_gp
*tg_pt_gp
,
2537 struct se_device
*dev
= tg_pt_gp
->tg_pt_gp_dev
;
2541 if (!tg_pt_gp
->tg_pt_gp_valid_id
) {
2542 pr_err("Unable to do implicit ALUA on non valid"
2543 " tg_pt_gp ID: %hu\n", tg_pt_gp
->tg_pt_gp_valid_id
);
2546 if (!(dev
->dev_flags
& DF_CONFIGURED
)) {
2547 pr_err("Unable to set alua_access_state while device is"
2548 " not configured\n");
2552 ret
= kstrtoul(page
, 0, &tmp
);
2554 pr_err("Unable to extract new ALUA access state from"
2558 new_state
= (int)tmp
;
2560 if (!(tg_pt_gp
->tg_pt_gp_alua_access_type
& TPGS_IMPLICIT_ALUA
)) {
2561 pr_err("Unable to process implicit configfs ALUA"
2562 " transition while TPGS_IMPLICIT_ALUA is disabled\n");
2565 if (tg_pt_gp
->tg_pt_gp_alua_access_type
& TPGS_EXPLICIT_ALUA
&&
2566 new_state
== ALUA_ACCESS_STATE_LBA_DEPENDENT
) {
2567 /* LBA DEPENDENT is only allowed with implicit ALUA */
2568 pr_err("Unable to process implicit configfs ALUA transition"
2569 " while explicit ALUA management is enabled\n");
2573 ret
= core_alua_do_port_transition(tg_pt_gp
, dev
,
2574 NULL
, NULL
, new_state
, 0);
2575 return (!ret
) ? count
: -EINVAL
;
2578 SE_DEV_ALUA_TG_PT_ATTR(alua_access_state
, S_IRUGO
| S_IWUSR
);
2581 * alua_access_status
2583 static ssize_t
target_core_alua_tg_pt_gp_show_attr_alua_access_status(
2584 struct t10_alua_tg_pt_gp
*tg_pt_gp
,
2587 return sprintf(page
, "%s\n",
2588 core_alua_dump_status(tg_pt_gp
->tg_pt_gp_alua_access_status
));
2591 static ssize_t
target_core_alua_tg_pt_gp_store_attr_alua_access_status(
2592 struct t10_alua_tg_pt_gp
*tg_pt_gp
,
2597 int new_status
, ret
;
2599 if (!tg_pt_gp
->tg_pt_gp_valid_id
) {
2600 pr_err("Unable to do set ALUA access status on non"
2601 " valid tg_pt_gp ID: %hu\n",
2602 tg_pt_gp
->tg_pt_gp_valid_id
);
2606 ret
= kstrtoul(page
, 0, &tmp
);
2608 pr_err("Unable to extract new ALUA access status"
2609 " from %s\n", page
);
2612 new_status
= (int)tmp
;
2614 if ((new_status
!= ALUA_STATUS_NONE
) &&
2615 (new_status
!= ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG
) &&
2616 (new_status
!= ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA
)) {
2617 pr_err("Illegal ALUA access status: 0x%02x\n",
2622 tg_pt_gp
->tg_pt_gp_alua_access_status
= new_status
;
2626 SE_DEV_ALUA_TG_PT_ATTR(alua_access_status
, S_IRUGO
| S_IWUSR
);
2631 static ssize_t
target_core_alua_tg_pt_gp_show_attr_alua_access_type(
2632 struct t10_alua_tg_pt_gp
*tg_pt_gp
,
2635 return core_alua_show_access_type(tg_pt_gp
, page
);
2638 static ssize_t
target_core_alua_tg_pt_gp_store_attr_alua_access_type(
2639 struct t10_alua_tg_pt_gp
*tg_pt_gp
,
2643 return core_alua_store_access_type(tg_pt_gp
, page
, count
);
2646 SE_DEV_ALUA_TG_PT_ATTR(alua_access_type
, S_IRUGO
| S_IWUSR
);
2649 * alua_supported_states
2652 #define SE_DEV_ALUA_SUPPORT_STATE_SHOW(_name, _var, _bit) \
2653 static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_support_##_name( \
2654 struct t10_alua_tg_pt_gp *t, char *p) \
2656 return sprintf(p, "%d\n", !!(t->_var & _bit)); \
2659 #define SE_DEV_ALUA_SUPPORT_STATE_STORE(_name, _var, _bit) \
2660 static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_support_##_name(\
2661 struct t10_alua_tg_pt_gp *t, const char *p, size_t c) \
2663 unsigned long tmp; \
2666 if (!t->tg_pt_gp_valid_id) { \
2667 pr_err("Unable to do set ##_name ALUA state on non" \
2668 " valid tg_pt_gp ID: %hu\n", \
2669 t->tg_pt_gp_valid_id); \
2673 ret = kstrtoul(p, 0, &tmp); \
2675 pr_err("Invalid value '%s', must be '0' or '1'\n", p); \
2679 pr_err("Invalid value '%ld', must be '0' or '1'\n", tmp); \
2690 SE_DEV_ALUA_SUPPORT_STATE_SHOW(transitioning
,
2691 tg_pt_gp_alua_supported_states
, ALUA_T_SUP
);
2692 SE_DEV_ALUA_SUPPORT_STATE_STORE(transitioning
,
2693 tg_pt_gp_alua_supported_states
, ALUA_T_SUP
);
2694 SE_DEV_ALUA_TG_PT_ATTR(alua_support_transitioning
, S_IRUGO
| S_IWUSR
);
2696 SE_DEV_ALUA_SUPPORT_STATE_SHOW(offline
,
2697 tg_pt_gp_alua_supported_states
, ALUA_O_SUP
);
2698 SE_DEV_ALUA_SUPPORT_STATE_STORE(offline
,
2699 tg_pt_gp_alua_supported_states
, ALUA_O_SUP
);
2700 SE_DEV_ALUA_TG_PT_ATTR(alua_support_offline
, S_IRUGO
| S_IWUSR
);
2702 SE_DEV_ALUA_SUPPORT_STATE_SHOW(lba_dependent
,
2703 tg_pt_gp_alua_supported_states
, ALUA_LBD_SUP
);
2704 SE_DEV_ALUA_SUPPORT_STATE_STORE(lba_dependent
,
2705 tg_pt_gp_alua_supported_states
, ALUA_LBD_SUP
);
2706 SE_DEV_ALUA_TG_PT_ATTR(alua_support_lba_dependent
, S_IRUGO
);
2708 SE_DEV_ALUA_SUPPORT_STATE_SHOW(unavailable
,
2709 tg_pt_gp_alua_supported_states
, ALUA_U_SUP
);
2710 SE_DEV_ALUA_SUPPORT_STATE_STORE(unavailable
,
2711 tg_pt_gp_alua_supported_states
, ALUA_U_SUP
);
2712 SE_DEV_ALUA_TG_PT_ATTR(alua_support_unavailable
, S_IRUGO
| S_IWUSR
);
2714 SE_DEV_ALUA_SUPPORT_STATE_SHOW(standby
,
2715 tg_pt_gp_alua_supported_states
, ALUA_S_SUP
);
2716 SE_DEV_ALUA_SUPPORT_STATE_STORE(standby
,
2717 tg_pt_gp_alua_supported_states
, ALUA_S_SUP
);
2718 SE_DEV_ALUA_TG_PT_ATTR(alua_support_standby
, S_IRUGO
| S_IWUSR
);
2720 SE_DEV_ALUA_SUPPORT_STATE_SHOW(active_optimized
,
2721 tg_pt_gp_alua_supported_states
, ALUA_AO_SUP
);
2722 SE_DEV_ALUA_SUPPORT_STATE_STORE(active_optimized
,
2723 tg_pt_gp_alua_supported_states
, ALUA_AO_SUP
);
2724 SE_DEV_ALUA_TG_PT_ATTR(alua_support_active_optimized
, S_IRUGO
| S_IWUSR
);
2726 SE_DEV_ALUA_SUPPORT_STATE_SHOW(active_nonoptimized
,
2727 tg_pt_gp_alua_supported_states
, ALUA_AN_SUP
);
2728 SE_DEV_ALUA_SUPPORT_STATE_STORE(active_nonoptimized
,
2729 tg_pt_gp_alua_supported_states
, ALUA_AN_SUP
);
2730 SE_DEV_ALUA_TG_PT_ATTR(alua_support_active_nonoptimized
, S_IRUGO
| S_IWUSR
);
2733 * alua_write_metadata
2735 static ssize_t
target_core_alua_tg_pt_gp_show_attr_alua_write_metadata(
2736 struct t10_alua_tg_pt_gp
*tg_pt_gp
,
2739 return sprintf(page
, "%d\n", tg_pt_gp
->tg_pt_gp_write_metadata
);
2742 static ssize_t
target_core_alua_tg_pt_gp_store_attr_alua_write_metadata(
2743 struct t10_alua_tg_pt_gp
*tg_pt_gp
,
2750 ret
= kstrtoul(page
, 0, &tmp
);
2752 pr_err("Unable to extract alua_write_metadata\n");
2756 if ((tmp
!= 0) && (tmp
!= 1)) {
2757 pr_err("Illegal value for alua_write_metadata:"
2761 tg_pt_gp
->tg_pt_gp_write_metadata
= (int)tmp
;
2766 SE_DEV_ALUA_TG_PT_ATTR(alua_write_metadata
, S_IRUGO
| S_IWUSR
);
2773 static ssize_t
target_core_alua_tg_pt_gp_show_attr_nonop_delay_msecs(
2774 struct t10_alua_tg_pt_gp
*tg_pt_gp
,
2777 return core_alua_show_nonop_delay_msecs(tg_pt_gp
, page
);
2781 static ssize_t
target_core_alua_tg_pt_gp_store_attr_nonop_delay_msecs(
2782 struct t10_alua_tg_pt_gp
*tg_pt_gp
,
2786 return core_alua_store_nonop_delay_msecs(tg_pt_gp
, page
, count
);
2789 SE_DEV_ALUA_TG_PT_ATTR(nonop_delay_msecs
, S_IRUGO
| S_IWUSR
);
2794 static ssize_t
target_core_alua_tg_pt_gp_show_attr_trans_delay_msecs(
2795 struct t10_alua_tg_pt_gp
*tg_pt_gp
,
2798 return core_alua_show_trans_delay_msecs(tg_pt_gp
, page
);
2801 static ssize_t
target_core_alua_tg_pt_gp_store_attr_trans_delay_msecs(
2802 struct t10_alua_tg_pt_gp
*tg_pt_gp
,
2806 return core_alua_store_trans_delay_msecs(tg_pt_gp
, page
, count
);
2809 SE_DEV_ALUA_TG_PT_ATTR(trans_delay_msecs
, S_IRUGO
| S_IWUSR
);
2812 * implicit_trans_secs
2814 static ssize_t
target_core_alua_tg_pt_gp_show_attr_implicit_trans_secs(
2815 struct t10_alua_tg_pt_gp
*tg_pt_gp
,
2818 return core_alua_show_implicit_trans_secs(tg_pt_gp
, page
);
2821 static ssize_t
target_core_alua_tg_pt_gp_store_attr_implicit_trans_secs(
2822 struct t10_alua_tg_pt_gp
*tg_pt_gp
,
2826 return core_alua_store_implicit_trans_secs(tg_pt_gp
, page
, count
);
2829 SE_DEV_ALUA_TG_PT_ATTR(implicit_trans_secs
, S_IRUGO
| S_IWUSR
);
2835 static ssize_t
target_core_alua_tg_pt_gp_show_attr_preferred(
2836 struct t10_alua_tg_pt_gp
*tg_pt_gp
,
2839 return core_alua_show_preferred_bit(tg_pt_gp
, page
);
2842 static ssize_t
target_core_alua_tg_pt_gp_store_attr_preferred(
2843 struct t10_alua_tg_pt_gp
*tg_pt_gp
,
2847 return core_alua_store_preferred_bit(tg_pt_gp
, page
, count
);
2850 SE_DEV_ALUA_TG_PT_ATTR(preferred
, S_IRUGO
| S_IWUSR
);
2855 static ssize_t
target_core_alua_tg_pt_gp_show_attr_tg_pt_gp_id(
2856 struct t10_alua_tg_pt_gp
*tg_pt_gp
,
2859 if (!tg_pt_gp
->tg_pt_gp_valid_id
)
2862 return sprintf(page
, "%hu\n", tg_pt_gp
->tg_pt_gp_id
);
2865 static ssize_t
target_core_alua_tg_pt_gp_store_attr_tg_pt_gp_id(
2866 struct t10_alua_tg_pt_gp
*tg_pt_gp
,
2870 struct config_group
*alua_tg_pt_gp_cg
= &tg_pt_gp
->tg_pt_gp_group
;
2871 unsigned long tg_pt_gp_id
;
2874 ret
= kstrtoul(page
, 0, &tg_pt_gp_id
);
2876 pr_err("kstrtoul() returned %d for"
2877 " tg_pt_gp_id\n", ret
);
2880 if (tg_pt_gp_id
> 0x0000ffff) {
2881 pr_err("ALUA tg_pt_gp_id: %lu exceeds maximum:"
2882 " 0x0000ffff\n", tg_pt_gp_id
);
2886 ret
= core_alua_set_tg_pt_gp_id(tg_pt_gp
, (u16
)tg_pt_gp_id
);
2890 pr_debug("Target_Core_ConfigFS: Set ALUA Target Port Group: "
2891 "core/alua/tg_pt_gps/%s to ID: %hu\n",
2892 config_item_name(&alua_tg_pt_gp_cg
->cg_item
),
2893 tg_pt_gp
->tg_pt_gp_id
);
2898 SE_DEV_ALUA_TG_PT_ATTR(tg_pt_gp_id
, S_IRUGO
| S_IWUSR
);
2903 static ssize_t
target_core_alua_tg_pt_gp_show_attr_members(
2904 struct t10_alua_tg_pt_gp
*tg_pt_gp
,
2908 ssize_t len
= 0, cur_len
;
2909 unsigned char buf
[TG_PT_GROUP_NAME_BUF
];
2911 memset(buf
, 0, TG_PT_GROUP_NAME_BUF
);
2913 spin_lock(&tg_pt_gp
->tg_pt_gp_lock
);
2914 list_for_each_entry(lun
, &tg_pt_gp
->tg_pt_gp_lun_list
,
2915 lun_tg_pt_gp_link
) {
2916 struct se_portal_group
*tpg
= lun
->lun_tpg
;
2918 cur_len
= snprintf(buf
, TG_PT_GROUP_NAME_BUF
, "%s/%s/tpgt_%hu"
2919 "/%s\n", tpg
->se_tpg_tfo
->get_fabric_name(),
2920 tpg
->se_tpg_tfo
->tpg_get_wwn(tpg
),
2921 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
),
2922 config_item_name(&lun
->lun_group
.cg_item
));
2923 cur_len
++; /* Extra byte for NULL terminator */
2925 if ((cur_len
+ len
) > PAGE_SIZE
) {
2926 pr_warn("Ran out of lu_gp_show_attr"
2927 "_members buffer\n");
2930 memcpy(page
+len
, buf
, cur_len
);
2933 spin_unlock(&tg_pt_gp
->tg_pt_gp_lock
);
2938 SE_DEV_ALUA_TG_PT_ATTR_RO(members
);
2940 CONFIGFS_EATTR_OPS(target_core_alua_tg_pt_gp
, t10_alua_tg_pt_gp
,
2943 static struct configfs_attribute
*target_core_alua_tg_pt_gp_attrs
[] = {
2944 &target_core_alua_tg_pt_gp_alua_access_state
.attr
,
2945 &target_core_alua_tg_pt_gp_alua_access_status
.attr
,
2946 &target_core_alua_tg_pt_gp_alua_access_type
.attr
,
2947 &target_core_alua_tg_pt_gp_alua_support_transitioning
.attr
,
2948 &target_core_alua_tg_pt_gp_alua_support_offline
.attr
,
2949 &target_core_alua_tg_pt_gp_alua_support_lba_dependent
.attr
,
2950 &target_core_alua_tg_pt_gp_alua_support_unavailable
.attr
,
2951 &target_core_alua_tg_pt_gp_alua_support_standby
.attr
,
2952 &target_core_alua_tg_pt_gp_alua_support_active_nonoptimized
.attr
,
2953 &target_core_alua_tg_pt_gp_alua_support_active_optimized
.attr
,
2954 &target_core_alua_tg_pt_gp_alua_write_metadata
.attr
,
2955 &target_core_alua_tg_pt_gp_nonop_delay_msecs
.attr
,
2956 &target_core_alua_tg_pt_gp_trans_delay_msecs
.attr
,
2957 &target_core_alua_tg_pt_gp_implicit_trans_secs
.attr
,
2958 &target_core_alua_tg_pt_gp_preferred
.attr
,
2959 &target_core_alua_tg_pt_gp_tg_pt_gp_id
.attr
,
2960 &target_core_alua_tg_pt_gp_members
.attr
,
2964 static void target_core_alua_tg_pt_gp_release(struct config_item
*item
)
2966 struct t10_alua_tg_pt_gp
*tg_pt_gp
= container_of(to_config_group(item
),
2967 struct t10_alua_tg_pt_gp
, tg_pt_gp_group
);
2969 core_alua_free_tg_pt_gp(tg_pt_gp
);
2972 static struct configfs_item_operations target_core_alua_tg_pt_gp_ops
= {
2973 .release
= target_core_alua_tg_pt_gp_release
,
2974 .show_attribute
= target_core_alua_tg_pt_gp_attr_show
,
2975 .store_attribute
= target_core_alua_tg_pt_gp_attr_store
,
2978 static struct config_item_type target_core_alua_tg_pt_gp_cit
= {
2979 .ct_item_ops
= &target_core_alua_tg_pt_gp_ops
,
2980 .ct_attrs
= target_core_alua_tg_pt_gp_attrs
,
2981 .ct_owner
= THIS_MODULE
,
2984 /* End functions for struct config_item_type target_core_alua_tg_pt_gp_cit */
2986 /* Start functions for struct config_item_type tb_alua_tg_pt_gps_cit */
2988 static struct config_group
*target_core_alua_create_tg_pt_gp(
2989 struct config_group
*group
,
2992 struct t10_alua
*alua
= container_of(group
, struct t10_alua
,
2993 alua_tg_pt_gps_group
);
2994 struct t10_alua_tg_pt_gp
*tg_pt_gp
;
2995 struct config_group
*alua_tg_pt_gp_cg
= NULL
;
2996 struct config_item
*alua_tg_pt_gp_ci
= NULL
;
2998 tg_pt_gp
= core_alua_allocate_tg_pt_gp(alua
->t10_dev
, name
, 0);
3002 alua_tg_pt_gp_cg
= &tg_pt_gp
->tg_pt_gp_group
;
3003 alua_tg_pt_gp_ci
= &alua_tg_pt_gp_cg
->cg_item
;
3005 config_group_init_type_name(alua_tg_pt_gp_cg
, name
,
3006 &target_core_alua_tg_pt_gp_cit
);
3008 pr_debug("Target_Core_ConfigFS: Allocated ALUA Target Port"
3009 " Group: alua/tg_pt_gps/%s\n",
3010 config_item_name(alua_tg_pt_gp_ci
));
3012 return alua_tg_pt_gp_cg
;
3015 static void target_core_alua_drop_tg_pt_gp(
3016 struct config_group
*group
,
3017 struct config_item
*item
)
3019 struct t10_alua_tg_pt_gp
*tg_pt_gp
= container_of(to_config_group(item
),
3020 struct t10_alua_tg_pt_gp
, tg_pt_gp_group
);
3022 pr_debug("Target_Core_ConfigFS: Releasing ALUA Target Port"
3023 " Group: alua/tg_pt_gps/%s, ID: %hu\n",
3024 config_item_name(item
), tg_pt_gp
->tg_pt_gp_id
);
3026 * core_alua_free_tg_pt_gp() is called from target_core_alua_tg_pt_gp_ops->release()
3027 * -> target_core_alua_tg_pt_gp_release().
3029 config_item_put(item
);
3032 static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops
= {
3033 .make_group
= &target_core_alua_create_tg_pt_gp
,
3034 .drop_item
= &target_core_alua_drop_tg_pt_gp
,
3037 TB_CIT_SETUP(dev_alua_tg_pt_gps
, NULL
, &target_core_alua_tg_pt_gps_group_ops
, NULL
);
3039 /* End functions for struct config_item_type tb_alua_tg_pt_gps_cit */
3041 /* Start functions for struct config_item_type target_core_alua_cit */
3044 * target_core_alua_cit is a ConfigFS group that lives under
3045 * /sys/kernel/config/target/core/alua. There are default groups
3046 * core/alua/lu_gps and core/alua/tg_pt_gps that are attached to
3047 * target_core_alua_cit in target_core_init_configfs() below.
3049 static struct config_item_type target_core_alua_cit
= {
3050 .ct_item_ops
= NULL
,
3052 .ct_owner
= THIS_MODULE
,
3055 /* End functions for struct config_item_type target_core_alua_cit */
3057 /* Start functions for struct config_item_type tb_dev_stat_cit */
3059 static struct config_group
*target_core_stat_mkdir(
3060 struct config_group
*group
,
3063 return ERR_PTR(-ENOSYS
);
3066 static void target_core_stat_rmdir(
3067 struct config_group
*group
,
3068 struct config_item
*item
)
3073 static struct configfs_group_operations target_core_stat_group_ops
= {
3074 .make_group
= &target_core_stat_mkdir
,
3075 .drop_item
= &target_core_stat_rmdir
,
3078 TB_CIT_SETUP(dev_stat
, NULL
, &target_core_stat_group_ops
, NULL
);
3080 /* End functions for struct config_item_type tb_dev_stat_cit */
3082 /* Start functions for struct config_item_type target_core_hba_cit */
3084 static struct config_group
*target_core_make_subdev(
3085 struct config_group
*group
,
3088 struct t10_alua_tg_pt_gp
*tg_pt_gp
;
3089 struct config_item
*hba_ci
= &group
->cg_item
;
3090 struct se_hba
*hba
= item_to_hba(hba_ci
);
3091 struct target_backend
*tb
= hba
->backend
;
3092 struct se_device
*dev
;
3093 struct config_group
*dev_cg
= NULL
, *tg_pt_gp_cg
= NULL
;
3094 struct config_group
*dev_stat_grp
= NULL
;
3095 int errno
= -ENOMEM
, ret
;
3097 ret
= mutex_lock_interruptible(&hba
->hba_access_mutex
);
3099 return ERR_PTR(ret
);
3101 dev
= target_alloc_device(hba
, name
);
3105 dev_cg
= &dev
->dev_group
;
3107 dev_cg
->default_groups
= kmalloc(sizeof(struct config_group
*) * 6,
3109 if (!dev_cg
->default_groups
)
3110 goto out_free_device
;
3112 config_group_init_type_name(dev_cg
, name
, &tb
->tb_dev_cit
);
3113 config_group_init_type_name(&dev
->dev_attrib
.da_group
, "attrib",
3114 &tb
->tb_dev_attrib_cit
);
3115 config_group_init_type_name(&dev
->dev_pr_group
, "pr",
3116 &tb
->tb_dev_pr_cit
);
3117 config_group_init_type_name(&dev
->t10_wwn
.t10_wwn_group
, "wwn",
3118 &tb
->tb_dev_wwn_cit
);
3119 config_group_init_type_name(&dev
->t10_alua
.alua_tg_pt_gps_group
,
3120 "alua", &tb
->tb_dev_alua_tg_pt_gps_cit
);
3121 config_group_init_type_name(&dev
->dev_stat_grps
.stat_group
,
3122 "statistics", &tb
->tb_dev_stat_cit
);
3124 dev_cg
->default_groups
[0] = &dev
->dev_attrib
.da_group
;
3125 dev_cg
->default_groups
[1] = &dev
->dev_pr_group
;
3126 dev_cg
->default_groups
[2] = &dev
->t10_wwn
.t10_wwn_group
;
3127 dev_cg
->default_groups
[3] = &dev
->t10_alua
.alua_tg_pt_gps_group
;
3128 dev_cg
->default_groups
[4] = &dev
->dev_stat_grps
.stat_group
;
3129 dev_cg
->default_groups
[5] = NULL
;
3131 * Add core/$HBA/$DEV/alua/default_tg_pt_gp
3133 tg_pt_gp
= core_alua_allocate_tg_pt_gp(dev
, "default_tg_pt_gp", 1);
3135 goto out_free_dev_cg_default_groups
;
3136 dev
->t10_alua
.default_tg_pt_gp
= tg_pt_gp
;
3138 tg_pt_gp_cg
= &dev
->t10_alua
.alua_tg_pt_gps_group
;
3139 tg_pt_gp_cg
->default_groups
= kmalloc(sizeof(struct config_group
*) * 2,
3141 if (!tg_pt_gp_cg
->default_groups
) {
3142 pr_err("Unable to allocate tg_pt_gp_cg->"
3143 "default_groups\n");
3144 goto out_free_tg_pt_gp
;
3147 config_group_init_type_name(&tg_pt_gp
->tg_pt_gp_group
,
3148 "default_tg_pt_gp", &target_core_alua_tg_pt_gp_cit
);
3149 tg_pt_gp_cg
->default_groups
[0] = &tg_pt_gp
->tg_pt_gp_group
;
3150 tg_pt_gp_cg
->default_groups
[1] = NULL
;
3152 * Add core/$HBA/$DEV/statistics/ default groups
3154 dev_stat_grp
= &dev
->dev_stat_grps
.stat_group
;
3155 dev_stat_grp
->default_groups
= kmalloc(sizeof(struct config_group
*) * 4,
3157 if (!dev_stat_grp
->default_groups
) {
3158 pr_err("Unable to allocate dev_stat_grp->default_groups\n");
3159 goto out_free_tg_pt_gp_cg_default_groups
;
3161 target_stat_setup_dev_default_groups(dev
);
3163 mutex_unlock(&hba
->hba_access_mutex
);
3166 out_free_tg_pt_gp_cg_default_groups
:
3167 kfree(tg_pt_gp_cg
->default_groups
);
3169 core_alua_free_tg_pt_gp(tg_pt_gp
);
3170 out_free_dev_cg_default_groups
:
3171 kfree(dev_cg
->default_groups
);
3173 target_free_device(dev
);
3175 mutex_unlock(&hba
->hba_access_mutex
);
3176 return ERR_PTR(errno
);
3179 static void target_core_drop_subdev(
3180 struct config_group
*group
,
3181 struct config_item
*item
)
3183 struct config_group
*dev_cg
= to_config_group(item
);
3184 struct se_device
*dev
=
3185 container_of(dev_cg
, struct se_device
, dev_group
);
3187 struct config_item
*df_item
;
3188 struct config_group
*tg_pt_gp_cg
, *dev_stat_grp
;
3191 hba
= item_to_hba(&dev
->se_hba
->hba_group
.cg_item
);
3193 mutex_lock(&hba
->hba_access_mutex
);
3195 dev_stat_grp
= &dev
->dev_stat_grps
.stat_group
;
3196 for (i
= 0; dev_stat_grp
->default_groups
[i
]; i
++) {
3197 df_item
= &dev_stat_grp
->default_groups
[i
]->cg_item
;
3198 dev_stat_grp
->default_groups
[i
] = NULL
;
3199 config_item_put(df_item
);
3201 kfree(dev_stat_grp
->default_groups
);
3203 tg_pt_gp_cg
= &dev
->t10_alua
.alua_tg_pt_gps_group
;
3204 for (i
= 0; tg_pt_gp_cg
->default_groups
[i
]; i
++) {
3205 df_item
= &tg_pt_gp_cg
->default_groups
[i
]->cg_item
;
3206 tg_pt_gp_cg
->default_groups
[i
] = NULL
;
3207 config_item_put(df_item
);
3209 kfree(tg_pt_gp_cg
->default_groups
);
3211 * core_alua_free_tg_pt_gp() is called from ->default_tg_pt_gp
3212 * directly from target_core_alua_tg_pt_gp_release().
3214 dev
->t10_alua
.default_tg_pt_gp
= NULL
;
3216 for (i
= 0; dev_cg
->default_groups
[i
]; i
++) {
3217 df_item
= &dev_cg
->default_groups
[i
]->cg_item
;
3218 dev_cg
->default_groups
[i
] = NULL
;
3219 config_item_put(df_item
);
3222 * se_dev is released from target_core_dev_item_ops->release()
3224 config_item_put(item
);
3225 mutex_unlock(&hba
->hba_access_mutex
);
3228 static struct configfs_group_operations target_core_hba_group_ops
= {
3229 .make_group
= target_core_make_subdev
,
3230 .drop_item
= target_core_drop_subdev
,
3233 CONFIGFS_EATTR_STRUCT(target_core_hba
, se_hba
);
3234 #define SE_HBA_ATTR(_name, _mode) \
3235 static struct target_core_hba_attribute \
3236 target_core_hba_##_name = \
3237 __CONFIGFS_EATTR(_name, _mode, \
3238 target_core_hba_show_attr_##_name, \
3239 target_core_hba_store_attr_##_name);
3241 #define SE_HBA_ATTR_RO(_name) \
3242 static struct target_core_hba_attribute \
3243 target_core_hba_##_name = \
3244 __CONFIGFS_EATTR_RO(_name, \
3245 target_core_hba_show_attr_##_name);
3247 static ssize_t
target_core_hba_show_attr_hba_info(
3251 return sprintf(page
, "HBA Index: %d plugin: %s version: %s\n",
3252 hba
->hba_id
, hba
->backend
->ops
->name
,
3253 TARGET_CORE_VERSION
);
3256 SE_HBA_ATTR_RO(hba_info
);
3258 static ssize_t
target_core_hba_show_attr_hba_mode(struct se_hba
*hba
,
3263 if (hba
->hba_flags
& HBA_FLAGS_PSCSI_MODE
)
3266 return sprintf(page
, "%d\n", hba_mode
);
3269 static ssize_t
target_core_hba_store_attr_hba_mode(struct se_hba
*hba
,
3270 const char *page
, size_t count
)
3272 unsigned long mode_flag
;
3275 if (hba
->backend
->ops
->pmode_enable_hba
== NULL
)
3278 ret
= kstrtoul(page
, 0, &mode_flag
);
3280 pr_err("Unable to extract hba mode flag: %d\n", ret
);
3284 if (hba
->dev_count
) {
3285 pr_err("Unable to set hba_mode with active devices\n");
3289 ret
= hba
->backend
->ops
->pmode_enable_hba(hba
, mode_flag
);
3293 hba
->hba_flags
|= HBA_FLAGS_PSCSI_MODE
;
3295 hba
->hba_flags
&= ~HBA_FLAGS_PSCSI_MODE
;
3300 SE_HBA_ATTR(hba_mode
, S_IRUGO
| S_IWUSR
);
3302 CONFIGFS_EATTR_OPS(target_core_hba
, se_hba
, hba_group
);
3304 static void target_core_hba_release(struct config_item
*item
)
3306 struct se_hba
*hba
= container_of(to_config_group(item
),
3307 struct se_hba
, hba_group
);
3308 core_delete_hba(hba
);
3311 static struct configfs_attribute
*target_core_hba_attrs
[] = {
3312 &target_core_hba_hba_info
.attr
,
3313 &target_core_hba_hba_mode
.attr
,
3317 static struct configfs_item_operations target_core_hba_item_ops
= {
3318 .release
= target_core_hba_release
,
3319 .show_attribute
= target_core_hba_attr_show
,
3320 .store_attribute
= target_core_hba_attr_store
,
3323 static struct config_item_type target_core_hba_cit
= {
3324 .ct_item_ops
= &target_core_hba_item_ops
,
3325 .ct_group_ops
= &target_core_hba_group_ops
,
3326 .ct_attrs
= target_core_hba_attrs
,
3327 .ct_owner
= THIS_MODULE
,
3330 static struct config_group
*target_core_call_addhbatotarget(
3331 struct config_group
*group
,
3334 char *se_plugin_str
, *str
, *str2
;
3336 char buf
[TARGET_CORE_NAME_MAX_LEN
];
3337 unsigned long plugin_dep_id
= 0;
3340 memset(buf
, 0, TARGET_CORE_NAME_MAX_LEN
);
3341 if (strlen(name
) >= TARGET_CORE_NAME_MAX_LEN
) {
3342 pr_err("Passed *name strlen(): %d exceeds"
3343 " TARGET_CORE_NAME_MAX_LEN: %d\n", (int)strlen(name
),
3344 TARGET_CORE_NAME_MAX_LEN
);
3345 return ERR_PTR(-ENAMETOOLONG
);
3347 snprintf(buf
, TARGET_CORE_NAME_MAX_LEN
, "%s", name
);
3349 str
= strstr(buf
, "_");
3351 pr_err("Unable to locate \"_\" for $SUBSYSTEM_PLUGIN_$HOST_ID\n");
3352 return ERR_PTR(-EINVAL
);
3354 se_plugin_str
= buf
;
3356 * Special case for subsystem plugins that have "_" in their names.
3357 * Namely rd_direct and rd_mcp..
3359 str2
= strstr(str
+1, "_");
3361 *str2
= '\0'; /* Terminate for *se_plugin_str */
3362 str2
++; /* Skip to start of plugin dependent ID */
3365 *str
= '\0'; /* Terminate for *se_plugin_str */
3366 str
++; /* Skip to start of plugin dependent ID */
3369 ret
= kstrtoul(str
, 0, &plugin_dep_id
);
3371 pr_err("kstrtoul() returned %d for"
3372 " plugin_dep_id\n", ret
);
3373 return ERR_PTR(ret
);
3376 * Load up TCM subsystem plugins if they have not already been loaded.
3378 transport_subsystem_check_init();
3380 hba
= core_alloc_hba(se_plugin_str
, plugin_dep_id
, 0);
3382 return ERR_CAST(hba
);
3384 config_group_init_type_name(&hba
->hba_group
, name
,
3385 &target_core_hba_cit
);
3387 return &hba
->hba_group
;
3390 static void target_core_call_delhbafromtarget(
3391 struct config_group
*group
,
3392 struct config_item
*item
)
3395 * core_delete_hba() is called from target_core_hba_item_ops->release()
3396 * -> target_core_hba_release()
3398 config_item_put(item
);
3401 static struct configfs_group_operations target_core_group_ops
= {
3402 .make_group
= target_core_call_addhbatotarget
,
3403 .drop_item
= target_core_call_delhbafromtarget
,
3406 static struct config_item_type target_core_cit
= {
3407 .ct_item_ops
= NULL
,
3408 .ct_group_ops
= &target_core_group_ops
,
3410 .ct_owner
= THIS_MODULE
,
3413 /* Stop functions for struct config_item_type target_core_hba_cit */
3415 void target_setup_backend_cits(struct target_backend
*tb
)
3417 target_core_setup_dev_cit(tb
);
3418 target_core_setup_dev_attrib_cit(tb
);
3419 target_core_setup_dev_pr_cit(tb
);
3420 target_core_setup_dev_wwn_cit(tb
);
3421 target_core_setup_dev_alua_tg_pt_gps_cit(tb
);
3422 target_core_setup_dev_stat_cit(tb
);
3425 static int __init
target_core_init_configfs(void)
3427 struct config_group
*target_cg
, *hba_cg
= NULL
, *alua_cg
= NULL
;
3428 struct config_group
*lu_gp_cg
= NULL
;
3429 struct configfs_subsystem
*subsys
= &target_core_fabrics
;
3430 struct t10_alua_lu_gp
*lu_gp
;
3433 pr_debug("TARGET_CORE[0]: Loading Generic Kernel Storage"
3434 " Engine: %s on %s/%s on "UTS_RELEASE
"\n",
3435 TARGET_CORE_VERSION
, utsname()->sysname
, utsname()->machine
);
3437 config_group_init(&subsys
->su_group
);
3438 mutex_init(&subsys
->su_mutex
);
3440 ret
= init_se_kmem_caches();
3444 * Create $CONFIGFS/target/core default group for HBA <-> Storage Object
3445 * and ALUA Logical Unit Group and Target Port Group infrastructure.
3447 target_cg
= &subsys
->su_group
;
3448 target_cg
->default_groups
= kmalloc(sizeof(struct config_group
*) * 2,
3450 if (!target_cg
->default_groups
) {
3451 pr_err("Unable to allocate target_cg->default_groups\n");
3456 config_group_init_type_name(&target_core_hbagroup
,
3457 "core", &target_core_cit
);
3458 target_cg
->default_groups
[0] = &target_core_hbagroup
;
3459 target_cg
->default_groups
[1] = NULL
;
3461 * Create ALUA infrastructure under /sys/kernel/config/target/core/alua/
3463 hba_cg
= &target_core_hbagroup
;
3464 hba_cg
->default_groups
= kmalloc(sizeof(struct config_group
*) * 2,
3466 if (!hba_cg
->default_groups
) {
3467 pr_err("Unable to allocate hba_cg->default_groups\n");
3471 config_group_init_type_name(&alua_group
,
3472 "alua", &target_core_alua_cit
);
3473 hba_cg
->default_groups
[0] = &alua_group
;
3474 hba_cg
->default_groups
[1] = NULL
;
3476 * Add ALUA Logical Unit Group and Target Port Group ConfigFS
3477 * groups under /sys/kernel/config/target/core/alua/
3479 alua_cg
= &alua_group
;
3480 alua_cg
->default_groups
= kmalloc(sizeof(struct config_group
*) * 2,
3482 if (!alua_cg
->default_groups
) {
3483 pr_err("Unable to allocate alua_cg->default_groups\n");
3488 config_group_init_type_name(&alua_lu_gps_group
,
3489 "lu_gps", &target_core_alua_lu_gps_cit
);
3490 alua_cg
->default_groups
[0] = &alua_lu_gps_group
;
3491 alua_cg
->default_groups
[1] = NULL
;
3493 * Add core/alua/lu_gps/default_lu_gp
3495 lu_gp
= core_alua_allocate_lu_gp("default_lu_gp", 1);
3496 if (IS_ERR(lu_gp
)) {
3501 lu_gp_cg
= &alua_lu_gps_group
;
3502 lu_gp_cg
->default_groups
= kmalloc(sizeof(struct config_group
*) * 2,
3504 if (!lu_gp_cg
->default_groups
) {
3505 pr_err("Unable to allocate lu_gp_cg->default_groups\n");
3510 config_group_init_type_name(&lu_gp
->lu_gp_group
, "default_lu_gp",
3511 &target_core_alua_lu_gp_cit
);
3512 lu_gp_cg
->default_groups
[0] = &lu_gp
->lu_gp_group
;
3513 lu_gp_cg
->default_groups
[1] = NULL
;
3514 default_lu_gp
= lu_gp
;
3516 * Register the target_core_mod subsystem with configfs.
3518 ret
= configfs_register_subsystem(subsys
);
3520 pr_err("Error %d while registering subsystem %s\n",
3521 ret
, subsys
->su_group
.cg_item
.ci_namebuf
);
3524 pr_debug("TARGET_CORE[0]: Initialized ConfigFS Fabric"
3525 " Infrastructure: "TARGET_CORE_VERSION
" on %s/%s"
3526 " on "UTS_RELEASE
"\n", utsname()->sysname
, utsname()->machine
);
3528 * Register built-in RAMDISK subsystem logic for virtual LUN 0
3530 ret
= rd_module_init();
3534 ret
= core_dev_setup_virtual_lun0();
3538 ret
= target_xcopy_setup_pt();
3545 configfs_unregister_subsystem(subsys
);
3546 core_dev_release_virtual_lun0();
3549 if (default_lu_gp
) {
3550 core_alua_free_lu_gp(default_lu_gp
);
3551 default_lu_gp
= NULL
;
3554 kfree(lu_gp_cg
->default_groups
);
3556 kfree(alua_cg
->default_groups
);
3558 kfree(hba_cg
->default_groups
);
3559 kfree(target_cg
->default_groups
);
3560 release_se_kmem_caches();
3564 static void __exit
target_core_exit_configfs(void)
3566 struct config_group
*hba_cg
, *alua_cg
, *lu_gp_cg
;
3567 struct config_item
*item
;
3570 lu_gp_cg
= &alua_lu_gps_group
;
3571 for (i
= 0; lu_gp_cg
->default_groups
[i
]; i
++) {
3572 item
= &lu_gp_cg
->default_groups
[i
]->cg_item
;
3573 lu_gp_cg
->default_groups
[i
] = NULL
;
3574 config_item_put(item
);
3576 kfree(lu_gp_cg
->default_groups
);
3577 lu_gp_cg
->default_groups
= NULL
;
3579 alua_cg
= &alua_group
;
3580 for (i
= 0; alua_cg
->default_groups
[i
]; i
++) {
3581 item
= &alua_cg
->default_groups
[i
]->cg_item
;
3582 alua_cg
->default_groups
[i
] = NULL
;
3583 config_item_put(item
);
3585 kfree(alua_cg
->default_groups
);
3586 alua_cg
->default_groups
= NULL
;
3588 hba_cg
= &target_core_hbagroup
;
3589 for (i
= 0; hba_cg
->default_groups
[i
]; i
++) {
3590 item
= &hba_cg
->default_groups
[i
]->cg_item
;
3591 hba_cg
->default_groups
[i
] = NULL
;
3592 config_item_put(item
);
3594 kfree(hba_cg
->default_groups
);
3595 hba_cg
->default_groups
= NULL
;
3597 * We expect subsys->su_group.default_groups to be released
3598 * by configfs subsystem provider logic..
3600 configfs_unregister_subsystem(&target_core_fabrics
);
3601 kfree(target_core_fabrics
.su_group
.default_groups
);
3603 core_alua_free_lu_gp(default_lu_gp
);
3604 default_lu_gp
= NULL
;
3606 pr_debug("TARGET_CORE[0]: Released ConfigFS Fabric"
3607 " Infrastructure\n");
3609 core_dev_release_virtual_lun0();
3611 target_xcopy_release_pt();
3612 release_se_kmem_caches();
3615 MODULE_DESCRIPTION("Target_Core_Mod/ConfigFS");
3616 MODULE_AUTHOR("nab@Linux-iSCSI.org");
3617 MODULE_LICENSE("GPL");
3619 module_init(target_core_init_configfs
);
3620 module_exit(target_core_exit_configfs
);